1 /* 2 * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2016, 2017, SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "registerSaver_s390.hpp" 29 #include "gc/shared/barrierSet.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "interpreter/interp_masm.hpp" 33 #include "nativeInst_s390.hpp" 34 #include "oops/instanceOop.hpp" 35 #include "oops/objArrayKlass.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/methodHandles.hpp" 38 #include "runtime/frame.inline.hpp" 39 #include "runtime/handles.inline.hpp" 40 #include "runtime/sharedRuntime.hpp" 41 #include "runtime/stubCodeGenerator.hpp" 42 #include "runtime/stubRoutines.hpp" 43 #include "runtime/thread.inline.hpp" 44 45 // Declaration and definition of StubGenerator (no .hpp file). 46 // For a more detailed description of the stub routine structure 47 // see the comment in stubRoutines.hpp. 48 49 #ifdef PRODUCT 50 #define __ _masm-> 51 #else 52 #define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm):_masm)-> 53 #endif 54 55 #define BLOCK_COMMENT(str) if (PrintAssembly) __ block_comment(str) 56 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 57 58 // ----------------------------------------------------------------------- 59 // Stub Code definitions 60 61 class StubGenerator: public StubCodeGenerator { 62 private: 63 64 //---------------------------------------------------------------------- 65 // Call stubs are used to call Java from C. 66 67 // 68 // Arguments: 69 // 70 // R2 - call wrapper address : address 71 // R3 - result : intptr_t* 72 // R4 - result type : BasicType 73 // R5 - method : method 74 // R6 - frame mgr entry point : address 75 // [SP+160] - parameter block : intptr_t* 76 // [SP+172] - parameter count in words : int 77 // [SP+176] - thread : Thread* 78 // 79 address generate_call_stub(address& return_address) { 80 // Set up a new C frame, copy Java arguments, call frame manager 81 // or native_entry, and process result. 82 83 StubCodeMark mark(this, "StubRoutines", "call_stub"); 84 address start = __ pc(); 85 86 Register r_arg_call_wrapper_addr = Z_ARG1; 87 Register r_arg_result_addr = Z_ARG2; 88 Register r_arg_result_type = Z_ARG3; 89 Register r_arg_method = Z_ARG4; 90 Register r_arg_entry = Z_ARG5; 91 92 // offsets to fp 93 #define d_arg_thread 176 94 #define d_arg_argument_addr 160 95 #define d_arg_argument_count 168+4 96 97 Register r_entryframe_fp = Z_tmp_1; 98 Register r_top_of_arguments_addr = Z_ARG4; 99 Register r_new_arg_entry = Z_R14; 100 101 // macros for frame offsets 102 #define call_wrapper_address_offset \ 103 _z_entry_frame_locals_neg(call_wrapper_address) 104 #define result_address_offset \ 105 _z_entry_frame_locals_neg(result_address) 106 #define result_type_offset \ 107 _z_entry_frame_locals_neg(result_type) 108 #define arguments_tos_address_offset \ 109 _z_entry_frame_locals_neg(arguments_tos_address) 110 111 { 112 // 113 // STACK on entry to call_stub: 114 // 115 // F1 [C_FRAME] 116 // ... 117 // 118 119 Register r_argument_addr = Z_tmp_3; 120 Register r_argumentcopy_addr = Z_tmp_4; 121 Register r_argument_size_in_bytes = Z_ARG5; 122 Register r_frame_size = Z_R1; 123 124 Label arguments_copied; 125 126 // Save non-volatile registers to ABI of caller frame. 127 BLOCK_COMMENT("save registers, push frame {"); 128 __ z_stmg(Z_R6, Z_R14, 16, Z_SP); 129 __ z_std(Z_F8, 96, Z_SP); 130 __ z_std(Z_F9, 104, Z_SP); 131 __ z_std(Z_F10, 112, Z_SP); 132 __ z_std(Z_F11, 120, Z_SP); 133 __ z_std(Z_F12, 128, Z_SP); 134 __ z_std(Z_F13, 136, Z_SP); 135 __ z_std(Z_F14, 144, Z_SP); 136 __ z_std(Z_F15, 152, Z_SP); 137 138 // 139 // Push ENTRY_FRAME including arguments: 140 // 141 // F0 [TOP_IJAVA_FRAME_ABI] 142 // [outgoing Java arguments] 143 // [ENTRY_FRAME_LOCALS] 144 // F1 [C_FRAME] 145 // ... 146 // 147 148 // Calculate new frame size and push frame. 149 #define abi_plus_locals_size \ 150 (frame::z_top_ijava_frame_abi_size + frame::z_entry_frame_locals_size) 151 if (abi_plus_locals_size % BytesPerWord == 0) { 152 // Preload constant part of frame size. 153 __ load_const_optimized(r_frame_size, -abi_plus_locals_size/BytesPerWord); 154 // Keep copy of our frame pointer (caller's SP). 155 __ z_lgr(r_entryframe_fp, Z_SP); 156 // Add space required by arguments to frame size. 157 __ z_slgf(r_frame_size, d_arg_argument_count, Z_R0, Z_SP); 158 // Move Z_ARG5 early, it will be used as a local. 159 __ z_lgr(r_new_arg_entry, r_arg_entry); 160 // Convert frame size from words to bytes. 161 __ z_sllg(r_frame_size, r_frame_size, LogBytesPerWord); 162 __ push_frame(r_frame_size, r_entryframe_fp, 163 false/*don't copy SP*/, true /*frame size sign inverted*/); 164 } else { 165 guarantee(false, "frame sizes should be multiples of word size (BytesPerWord)"); 166 } 167 BLOCK_COMMENT("} save, push"); 168 169 // Load argument registers for call. 170 BLOCK_COMMENT("prepare/copy arguments {"); 171 __ z_lgr(Z_method, r_arg_method); 172 __ z_lg(Z_thread, d_arg_thread, r_entryframe_fp); 173 174 // Calculate top_of_arguments_addr which will be tos (not prepushed) later. 175 // Wimply use SP + frame::top_ijava_frame_size. 176 __ add2reg(r_top_of_arguments_addr, 177 frame::z_top_ijava_frame_abi_size - BytesPerWord, Z_SP); 178 179 // Initialize call_stub locals (step 1). 180 if ((call_wrapper_address_offset + BytesPerWord == result_address_offset) && 181 (result_address_offset + BytesPerWord == result_type_offset) && 182 (result_type_offset + BytesPerWord == arguments_tos_address_offset)) { 183 184 __ z_stmg(r_arg_call_wrapper_addr, r_top_of_arguments_addr, 185 call_wrapper_address_offset, r_entryframe_fp); 186 } else { 187 __ z_stg(r_arg_call_wrapper_addr, 188 call_wrapper_address_offset, r_entryframe_fp); 189 __ z_stg(r_arg_result_addr, 190 result_address_offset, r_entryframe_fp); 191 __ z_stg(r_arg_result_type, 192 result_type_offset, r_entryframe_fp); 193 __ z_stg(r_top_of_arguments_addr, 194 arguments_tos_address_offset, r_entryframe_fp); 195 } 196 197 // Copy Java arguments. 198 199 // Any arguments to copy? 200 __ load_and_test_int2long(Z_R1, Address(r_entryframe_fp, d_arg_argument_count)); 201 __ z_bre(arguments_copied); 202 203 // Prepare loop and copy arguments in reverse order. 204 { 205 // Calculate argument size in bytes. 206 __ z_sllg(r_argument_size_in_bytes, Z_R1, LogBytesPerWord); 207 208 // Get addr of first incoming Java argument. 209 __ z_lg(r_argument_addr, d_arg_argument_addr, r_entryframe_fp); 210 211 // Let r_argumentcopy_addr point to last outgoing Java argument. 212 __ add2reg(r_argumentcopy_addr, BytesPerWord, r_top_of_arguments_addr); // = Z_SP+160 effectively. 213 214 // Let r_argument_addr point to last incoming Java argument. 215 __ add2reg_with_index(r_argument_addr, -BytesPerWord, 216 r_argument_size_in_bytes, r_argument_addr); 217 218 // Now loop while Z_R1 > 0 and copy arguments. 219 { 220 Label next_argument; 221 __ bind(next_argument); 222 // Mem-mem move. 223 __ z_mvc(0, BytesPerWord-1, r_argumentcopy_addr, 0, r_argument_addr); 224 __ add2reg(r_argument_addr, -BytesPerWord); 225 __ add2reg(r_argumentcopy_addr, BytesPerWord); 226 __ z_brct(Z_R1, next_argument); 227 } 228 } // End of argument copy loop. 229 230 __ bind(arguments_copied); 231 } 232 BLOCK_COMMENT("} arguments"); 233 234 BLOCK_COMMENT("call {"); 235 { 236 // Call frame manager or native entry. 237 238 // 239 // Register state on entry to frame manager / native entry: 240 // 241 // Z_ARG1 = r_top_of_arguments_addr - intptr_t *sender tos (prepushed) 242 // Lesp = (SP) + copied_arguments_offset - 8 243 // Z_method - method 244 // Z_thread - JavaThread* 245 // 246 247 // Here, the usual SP is the initial_caller_sp. 248 __ z_lgr(Z_R10, Z_SP); 249 250 // Z_esp points to the slot below the last argument. 251 __ z_lgr(Z_esp, r_top_of_arguments_addr); 252 253 // 254 // Stack on entry to frame manager / native entry: 255 // 256 // F0 [TOP_IJAVA_FRAME_ABI] 257 // [outgoing Java arguments] 258 // [ENTRY_FRAME_LOCALS] 259 // F1 [C_FRAME] 260 // ... 261 // 262 263 // Do a light-weight C-call here, r_new_arg_entry holds the address 264 // of the interpreter entry point (frame manager or native entry) 265 // and save runtime-value of return_pc in return_address 266 // (call by reference argument). 267 return_address = __ call_stub(r_new_arg_entry); 268 } 269 BLOCK_COMMENT("} call"); 270 271 { 272 BLOCK_COMMENT("restore registers {"); 273 // Returned from frame manager or native entry. 274 // Now pop frame, process result, and return to caller. 275 276 // 277 // Stack on exit from frame manager / native entry: 278 // 279 // F0 [ABI] 280 // ... 281 // [ENTRY_FRAME_LOCALS] 282 // F1 [C_FRAME] 283 // ... 284 // 285 // Just pop the topmost frame ... 286 // 287 288 Label ret_is_object; 289 Label ret_is_long; 290 Label ret_is_float; 291 Label ret_is_double; 292 293 // Restore frame pointer. 294 __ z_lg(r_entryframe_fp, _z_abi(callers_sp), Z_SP); 295 // Pop frame. Done here to minimize stalls. 296 __ pop_frame(); 297 298 // Reload some volatile registers which we've spilled before the call 299 // to frame manager / native entry. 300 // Access all locals via frame pointer, because we know nothing about 301 // the topmost frame's size. 302 __ z_lg(r_arg_result_addr, result_address_offset, r_entryframe_fp); 303 __ z_lg(r_arg_result_type, result_type_offset, r_entryframe_fp); 304 305 // Restore non-volatiles. 306 __ z_lmg(Z_R6, Z_R14, 16, Z_SP); 307 __ z_ld(Z_F8, 96, Z_SP); 308 __ z_ld(Z_F9, 104, Z_SP); 309 __ z_ld(Z_F10, 112, Z_SP); 310 __ z_ld(Z_F11, 120, Z_SP); 311 __ z_ld(Z_F12, 128, Z_SP); 312 __ z_ld(Z_F13, 136, Z_SP); 313 __ z_ld(Z_F14, 144, Z_SP); 314 __ z_ld(Z_F15, 152, Z_SP); 315 BLOCK_COMMENT("} restore"); 316 317 // 318 // Stack on exit from call_stub: 319 // 320 // 0 [C_FRAME] 321 // ... 322 // 323 // No call_stub frames left. 324 // 325 326 // All non-volatiles have been restored at this point!! 327 328 //------------------------------------------------------------------------ 329 // The following code makes some assumptions on the T_<type> enum values. 330 // The enum is defined in globalDefinitions.hpp. 331 // The validity of the assumptions is tested as far as possible. 332 // The assigned values should not be shuffled 333 // T_BOOLEAN==4 - lowest used enum value 334 // T_NARROWOOP==16 - largest used enum value 335 //------------------------------------------------------------------------ 336 BLOCK_COMMENT("process result {"); 337 Label firstHandler; 338 int handlerLen= 8; 339 #ifdef ASSERT 340 char assertMsg[] = "check BasicType definition in globalDefinitions.hpp"; 341 __ z_chi(r_arg_result_type, T_BOOLEAN); 342 __ asm_assert_low(assertMsg, 0x0234); 343 __ z_chi(r_arg_result_type, T_NARROWOOP); 344 __ asm_assert_high(assertMsg, 0x0235); 345 #endif 346 __ add2reg(r_arg_result_type, -T_BOOLEAN); // Remove offset. 347 __ z_larl(Z_R1, firstHandler); // location of first handler 348 __ z_sllg(r_arg_result_type, r_arg_result_type, 3); // Each handler is 8 bytes long. 349 __ z_bc(MacroAssembler::bcondAlways, 0, r_arg_result_type, Z_R1); 350 351 __ align(handlerLen); 352 __ bind(firstHandler); 353 // T_BOOLEAN: 354 guarantee(T_BOOLEAN == 4, "check BasicType definition in globalDefinitions.hpp"); 355 __ z_st(Z_RET, 0, r_arg_result_addr); 356 __ z_br(Z_R14); // Return to caller. 357 __ align(handlerLen); 358 // T_CHAR: 359 guarantee(T_CHAR == T_BOOLEAN+1, "check BasicType definition in globalDefinitions.hpp"); 360 __ z_st(Z_RET, 0, r_arg_result_addr); 361 __ z_br(Z_R14); // Return to caller. 362 __ align(handlerLen); 363 // T_FLOAT: 364 guarantee(T_FLOAT == T_CHAR+1, "check BasicType definition in globalDefinitions.hpp"); 365 __ z_ste(Z_FRET, 0, r_arg_result_addr); 366 __ z_br(Z_R14); // Return to caller. 367 __ align(handlerLen); 368 // T_DOUBLE: 369 guarantee(T_DOUBLE == T_FLOAT+1, "check BasicType definition in globalDefinitions.hpp"); 370 __ z_std(Z_FRET, 0, r_arg_result_addr); 371 __ z_br(Z_R14); // Return to caller. 372 __ align(handlerLen); 373 // T_BYTE: 374 guarantee(T_BYTE == T_DOUBLE+1, "check BasicType definition in globalDefinitions.hpp"); 375 __ z_st(Z_RET, 0, r_arg_result_addr); 376 __ z_br(Z_R14); // Return to caller. 377 __ align(handlerLen); 378 // T_SHORT: 379 guarantee(T_SHORT == T_BYTE+1, "check BasicType definition in globalDefinitions.hpp"); 380 __ z_st(Z_RET, 0, r_arg_result_addr); 381 __ z_br(Z_R14); // Return to caller. 382 __ align(handlerLen); 383 // T_INT: 384 guarantee(T_INT == T_SHORT+1, "check BasicType definition in globalDefinitions.hpp"); 385 __ z_st(Z_RET, 0, r_arg_result_addr); 386 __ z_br(Z_R14); // Return to caller. 387 __ align(handlerLen); 388 // T_LONG: 389 guarantee(T_LONG == T_INT+1, "check BasicType definition in globalDefinitions.hpp"); 390 __ z_stg(Z_RET, 0, r_arg_result_addr); 391 __ z_br(Z_R14); // Return to caller. 392 __ align(handlerLen); 393 // T_OBJECT: 394 guarantee(T_OBJECT == T_LONG+1, "check BasicType definition in globalDefinitions.hpp"); 395 __ z_stg(Z_RET, 0, r_arg_result_addr); 396 __ z_br(Z_R14); // Return to caller. 397 __ align(handlerLen); 398 // T_ARRAY: 399 guarantee(T_ARRAY == T_OBJECT+1, "check BasicType definition in globalDefinitions.hpp"); 400 __ z_stg(Z_RET, 0, r_arg_result_addr); 401 __ z_br(Z_R14); // Return to caller. 402 __ align(handlerLen); 403 // T_VOID: 404 guarantee(T_VOID == T_ARRAY+1, "check BasicType definition in globalDefinitions.hpp"); 405 __ z_stg(Z_RET, 0, r_arg_result_addr); 406 __ z_br(Z_R14); // Return to caller. 407 __ align(handlerLen); 408 // T_ADDRESS: 409 guarantee(T_ADDRESS == T_VOID+1, "check BasicType definition in globalDefinitions.hpp"); 410 __ z_stg(Z_RET, 0, r_arg_result_addr); 411 __ z_br(Z_R14); // Return to caller. 412 __ align(handlerLen); 413 // T_NARROWOOP: 414 guarantee(T_NARROWOOP == T_ADDRESS+1, "check BasicType definition in globalDefinitions.hpp"); 415 __ z_st(Z_RET, 0, r_arg_result_addr); 416 __ z_br(Z_R14); // Return to caller. 417 __ align(handlerLen); 418 BLOCK_COMMENT("} process result"); 419 } 420 return start; 421 } 422 423 // Return point for a Java call if there's an exception thrown in 424 // Java code. The exception is caught and transformed into a 425 // pending exception stored in JavaThread that can be tested from 426 // within the VM. 427 address generate_catch_exception() { 428 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 429 430 address start = __ pc(); 431 432 // 433 // Registers alive 434 // 435 // Z_thread 436 // Z_ARG1 - address of pending exception 437 // Z_ARG2 - return address in call stub 438 // 439 440 const Register exception_file = Z_R0; 441 const Register exception_line = Z_R1; 442 443 __ load_const_optimized(exception_file, (void*)__FILE__); 444 __ load_const_optimized(exception_line, (void*)__LINE__); 445 446 __ z_stg(Z_ARG1, thread_(pending_exception)); 447 // Store into `char *'. 448 __ z_stg(exception_file, thread_(exception_file)); 449 // Store into `int'. 450 __ z_st(exception_line, thread_(exception_line)); 451 452 // Complete return to VM. 453 assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before"); 454 455 // Continue in call stub. 456 __ z_br(Z_ARG2); 457 458 return start; 459 } 460 461 // Continuation point for runtime calls returning with a pending 462 // exception. The pending exception check happened in the runtime 463 // or native call stub. The pending exception in Thread is 464 // converted into a Java-level exception. 465 // 466 // Read: 467 // Z_R14: pc the runtime library callee wants to return to. 468 // Since the exception occurred in the callee, the return pc 469 // from the point of view of Java is the exception pc. 470 // 471 // Invalidate: 472 // Volatile registers (except below). 473 // 474 // Update: 475 // Z_ARG1: exception 476 // (Z_R14 is unchanged and is live out). 477 // 478 address generate_forward_exception() { 479 StubCodeMark mark(this, "StubRoutines", "forward_exception"); 480 address start = __ pc(); 481 482 #define pending_exception_offset in_bytes(Thread::pending_exception_offset()) 483 #ifdef ASSERT 484 // Get pending exception oop. 485 __ z_lg(Z_ARG1, pending_exception_offset, Z_thread); 486 487 // Make sure that this code is only executed if there is a pending exception. 488 { 489 Label L; 490 __ z_ltgr(Z_ARG1, Z_ARG1); 491 __ z_brne(L); 492 __ stop("StubRoutines::forward exception: no pending exception (1)"); 493 __ bind(L); 494 } 495 496 __ verify_oop(Z_ARG1, "StubRoutines::forward exception: not an oop"); 497 #endif 498 499 __ z_lgr(Z_ARG2, Z_R14); // Copy exception pc into Z_ARG2. 500 __ save_return_pc(); 501 __ push_frame_abi160(0); 502 // Find exception handler. 503 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 504 Z_thread, 505 Z_ARG2); 506 // Copy handler's address. 507 __ z_lgr(Z_R1, Z_RET); 508 __ pop_frame(); 509 __ restore_return_pc(); 510 511 // Set up the arguments for the exception handler: 512 // - Z_ARG1: exception oop 513 // - Z_ARG2: exception pc 514 515 // Load pending exception oop. 516 __ z_lg(Z_ARG1, pending_exception_offset, Z_thread); 517 518 // The exception pc is the return address in the caller, 519 // must load it into Z_ARG2 520 __ z_lgr(Z_ARG2, Z_R14); 521 522 #ifdef ASSERT 523 // Make sure exception is set. 524 { Label L; 525 __ z_ltgr(Z_ARG1, Z_ARG1); 526 __ z_brne(L); 527 __ stop("StubRoutines::forward exception: no pending exception (2)"); 528 __ bind(L); 529 } 530 #endif 531 // Clear the pending exception. 532 __ clear_mem(Address(Z_thread, pending_exception_offset), sizeof(void *)); 533 // Jump to exception handler 534 __ z_br(Z_R1 /*handler address*/); 535 536 return start; 537 538 #undef pending_exception_offset 539 } 540 541 // Continuation point for throwing of implicit exceptions that are 542 // not handled in the current activation. Fabricates an exception 543 // oop and initiates normal exception dispatching in this 544 // frame. Only callee-saved registers are preserved (through the 545 // normal RegisterMap handling). If the compiler 546 // needs all registers to be preserved between the fault point and 547 // the exception handler then it must assume responsibility for that 548 // in AbstractCompiler::continuation_for_implicit_null_exception or 549 // continuation_for_implicit_division_by_zero_exception. All other 550 // implicit exceptions (e.g., NullPointerException or 551 // AbstractMethodError on entry) are either at call sites or 552 // otherwise assume that stack unwinding will be initiated, so 553 // caller saved registers were assumed volatile in the compiler. 554 555 // Note that we generate only this stub into a RuntimeStub, because 556 // it needs to be properly traversed and ignored during GC, so we 557 // change the meaning of the "__" macro within this method. 558 559 // Note: the routine set_pc_not_at_call_for_caller in 560 // SharedRuntime.cpp requires that this code be generated into a 561 // RuntimeStub. 562 #undef __ 563 #define __ masm-> 564 565 address generate_throw_exception(const char* name, address runtime_entry, 566 bool restore_saved_exception_pc, 567 Register arg1 = noreg, Register arg2 = noreg) { 568 assert_different_registers(arg1, Z_R0_scratch); // would be destroyed by push_frame() 569 assert_different_registers(arg2, Z_R0_scratch); // would be destroyed by push_frame() 570 571 int insts_size = 256; 572 int locs_size = 0; 573 CodeBuffer code(name, insts_size, locs_size); 574 MacroAssembler* masm = new MacroAssembler(&code); 575 int framesize_in_bytes; 576 address start = __ pc(); 577 578 __ save_return_pc(); 579 framesize_in_bytes = __ push_frame_abi160(0); 580 581 address frame_complete_pc = __ pc(); 582 if (restore_saved_exception_pc) { 583 __ unimplemented("StubGenerator::throw_exception", 74); 584 } 585 586 // Note that we always have a runtime stub frame on the top of stack at this point. 587 __ get_PC(Z_R1); 588 __ set_last_Java_frame(/*sp*/Z_SP, /*pc*/Z_R1); 589 590 // Do the call. 591 BLOCK_COMMENT("call runtime_entry"); 592 __ call_VM_leaf(runtime_entry, Z_thread, arg1, arg2); 593 594 __ reset_last_Java_frame(); 595 596 #ifdef ASSERT 597 // Make sure that this code is only executed if there is a pending exception. 598 { Label L; 599 __ z_lg(Z_R0, 600 in_bytes(Thread::pending_exception_offset()), 601 Z_thread); 602 __ z_ltgr(Z_R0, Z_R0); 603 __ z_brne(L); 604 __ stop("StubRoutines::throw_exception: no pending exception"); 605 __ bind(L); 606 } 607 #endif 608 609 __ pop_frame(); 610 __ restore_return_pc(); 611 612 __ load_const_optimized(Z_R1, StubRoutines::forward_exception_entry()); 613 __ z_br(Z_R1); 614 615 RuntimeStub* stub = 616 RuntimeStub::new_runtime_stub(name, &code, 617 frame_complete_pc - start, 618 framesize_in_bytes/wordSize, 619 NULL /*oop_maps*/, false); 620 621 return stub->entry_point(); 622 } 623 624 #undef __ 625 #ifdef PRODUCT 626 #define __ _masm-> 627 #else 628 #define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm):_masm)-> 629 #endif 630 631 // Support for uint StubRoutine::zarch::partial_subtype_check(Klass 632 // sub, Klass super); 633 // 634 // Arguments: 635 // ret : Z_RET, returned 636 // sub : Z_ARG2, argument, not changed 637 // super: Z_ARG3, argument, not changed 638 // 639 // raddr: Z_R14, blown by call 640 // 641 address generate_partial_subtype_check() { 642 StubCodeMark mark(this, "StubRoutines", "partial_subtype_check"); 643 Label miss; 644 645 address start = __ pc(); 646 647 const Register Rsubklass = Z_ARG2; // subklass 648 const Register Rsuperklass = Z_ARG3; // superklass 649 650 // No args, but tmp registers that are killed. 651 const Register Rlength = Z_ARG4; // cache array length 652 const Register Rarray_ptr = Z_ARG5; // Current value from cache array. 653 654 if (UseCompressedOops) { 655 assert(Universe::heap() != NULL, "java heap must be initialized to generate partial_subtype_check stub"); 656 } 657 658 // Always take the slow path (see SPARC). 659 __ check_klass_subtype_slow_path(Rsubklass, Rsuperklass, 660 Rarray_ptr, Rlength, NULL, &miss); 661 662 // Match falls through here. 663 __ clear_reg(Z_RET); // Zero indicates a match. Set EQ flag in CC. 664 __ z_br(Z_R14); 665 666 __ BIND(miss); 667 __ load_const_optimized(Z_RET, 1); // One indicates a miss. 668 __ z_ltgr(Z_RET, Z_RET); // Set NE flag in CR. 669 __ z_br(Z_R14); 670 671 return start; 672 } 673 674 // Return address of code to be called from code generated by 675 // MacroAssembler::verify_oop. 676 // 677 // Don't generate, rather use C++ code. 678 address generate_verify_oop_subroutine() { 679 // Don't generate a StubCodeMark, because no code is generated! 680 // Generating the mark triggers notifying the oprofile jvmti agent 681 // about the dynamic code generation, but the stub without 682 // code (code_size == 0) confuses opjitconv 683 // StubCodeMark mark(this, "StubRoutines", "verify_oop_stub"); 684 685 address start = 0; 686 return start; 687 } 688 689 // This is to test that the count register contains a positive int value. 690 // Required because C2 does not respect int to long conversion for stub calls. 691 void assert_positive_int(Register count) { 692 #ifdef ASSERT 693 __ z_srag(Z_R0, count, 31); // Just leave the sign (must be zero) in Z_R0. 694 __ asm_assert_eq("missing zero extend", 0xAFFE); 695 #endif 696 } 697 698 // Generate overlap test for array copy stubs. 699 // If no actual overlap is detected, control is transferred to the 700 // "normal" copy stub (entry address passed in disjoint_copy_target). 701 // Otherwise, execution continues with the code generated by the 702 // caller of array_overlap_test. 703 // 704 // Input: 705 // Z_ARG1 - from 706 // Z_ARG2 - to 707 // Z_ARG3 - element count 708 void array_overlap_test(address disjoint_copy_target, int log2_elem_size) { 709 __ MacroAssembler::compare_and_branch_optimized(Z_ARG2, Z_ARG1, Assembler::bcondNotHigh, 710 disjoint_copy_target, /*len64=*/true, /*has_sign=*/false); 711 712 Register index = Z_ARG3; 713 if (log2_elem_size > 0) { 714 __ z_sllg(Z_R1, Z_ARG3, log2_elem_size); // byte count 715 index = Z_R1; 716 } 717 __ add2reg_with_index(Z_R1, 0, index, Z_ARG1); // First byte after "from" range. 718 719 __ MacroAssembler::compare_and_branch_optimized(Z_R1, Z_ARG2, Assembler::bcondNotHigh, 720 disjoint_copy_target, /*len64=*/true, /*has_sign=*/false); 721 722 // Destructive overlap: let caller generate code for that. 723 } 724 725 // Generate stub for disjoint array copy. If "aligned" is true, the 726 // "from" and "to" addresses are assumed to be heapword aligned. 727 // 728 // Arguments for generated stub: 729 // from: Z_ARG1 730 // to: Z_ARG2 731 // count: Z_ARG3 treated as signed 732 void generate_disjoint_copy(bool aligned, int element_size, 733 bool branchToEnd, 734 bool restoreArgs) { 735 // This is the zarch specific stub generator for general array copy tasks. 736 // It has the following prereqs and features: 737 // 738 // - No destructive overlap allowed (else unpredictable results). 739 // - Destructive overlap does not exist if the leftmost byte of the target 740 // does not coincide with any of the source bytes (except the leftmost). 741 // 742 // Register usage upon entry: 743 // Z_ARG1 == Z_R2 : address of source array 744 // Z_ARG2 == Z_R3 : address of target array 745 // Z_ARG3 == Z_R4 : length of operands (# of elements on entry) 746 // 747 // Register usage within the generator: 748 // - Z_R0 and Z_R1 are KILLed by the stub routine (target addr/len). 749 // Used as pair register operand in complex moves, scratch registers anyway. 750 // - Z_R5 is KILLed by the stub routine (source register pair addr/len) (even/odd reg). 751 // Same as R0/R1, but no scratch register. 752 // - Z_ARG1, Z_ARG2, Z_ARG3 are USEd but preserved by the stub routine, 753 // but they might get temporarily overwritten. 754 755 Register save_reg = Z_ARG4; // (= Z_R5), holds original target operand address for restore. 756 757 { 758 Register llen_reg = Z_R1; // Holds left operand len (odd reg). 759 Register laddr_reg = Z_R0; // Holds left operand addr (even reg), overlaps with data_reg. 760 Register rlen_reg = Z_R5; // Holds right operand len (odd reg), overlaps with save_reg. 761 Register raddr_reg = Z_R4; // Holds right operand addr (even reg), overlaps with len_reg. 762 763 Register data_reg = Z_R0; // Holds copied data chunk in alignment process and copy loop. 764 Register len_reg = Z_ARG3; // Holds operand len (#elements at entry, #bytes shortly after). 765 Register dst_reg = Z_ARG2; // Holds left (target) operand addr. 766 Register src_reg = Z_ARG1; // Holds right (source) operand addr. 767 768 Label doMVCLOOP, doMVCLOOPcount, doMVCLOOPiterate; 769 Label doMVCUnrolled; 770 NearLabel doMVC, doMVCgeneral, done; 771 Label MVC_template; 772 address pcMVCblock_b, pcMVCblock_e; 773 774 bool usedMVCLE = true; 775 bool usedMVCLOOP = true; 776 bool usedMVCUnrolled = false; 777 bool usedMVC = false; 778 bool usedMVCgeneral = false; 779 780 int stride; 781 Register stride_reg; 782 Register ix_reg; 783 784 assert((element_size<=256) && (256%element_size == 0), "element size must be <= 256, power of 2"); 785 unsigned int log2_size = exact_log2(element_size); 786 787 switch (element_size) { 788 case 1: BLOCK_COMMENT("ARRAYCOPY DISJOINT byte {"); break; 789 case 2: BLOCK_COMMENT("ARRAYCOPY DISJOINT short {"); break; 790 case 4: BLOCK_COMMENT("ARRAYCOPY DISJOINT int {"); break; 791 case 8: BLOCK_COMMENT("ARRAYCOPY DISJOINT long {"); break; 792 default: BLOCK_COMMENT("ARRAYCOPY DISJOINT {"); break; 793 } 794 795 assert_positive_int(len_reg); 796 797 BLOCK_COMMENT("preparation {"); 798 799 // No copying if len <= 0. 800 if (branchToEnd) { 801 __ compare64_and_branch(len_reg, (intptr_t) 0, Assembler::bcondNotHigh, done); 802 } else { 803 if (VM_Version::has_CompareBranch()) { 804 __ z_cgib(len_reg, 0, Assembler::bcondNotHigh, 0, Z_R14); 805 } else { 806 __ z_ltgr(len_reg, len_reg); 807 __ z_bcr(Assembler::bcondNotPositive, Z_R14); 808 } 809 } 810 811 // Prefetch just one cache line. Speculative opt for short arrays. 812 // Do not use Z_R1 in prefetch. Is undefined here. 813 if (VM_Version::has_Prefetch()) { 814 __ z_pfd(0x01, 0, Z_R0, src_reg); // Fetch access. 815 __ z_pfd(0x02, 0, Z_R0, dst_reg); // Store access. 816 } 817 818 BLOCK_COMMENT("} preparation"); 819 820 // Save args only if really needed. 821 // Keep len test local to branch. Is generated only once. 822 823 BLOCK_COMMENT("mode selection {"); 824 825 // Special handling for arrays with only a few elements. 826 // Nothing fancy: just an executed MVC. 827 if (log2_size > 0) { 828 __ z_sllg(Z_R1, len_reg, log2_size); // Remember #bytes in Z_R1. 829 } 830 if (element_size != 8) { 831 __ z_cghi(len_reg, 256/element_size); 832 __ z_brnh(doMVC); 833 usedMVC = true; 834 } 835 if (element_size == 8) { // Long and oop arrays are always aligned. 836 __ z_cghi(len_reg, 256/element_size); 837 __ z_brnh(doMVCUnrolled); 838 usedMVCUnrolled = true; 839 } 840 841 // Prefetch another cache line. We, for sure, have more than one line to copy. 842 if (VM_Version::has_Prefetch()) { 843 __ z_pfd(0x01, 256, Z_R0, src_reg); // Fetch access. 844 __ z_pfd(0x02, 256, Z_R0, dst_reg); // Store access. 845 } 846 847 if (restoreArgs) { 848 // Remember entry value of ARG2 to restore all arguments later from that knowledge. 849 __ z_lgr(save_reg, dst_reg); 850 } 851 852 __ z_cghi(len_reg, 4096/element_size); 853 if (log2_size == 0) { 854 __ z_lgr(Z_R1, len_reg); // Init Z_R1 with #bytes 855 } 856 __ z_brnh(doMVCLOOP); 857 858 // Fall through to MVCLE case. 859 860 BLOCK_COMMENT("} mode selection"); 861 862 // MVCLE: for long arrays 863 // DW aligned: Best performance for sizes > 4kBytes. 864 // unaligned: Least complex for sizes > 256 bytes. 865 if (usedMVCLE) { 866 BLOCK_COMMENT("mode MVCLE {"); 867 868 // Setup registers for mvcle. 869 //__ z_lgr(llen_reg, len_reg);// r1 <- r4 #bytes already in Z_R1, aka llen_reg. 870 __ z_lgr(laddr_reg, dst_reg); // r0 <- r3 871 __ z_lgr(raddr_reg, src_reg); // r4 <- r2 872 __ z_lgr(rlen_reg, llen_reg); // r5 <- r1 873 874 __ MacroAssembler::move_long_ext(laddr_reg, raddr_reg, 0xb0); // special: bypass cache 875 // __ MacroAssembler::move_long_ext(laddr_reg, raddr_reg, 0xb8); // special: Hold data in cache. 876 // __ MacroAssembler::move_long_ext(laddr_reg, raddr_reg, 0); 877 878 if (restoreArgs) { 879 // MVCLE updates the source (Z_R4,Z_R5) and target (Z_R0,Z_R1) register pairs. 880 // Dst_reg (Z_ARG2) and src_reg (Z_ARG1) are left untouched. No restore required. 881 // Len_reg (Z_ARG3) is destroyed and must be restored. 882 __ z_slgr(laddr_reg, dst_reg); // copied #bytes 883 if (log2_size > 0) { 884 __ z_srag(Z_ARG3, laddr_reg, log2_size); // Convert back to #elements. 885 } else { 886 __ z_lgr(Z_ARG3, laddr_reg); 887 } 888 } 889 if (branchToEnd) { 890 __ z_bru(done); 891 } else { 892 __ z_br(Z_R14); 893 } 894 BLOCK_COMMENT("} mode MVCLE"); 895 } 896 // No fallthru possible here. 897 898 // MVCUnrolled: for short, aligned arrays. 899 900 if (usedMVCUnrolled) { 901 BLOCK_COMMENT("mode MVC unrolled {"); 902 stride = 8; 903 904 // Generate unrolled MVC instructions. 905 for (int ii = 32; ii > 1; ii--) { 906 __ z_mvc(0, ii * stride-1, dst_reg, 0, src_reg); // ii*8 byte copy 907 if (branchToEnd) { 908 __ z_bru(done); 909 } else { 910 __ z_br(Z_R14); 911 } 912 } 913 914 pcMVCblock_b = __ pc(); 915 __ z_mvc(0, 1 * stride-1, dst_reg, 0, src_reg); // 8 byte copy 916 if (branchToEnd) { 917 __ z_bru(done); 918 } else { 919 __ z_br(Z_R14); 920 } 921 922 pcMVCblock_e = __ pc(); 923 Label MVC_ListEnd; 924 __ bind(MVC_ListEnd); 925 926 // This is an absolute fast path: 927 // - Array len in bytes must be not greater than 256. 928 // - Array len in bytes must be an integer mult of DW 929 // to save expensive handling of trailing bytes. 930 // - Argument restore is not done, 931 // i.e. previous code must not alter arguments (this code doesn't either). 932 933 __ bind(doMVCUnrolled); 934 935 // Avoid mul, prefer shift where possible. 936 // Combine shift right (for #DW) with shift left (for block size). 937 // Set CC for zero test below (asm_assert). 938 // Note: #bytes comes in Z_R1, #DW in len_reg. 939 unsigned int MVCblocksize = pcMVCblock_e - pcMVCblock_b; 940 unsigned int logMVCblocksize = 0xffffffffU; // Pacify compiler ("used uninitialized" warning). 941 942 if (log2_size > 0) { // Len was scaled into Z_R1. 943 switch (MVCblocksize) { 944 945 case 8: logMVCblocksize = 3; 946 __ z_ltgr(Z_R0, Z_R1); // #bytes is index 947 break; // reasonable size, use shift 948 949 case 16: logMVCblocksize = 4; 950 __ z_slag(Z_R0, Z_R1, logMVCblocksize-log2_size); 951 break; // reasonable size, use shift 952 953 default: logMVCblocksize = 0; 954 __ z_ltgr(Z_R0, len_reg); // #DW for mul 955 break; // all other sizes: use mul 956 } 957 } else { 958 guarantee(log2_size, "doMVCUnrolled: only for DW entities"); 959 } 960 961 // This test (and branch) is redundant. Previous code makes sure that 962 // - element count > 0 963 // - element size == 8. 964 // Thus, len reg should never be zero here. We insert an asm_assert() here, 965 // just to double-check and to be on the safe side. 966 __ asm_assert(false, "zero len cannot occur", 99); 967 968 __ z_larl(Z_R1, MVC_ListEnd); // Get addr of last instr block. 969 // Avoid mul, prefer shift where possible. 970 if (logMVCblocksize == 0) { 971 __ z_mghi(Z_R0, MVCblocksize); 972 } 973 __ z_slgr(Z_R1, Z_R0); 974 __ z_br(Z_R1); 975 BLOCK_COMMENT("} mode MVC unrolled"); 976 } 977 // No fallthru possible here. 978 979 // MVC execute template 980 // Must always generate. Usage may be switched on below. 981 // There is no suitable place after here to put the template. 982 __ bind(MVC_template); 983 __ z_mvc(0,0,dst_reg,0,src_reg); // Instr template, never exec directly! 984 985 986 // MVC Loop: for medium-sized arrays 987 988 // Only for DW aligned arrays (src and dst). 989 // #bytes to copy must be at least 256!!! 990 // Non-aligned cases handled separately. 991 stride = 256; 992 stride_reg = Z_R1; // Holds #bytes when control arrives here. 993 ix_reg = Z_ARG3; // Alias for len_reg. 994 995 996 if (usedMVCLOOP) { 997 BLOCK_COMMENT("mode MVC loop {"); 998 __ bind(doMVCLOOP); 999 1000 __ z_lcgr(ix_reg, Z_R1); // Ix runs from -(n-2)*stride to 1*stride (inclusive). 1001 __ z_llill(stride_reg, stride); 1002 __ add2reg(ix_reg, 2*stride); // Thus: increment ix by 2*stride. 1003 1004 __ bind(doMVCLOOPiterate); 1005 __ z_mvc(0, stride-1, dst_reg, 0, src_reg); 1006 __ add2reg(dst_reg, stride); 1007 __ add2reg(src_reg, stride); 1008 __ bind(doMVCLOOPcount); 1009 __ z_brxlg(ix_reg, stride_reg, doMVCLOOPiterate); 1010 1011 // Don 't use add2reg() here, since we must set the condition code! 1012 __ z_aghi(ix_reg, -2*stride); // Compensate incr from above: zero diff means "all copied". 1013 1014 if (restoreArgs) { 1015 __ z_lcgr(Z_R1, ix_reg); // Prepare ix_reg for copy loop, #bytes expected in Z_R1. 1016 __ z_brnz(doMVCgeneral); // We're not done yet, ix_reg is not zero. 1017 1018 // ARG1, ARG2, and ARG3 were altered by the code above, so restore them building on save_reg. 1019 __ z_slgr(dst_reg, save_reg); // copied #bytes 1020 __ z_slgr(src_reg, dst_reg); // = ARG1 (now restored) 1021 if (log2_size) { 1022 __ z_srag(Z_ARG3, dst_reg, log2_size); // Convert back to #elements to restore ARG3. 1023 } else { 1024 __ z_lgr(Z_ARG3, dst_reg); 1025 } 1026 __ z_lgr(Z_ARG2, save_reg); // ARG2 now restored. 1027 1028 if (branchToEnd) { 1029 __ z_bru(done); 1030 } else { 1031 __ z_br(Z_R14); 1032 } 1033 1034 } else { 1035 if (branchToEnd) { 1036 __ z_brz(done); // CC set by aghi instr. 1037 } else { 1038 __ z_bcr(Assembler::bcondZero, Z_R14); // We're all done if zero. 1039 } 1040 1041 __ z_lcgr(Z_R1, ix_reg); // Prepare ix_reg for copy loop, #bytes expected in Z_R1. 1042 // __ z_bru(doMVCgeneral); // fallthru 1043 } 1044 usedMVCgeneral = true; 1045 BLOCK_COMMENT("} mode MVC loop"); 1046 } 1047 // Fallthru to doMVCgeneral 1048 1049 // MVCgeneral: for short, unaligned arrays, after other copy operations 1050 1051 // Somewhat expensive due to use of EX instruction, but simple. 1052 if (usedMVCgeneral) { 1053 BLOCK_COMMENT("mode MVC general {"); 1054 __ bind(doMVCgeneral); 1055 1056 __ add2reg(len_reg, -1, Z_R1); // Get #bytes-1 for EXECUTE. 1057 if (VM_Version::has_ExecuteExtensions()) { 1058 __ z_exrl(len_reg, MVC_template); // Execute MVC with variable length. 1059 } else { 1060 __ z_larl(Z_R1, MVC_template); // Get addr of instr template. 1061 __ z_ex(len_reg, 0, Z_R0, Z_R1); // Execute MVC with variable length. 1062 } // penalty: 9 ticks 1063 1064 if (restoreArgs) { 1065 // ARG1, ARG2, and ARG3 were altered by code executed before, so restore them building on save_reg 1066 __ z_slgr(dst_reg, save_reg); // Copied #bytes without the "doMVCgeneral" chunk 1067 __ z_slgr(src_reg, dst_reg); // = ARG1 (now restored), was not advanced for "doMVCgeneral" chunk 1068 __ add2reg_with_index(dst_reg, 1, len_reg, dst_reg); // Len of executed MVC was not accounted for, yet. 1069 if (log2_size) { 1070 __ z_srag(Z_ARG3, dst_reg, log2_size); // Convert back to #elements to restore ARG3 1071 } else { 1072 __ z_lgr(Z_ARG3, dst_reg); 1073 } 1074 __ z_lgr(Z_ARG2, save_reg); // ARG2 now restored. 1075 } 1076 1077 if (usedMVC) { 1078 if (branchToEnd) { 1079 __ z_bru(done); 1080 } else { 1081 __ z_br(Z_R14); 1082 } 1083 } else { 1084 if (!branchToEnd) __ z_br(Z_R14); 1085 } 1086 BLOCK_COMMENT("} mode MVC general"); 1087 } 1088 // Fallthru possible if following block not generated. 1089 1090 // MVC: for short, unaligned arrays 1091 1092 // Somewhat expensive due to use of EX instruction, but simple. penalty: 9 ticks. 1093 // Differs from doMVCgeneral in reconstruction of ARG2, ARG3, and ARG4. 1094 if (usedMVC) { 1095 BLOCK_COMMENT("mode MVC {"); 1096 __ bind(doMVC); 1097 1098 // get #bytes-1 for EXECUTE 1099 if (log2_size) { 1100 __ add2reg(Z_R1, -1); // Length was scaled into Z_R1. 1101 } else { 1102 __ add2reg(Z_R1, -1, len_reg); // Length was not scaled. 1103 } 1104 1105 if (VM_Version::has_ExecuteExtensions()) { 1106 __ z_exrl(Z_R1, MVC_template); // Execute MVC with variable length. 1107 } else { 1108 __ z_lgr(Z_R0, Z_R5); // Save ARG4, may be unnecessary. 1109 __ z_larl(Z_R5, MVC_template); // Get addr of instr template. 1110 __ z_ex(Z_R1, 0, Z_R0, Z_R5); // Execute MVC with variable length. 1111 __ z_lgr(Z_R5, Z_R0); // Restore ARG4, may be unnecessary. 1112 } 1113 1114 if (!branchToEnd) { 1115 __ z_br(Z_R14); 1116 } 1117 BLOCK_COMMENT("} mode MVC"); 1118 } 1119 1120 __ bind(done); 1121 1122 switch (element_size) { 1123 case 1: BLOCK_COMMENT("} ARRAYCOPY DISJOINT byte "); break; 1124 case 2: BLOCK_COMMENT("} ARRAYCOPY DISJOINT short"); break; 1125 case 4: BLOCK_COMMENT("} ARRAYCOPY DISJOINT int "); break; 1126 case 8: BLOCK_COMMENT("} ARRAYCOPY DISJOINT long "); break; 1127 default: BLOCK_COMMENT("} ARRAYCOPY DISJOINT "); break; 1128 } 1129 } 1130 } 1131 1132 // Generate stub for conjoint array copy. If "aligned" is true, the 1133 // "from" and "to" addresses are assumed to be heapword aligned. 1134 // 1135 // Arguments for generated stub: 1136 // from: Z_ARG1 1137 // to: Z_ARG2 1138 // count: Z_ARG3 treated as signed 1139 void generate_conjoint_copy(bool aligned, int element_size, bool branchToEnd) { 1140 1141 // This is the zarch specific stub generator for general array copy tasks. 1142 // It has the following prereqs and features: 1143 // 1144 // - Destructive overlap exists and is handled by reverse copy. 1145 // - Destructive overlap exists if the leftmost byte of the target 1146 // does coincide with any of the source bytes (except the leftmost). 1147 // - Z_R0 and Z_R1 are KILLed by the stub routine (data and stride) 1148 // - Z_ARG1 and Z_ARG2 are USEd but preserved by the stub routine. 1149 // - Z_ARG3 is USED but preserved by the stub routine. 1150 // - Z_ARG4 is used as index register and is thus KILLed. 1151 // 1152 { 1153 Register stride_reg = Z_R1; // Stride & compare value in loop (negative element_size). 1154 Register data_reg = Z_R0; // Holds value of currently processed element. 1155 Register ix_reg = Z_ARG4; // Holds byte index of currently processed element. 1156 Register len_reg = Z_ARG3; // Holds length (in #elements) of arrays. 1157 Register dst_reg = Z_ARG2; // Holds left operand addr. 1158 Register src_reg = Z_ARG1; // Holds right operand addr. 1159 1160 assert(256%element_size == 0, "Element size must be power of 2."); 1161 assert(element_size <= 8, "Can't handle more than DW units."); 1162 1163 switch (element_size) { 1164 case 1: BLOCK_COMMENT("ARRAYCOPY CONJOINT byte {"); break; 1165 case 2: BLOCK_COMMENT("ARRAYCOPY CONJOINT short {"); break; 1166 case 4: BLOCK_COMMENT("ARRAYCOPY CONJOINT int {"); break; 1167 case 8: BLOCK_COMMENT("ARRAYCOPY CONJOINT long {"); break; 1168 default: BLOCK_COMMENT("ARRAYCOPY CONJOINT {"); break; 1169 } 1170 1171 assert_positive_int(len_reg); 1172 1173 if (VM_Version::has_Prefetch()) { 1174 __ z_pfd(0x01, 0, Z_R0, src_reg); // Fetch access. 1175 __ z_pfd(0x02, 0, Z_R0, dst_reg); // Store access. 1176 } 1177 1178 unsigned int log2_size = exact_log2(element_size); 1179 if (log2_size) { 1180 __ z_sllg(ix_reg, len_reg, log2_size); 1181 } else { 1182 __ z_lgr(ix_reg, len_reg); 1183 } 1184 1185 // Optimize reverse copy loop. 1186 // Main loop copies DW units which may be unaligned. Unaligned access adds some penalty ticks. 1187 // Unaligned DW access (neither fetch nor store) is DW-atomic, but should be alignment-atomic. 1188 // Preceding the main loop, some bytes are copied to obtain a DW-multiple remaining length. 1189 1190 Label countLoop1; 1191 Label copyLoop1; 1192 Label skipBY; 1193 Label skipHW; 1194 int stride = -8; 1195 1196 __ load_const_optimized(stride_reg, stride); // Prepare for DW copy loop. 1197 1198 if (element_size == 8) // Nothing to do here. 1199 __ z_bru(countLoop1); 1200 else { // Do not generate dead code. 1201 __ z_tmll(ix_reg, 7); // Check the "odd" bits. 1202 __ z_bre(countLoop1); // There are none, very good! 1203 } 1204 1205 if (log2_size == 0) { // Handle leftover Byte. 1206 __ z_tmll(ix_reg, 1); 1207 __ z_bre(skipBY); 1208 __ z_lb(data_reg, -1, ix_reg, src_reg); 1209 __ z_stcy(data_reg, -1, ix_reg, dst_reg); 1210 __ add2reg(ix_reg, -1); // Decrement delayed to avoid AGI. 1211 __ bind(skipBY); 1212 // fallthru 1213 } 1214 if (log2_size <= 1) { // Handle leftover HW. 1215 __ z_tmll(ix_reg, 2); 1216 __ z_bre(skipHW); 1217 __ z_lhy(data_reg, -2, ix_reg, src_reg); 1218 __ z_sthy(data_reg, -2, ix_reg, dst_reg); 1219 __ add2reg(ix_reg, -2); // Decrement delayed to avoid AGI. 1220 __ bind(skipHW); 1221 __ z_tmll(ix_reg, 4); 1222 __ z_bre(countLoop1); 1223 // fallthru 1224 } 1225 if (log2_size <= 2) { // There are just 4 bytes (left) that need to be copied. 1226 __ z_ly(data_reg, -4, ix_reg, src_reg); 1227 __ z_sty(data_reg, -4, ix_reg, dst_reg); 1228 __ add2reg(ix_reg, -4); // Decrement delayed to avoid AGI. 1229 __ z_bru(countLoop1); 1230 } 1231 1232 // Control can never get to here. Never! Never ever! 1233 __ z_illtrap(0x99); 1234 __ bind(copyLoop1); 1235 __ z_lg(data_reg, 0, ix_reg, src_reg); 1236 __ z_stg(data_reg, 0, ix_reg, dst_reg); 1237 __ bind(countLoop1); 1238 __ z_brxhg(ix_reg, stride_reg, copyLoop1); 1239 1240 if (!branchToEnd) 1241 __ z_br(Z_R14); 1242 1243 switch (element_size) { 1244 case 1: BLOCK_COMMENT("} ARRAYCOPY CONJOINT byte "); break; 1245 case 2: BLOCK_COMMENT("} ARRAYCOPY CONJOINT short"); break; 1246 case 4: BLOCK_COMMENT("} ARRAYCOPY CONJOINT int "); break; 1247 case 8: BLOCK_COMMENT("} ARRAYCOPY CONJOINT long "); break; 1248 default: BLOCK_COMMENT("} ARRAYCOPY CONJOINT "); break; 1249 } 1250 } 1251 } 1252 1253 // Generate stub for disjoint byte copy. If "aligned" is true, the 1254 // "from" and "to" addresses are assumed to be heapword aligned. 1255 address generate_disjoint_byte_copy(bool aligned, const char * name) { 1256 StubCodeMark mark(this, "StubRoutines", name); 1257 1258 // This is the zarch specific stub generator for byte array copy. 1259 // Refer to generate_disjoint_copy for a list of prereqs and features: 1260 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1261 generate_disjoint_copy(aligned, 1, false, false); 1262 return __ addr_at(start_off); 1263 } 1264 1265 1266 address generate_disjoint_short_copy(bool aligned, const char * name) { 1267 StubCodeMark mark(this, "StubRoutines", name); 1268 // This is the zarch specific stub generator for short array copy. 1269 // Refer to generate_disjoint_copy for a list of prereqs and features: 1270 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1271 generate_disjoint_copy(aligned, 2, false, false); 1272 return __ addr_at(start_off); 1273 } 1274 1275 1276 address generate_disjoint_int_copy(bool aligned, const char * name) { 1277 StubCodeMark mark(this, "StubRoutines", name); 1278 // This is the zarch specific stub generator for int array copy. 1279 // Refer to generate_disjoint_copy for a list of prereqs and features: 1280 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1281 generate_disjoint_copy(aligned, 4, false, false); 1282 return __ addr_at(start_off); 1283 } 1284 1285 1286 address generate_disjoint_long_copy(bool aligned, const char * name) { 1287 StubCodeMark mark(this, "StubRoutines", name); 1288 // This is the zarch specific stub generator for long array copy. 1289 // Refer to generate_disjoint_copy for a list of prereqs and features: 1290 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1291 generate_disjoint_copy(aligned, 8, false, false); 1292 return __ addr_at(start_off); 1293 } 1294 1295 1296 address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) { 1297 StubCodeMark mark(this, "StubRoutines", name); 1298 // This is the zarch specific stub generator for oop array copy. 1299 // Refer to generate_disjoint_copy for a list of prereqs and features. 1300 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1301 unsigned int size = UseCompressedOops ? 4 : 8; 1302 1303 DecoratorSet decorators = ARRAYCOPY_DISJOINT; 1304 if (dest_uninitialized) { 1305 decorators |= AS_DEST_NOT_INITIALIZED; 1306 } 1307 if (aligned) { 1308 decorators |= ARRAYCOPY_ALIGNED; 1309 } 1310 1311 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1312 bs->arraycopy_prologue(_masm, decorators, T_OBJECT, Z_ARG1, Z_ARG2, Z_ARG3); 1313 1314 generate_disjoint_copy(aligned, size, true, true); 1315 1316 bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, Z_ARG2, Z_ARG3, true); 1317 1318 return __ addr_at(start_off); 1319 } 1320 1321 1322 address generate_conjoint_byte_copy(bool aligned, const char * name) { 1323 StubCodeMark mark(this, "StubRoutines", name); 1324 // This is the zarch specific stub generator for overlapping byte array copy. 1325 // Refer to generate_conjoint_copy for a list of prereqs and features: 1326 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1327 address nooverlap_target = aligned ? StubRoutines::arrayof_jbyte_disjoint_arraycopy() 1328 : StubRoutines::jbyte_disjoint_arraycopy(); 1329 1330 array_overlap_test(nooverlap_target, 0); // Branch away to nooverlap_target if disjoint. 1331 generate_conjoint_copy(aligned, 1, false); 1332 1333 return __ addr_at(start_off); 1334 } 1335 1336 1337 address generate_conjoint_short_copy(bool aligned, const char * name) { 1338 StubCodeMark mark(this, "StubRoutines", name); 1339 // This is the zarch specific stub generator for overlapping short array copy. 1340 // Refer to generate_conjoint_copy for a list of prereqs and features: 1341 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1342 address nooverlap_target = aligned ? StubRoutines::arrayof_jshort_disjoint_arraycopy() 1343 : StubRoutines::jshort_disjoint_arraycopy(); 1344 1345 array_overlap_test(nooverlap_target, 1); // Branch away to nooverlap_target if disjoint. 1346 generate_conjoint_copy(aligned, 2, false); 1347 1348 return __ addr_at(start_off); 1349 } 1350 1351 address generate_conjoint_int_copy(bool aligned, const char * name) { 1352 StubCodeMark mark(this, "StubRoutines", name); 1353 // This is the zarch specific stub generator for overlapping int array copy. 1354 // Refer to generate_conjoint_copy for a list of prereqs and features: 1355 1356 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1357 address nooverlap_target = aligned ? StubRoutines::arrayof_jint_disjoint_arraycopy() 1358 : StubRoutines::jint_disjoint_arraycopy(); 1359 1360 array_overlap_test(nooverlap_target, 2); // Branch away to nooverlap_target if disjoint. 1361 generate_conjoint_copy(aligned, 4, false); 1362 1363 return __ addr_at(start_off); 1364 } 1365 1366 address generate_conjoint_long_copy(bool aligned, const char * name) { 1367 StubCodeMark mark(this, "StubRoutines", name); 1368 // This is the zarch specific stub generator for overlapping long array copy. 1369 // Refer to generate_conjoint_copy for a list of prereqs and features: 1370 1371 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1372 address nooverlap_target = aligned ? StubRoutines::arrayof_jlong_disjoint_arraycopy() 1373 : StubRoutines::jlong_disjoint_arraycopy(); 1374 1375 array_overlap_test(nooverlap_target, 3); // Branch away to nooverlap_target if disjoint. 1376 generate_conjoint_copy(aligned, 8, false); 1377 1378 return __ addr_at(start_off); 1379 } 1380 1381 address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) { 1382 StubCodeMark mark(this, "StubRoutines", name); 1383 // This is the zarch specific stub generator for overlapping oop array copy. 1384 // Refer to generate_conjoint_copy for a list of prereqs and features. 1385 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1386 unsigned int size = UseCompressedOops ? 4 : 8; 1387 unsigned int shift = UseCompressedOops ? 2 : 3; 1388 1389 address nooverlap_target = aligned ? StubRoutines::arrayof_oop_disjoint_arraycopy(dest_uninitialized) 1390 : StubRoutines::oop_disjoint_arraycopy(dest_uninitialized); 1391 1392 // Branch to disjoint_copy (if applicable) before pre_barrier to avoid double pre_barrier. 1393 array_overlap_test(nooverlap_target, shift); // Branch away to nooverlap_target if disjoint. 1394 1395 DecoratorSet decorators = 0; 1396 if (dest_uninitialized) { 1397 decorators |= AS_DEST_NOT_INITIALIZED; 1398 } 1399 if (aligned) { 1400 decorators |= ARRAYCOPY_ALIGNED; 1401 } 1402 1403 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1404 bs->arraycopy_prologue(_masm, decorators, T_OBJECT, Z_ARG1, Z_ARG2, Z_ARG3); 1405 1406 generate_conjoint_copy(aligned, size, true); // Must preserve ARG2, ARG3. 1407 1408 bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, Z_ARG2, Z_ARG3, true); 1409 1410 return __ addr_at(start_off); 1411 } 1412 1413 1414 void generate_arraycopy_stubs() { 1415 1416 // Note: the disjoint stubs must be generated first, some of 1417 // the conjoint stubs use them. 1418 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy (false, "jbyte_disjoint_arraycopy"); 1419 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy"); 1420 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy (false, "jint_disjoint_arraycopy"); 1421 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy (false, "jlong_disjoint_arraycopy"); 1422 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy (false, "oop_disjoint_arraycopy", false); 1423 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (false, "oop_disjoint_arraycopy_uninit", true); 1424 1425 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy (true, "arrayof_jbyte_disjoint_arraycopy"); 1426 StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy"); 1427 StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy (true, "arrayof_jint_disjoint_arraycopy"); 1428 StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy (true, "arrayof_jlong_disjoint_arraycopy"); 1429 StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy (true, "arrayof_oop_disjoint_arraycopy", false); 1430 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (true, "arrayof_oop_disjoint_arraycopy_uninit", true); 1431 1432 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy (false, "jbyte_arraycopy"); 1433 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy"); 1434 StubRoutines::_jint_arraycopy = generate_conjoint_int_copy (false, "jint_arraycopy"); 1435 StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy (false, "jlong_arraycopy"); 1436 StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy (false, "oop_arraycopy", false); 1437 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy (false, "oop_arraycopy_uninit", true); 1438 1439 StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy (true, "arrayof_jbyte_arraycopy"); 1440 StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy"); 1441 StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy (true, "arrayof_jint_arraycopy"); 1442 StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy (true, "arrayof_jlong_arraycopy"); 1443 StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy (true, "arrayof_oop_arraycopy", false); 1444 StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy (true, "arrayof_oop_arraycopy_uninit", true); 1445 } 1446 1447 void generate_safefetch(const char* name, int size, address* entry, address* fault_pc, address* continuation_pc) { 1448 1449 // safefetch signatures: 1450 // int SafeFetch32(int* adr, int errValue); 1451 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 1452 // 1453 // arguments: 1454 // Z_ARG1 = adr 1455 // Z_ARG2 = errValue 1456 // 1457 // result: 1458 // Z_RET = *adr or errValue 1459 1460 StubCodeMark mark(this, "StubRoutines", name); 1461 1462 // entry point 1463 // Load *adr into Z_ARG2, may fault. 1464 *entry = *fault_pc = __ pc(); 1465 switch (size) { 1466 case 4: 1467 // Sign extended int32_t. 1468 __ z_lgf(Z_ARG2, 0, Z_ARG1); 1469 break; 1470 case 8: 1471 // int64_t 1472 __ z_lg(Z_ARG2, 0, Z_ARG1); 1473 break; 1474 default: 1475 ShouldNotReachHere(); 1476 } 1477 1478 // Return errValue or *adr. 1479 *continuation_pc = __ pc(); 1480 __ z_lgr(Z_RET, Z_ARG2); 1481 __ z_br(Z_R14); 1482 1483 } 1484 1485 // Call interface for AES_encryptBlock, AES_decryptBlock stubs. 1486 // 1487 // Z_ARG1 - source data block. Ptr to leftmost byte to be processed. 1488 // Z_ARG2 - destination data block. Ptr to leftmost byte to be stored. 1489 // For in-place encryption/decryption, ARG1 and ARG2 can point 1490 // to the same piece of storage. 1491 // Z_ARG3 - Crypto key address (expanded key). The first n bits of 1492 // the expanded key constitute the original AES-<n> key (see below). 1493 // 1494 // Z_RET - return value. First unprocessed byte offset in src buffer. 1495 // 1496 // Some remarks: 1497 // The crypto key, as passed from the caller to these encryption stubs, 1498 // is a so-called expanded key. It is derived from the original key 1499 // by the Rijndael key schedule, see http://en.wikipedia.org/wiki/Rijndael_key_schedule 1500 // With the expanded key, the cipher/decipher task is decomposed in 1501 // multiple, less complex steps, called rounds. Sun SPARC and Intel 1502 // processors obviously implement support for those less complex steps. 1503 // z/Architecture provides instructions for full cipher/decipher complexity. 1504 // Therefore, we need the original, not the expanded key here. 1505 // Luckily, the first n bits of an AES-<n> expanded key are formed 1506 // by the original key itself. That takes us out of trouble. :-) 1507 // The key length (in bytes) relation is as follows: 1508 // original expanded rounds key bit keylen 1509 // key bytes key bytes length in words 1510 // 16 176 11 128 44 1511 // 24 208 13 192 52 1512 // 32 240 15 256 60 1513 // 1514 // The crypto instructions used in the AES* stubs have some specific register requirements. 1515 // Z_R0 holds the crypto function code. Please refer to the KM/KMC instruction 1516 // description in the "z/Architecture Principles of Operation" manual for details. 1517 // Z_R1 holds the parameter block address. The parameter block contains the cryptographic key 1518 // (KM instruction) and the chaining value (KMC instruction). 1519 // dst must designate an even-numbered register, holding the address of the output message. 1520 // src must designate an even/odd register pair, holding the address/length of the original message 1521 1522 // Helper function which generates code to 1523 // - load the function code in register fCode (== Z_R0). 1524 // - load the data block length (depends on cipher function) into register srclen if requested. 1525 // - is_decipher switches between cipher/decipher function codes 1526 // - set_len requests (if true) loading the data block length in register srclen 1527 void generate_load_AES_fCode(Register keylen, Register fCode, Register srclen, bool is_decipher) { 1528 1529 BLOCK_COMMENT("Set fCode {"); { 1530 Label fCode_set; 1531 int mode = is_decipher ? VM_Version::CipherMode::decipher : VM_Version::CipherMode::cipher; 1532 bool identical_dataBlk_len = (VM_Version::Cipher::_AES128_dataBlk == VM_Version::Cipher::_AES192_dataBlk) 1533 && (VM_Version::Cipher::_AES128_dataBlk == VM_Version::Cipher::_AES256_dataBlk); 1534 // Expanded key length is 44/52/60 * 4 bytes for AES-128/AES-192/AES-256. 1535 __ z_cghi(keylen, 52); // Check only once at the beginning. keylen and fCode may share the same register. 1536 1537 __ z_lghi(fCode, VM_Version::Cipher::_AES128 + mode); 1538 if (!identical_dataBlk_len) { 1539 __ z_lghi(srclen, VM_Version::Cipher::_AES128_dataBlk); 1540 } 1541 __ z_brl(fCode_set); // keyLen < 52: AES128 1542 1543 __ z_lghi(fCode, VM_Version::Cipher::_AES192 + mode); 1544 if (!identical_dataBlk_len) { 1545 __ z_lghi(srclen, VM_Version::Cipher::_AES192_dataBlk); 1546 } 1547 __ z_bre(fCode_set); // keyLen == 52: AES192 1548 1549 __ z_lghi(fCode, VM_Version::Cipher::_AES256 + mode); 1550 if (!identical_dataBlk_len) { 1551 __ z_lghi(srclen, VM_Version::Cipher::_AES256_dataBlk); 1552 } 1553 // __ z_brh(fCode_set); // keyLen < 52: AES128 // fallthru 1554 1555 __ bind(fCode_set); 1556 if (identical_dataBlk_len) { 1557 __ z_lghi(srclen, VM_Version::Cipher::_AES128_dataBlk); 1558 } 1559 } 1560 BLOCK_COMMENT("} Set fCode"); 1561 } 1562 1563 // Push a parameter block for the cipher/decipher instruction on the stack. 1564 // Layout of the additional stack space allocated for AES_cipherBlockChaining: 1565 // 1566 // | | 1567 // +--------+ <-- SP before expansion 1568 // | | 1569 // : : alignment loss, 0..(AES_parmBlk_align-8) bytes 1570 // | | 1571 // +--------+ 1572 // | | 1573 // : : space for parameter block, size VM_Version::Cipher::_AES*_parmBlk_C 1574 // | | 1575 // +--------+ <-- parmBlk, octoword-aligned, start of parameter block 1576 // | | 1577 // : : additional stack space for spills etc., size AES_parmBlk_addspace, DW @ Z_SP not usable!!! 1578 // | | 1579 // +--------+ <-- Z_SP after expansion 1580 1581 void generate_push_Block(int dataBlk_len, int parmBlk_len, int crypto_fCode, 1582 Register parmBlk, Register keylen, Register fCode, Register cv, Register key) { 1583 const int AES_parmBlk_align = 32; // octoword alignment. 1584 const int AES_parmBlk_addspace = 24; // Must be sufficiently large to hold all spilled registers 1585 // (currently 2) PLUS 1 DW for the frame pointer. 1586 1587 const int cv_len = dataBlk_len; 1588 const int key_len = parmBlk_len - cv_len; 1589 // This len must be known at JIT compile time. Only then are we able to recalc the SP before resize. 1590 // We buy this knowledge by wasting some (up to AES_parmBlk_align) bytes of stack space. 1591 const int resize_len = cv_len + key_len + AES_parmBlk_align + AES_parmBlk_addspace; 1592 1593 // Use parmBlk as temp reg here to hold the frame pointer. 1594 __ resize_frame(-resize_len, parmBlk, true); 1595 1596 // calculate parmBlk address from updated (resized) SP. 1597 __ add2reg(parmBlk, resize_len - (cv_len + key_len), Z_SP); 1598 __ z_nill(parmBlk, (~(AES_parmBlk_align-1)) & 0xffff); // Align parameter block. 1599 1600 // There is room for stuff in the range [parmBlk-AES_parmBlk_addspace+8, parmBlk). 1601 __ z_stg(keylen, -8, parmBlk); // Spill keylen for later use. 1602 1603 // calculate (SP before resize) from updated SP. 1604 __ add2reg(keylen, resize_len, Z_SP); // keylen holds prev SP for now. 1605 __ z_stg(keylen, -16, parmBlk); // Spill prev SP for easy revert. 1606 1607 __ z_mvc(0, cv_len-1, parmBlk, 0, cv); // Copy cv. 1608 __ z_mvc(cv_len, key_len-1, parmBlk, 0, key); // Copy key. 1609 __ z_lghi(fCode, crypto_fCode); 1610 } 1611 1612 // NOTE: 1613 // Before returning, the stub has to copy the chaining value from 1614 // the parmBlk, where it was updated by the crypto instruction, back 1615 // to the chaining value array the address of which was passed in the cv argument. 1616 // As all the available registers are used and modified by KMC, we need to save 1617 // the key length across the KMC instruction. We do so by spilling it to the stack, 1618 // just preceding the parmBlk (at (parmBlk - 8)). 1619 void generate_push_parmBlk(Register keylen, Register fCode, Register parmBlk, Register key, Register cv, bool is_decipher) { 1620 int mode = is_decipher ? VM_Version::CipherMode::decipher : VM_Version::CipherMode::cipher; 1621 Label parmBlk_128, parmBlk_192, parmBlk_256, parmBlk_set; 1622 1623 BLOCK_COMMENT("push parmBlk {"); 1624 if (VM_Version::has_Crypto_AES() ) { __ z_cghi(keylen, 52); } 1625 if (VM_Version::has_Crypto_AES128()) { __ z_brl(parmBlk_128); } // keyLen < 52: AES128 1626 if (VM_Version::has_Crypto_AES192()) { __ z_bre(parmBlk_192); } // keyLen == 52: AES192 1627 if (VM_Version::has_Crypto_AES256()) { __ z_brh(parmBlk_256); } // keyLen > 52: AES256 1628 1629 // Security net: requested AES function not available on this CPU. 1630 // NOTE: 1631 // As of now (March 2015), this safety net is not required. JCE policy files limit the 1632 // cryptographic strength of the keys used to 128 bit. If we have AES hardware support 1633 // at all, we have at least AES-128. 1634 __ stop_static("AES key strength not supported by CPU. Use -XX:-UseAES as remedy.", 0); 1635 1636 if (VM_Version::has_Crypto_AES256()) { 1637 __ bind(parmBlk_256); 1638 generate_push_Block(VM_Version::Cipher::_AES256_dataBlk, 1639 VM_Version::Cipher::_AES256_parmBlk_C, 1640 VM_Version::Cipher::_AES256 + mode, 1641 parmBlk, keylen, fCode, cv, key); 1642 if (VM_Version::has_Crypto_AES128() || VM_Version::has_Crypto_AES192()) { 1643 __ z_bru(parmBlk_set); // Fallthru otherwise. 1644 } 1645 } 1646 1647 if (VM_Version::has_Crypto_AES192()) { 1648 __ bind(parmBlk_192); 1649 generate_push_Block(VM_Version::Cipher::_AES192_dataBlk, 1650 VM_Version::Cipher::_AES192_parmBlk_C, 1651 VM_Version::Cipher::_AES192 + mode, 1652 parmBlk, keylen, fCode, cv, key); 1653 if (VM_Version::has_Crypto_AES128()) { 1654 __ z_bru(parmBlk_set); // Fallthru otherwise. 1655 } 1656 } 1657 1658 if (VM_Version::has_Crypto_AES128()) { 1659 __ bind(parmBlk_128); 1660 generate_push_Block(VM_Version::Cipher::_AES128_dataBlk, 1661 VM_Version::Cipher::_AES128_parmBlk_C, 1662 VM_Version::Cipher::_AES128 + mode, 1663 parmBlk, keylen, fCode, cv, key); 1664 // Fallthru 1665 } 1666 1667 __ bind(parmBlk_set); 1668 BLOCK_COMMENT("} push parmBlk"); 1669 } 1670 1671 // Pop a parameter block from the stack. The chaining value portion of the parameter block 1672 // is copied back to the cv array as it is needed for subsequent cipher steps. 1673 // The keylen value as well as the original SP (before resizing) was pushed to the stack 1674 // when pushing the parameter block. 1675 void generate_pop_parmBlk(Register keylen, Register parmBlk, Register key, Register cv) { 1676 1677 BLOCK_COMMENT("pop parmBlk {"); 1678 bool identical_dataBlk_len = (VM_Version::Cipher::_AES128_dataBlk == VM_Version::Cipher::_AES192_dataBlk) && 1679 (VM_Version::Cipher::_AES128_dataBlk == VM_Version::Cipher::_AES256_dataBlk); 1680 if (identical_dataBlk_len) { 1681 int cv_len = VM_Version::Cipher::_AES128_dataBlk; 1682 __ z_mvc(0, cv_len-1, cv, 0, parmBlk); // Copy cv. 1683 } else { 1684 int cv_len; 1685 Label parmBlk_128, parmBlk_192, parmBlk_256, parmBlk_set; 1686 __ z_lg(keylen, -8, parmBlk); // restore keylen 1687 __ z_cghi(keylen, 52); 1688 if (VM_Version::has_Crypto_AES256()) __ z_brh(parmBlk_256); // keyLen > 52: AES256 1689 if (VM_Version::has_Crypto_AES192()) __ z_bre(parmBlk_192); // keyLen == 52: AES192 1690 // if (VM_Version::has_Crypto_AES128()) __ z_brl(parmBlk_128); // keyLen < 52: AES128 // fallthru 1691 1692 // Security net: there is no one here. If we would need it, we should have 1693 // fallen into it already when pushing the parameter block. 1694 if (VM_Version::has_Crypto_AES128()) { 1695 __ bind(parmBlk_128); 1696 cv_len = VM_Version::Cipher::_AES128_dataBlk; 1697 __ z_mvc(0, cv_len-1, cv, 0, parmBlk); // Copy cv. 1698 if (VM_Version::has_Crypto_AES192() || VM_Version::has_Crypto_AES256()) { 1699 __ z_bru(parmBlk_set); 1700 } 1701 } 1702 1703 if (VM_Version::has_Crypto_AES192()) { 1704 __ bind(parmBlk_192); 1705 cv_len = VM_Version::Cipher::_AES192_dataBlk; 1706 __ z_mvc(0, cv_len-1, cv, 0, parmBlk); // Copy cv. 1707 if (VM_Version::has_Crypto_AES256()) { 1708 __ z_bru(parmBlk_set); 1709 } 1710 } 1711 1712 if (VM_Version::has_Crypto_AES256()) { 1713 __ bind(parmBlk_256); 1714 cv_len = VM_Version::Cipher::_AES256_dataBlk; 1715 __ z_mvc(0, cv_len-1, cv, 0, parmBlk); // Copy cv. 1716 // __ z_bru(parmBlk_set); // fallthru 1717 } 1718 __ bind(parmBlk_set); 1719 } 1720 __ z_lg(Z_SP, -16, parmBlk); // Revert resize_frame_absolute. Z_SP saved by push_parmBlk. 1721 BLOCK_COMMENT("} pop parmBlk"); 1722 } 1723 1724 // Compute AES encrypt/decrypt function. 1725 void generate_AES_cipherBlock(bool is_decipher) { 1726 // Incoming arguments. 1727 Register from = Z_ARG1; // source byte array 1728 Register to = Z_ARG2; // destination byte array 1729 Register key = Z_ARG3; // expanded key array 1730 1731 const Register keylen = Z_R0; // Temporarily (until fCode is set) holds the expanded key array length. 1732 1733 // Register definitions as required by KM instruction. 1734 const Register fCode = Z_R0; // crypto function code 1735 const Register parmBlk = Z_R1; // parameter block address (points to crypto key) 1736 const Register src = Z_ARG1; // Must be even reg (KM requirement). 1737 const Register srclen = Z_ARG2; // Must be odd reg and pair with src. Overwrites destination address. 1738 const Register dst = Z_ARG3; // Must be even reg (KM requirement). Overwrites expanded key address. 1739 1740 // Read key len of expanded key (in 4-byte words). 1741 __ z_lgf(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 1742 1743 // Copy arguments to registers as required by crypto instruction. 1744 __ z_lgr(parmBlk, key); // crypto key (in T_INT array). 1745 __ lgr_if_needed(src, from); // Copy src address. Will not emit, src/from are identical. 1746 __ z_lgr(dst, to); // Copy dst address, even register required. 1747 1748 // Construct function code into fCode(Z_R0), data block length into srclen(Z_ARG2). 1749 generate_load_AES_fCode(keylen, fCode, srclen, is_decipher); 1750 1751 __ km(dst, src); // Cipher the message. 1752 1753 __ z_br(Z_R14); 1754 } 1755 1756 // Compute AES encrypt function. 1757 address generate_AES_encryptBlock(const char* name) { 1758 __ align(CodeEntryAlignment); 1759 StubCodeMark mark(this, "StubRoutines", name); 1760 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1761 1762 generate_AES_cipherBlock(false); 1763 1764 return __ addr_at(start_off); 1765 } 1766 1767 // Compute AES decrypt function. 1768 address generate_AES_decryptBlock(const char* name) { 1769 __ align(CodeEntryAlignment); 1770 StubCodeMark mark(this, "StubRoutines", name); 1771 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1772 1773 generate_AES_cipherBlock(true); 1774 1775 return __ addr_at(start_off); 1776 } 1777 1778 // These stubs receive the addresses of the cryptographic key and of the chaining value as two separate 1779 // arguments (registers "key" and "cv", respectively). The KMC instruction, on the other hand, requires 1780 // chaining value and key to be, in this sequence, adjacent in storage. Thus, we need to allocate some 1781 // thread-local working storage. Using heap memory incurs all the hassles of allocating/freeing. 1782 // Stack space, on the contrary, is deallocated automatically when we return from the stub to the caller. 1783 // *** WARNING *** 1784 // Please note that we do not formally allocate stack space, nor do we 1785 // update the stack pointer. Therefore, no function calls are allowed 1786 // and nobody else must use the stack range where the parameter block 1787 // is located. 1788 // We align the parameter block to the next available octoword. 1789 // 1790 // Compute chained AES encrypt function. 1791 void generate_AES_cipherBlockChaining(bool is_decipher) { 1792 1793 Register from = Z_ARG1; // source byte array (clear text) 1794 Register to = Z_ARG2; // destination byte array (ciphered) 1795 Register key = Z_ARG3; // expanded key array. 1796 Register cv = Z_ARG4; // chaining value 1797 const Register msglen = Z_ARG5; // Total length of the msg to be encrypted. Value must be returned 1798 // in Z_RET upon completion of this stub. Is 32-bit integer. 1799 1800 const Register keylen = Z_R0; // Expanded key length, as read from key array. Temp only. 1801 const Register fCode = Z_R0; // crypto function code 1802 const Register parmBlk = Z_R1; // parameter block address (points to crypto key) 1803 const Register src = Z_ARG1; // is Z_R2 1804 const Register srclen = Z_ARG2; // Overwrites destination address. 1805 const Register dst = Z_ARG3; // Overwrites key address. 1806 1807 // Read key len of expanded key (in 4-byte words). 1808 __ z_lgf(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 1809 1810 // Construct parm block address in parmBlk (== Z_R1), copy cv and key to parm block. 1811 // Construct function code in fCode (Z_R0). 1812 generate_push_parmBlk(keylen, fCode, parmBlk, key, cv, is_decipher); 1813 1814 // Prepare other registers for instruction. 1815 __ lgr_if_needed(src, from); // Copy src address. Will not emit, src/from are identical. 1816 __ z_lgr(dst, to); 1817 __ z_llgfr(srclen, msglen); // We pass the offsets as ints, not as longs as required. 1818 1819 __ kmc(dst, src); // Cipher the message. 1820 1821 generate_pop_parmBlk(keylen, parmBlk, key, cv); 1822 1823 __ z_llgfr(Z_RET, msglen); // We pass the offsets as ints, not as longs as required. 1824 __ z_br(Z_R14); 1825 } 1826 1827 // Compute chained AES encrypt function. 1828 address generate_cipherBlockChaining_AES_encrypt(const char* name) { 1829 __ align(CodeEntryAlignment); 1830 StubCodeMark mark(this, "StubRoutines", name); 1831 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1832 1833 generate_AES_cipherBlockChaining(false); 1834 1835 return __ addr_at(start_off); 1836 } 1837 1838 // Compute chained AES encrypt function. 1839 address generate_cipherBlockChaining_AES_decrypt(const char* name) { 1840 __ align(CodeEntryAlignment); 1841 StubCodeMark mark(this, "StubRoutines", name); 1842 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1843 1844 generate_AES_cipherBlockChaining(true); 1845 1846 return __ addr_at(start_off); 1847 } 1848 1849 1850 // Call interface for all SHA* stubs. 1851 // 1852 // Z_ARG1 - source data block. Ptr to leftmost byte to be processed. 1853 // Z_ARG2 - current SHA state. Ptr to state area. This area serves as 1854 // parameter block as required by the crypto instruction. 1855 // Z_ARG3 - current byte offset in source data block. 1856 // Z_ARG4 - last byte offset in source data block. 1857 // (Z_ARG4 - Z_ARG3) gives the #bytes remaining to be processed. 1858 // 1859 // Z_RET - return value. First unprocessed byte offset in src buffer. 1860 // 1861 // A few notes on the call interface: 1862 // - All stubs, whether they are single-block or multi-block, are assumed to 1863 // digest an integer multiple of the data block length of data. All data 1864 // blocks are digested using the intermediate message digest (KIMD) instruction. 1865 // Special end processing, as done by the KLMD instruction, seems to be 1866 // emulated by the calling code. 1867 // 1868 // - Z_ARG1 addresses the first byte of source data. The offset (Z_ARG3) is 1869 // already accounted for. 1870 // 1871 // - The current SHA state (the intermediate message digest value) is contained 1872 // in an area addressed by Z_ARG2. The area size depends on the SHA variant 1873 // and is accessible via the enum VM_Version::MsgDigest::_SHA<n>_parmBlk_I 1874 // 1875 // - The single-block stub is expected to digest exactly one data block, starting 1876 // at the address passed in Z_ARG1. 1877 // 1878 // - The multi-block stub is expected to digest all data blocks which start in 1879 // the offset interval [srcOff(Z_ARG3), srcLimit(Z_ARG4)). The exact difference 1880 // (srcLimit-srcOff), rounded up to the next multiple of the data block length, 1881 // gives the number of blocks to digest. It must be assumed that the calling code 1882 // provides for a large enough source data buffer. 1883 // 1884 // Compute SHA-1 function. 1885 address generate_SHA1_stub(bool multiBlock, const char* name) { 1886 __ align(CodeEntryAlignment); 1887 StubCodeMark mark(this, "StubRoutines", name); 1888 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1889 1890 const Register srcBuff = Z_ARG1; // Points to first block to process (offset already added). 1891 const Register SHAState = Z_ARG2; // Only on entry. Reused soon thereafter for kimd register pairs. 1892 const Register srcOff = Z_ARG3; // int 1893 const Register srcLimit = Z_ARG4; // Only passed in multiBlock case. int 1894 1895 const Register SHAState_local = Z_R1; 1896 const Register SHAState_save = Z_ARG3; 1897 const Register srcBufLen = Z_ARG2; // Destroys state address, must be copied before. 1898 Label useKLMD, rtn; 1899 1900 __ load_const_optimized(Z_R0, (int)VM_Version::MsgDigest::_SHA1); // function code 1901 __ z_lgr(SHAState_local, SHAState); // SHAState == parameter block 1902 1903 if (multiBlock) { // Process everything from offset to limit. 1904 1905 // The following description is valid if we get a raw (unpimped) source data buffer, 1906 // spanning the range between [srcOff(Z_ARG3), srcLimit(Z_ARG4)). As detailled above, 1907 // the calling convention for these stubs is different. We leave the description in 1908 // to inform the reader what must be happening hidden in the calling code. 1909 // 1910 // The data block to be processed can have arbitrary length, i.e. its length does not 1911 // need to be an integer multiple of SHA<n>_datablk. Therefore, we need to implement 1912 // two different paths. If the length is an integer multiple, we use KIMD, saving us 1913 // to copy the SHA state back and forth. If the length is odd, we copy the SHA state 1914 // to the stack, execute a KLMD instruction on it and copy the result back to the 1915 // caller's SHA state location. 1916 1917 // Total #srcBuff blocks to process. 1918 if (VM_Version::has_DistinctOpnds()) { 1919 __ z_srk(srcBufLen, srcLimit, srcOff); // exact difference 1920 __ z_ahi(srcBufLen, VM_Version::MsgDigest::_SHA1_dataBlk-1); // round up 1921 __ z_nill(srcBufLen, (~(VM_Version::MsgDigest::_SHA1_dataBlk-1)) & 0xffff); 1922 __ z_ark(srcLimit, srcOff, srcBufLen); // Srclimit temporarily holds return value. 1923 __ z_llgfr(srcBufLen, srcBufLen); // Cast to 64-bit. 1924 } else { 1925 __ z_lgfr(srcBufLen, srcLimit); // Exact difference. srcLimit passed as int. 1926 __ z_sgfr(srcBufLen, srcOff); // SrcOff passed as int, now properly casted to long. 1927 __ z_aghi(srcBufLen, VM_Version::MsgDigest::_SHA1_dataBlk-1); // round up 1928 __ z_nill(srcBufLen, (~(VM_Version::MsgDigest::_SHA1_dataBlk-1)) & 0xffff); 1929 __ z_lgr(srcLimit, srcOff); // SrcLimit temporarily holds return value. 1930 __ z_agr(srcLimit, srcBufLen); 1931 } 1932 1933 // Integral #blocks to digest? 1934 // As a result of the calculations above, srcBufLen MUST be an integer 1935 // multiple of _SHA1_dataBlk, or else we are in big trouble. 1936 // We insert an asm_assert into the KLMD case to guard against that. 1937 __ z_tmll(srcBufLen, VM_Version::MsgDigest::_SHA1_dataBlk-1); 1938 __ z_brc(Assembler::bcondNotAllZero, useKLMD); 1939 1940 // Process all full blocks. 1941 __ kimd(srcBuff); 1942 1943 __ z_lgr(Z_RET, srcLimit); // Offset of first unprocessed byte in buffer. 1944 } else { // Process one data block only. 1945 __ load_const_optimized(srcBufLen, (int)VM_Version::MsgDigest::_SHA1_dataBlk); // #srcBuff bytes to process 1946 __ kimd(srcBuff); 1947 __ add2reg(Z_RET, (int)VM_Version::MsgDigest::_SHA1_dataBlk, srcOff); // Offset of first unprocessed byte in buffer. No 32 to 64 bit extension needed. 1948 } 1949 1950 __ bind(rtn); 1951 __ z_br(Z_R14); 1952 1953 if (multiBlock) { 1954 __ bind(useKLMD); 1955 1956 #if 1 1957 // Security net: this stub is believed to be called for full-sized data blocks only 1958 // NOTE: The following code is believed to be correct, but is is not tested. 1959 __ stop_static("SHA128 stub can digest full data blocks only. Use -XX:-UseSHA as remedy.", 0); 1960 #endif 1961 } 1962 1963 return __ addr_at(start_off); 1964 } 1965 1966 // Compute SHA-256 function. 1967 address generate_SHA256_stub(bool multiBlock, const char* name) { 1968 __ align(CodeEntryAlignment); 1969 StubCodeMark mark(this, "StubRoutines", name); 1970 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1971 1972 const Register srcBuff = Z_ARG1; 1973 const Register SHAState = Z_ARG2; // Only on entry. Reused soon thereafter. 1974 const Register SHAState_local = Z_R1; 1975 const Register SHAState_save = Z_ARG3; 1976 const Register srcOff = Z_ARG3; 1977 const Register srcLimit = Z_ARG4; 1978 const Register srcBufLen = Z_ARG2; // Destroys state address, must be copied before. 1979 Label useKLMD, rtn; 1980 1981 __ load_const_optimized(Z_R0, (int)VM_Version::MsgDigest::_SHA256); // function code 1982 __ z_lgr(SHAState_local, SHAState); // SHAState == parameter block 1983 1984 if (multiBlock) { // Process everything from offset to limit. 1985 // The following description is valid if we get a raw (unpimped) source data buffer, 1986 // spanning the range between [srcOff(Z_ARG3), srcLimit(Z_ARG4)). As detailled above, 1987 // the calling convention for these stubs is different. We leave the description in 1988 // to inform the reader what must be happening hidden in the calling code. 1989 // 1990 // The data block to be processed can have arbitrary length, i.e. its length does not 1991 // need to be an integer multiple of SHA<n>_datablk. Therefore, we need to implement 1992 // two different paths. If the length is an integer multiple, we use KIMD, saving us 1993 // to copy the SHA state back and forth. If the length is odd, we copy the SHA state 1994 // to the stack, execute a KLMD instruction on it and copy the result back to the 1995 // caller's SHA state location. 1996 1997 // total #srcBuff blocks to process 1998 if (VM_Version::has_DistinctOpnds()) { 1999 __ z_srk(srcBufLen, srcLimit, srcOff); // exact difference 2000 __ z_ahi(srcBufLen, VM_Version::MsgDigest::_SHA256_dataBlk-1); // round up 2001 __ z_nill(srcBufLen, (~(VM_Version::MsgDigest::_SHA256_dataBlk-1)) & 0xffff); 2002 __ z_ark(srcLimit, srcOff, srcBufLen); // Srclimit temporarily holds return value. 2003 __ z_llgfr(srcBufLen, srcBufLen); // Cast to 64-bit. 2004 } else { 2005 __ z_lgfr(srcBufLen, srcLimit); // exact difference 2006 __ z_sgfr(srcBufLen, srcOff); 2007 __ z_aghi(srcBufLen, VM_Version::MsgDigest::_SHA256_dataBlk-1); // round up 2008 __ z_nill(srcBufLen, (~(VM_Version::MsgDigest::_SHA256_dataBlk-1)) & 0xffff); 2009 __ z_lgr(srcLimit, srcOff); // Srclimit temporarily holds return value. 2010 __ z_agr(srcLimit, srcBufLen); 2011 } 2012 2013 // Integral #blocks to digest? 2014 // As a result of the calculations above, srcBufLen MUST be an integer 2015 // multiple of _SHA1_dataBlk, or else we are in big trouble. 2016 // We insert an asm_assert into the KLMD case to guard against that. 2017 __ z_tmll(srcBufLen, VM_Version::MsgDigest::_SHA256_dataBlk-1); 2018 __ z_brc(Assembler::bcondNotAllZero, useKLMD); 2019 2020 // Process all full blocks. 2021 __ kimd(srcBuff); 2022 2023 __ z_lgr(Z_RET, srcLimit); // Offset of first unprocessed byte in buffer. 2024 } else { // Process one data block only. 2025 __ load_const_optimized(srcBufLen, (int)VM_Version::MsgDigest::_SHA256_dataBlk); // #srcBuff bytes to process 2026 __ kimd(srcBuff); 2027 __ add2reg(Z_RET, (int)VM_Version::MsgDigest::_SHA256_dataBlk, srcOff); // Offset of first unprocessed byte in buffer. 2028 } 2029 2030 __ bind(rtn); 2031 __ z_br(Z_R14); 2032 2033 if (multiBlock) { 2034 __ bind(useKLMD); 2035 #if 1 2036 // Security net: this stub is believed to be called for full-sized data blocks only. 2037 // NOTE: 2038 // The following code is believed to be correct, but is is not tested. 2039 __ stop_static("SHA256 stub can digest full data blocks only. Use -XX:-UseSHA as remedy.", 0); 2040 #endif 2041 } 2042 2043 return __ addr_at(start_off); 2044 } 2045 2046 // Compute SHA-512 function. 2047 address generate_SHA512_stub(bool multiBlock, const char* name) { 2048 __ align(CodeEntryAlignment); 2049 StubCodeMark mark(this, "StubRoutines", name); 2050 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 2051 2052 const Register srcBuff = Z_ARG1; 2053 const Register SHAState = Z_ARG2; // Only on entry. Reused soon thereafter. 2054 const Register SHAState_local = Z_R1; 2055 const Register SHAState_save = Z_ARG3; 2056 const Register srcOff = Z_ARG3; 2057 const Register srcLimit = Z_ARG4; 2058 const Register srcBufLen = Z_ARG2; // Destroys state address, must be copied before. 2059 Label useKLMD, rtn; 2060 2061 __ load_const_optimized(Z_R0, (int)VM_Version::MsgDigest::_SHA512); // function code 2062 __ z_lgr(SHAState_local, SHAState); // SHAState == parameter block 2063 2064 if (multiBlock) { // Process everything from offset to limit. 2065 // The following description is valid if we get a raw (unpimped) source data buffer, 2066 // spanning the range between [srcOff(Z_ARG3), srcLimit(Z_ARG4)). As detailled above, 2067 // the calling convention for these stubs is different. We leave the description in 2068 // to inform the reader what must be happening hidden in the calling code. 2069 // 2070 // The data block to be processed can have arbitrary length, i.e. its length does not 2071 // need to be an integer multiple of SHA<n>_datablk. Therefore, we need to implement 2072 // two different paths. If the length is an integer multiple, we use KIMD, saving us 2073 // to copy the SHA state back and forth. If the length is odd, we copy the SHA state 2074 // to the stack, execute a KLMD instruction on it and copy the result back to the 2075 // caller's SHA state location. 2076 2077 // total #srcBuff blocks to process 2078 if (VM_Version::has_DistinctOpnds()) { 2079 __ z_srk(srcBufLen, srcLimit, srcOff); // exact difference 2080 __ z_ahi(srcBufLen, VM_Version::MsgDigest::_SHA512_dataBlk-1); // round up 2081 __ z_nill(srcBufLen, (~(VM_Version::MsgDigest::_SHA512_dataBlk-1)) & 0xffff); 2082 __ z_ark(srcLimit, srcOff, srcBufLen); // Srclimit temporarily holds return value. 2083 __ z_llgfr(srcBufLen, srcBufLen); // Cast to 64-bit. 2084 } else { 2085 __ z_lgfr(srcBufLen, srcLimit); // exact difference 2086 __ z_sgfr(srcBufLen, srcOff); 2087 __ z_aghi(srcBufLen, VM_Version::MsgDigest::_SHA512_dataBlk-1); // round up 2088 __ z_nill(srcBufLen, (~(VM_Version::MsgDigest::_SHA512_dataBlk-1)) & 0xffff); 2089 __ z_lgr(srcLimit, srcOff); // Srclimit temporarily holds return value. 2090 __ z_agr(srcLimit, srcBufLen); 2091 } 2092 2093 // integral #blocks to digest? 2094 // As a result of the calculations above, srcBufLen MUST be an integer 2095 // multiple of _SHA1_dataBlk, or else we are in big trouble. 2096 // We insert an asm_assert into the KLMD case to guard against that. 2097 __ z_tmll(srcBufLen, VM_Version::MsgDigest::_SHA512_dataBlk-1); 2098 __ z_brc(Assembler::bcondNotAllZero, useKLMD); 2099 2100 // Process all full blocks. 2101 __ kimd(srcBuff); 2102 2103 __ z_lgr(Z_RET, srcLimit); // Offset of first unprocessed byte in buffer. 2104 } else { // Process one data block only. 2105 __ load_const_optimized(srcBufLen, (int)VM_Version::MsgDigest::_SHA512_dataBlk); // #srcBuff bytes to process 2106 __ kimd(srcBuff); 2107 __ add2reg(Z_RET, (int)VM_Version::MsgDigest::_SHA512_dataBlk, srcOff); // Offset of first unprocessed byte in buffer. 2108 } 2109 2110 __ bind(rtn); 2111 __ z_br(Z_R14); 2112 2113 if (multiBlock) { 2114 __ bind(useKLMD); 2115 #if 1 2116 // Security net: this stub is believed to be called for full-sized data blocks only 2117 // NOTE: 2118 // The following code is believed to be correct, but is is not tested. 2119 __ stop_static("SHA512 stub can digest full data blocks only. Use -XX:-UseSHA as remedy.", 0); 2120 #endif 2121 } 2122 2123 return __ addr_at(start_off); 2124 } 2125 2126 2127 /** 2128 * Arguments: 2129 * 2130 * Inputs: 2131 * Z_ARG1 - int crc 2132 * Z_ARG2 - byte* buf 2133 * Z_ARG3 - int length (of buffer) 2134 * 2135 * Result: 2136 * Z_RET - int crc result 2137 **/ 2138 // Compute CRC function (generic, for all polynomials). 2139 void generate_CRC_updateBytes(const char* name, Register table, bool invertCRC) { 2140 2141 // arguments to kernel_crc32: 2142 Register crc = Z_ARG1; // Current checksum, preset by caller or result from previous call, int. 2143 Register data = Z_ARG2; // source byte array 2144 Register dataLen = Z_ARG3; // #bytes to process, int 2145 // Register table = Z_ARG4; // crc table address. Preloaded and passed in by caller. 2146 const Register t0 = Z_R10; // work reg for kernel* emitters 2147 const Register t1 = Z_R11; // work reg for kernel* emitters 2148 const Register t2 = Z_R12; // work reg for kernel* emitters 2149 const Register t3 = Z_R13; // work reg for kernel* emitters 2150 2151 assert_different_registers(crc, data, dataLen, table); 2152 2153 // We pass these values as ints, not as longs as required by C calling convention. 2154 // Crc used as int. 2155 __ z_llgfr(dataLen, dataLen); 2156 2157 __ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers. 2158 __ z_stmg(Z_R10, Z_R13, 1*8, Z_SP); // Spill regs 10..11 to make them available as work registers. 2159 __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, invertCRC); 2160 __ z_lmg(Z_R10, Z_R13, 1*8, Z_SP); // Spill regs 10..11 back from stack. 2161 __ resize_frame(+(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers. 2162 2163 __ z_llgfr(Z_RET, crc); // Updated crc is function result. No copying required, just zero upper 32 bits. 2164 __ z_br(Z_R14); // Result already in Z_RET == Z_ARG1. 2165 } 2166 2167 2168 // Compute CRC32 function. 2169 address generate_CRC32_updateBytes(const char* name) { 2170 __ align(CodeEntryAlignment); 2171 StubCodeMark mark(this, "StubRoutines", name); 2172 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 2173 2174 assert(UseCRC32Intrinsics, "should not generate this stub (%s) with CRC32 intrinsics disabled", name); 2175 2176 BLOCK_COMMENT("CRC32_updateBytes {"); 2177 Register table = Z_ARG4; // crc32 table address. 2178 StubRoutines::zarch::generate_load_crc_table_addr(_masm, table); 2179 2180 generate_CRC_updateBytes(name, table, true); 2181 BLOCK_COMMENT("} CRC32_updateBytes"); 2182 2183 return __ addr_at(start_off); 2184 } 2185 2186 2187 // Compute CRC32C function. 2188 address generate_CRC32C_updateBytes(const char* name) { 2189 __ align(CodeEntryAlignment); 2190 StubCodeMark mark(this, "StubRoutines", name); 2191 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 2192 2193 assert(UseCRC32CIntrinsics, "should not generate this stub (%s) with CRC32C intrinsics disabled", name); 2194 2195 BLOCK_COMMENT("CRC32C_updateBytes {"); 2196 Register table = Z_ARG4; // crc32c table address. 2197 StubRoutines::zarch::generate_load_crc32c_table_addr(_masm, table); 2198 2199 generate_CRC_updateBytes(name, table, false); 2200 BLOCK_COMMENT("} CRC32C_updateBytes"); 2201 2202 return __ addr_at(start_off); 2203 } 2204 2205 2206 // Arguments: 2207 // Z_ARG1 - x address 2208 // Z_ARG2 - x length 2209 // Z_ARG3 - y address 2210 // Z_ARG4 - y length 2211 // Z_ARG5 - z address 2212 // 160[Z_SP] - z length 2213 address generate_multiplyToLen() { 2214 __ align(CodeEntryAlignment); 2215 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 2216 2217 address start = __ pc(); 2218 2219 const Register x = Z_ARG1; 2220 const Register xlen = Z_ARG2; 2221 const Register y = Z_ARG3; 2222 const Register ylen = Z_ARG4; 2223 const Register z = Z_ARG5; 2224 // zlen is passed on the stack: 2225 // Address zlen(Z_SP, _z_abi(remaining_cargs)); 2226 2227 // Next registers will be saved on stack in multiply_to_len(). 2228 const Register tmp1 = Z_tmp_1; 2229 const Register tmp2 = Z_tmp_2; 2230 const Register tmp3 = Z_tmp_3; 2231 const Register tmp4 = Z_tmp_4; 2232 const Register tmp5 = Z_R9; 2233 2234 BLOCK_COMMENT("Entry:"); 2235 2236 __ z_llgfr(xlen, xlen); 2237 __ z_llgfr(ylen, ylen); 2238 2239 __ multiply_to_len(x, xlen, y, ylen, z, tmp1, tmp2, tmp3, tmp4, tmp5); 2240 2241 __ z_br(Z_R14); // Return to caller. 2242 2243 return start; 2244 } 2245 2246 void generate_initial() { 2247 // Generates all stubs and initializes the entry points. 2248 2249 // Entry points that exist in all platforms. 2250 // Note: This is code that could be shared among different 2251 // platforms - however the benefit seems to be smaller than the 2252 // disadvantage of having a much more complicated generator 2253 // structure. See also comment in stubRoutines.hpp. 2254 StubRoutines::_forward_exception_entry = generate_forward_exception(); 2255 2256 StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address); 2257 StubRoutines::_catch_exception_entry = generate_catch_exception(); 2258 2259 // Build this early so it's available for the interpreter. 2260 StubRoutines::_throw_StackOverflowError_entry = 2261 generate_throw_exception("StackOverflowError throw_exception", 2262 CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false); 2263 StubRoutines::_throw_delayed_StackOverflowError_entry = 2264 generate_throw_exception("delayed StackOverflowError throw_exception", 2265 CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError), false); 2266 2267 //---------------------------------------------------------------------- 2268 // Entry points that are platform specific. 2269 2270 if (UseCRC32Intrinsics) { 2271 StubRoutines::_crc_table_adr = (address)StubRoutines::zarch::_crc_table; 2272 StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes"); 2273 } 2274 2275 if (UseCRC32CIntrinsics) { 2276 StubRoutines::_crc32c_table_addr = (address)StubRoutines::zarch::_crc32c_table; 2277 StubRoutines::_updateBytesCRC32C = generate_CRC32C_updateBytes("CRC32C_updateBytes"); 2278 } 2279 2280 // Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction. 2281 StubRoutines::zarch::_trot_table_addr = (address)StubRoutines::zarch::_trot_table; 2282 } 2283 2284 2285 void generate_all() { 2286 // Generates all stubs and initializes the entry points. 2287 2288 StubRoutines::zarch::_partial_subtype_check = generate_partial_subtype_check(); 2289 2290 // These entry points require SharedInfo::stack0 to be set up in non-core builds. 2291 StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false); 2292 StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false); 2293 StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false); 2294 2295 // Support for verify_oop (must happen after universe_init). 2296 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine(); 2297 2298 // Arraycopy stubs used by compilers. 2299 generate_arraycopy_stubs(); 2300 2301 // safefetch stubs 2302 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, &StubRoutines::_safefetch32_fault_pc, &StubRoutines::_safefetch32_continuation_pc); 2303 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, &StubRoutines::_safefetchN_fault_pc, &StubRoutines::_safefetchN_continuation_pc); 2304 2305 // Generate AES intrinsics code. 2306 if (UseAESIntrinsics) { 2307 StubRoutines::_aescrypt_encryptBlock = generate_AES_encryptBlock("AES_encryptBlock"); 2308 StubRoutines::_aescrypt_decryptBlock = generate_AES_decryptBlock("AES_decryptBlock"); 2309 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_AES_encrypt("AES_encryptBlock_chaining"); 2310 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_AES_decrypt("AES_decryptBlock_chaining"); 2311 } 2312 2313 // Generate SHA1/SHA256/SHA512 intrinsics code. 2314 if (UseSHA1Intrinsics) { 2315 StubRoutines::_sha1_implCompress = generate_SHA1_stub(false, "SHA1_singleBlock"); 2316 StubRoutines::_sha1_implCompressMB = generate_SHA1_stub(true, "SHA1_multiBlock"); 2317 } 2318 if (UseSHA256Intrinsics) { 2319 StubRoutines::_sha256_implCompress = generate_SHA256_stub(false, "SHA256_singleBlock"); 2320 StubRoutines::_sha256_implCompressMB = generate_SHA256_stub(true, "SHA256_multiBlock"); 2321 } 2322 if (UseSHA512Intrinsics) { 2323 StubRoutines::_sha512_implCompress = generate_SHA512_stub(false, "SHA512_singleBlock"); 2324 StubRoutines::_sha512_implCompressMB = generate_SHA512_stub(true, "SHA512_multiBlock"); 2325 } 2326 2327 #ifdef COMPILER2 2328 if (UseMultiplyToLenIntrinsic) { 2329 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 2330 } 2331 if (UseMontgomeryMultiplyIntrinsic) { 2332 StubRoutines::_montgomeryMultiply 2333 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 2334 } 2335 if (UseMontgomerySquareIntrinsic) { 2336 StubRoutines::_montgomerySquare 2337 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 2338 } 2339 #endif 2340 } 2341 2342 public: 2343 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 2344 // Replace the standard masm with a special one: 2345 _masm = new MacroAssembler(code); 2346 2347 _stub_count = !all ? 0x100 : 0x200; 2348 if (all) { 2349 generate_all(); 2350 } else { 2351 generate_initial(); 2352 } 2353 } 2354 2355 private: 2356 int _stub_count; 2357 void stub_prolog(StubCodeDesc* cdesc) { 2358 #ifdef ASSERT 2359 // Put extra information in the stub code, to make it more readable. 2360 // Write the high part of the address. 2361 // [RGV] Check if there is a dependency on the size of this prolog. 2362 __ emit_32((intptr_t)cdesc >> 32); 2363 __ emit_32((intptr_t)cdesc); 2364 __ emit_32(++_stub_count); 2365 #endif 2366 align(true); 2367 } 2368 2369 void align(bool at_header = false) { 2370 // z/Architecture cache line size is 256 bytes. 2371 // There is no obvious benefit in aligning stub 2372 // code to cache lines. Use CodeEntryAlignment instead. 2373 const unsigned int icache_line_size = CodeEntryAlignment; 2374 const unsigned int icache_half_line_size = MIN2<unsigned int>(32, CodeEntryAlignment); 2375 2376 if (at_header) { 2377 while ((intptr_t)(__ pc()) % icache_line_size != 0) { 2378 __ emit_16(0); 2379 } 2380 } else { 2381 while ((intptr_t)(__ pc()) % icache_half_line_size != 0) { 2382 __ z_nop(); 2383 } 2384 } 2385 } 2386 2387 }; 2388 2389 void StubGenerator_generate(CodeBuffer* code, bool all) { 2390 StubGenerator g(code, all); 2391 }