1 /* 2 * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2016 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "registerSaver_s390.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interp_masm.hpp" 31 #include "nativeInst_s390.hpp" 32 #include "oops/instanceOop.hpp" 33 #include "oops/objArrayKlass.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "prims/methodHandles.hpp" 36 #include "runtime/frame.inline.hpp" 37 #include "runtime/handles.inline.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 #include "runtime/stubCodeGenerator.hpp" 40 #include "runtime/stubRoutines.hpp" 41 #include "runtime/thread.inline.hpp" 42 43 // Declaration and definition of StubGenerator (no .hpp file). 44 // For a more detailed description of the stub routine structure 45 // see the comment in stubRoutines.hpp. 46 47 #ifdef PRODUCT 48 #define __ _masm-> 49 #else 50 #define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm):_masm)-> 51 #endif 52 53 #define BLOCK_COMMENT(str) if (PrintAssembly) __ block_comment(str) 54 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 55 56 // ----------------------------------------------------------------------- 57 // Stub Code definitions 58 59 class StubGenerator: public StubCodeGenerator { 60 private: 61 62 //---------------------------------------------------------------------- 63 // Call stubs are used to call Java from C. 64 65 // 66 // Arguments: 67 // 68 // R2 - call wrapper address : address 69 // R3 - result : intptr_t* 70 // R4 - result type : BasicType 71 // R5 - method : method 72 // R6 - frame mgr entry point : address 73 // [SP+160] - parameter block : intptr_t* 74 // [SP+172] - parameter count in words : int 75 // [SP+176] - thread : Thread* 76 // 77 address generate_call_stub(address& return_address) { 78 // Set up a new C frame, copy Java arguments, call frame manager 79 // or native_entry, and process result. 80 81 StubCodeMark mark(this, "StubRoutines", "call_stub"); 82 address start = __ pc(); 83 84 Register r_arg_call_wrapper_addr = Z_ARG1; 85 Register r_arg_result_addr = Z_ARG2; 86 Register r_arg_result_type = Z_ARG3; 87 Register r_arg_method = Z_ARG4; 88 Register r_arg_entry = Z_ARG5; 89 90 // offsets to fp 91 #define d_arg_thread 176 92 #define d_arg_argument_addr 160 93 #define d_arg_argument_count 168+4 94 95 Register r_entryframe_fp = Z_tmp_1; 96 Register r_top_of_arguments_addr = Z_ARG4; 97 Register r_new_arg_entry = Z_R14; 98 99 // macros for frame offsets 100 #define call_wrapper_address_offset \ 101 _z_entry_frame_locals_neg(call_wrapper_address) 102 #define result_address_offset \ 103 _z_entry_frame_locals_neg(result_address) 104 #define result_type_offset \ 105 _z_entry_frame_locals_neg(result_type) 106 #define arguments_tos_address_offset \ 107 _z_entry_frame_locals_neg(arguments_tos_address) 108 109 { 110 // 111 // STACK on entry to call_stub: 112 // 113 // F1 [C_FRAME] 114 // ... 115 // 116 117 Register r_argument_addr = Z_tmp_3; 118 Register r_argumentcopy_addr = Z_tmp_4; 119 Register r_argument_size_in_bytes = Z_ARG5; 120 Register r_frame_size = Z_R1; 121 122 Label arguments_copied; 123 124 // Save non-volatile registers to ABI of caller frame. 125 BLOCK_COMMENT("save registers, push frame {"); 126 __ z_stmg(Z_R6, Z_R14, 16, Z_SP); 127 __ z_std(Z_F8, 96, Z_SP); 128 __ z_std(Z_F9, 104, Z_SP); 129 __ z_std(Z_F10, 112, Z_SP); 130 __ z_std(Z_F11, 120, Z_SP); 131 __ z_std(Z_F12, 128, Z_SP); 132 __ z_std(Z_F13, 136, Z_SP); 133 __ z_std(Z_F14, 144, Z_SP); 134 __ z_std(Z_F15, 152, Z_SP); 135 136 // 137 // Push ENTRY_FRAME including arguments: 138 // 139 // F0 [TOP_IJAVA_FRAME_ABI] 140 // [outgoing Java arguments] 141 // [ENTRY_FRAME_LOCALS] 142 // F1 [C_FRAME] 143 // ... 144 // 145 146 // Calculate new frame size and push frame. 147 #define abi_plus_locals_size \ 148 (frame::z_top_ijava_frame_abi_size + frame::z_entry_frame_locals_size) 149 if (abi_plus_locals_size % BytesPerWord == 0) { 150 // Preload constant part of frame size. 151 __ load_const_optimized(r_frame_size, -abi_plus_locals_size/BytesPerWord); 152 // Keep copy of our frame pointer (caller's SP). 153 __ z_lgr(r_entryframe_fp, Z_SP); 154 // Add space required by arguments to frame size. 155 __ z_slgf(r_frame_size, d_arg_argument_count, Z_R0, Z_SP); 156 // Move Z_ARG5 early, it will be used as a local. 157 __ z_lgr(r_new_arg_entry, r_arg_entry); 158 // Convert frame size from words to bytes. 159 __ z_sllg(r_frame_size, r_frame_size, LogBytesPerWord); 160 __ push_frame(r_frame_size, r_entryframe_fp, 161 false/*don't copy SP*/, true /*frame size sign inverted*/); 162 } else { 163 guarantee(false, "frame sizes should be multiples of word size (BytesPerWord)"); 164 } 165 BLOCK_COMMENT("} save, push"); 166 167 // Load argument registers for call. 168 BLOCK_COMMENT("prepare/copy arguments {"); 169 __ z_lgr(Z_method, r_arg_method); 170 __ z_lg(Z_thread, d_arg_thread, r_entryframe_fp); 171 172 // Calculate top_of_arguments_addr which will be tos (not prepushed) later. 173 // Wimply use SP + frame::top_ijava_frame_size. 174 __ add2reg(r_top_of_arguments_addr, 175 frame::z_top_ijava_frame_abi_size - BytesPerWord, Z_SP); 176 177 // Initialize call_stub locals (step 1). 178 if ((call_wrapper_address_offset + BytesPerWord == result_address_offset) && 179 (result_address_offset + BytesPerWord == result_type_offset) && 180 (result_type_offset + BytesPerWord == arguments_tos_address_offset)) { 181 182 __ z_stmg(r_arg_call_wrapper_addr, r_top_of_arguments_addr, 183 call_wrapper_address_offset, r_entryframe_fp); 184 } else { 185 __ z_stg(r_arg_call_wrapper_addr, 186 call_wrapper_address_offset, r_entryframe_fp); 187 __ z_stg(r_arg_result_addr, 188 result_address_offset, r_entryframe_fp); 189 __ z_stg(r_arg_result_type, 190 result_type_offset, r_entryframe_fp); 191 __ z_stg(r_top_of_arguments_addr, 192 arguments_tos_address_offset, r_entryframe_fp); 193 } 194 195 // Copy Java arguments. 196 197 // Any arguments to copy? 198 __ load_and_test_int2long(Z_R1, Address(r_entryframe_fp, d_arg_argument_count)); 199 __ z_bre(arguments_copied); 200 201 // Prepare loop and copy arguments in reverse order. 202 { 203 // Calculate argument size in bytes. 204 __ z_sllg(r_argument_size_in_bytes, Z_R1, LogBytesPerWord); 205 206 // Get addr of first incoming Java argument. 207 __ z_lg(r_argument_addr, d_arg_argument_addr, r_entryframe_fp); 208 209 // Let r_argumentcopy_addr point to last outgoing Java argument. 210 __ add2reg(r_argumentcopy_addr, BytesPerWord, r_top_of_arguments_addr); // = Z_SP+160 effectively. 211 212 // Let r_argument_addr point to last incoming Java argument. 213 __ add2reg_with_index(r_argument_addr, -BytesPerWord, 214 r_argument_size_in_bytes, r_argument_addr); 215 216 // Now loop while Z_R1 > 0 and copy arguments. 217 { 218 Label next_argument; 219 __ bind(next_argument); 220 // Mem-mem move. 221 __ z_mvc(0, BytesPerWord-1, r_argumentcopy_addr, 0, r_argument_addr); 222 __ add2reg(r_argument_addr, -BytesPerWord); 223 __ add2reg(r_argumentcopy_addr, BytesPerWord); 224 __ z_brct(Z_R1, next_argument); 225 } 226 } // End of argument copy loop. 227 228 __ bind(arguments_copied); 229 } 230 BLOCK_COMMENT("} arguments"); 231 232 BLOCK_COMMENT("call {"); 233 { 234 // Call frame manager or native entry. 235 236 // 237 // Register state on entry to frame manager / native entry: 238 // 239 // Z_ARG1 = r_top_of_arguments_addr - intptr_t *sender tos (prepushed) 240 // Lesp = (SP) + copied_arguments_offset - 8 241 // Z_method - method 242 // Z_thread - JavaThread* 243 // 244 245 // Here, the usual SP is the initial_caller_sp. 246 __ z_lgr(Z_R10, Z_SP); 247 248 // Z_esp points to the slot below the last argument. 249 __ z_lgr(Z_esp, r_top_of_arguments_addr); 250 251 // 252 // Stack on entry to frame manager / native entry: 253 // 254 // F0 [TOP_IJAVA_FRAME_ABI] 255 // [outgoing Java arguments] 256 // [ENTRY_FRAME_LOCALS] 257 // F1 [C_FRAME] 258 // ... 259 // 260 261 // Do a light-weight C-call here, r_new_arg_entry holds the address 262 // of the interpreter entry point (frame manager or native entry) 263 // and save runtime-value of return_pc in return_address 264 // (call by reference argument). 265 return_address = __ call_stub(r_new_arg_entry); 266 } 267 BLOCK_COMMENT("} call"); 268 269 { 270 BLOCK_COMMENT("restore registers {"); 271 // Returned from frame manager or native entry. 272 // Now pop frame, process result, and return to caller. 273 274 // 275 // Stack on exit from frame manager / native entry: 276 // 277 // F0 [ABI] 278 // ... 279 // [ENTRY_FRAME_LOCALS] 280 // F1 [C_FRAME] 281 // ... 282 // 283 // Just pop the topmost frame ... 284 // 285 286 Label ret_is_object; 287 Label ret_is_long; 288 Label ret_is_float; 289 Label ret_is_double; 290 291 // Restore frame pointer. 292 __ z_lg(r_entryframe_fp, _z_abi(callers_sp), Z_SP); 293 // Pop frame. Done here to minimize stalls. 294 __ z_lg(Z_SP, _z_abi(callers_sp), Z_SP); 295 296 // Reload some volatile registers which we've spilled before the call 297 // to frame manager / native entry. 298 // Access all locals via frame pointer, because we know nothing about 299 // the topmost frame's size. 300 __ z_lg(r_arg_result_addr, result_address_offset, r_entryframe_fp); 301 __ z_lg(r_arg_result_type, result_type_offset, r_entryframe_fp); 302 303 // Restore non-volatiles. 304 __ z_lmg(Z_R6, Z_R14, 16, Z_SP); 305 __ z_ld(Z_F8, 96, Z_SP); 306 __ z_ld(Z_F9, 104, Z_SP); 307 __ z_ld(Z_F10, 112, Z_SP); 308 __ z_ld(Z_F11, 120, Z_SP); 309 __ z_ld(Z_F12, 128, Z_SP); 310 __ z_ld(Z_F13, 136, Z_SP); 311 __ z_ld(Z_F14, 144, Z_SP); 312 __ z_ld(Z_F15, 152, Z_SP); 313 BLOCK_COMMENT("} restore"); 314 315 // 316 // Stack on exit from call_stub: 317 // 318 // 0 [C_FRAME] 319 // ... 320 // 321 // No call_stub frames left. 322 // 323 324 // All non-volatiles have been restored at this point!! 325 326 //------------------------------------------------------------------------ 327 // The following code makes some assumptions on the T_<type> enum values. 328 // The enum is defined in globalDefinitions.hpp. 329 // The validity of the assumptions is tested as far as possible. 330 // The assigned values should not be shuffled 331 // T_BOOLEAN==4 - lowest used enum value 332 // T_NARROWOOP==16 - largest used enum value 333 //------------------------------------------------------------------------ 334 BLOCK_COMMENT("process result {"); 335 Label firstHandler; 336 int handlerLen= 8; 337 #ifdef ASSERT 338 char assertMsg[] = "check BasicType definition in globalDefinitions.hpp"; 339 __ z_chi(r_arg_result_type, T_BOOLEAN); 340 __ asm_assert_low(assertMsg, 0x0234); 341 __ z_chi(r_arg_result_type, T_NARROWOOP); 342 __ asm_assert_high(assertMsg, 0x0235); 343 #endif 344 __ add2reg(r_arg_result_type, -T_BOOLEAN); // Remove offset. 345 __ z_larl(Z_R1, firstHandler); // location of first handler 346 __ z_sllg(r_arg_result_type, r_arg_result_type, 3); // Each handler is 8 bytes long. 347 __ z_bc(MacroAssembler::bcondAlways, 0, r_arg_result_type, Z_R1); 348 349 __ align(handlerLen); 350 __ bind(firstHandler); 351 // T_BOOLEAN: 352 guarantee(T_BOOLEAN == 4, "check BasicType definition in globalDefinitions.hpp"); 353 __ z_st(Z_RET, 0, r_arg_result_addr); 354 __ z_br(Z_R14); // Return to caller. 355 __ align(handlerLen); 356 // T_CHAR: 357 guarantee(T_CHAR == T_BOOLEAN+1, "check BasicType definition in globalDefinitions.hpp"); 358 __ z_st(Z_RET, 0, r_arg_result_addr); 359 __ z_br(Z_R14); // Return to caller. 360 __ align(handlerLen); 361 // T_FLOAT: 362 guarantee(T_FLOAT == T_CHAR+1, "check BasicType definition in globalDefinitions.hpp"); 363 __ z_ste(Z_FRET, 0, r_arg_result_addr); 364 __ z_br(Z_R14); // Return to caller. 365 __ align(handlerLen); 366 // T_DOUBLE: 367 guarantee(T_DOUBLE == T_FLOAT+1, "check BasicType definition in globalDefinitions.hpp"); 368 __ z_std(Z_FRET, 0, r_arg_result_addr); 369 __ z_br(Z_R14); // Return to caller. 370 __ align(handlerLen); 371 // T_BYTE: 372 guarantee(T_BYTE == T_DOUBLE+1, "check BasicType definition in globalDefinitions.hpp"); 373 __ z_st(Z_RET, 0, r_arg_result_addr); 374 __ z_br(Z_R14); // Return to caller. 375 __ align(handlerLen); 376 // T_SHORT: 377 guarantee(T_SHORT == T_BYTE+1, "check BasicType definition in globalDefinitions.hpp"); 378 __ z_st(Z_RET, 0, r_arg_result_addr); 379 __ z_br(Z_R14); // Return to caller. 380 __ align(handlerLen); 381 // T_INT: 382 guarantee(T_INT == T_SHORT+1, "check BasicType definition in globalDefinitions.hpp"); 383 __ z_st(Z_RET, 0, r_arg_result_addr); 384 __ z_br(Z_R14); // Return to caller. 385 __ align(handlerLen); 386 // T_LONG: 387 guarantee(T_LONG == T_INT+1, "check BasicType definition in globalDefinitions.hpp"); 388 __ z_stg(Z_RET, 0, r_arg_result_addr); 389 __ z_br(Z_R14); // Return to caller. 390 __ align(handlerLen); 391 // T_OBJECT: 392 guarantee(T_OBJECT == T_LONG+1, "check BasicType definition in globalDefinitions.hpp"); 393 __ z_stg(Z_RET, 0, r_arg_result_addr); 394 __ z_br(Z_R14); // Return to caller. 395 __ align(handlerLen); 396 // T_ARRAY: 397 guarantee(T_ARRAY == T_OBJECT+1, "check BasicType definition in globalDefinitions.hpp"); 398 __ z_stg(Z_RET, 0, r_arg_result_addr); 399 __ z_br(Z_R14); // Return to caller. 400 __ align(handlerLen); 401 // T_VOID: 402 guarantee(T_VOID == T_ARRAY+1, "check BasicType definition in globalDefinitions.hpp"); 403 __ z_stg(Z_RET, 0, r_arg_result_addr); 404 __ z_br(Z_R14); // Return to caller. 405 __ align(handlerLen); 406 // T_ADDRESS: 407 guarantee(T_ADDRESS == T_VOID+1, "check BasicType definition in globalDefinitions.hpp"); 408 __ z_stg(Z_RET, 0, r_arg_result_addr); 409 __ z_br(Z_R14); // Return to caller. 410 __ align(handlerLen); 411 // T_NARROWOOP: 412 guarantee(T_NARROWOOP == T_ADDRESS+1, "check BasicType definition in globalDefinitions.hpp"); 413 __ z_st(Z_RET, 0, r_arg_result_addr); 414 __ z_br(Z_R14); // Return to caller. 415 __ align(handlerLen); 416 BLOCK_COMMENT("} process result"); 417 } 418 return start; 419 } 420 421 // Return point for a Java call if there's an exception thrown in 422 // Java code. The exception is caught and transformed into a 423 // pending exception stored in JavaThread that can be tested from 424 // within the VM. 425 address generate_catch_exception() { 426 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 427 428 address start = __ pc(); 429 430 // 431 // Registers alive 432 // 433 // Z_thread 434 // Z_ARG1 - address of pending exception 435 // Z_ARG2 - return address in call stub 436 // 437 438 const Register exception_file = Z_R0; 439 const Register exception_line = Z_R1; 440 441 __ load_const_optimized(exception_file, (void*)__FILE__); 442 __ load_const_optimized(exception_line, (void*)__LINE__); 443 444 __ z_stg(Z_ARG1, thread_(pending_exception)); 445 // Store into `char *'. 446 __ z_stg(exception_file, thread_(exception_file)); 447 // Store into `int'. 448 __ z_st(exception_line, thread_(exception_line)); 449 450 // Complete return to VM. 451 assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before"); 452 453 // Continue in call stub. 454 __ z_br(Z_ARG2); 455 456 return start; 457 } 458 459 // Continuation point for runtime calls returning with a pending 460 // exception. The pending exception check happened in the runtime 461 // or native call stub. The pending exception in Thread is 462 // converted into a Java-level exception. 463 // 464 // Read: 465 // Z_R14: pc the runtime library callee wants to return to. 466 // Since the exception occurred in the callee, the return pc 467 // from the point of view of Java is the exception pc. 468 // 469 // Invalidate: 470 // Volatile registers (except below). 471 // 472 // Update: 473 // Z_ARG1: exception 474 // (Z_R14 is unchanged and is live out). 475 // 476 address generate_forward_exception() { 477 StubCodeMark mark(this, "StubRoutines", "forward_exception"); 478 address start = __ pc(); 479 480 #define pending_exception_offset in_bytes(Thread::pending_exception_offset()) 481 #ifdef ASSERT 482 // Get pending exception oop. 483 __ z_lg(Z_ARG1, pending_exception_offset, Z_thread); 484 485 // Make sure that this code is only executed if there is a pending exception. 486 { 487 Label L; 488 __ z_ltgr(Z_ARG1, Z_ARG1); 489 __ z_brne(L); 490 __ stop("StubRoutines::forward exception: no pending exception (1)"); 491 __ bind(L); 492 } 493 494 __ verify_oop(Z_ARG1, "StubRoutines::forward exception: not an oop"); 495 #endif 496 497 __ z_lgr(Z_ARG2, Z_R14); // Copy exception pc into Z_ARG2. 498 __ save_return_pc(); 499 __ push_frame_abi160(0); 500 // Find exception handler. 501 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 502 Z_thread, 503 Z_ARG2); 504 // Copy handler's address. 505 __ z_lgr(Z_R1, Z_RET); 506 __ pop_frame(); 507 __ restore_return_pc(); 508 509 // Set up the arguments for the exception handler: 510 // - Z_ARG1: exception oop 511 // - Z_ARG2: exception pc 512 513 // Load pending exception oop. 514 __ z_lg(Z_ARG1, pending_exception_offset, Z_thread); 515 516 // The exception pc is the return address in the caller, 517 // must load it into Z_ARG2 518 __ z_lgr(Z_ARG2, Z_R14); 519 520 #ifdef ASSERT 521 // Make sure exception is set. 522 { Label L; 523 __ z_ltgr(Z_ARG1, Z_ARG1); 524 __ z_brne(L); 525 __ stop("StubRoutines::forward exception: no pending exception (2)"); 526 __ bind(L); 527 } 528 #endif 529 // Clear the pending exception. 530 __ clear_mem(Address(Z_thread, pending_exception_offset), sizeof(void *)); 531 // Jump to exception handler 532 __ z_br(Z_R1 /*handler address*/); 533 534 return start; 535 536 #undef pending_exception_offset 537 } 538 539 // Continuation point for throwing of implicit exceptions that are 540 // not handled in the current activation. Fabricates an exception 541 // oop and initiates normal exception dispatching in this 542 // frame. Only callee-saved registers are preserved (through the 543 // normal RegisterMap handling). If the compiler 544 // needs all registers to be preserved between the fault point and 545 // the exception handler then it must assume responsibility for that 546 // in AbstractCompiler::continuation_for_implicit_null_exception or 547 // continuation_for_implicit_division_by_zero_exception. All other 548 // implicit exceptions (e.g., NullPointerException or 549 // AbstractMethodError on entry) are either at call sites or 550 // otherwise assume that stack unwinding will be initiated, so 551 // caller saved registers were assumed volatile in the compiler. 552 553 // Note that we generate only this stub into a RuntimeStub, because 554 // it needs to be properly traversed and ignored during GC, so we 555 // change the meaning of the "__" macro within this method. 556 557 // Note: the routine set_pc_not_at_call_for_caller in 558 // SharedRuntime.cpp requires that this code be generated into a 559 // RuntimeStub. 560 #undef __ 561 #define __ masm-> 562 563 address generate_throw_exception(const char* name, address runtime_entry, 564 bool restore_saved_exception_pc, 565 Register arg1 = noreg, Register arg2 = noreg) { 566 int insts_size = 256; 567 int locs_size = 0; 568 CodeBuffer code(name, insts_size, locs_size); 569 MacroAssembler* masm = new MacroAssembler(&code); 570 int framesize_in_bytes; 571 address start = __ pc(); 572 573 __ save_return_pc(); 574 framesize_in_bytes = __ push_frame_abi160(0); 575 576 address frame_complete_pc = __ pc(); 577 if (restore_saved_exception_pc) { 578 __ unimplemented("StubGenerator::throw_exception", 74); 579 } 580 581 // Note that we always have a runtime stub frame on the top of stack at this point. 582 __ get_PC(Z_R1); 583 __ set_last_Java_frame(/*sp*/Z_SP, /*pc*/Z_R1); 584 585 // Do the call. 586 BLOCK_COMMENT("call runtime_entry"); 587 __ call_VM_leaf(runtime_entry, Z_thread, arg1, arg2); 588 589 __ reset_last_Java_frame(); 590 591 #ifdef ASSERT 592 // Make sure that this code is only executed if there is a pending exception. 593 { Label L; 594 __ z_lg(Z_R0, 595 in_bytes(Thread::pending_exception_offset()), 596 Z_thread); 597 __ z_ltgr(Z_R0, Z_R0); 598 __ z_brne(L); 599 __ stop("StubRoutines::throw_exception: no pending exception"); 600 __ bind(L); 601 } 602 #endif 603 604 __ pop_frame(); 605 __ restore_return_pc(); 606 607 __ load_const_optimized(Z_R1, StubRoutines::forward_exception_entry()); 608 __ z_br(Z_R1); 609 610 RuntimeStub* stub = 611 RuntimeStub::new_runtime_stub(name, &code, 612 frame_complete_pc - start, 613 framesize_in_bytes/wordSize, 614 NULL /*oop_maps*/, false); 615 616 return stub->entry_point(); 617 } 618 619 #undef __ 620 #ifdef PRODUCT 621 #define __ _masm-> 622 #else 623 #define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm):_masm)-> 624 #endif 625 626 // Support for uint StubRoutine::zarch::partial_subtype_check(Klass 627 // sub, Klass super); 628 // 629 // Arguments: 630 // ret : Z_RET, returned 631 // sub : Z_ARG2, argument, not changed 632 // super: Z_ARG3, argument, not changed 633 // 634 // raddr: Z_R14, blown by call 635 // 636 address generate_partial_subtype_check() { 637 StubCodeMark mark(this, "StubRoutines", "partial_subtype_check"); 638 Label miss; 639 640 address start = __ pc(); 641 642 const Register Rsubklass = Z_ARG2; // subklass 643 const Register Rsuperklass = Z_ARG3; // superklass 644 645 // No args, but tmp registers that are killed. 646 const Register Rlength = Z_ARG4; // cache array length 647 const Register Rarray_ptr = Z_ARG5; // Current value from cache array. 648 649 if (UseCompressedOops) { 650 assert(Universe::heap() != NULL, "java heap must be initialized to generate partial_subtype_check stub"); 651 } 652 653 // Always take the slow path (see SPARC). 654 __ check_klass_subtype_slow_path(Rsubklass, Rsuperklass, 655 Rarray_ptr, Rlength, NULL, &miss); 656 657 // Match falls through here. 658 __ clear_reg(Z_RET); // Zero indicates a match. Set EQ flag in CC. 659 __ z_br(Z_R14); 660 661 __ BIND(miss); 662 __ load_const_optimized(Z_RET, 1); // One indicates a miss. 663 __ z_ltgr(Z_RET, Z_RET); // Set NE flag in CR. 664 __ z_br(Z_R14); 665 666 return start; 667 } 668 669 // Return address of code to be called from code generated by 670 // MacroAssembler::verify_oop. 671 // 672 // Don't generate, rather use C++ code. 673 address generate_verify_oop_subroutine() { 674 // Don't generate a StubCodeMark, because no code is generated! 675 // Generating the mark triggers notifying the oprofile jvmti agent 676 // about the dynamic code generation, but the stub without 677 // code (code_size == 0) confuses opjitconv 678 // StubCodeMark mark(this, "StubRoutines", "verify_oop_stub"); 679 680 address start = 0; 681 return start; 682 } 683 684 // Generate pre-write barrier for array. 685 // 686 // Input: 687 // addr - register containing starting address 688 // count - register containing element count 689 // 690 // The input registers are overwritten. 691 void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { 692 693 BarrierSet* const bs = Universe::heap()->barrier_set(); 694 switch (bs->kind()) { 695 case BarrierSet::G1SATBCTLogging: 696 // With G1, don't generate the call if we statically know that the target in uninitialized. 697 if (!dest_uninitialized) { 698 // Is marking active? 699 Label filtered; 700 Register Rtmp1 = Z_R0; 701 const int active_offset = in_bytes(JavaThread::satb_mark_queue_offset() + 702 SATBMarkQueue::byte_offset_of_active()); 703 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 704 __ load_and_test_int(Rtmp1, Address(Z_thread, active_offset)); 705 } else { 706 guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 707 __ load_and_test_byte(Rtmp1, Address(Z_thread, active_offset)); 708 } 709 __ z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently. 710 711 // __ push_frame_abi160(0); 712 (void) RegisterSaver::save_live_registers(_masm, RegisterSaver::arg_registers); 713 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), addr, count); 714 (void) RegisterSaver::restore_live_registers(_masm, RegisterSaver::arg_registers); 715 // __ pop_frame(); 716 717 __ bind(filtered); 718 } 719 break; 720 case BarrierSet::CardTableForRS: 721 case BarrierSet::CardTableExtension: 722 case BarrierSet::ModRef: 723 break; 724 default: 725 ShouldNotReachHere(); 726 } 727 } 728 729 // Generate post-write barrier for array. 730 // 731 // Input: 732 // addr - register containing starting address 733 // count - register containing element count 734 // 735 // The input registers are overwritten. 736 void gen_write_ref_array_post_barrier(Register addr, Register count, bool branchToEnd) { 737 BarrierSet* const bs = Universe::heap()->barrier_set(); 738 switch (bs->kind()) { 739 case BarrierSet::G1SATBCTLogging: 740 { 741 if (branchToEnd) { 742 // __ push_frame_abi160(0); 743 (void) RegisterSaver::save_live_registers(_masm, RegisterSaver::arg_registers); 744 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count); 745 (void) RegisterSaver::restore_live_registers(_masm, RegisterSaver::arg_registers); 746 // __ pop_frame(); 747 } else { 748 // Tail call: call c and return to stub caller. 749 address entry_point = CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post); 750 if (Z_ARG1 != addr) __ z_lgr(Z_ARG1, addr); 751 if (Z_ARG2 != count) __ z_lgr(Z_ARG2, count); 752 __ load_const(Z_R1, entry_point); 753 __ z_br(Z_R1); // Branch without linking, callee will return to stub caller. 754 } 755 } 756 break; 757 case BarrierSet::CardTableForRS: 758 case BarrierSet::CardTableExtension: 759 // These cases formerly known as 760 // void array_store_check(Register addr, Register count, bool branchToEnd). 761 { 762 NearLabel doXC, done; 763 CardTableModRefBS* ct = (CardTableModRefBS*)bs; 764 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 765 assert_different_registers(Z_R0, Z_R1, addr, count); 766 767 // Nothing to do if count <= 0. 768 if (branchToEnd) { 769 __ compare64_and_branch(count, (intptr_t) 0, Assembler::bcondNotHigh, done); 770 } else { 771 __ z_ltgr(count, count); 772 __ z_bcr(Assembler::bcondNotPositive, Z_R14); 773 } 774 775 // Note: We can't combine the shifts. We could lose a carry 776 // from calculating the array end address. 777 // count = (count-1)*BytesPerHeapOop + addr 778 // Count holds addr of last oop in array then. 779 __ z_sllg(count, count, LogBytesPerHeapOop); 780 __ add2reg_with_index(count, -BytesPerHeapOop, count, addr); 781 782 // Get base address of card table. 783 __ load_const_optimized(Z_R1, (address)ct->byte_map_base); 784 785 // count = (count>>shift) - (addr>>shift) 786 __ z_srlg(addr, addr, CardTableModRefBS::card_shift); 787 __ z_srlg(count, count, CardTableModRefBS::card_shift); 788 789 // Prefetch first elements of card table for update. 790 if (VM_Version::has_Prefetch()) { 791 __ z_pfd(0x02, 0, addr, Z_R1); 792 } 793 794 // Special case: clear just one byte. 795 __ clear_reg(Z_R0, true, false); // Used for doOneByte. 796 __ z_sgr(count, addr); // Count = n-1 now, CC used for brc below. 797 __ z_stc(Z_R0, 0, addr, Z_R1); // Must preserve CC from z_sgr. 798 if (branchToEnd) { 799 __ z_brz(done); 800 } else { 801 __ z_bcr(Assembler::bcondZero, Z_R14); 802 } 803 804 __ z_cghi(count, 255); 805 __ z_brnh(doXC); 806 807 // MVCLE: clear a long area. 808 // Start addr of card table range = base + addr. 809 // # bytes in card table range = (count + 1) 810 __ add2reg_with_index(Z_R0, 0, Z_R1, addr); 811 __ add2reg(Z_R1, 1, count); 812 813 // dirty hack: 814 // There are just two callers. Both pass 815 // count in Z_ARG3 = Z_R4 816 // addr in Z_ARG2 = Z_R3 817 // ==> use Z_ARG2 as src len reg = 0 818 // Z_ARG1 as src addr (ignored) 819 assert(count == Z_ARG3, "count: unexpected register number"); 820 assert(addr == Z_ARG2, "addr: unexpected register number"); 821 __ clear_reg(Z_ARG2, true, false); 822 823 __ MacroAssembler::move_long_ext(Z_R0, Z_ARG1, 0); 824 825 if (branchToEnd) { 826 __ z_bru(done); 827 } else { 828 __ z_bcr(Assembler::bcondAlways, Z_R14); 829 } 830 831 // XC: clear a short area. 832 Label XC_template; // Instr template, never exec directly! 833 __ bind(XC_template); 834 __ z_xc(0, 0, addr, 0, addr); 835 836 __ bind(doXC); 837 // start addr of card table range = base + addr 838 // end addr of card table range = base + addr + count 839 __ add2reg_with_index(addr, 0, Z_R1, addr); 840 841 if (VM_Version::has_ExecuteExtensions()) { 842 __ z_exrl(count, XC_template); // Execute XC with var. len. 843 } else { 844 __ z_larl(Z_R1, XC_template); 845 __ z_ex(count, 0, Z_R0, Z_R1); // Execute XC with var. len. 846 } 847 if (!branchToEnd) { 848 __ z_br(Z_R14); 849 } 850 851 __ bind(done); 852 } 853 break; 854 case BarrierSet::ModRef: 855 if (!branchToEnd) { __ z_br(Z_R14); } 856 break; 857 default: 858 ShouldNotReachHere(); 859 } 860 } 861 862 863 // This is to test that the count register contains a positive int value. 864 // Required because C2 does not respect int to long conversion for stub calls. 865 void assert_positive_int(Register count) { 866 #ifdef ASSERT 867 __ z_srag(Z_R0, count, 31); // Just leave the sign (must be zero) in Z_R0. 868 __ asm_assert_eq("missing zero extend", 0xAFFE); 869 #endif 870 } 871 872 // Generate overlap test for array copy stubs. 873 // If no actual overlap is detected, control is transferred to the 874 // "normal" copy stub (entry address passed in disjoint_copy_target). 875 // Otherwise, execution continues with the code generated by the 876 // caller of array_overlap_test. 877 // 878 // Input: 879 // Z_ARG1 - from 880 // Z_ARG2 - to 881 // Z_ARG3 - element count 882 void array_overlap_test(address disjoint_copy_target, int log2_elem_size) { 883 __ MacroAssembler::compare_and_branch_optimized(Z_ARG2, Z_ARG1, Assembler::bcondNotHigh, 884 disjoint_copy_target, /*len64=*/true, /*has_sign=*/false); 885 886 Register index = Z_ARG3; 887 if (log2_elem_size > 0) { 888 __ z_sllg(Z_R1, Z_ARG3, log2_elem_size); // byte count 889 index = Z_R1; 890 } 891 __ add2reg_with_index(Z_R1, 0, index, Z_ARG1); // First byte after "from" range. 892 893 __ MacroAssembler::compare_and_branch_optimized(Z_R1, Z_ARG2, Assembler::bcondNotHigh, 894 disjoint_copy_target, /*len64=*/true, /*has_sign=*/false); 895 896 // Destructive overlap: let caller generate code for that. 897 } 898 899 // Generate stub for disjoint array copy. If "aligned" is true, the 900 // "from" and "to" addresses are assumed to be heapword aligned. 901 // 902 // Arguments for generated stub: 903 // from: Z_ARG1 904 // to: Z_ARG2 905 // count: Z_ARG3 treated as signed 906 void generate_disjoint_copy(bool aligned, int element_size, 907 bool branchToEnd, 908 bool restoreArgs) { 909 // This is the zarch specific stub generator for general array copy tasks. 910 // It has the following prereqs and features: 911 // 912 // - No destructive overlap allowed (else unpredictable results). 913 // - Destructive overlap does not exist if the leftmost byte of the target 914 // does not coincide with any of the source bytes (except the leftmost). 915 // 916 // Register usage upon entry: 917 // Z_ARG1 == Z_R2 : address of source array 918 // Z_ARG2 == Z_R3 : address of target array 919 // Z_ARG3 == Z_R4 : length of operands (# of elements on entry) 920 // 921 // Register usage within the generator: 922 // - Z_R0 and Z_R1 are KILLed by the stub routine (target addr/len). 923 // Used as pair register operand in complex moves, scratch registers anyway. 924 // - Z_R5 is KILLed by the stub routine (source register pair addr/len) (even/odd reg). 925 // Same as R0/R1, but no scratch register. 926 // - Z_ARG1, Z_ARG2, Z_ARG3 are USEd but preserved by the stub routine, 927 // but they might get temporarily overwritten. 928 929 Register save_reg = Z_ARG4; // (= Z_R5), holds original target operand address for restore. 930 931 { 932 Register llen_reg = Z_R1; // Holds left operand len (odd reg). 933 Register laddr_reg = Z_R0; // Holds left operand addr (even reg), overlaps with data_reg. 934 Register rlen_reg = Z_R5; // Holds right operand len (odd reg), overlaps with save_reg. 935 Register raddr_reg = Z_R4; // Holds right operand addr (even reg), overlaps with len_reg. 936 937 Register data_reg = Z_R0; // Holds copied data chunk in alignment process and copy loop. 938 Register len_reg = Z_ARG3; // Holds operand len (#elements at entry, #bytes shortly after). 939 Register dst_reg = Z_ARG2; // Holds left (target) operand addr. 940 Register src_reg = Z_ARG1; // Holds right (source) operand addr. 941 942 Label doMVCLOOP, doMVCLOOPcount, doMVCLOOPiterate; 943 Label doMVCUnrolled; 944 NearLabel doMVC, doMVCgeneral, done; 945 Label MVC_template; 946 address pcMVCblock_b, pcMVCblock_e; 947 948 bool usedMVCLE = true; 949 bool usedMVCLOOP = true; 950 bool usedMVCUnrolled = false; 951 bool usedMVC = false; 952 bool usedMVCgeneral = false; 953 954 int stride; 955 Register stride_reg; 956 Register ix_reg; 957 958 assert((element_size<=256) && (256%element_size == 0), "element size must be <= 256, power of 2"); 959 unsigned int log2_size = exact_log2(element_size); 960 961 switch (element_size) { 962 case 1: BLOCK_COMMENT("ARRAYCOPY DISJOINT byte {"); break; 963 case 2: BLOCK_COMMENT("ARRAYCOPY DISJOINT short {"); break; 964 case 4: BLOCK_COMMENT("ARRAYCOPY DISJOINT int {"); break; 965 case 8: BLOCK_COMMENT("ARRAYCOPY DISJOINT long {"); break; 966 default: BLOCK_COMMENT("ARRAYCOPY DISJOINT {"); break; 967 } 968 969 assert_positive_int(len_reg); 970 971 BLOCK_COMMENT("preparation {"); 972 973 // No copying if len <= 0. 974 if (branchToEnd) { 975 __ compare64_and_branch(len_reg, (intptr_t) 0, Assembler::bcondNotHigh, done); 976 } else { 977 if (VM_Version::has_CompareBranch()) { 978 __ z_cgib(len_reg, 0, Assembler::bcondNotHigh, 0, Z_R14); 979 } else { 980 __ z_ltgr(len_reg, len_reg); 981 __ z_bcr(Assembler::bcondNotPositive, Z_R14); 982 } 983 } 984 985 // Prefetch just one cache line. Speculative opt for short arrays. 986 // Do not use Z_R1 in prefetch. Is undefined here. 987 if (VM_Version::has_Prefetch()) { 988 __ z_pfd(0x01, 0, Z_R0, src_reg); // Fetch access. 989 __ z_pfd(0x02, 0, Z_R0, dst_reg); // Store access. 990 } 991 992 BLOCK_COMMENT("} preparation"); 993 994 // Save args only if really needed. 995 // Keep len test local to branch. Is generated only once. 996 997 BLOCK_COMMENT("mode selection {"); 998 999 // Special handling for arrays with only a few elements. 1000 // Nothing fancy: just an executed MVC. 1001 if (log2_size > 0) { 1002 __ z_sllg(Z_R1, len_reg, log2_size); // Remember #bytes in Z_R1. 1003 } 1004 if (element_size != 8) { 1005 __ z_cghi(len_reg, 256/element_size); 1006 __ z_brnh(doMVC); 1007 usedMVC = true; 1008 } 1009 if (element_size == 8) { // Long and oop arrays are always aligned. 1010 __ z_cghi(len_reg, 256/element_size); 1011 __ z_brnh(doMVCUnrolled); 1012 usedMVCUnrolled = true; 1013 } 1014 1015 // Prefetch another cache line. We, for sure, have more than one line to copy. 1016 if (VM_Version::has_Prefetch()) { 1017 __ z_pfd(0x01, 256, Z_R0, src_reg); // Fetch access. 1018 __ z_pfd(0x02, 256, Z_R0, dst_reg); // Store access. 1019 } 1020 1021 if (restoreArgs) { 1022 // Remember entry value of ARG2 to restore all arguments later from that knowledge. 1023 __ z_lgr(save_reg, dst_reg); 1024 } 1025 1026 __ z_cghi(len_reg, 4096/element_size); 1027 if (log2_size == 0) { 1028 __ z_lgr(Z_R1, len_reg); // Init Z_R1 with #bytes 1029 } 1030 __ z_brnh(doMVCLOOP); 1031 1032 // Fall through to MVCLE case. 1033 1034 BLOCK_COMMENT("} mode selection"); 1035 1036 // MVCLE: for long arrays 1037 // DW aligned: Best performance for sizes > 4kBytes. 1038 // unaligned: Least complex for sizes > 256 bytes. 1039 if (usedMVCLE) { 1040 BLOCK_COMMENT("mode MVCLE {"); 1041 1042 // Setup registers for mvcle. 1043 //__ z_lgr(llen_reg, len_reg);// r1 <- r4 #bytes already in Z_R1, aka llen_reg. 1044 __ z_lgr(laddr_reg, dst_reg); // r0 <- r3 1045 __ z_lgr(raddr_reg, src_reg); // r4 <- r2 1046 __ z_lgr(rlen_reg, llen_reg); // r5 <- r1 1047 1048 __ MacroAssembler::move_long_ext(laddr_reg, raddr_reg, 0xb0); // special: bypass cache 1049 // __ MacroAssembler::move_long_ext(laddr_reg, raddr_reg, 0xb8); // special: Hold data in cache. 1050 // __ MacroAssembler::move_long_ext(laddr_reg, raddr_reg, 0); 1051 1052 if (restoreArgs) { 1053 // MVCLE updates the source (Z_R4,Z_R5) and target (Z_R0,Z_R1) register pairs. 1054 // Dst_reg (Z_ARG2) and src_reg (Z_ARG1) are left untouched. No restore required. 1055 // Len_reg (Z_ARG3) is destroyed and must be restored. 1056 __ z_slgr(laddr_reg, dst_reg); // copied #bytes 1057 if (log2_size > 0) { 1058 __ z_srag(Z_ARG3, laddr_reg, log2_size); // Convert back to #elements. 1059 } else { 1060 __ z_lgr(Z_ARG3, laddr_reg); 1061 } 1062 } 1063 if (branchToEnd) { 1064 __ z_bru(done); 1065 } else { 1066 __ z_br(Z_R14); 1067 } 1068 BLOCK_COMMENT("} mode MVCLE"); 1069 } 1070 // No fallthru possible here. 1071 1072 // MVCUnrolled: for short, aligned arrays. 1073 1074 if (usedMVCUnrolled) { 1075 BLOCK_COMMENT("mode MVC unrolled {"); 1076 stride = 8; 1077 1078 // Generate unrolled MVC instructions. 1079 for (int ii = 32; ii > 1; ii--) { 1080 __ z_mvc(0, ii * stride-1, dst_reg, 0, src_reg); // ii*8 byte copy 1081 if (branchToEnd) { 1082 __ z_bru(done); 1083 } else { 1084 __ z_br(Z_R14); 1085 } 1086 } 1087 1088 pcMVCblock_b = __ pc(); 1089 __ z_mvc(0, 1 * stride-1, dst_reg, 0, src_reg); // 8 byte copy 1090 if (branchToEnd) { 1091 __ z_bru(done); 1092 } else { 1093 __ z_br(Z_R14); 1094 } 1095 1096 pcMVCblock_e = __ pc(); 1097 Label MVC_ListEnd; 1098 __ bind(MVC_ListEnd); 1099 1100 // This is an absolute fast path: 1101 // - Array len in bytes must be not greater than 256. 1102 // - Array len in bytes must be an integer mult of DW 1103 // to save expensive handling of trailing bytes. 1104 // - Argument restore is not done, 1105 // i.e. previous code must not alter arguments (this code doesn't either). 1106 1107 __ bind(doMVCUnrolled); 1108 1109 // Avoid mul, prefer shift where possible. 1110 // Combine shift right (for #DW) with shift left (for block size). 1111 // Set CC for zero test below (asm_assert). 1112 // Note: #bytes comes in Z_R1, #DW in len_reg. 1113 unsigned int MVCblocksize = pcMVCblock_e - pcMVCblock_b; 1114 unsigned int logMVCblocksize = 0xffffffffU; // Pacify compiler ("used uninitialized" warning). 1115 1116 if (log2_size > 0) { // Len was scaled into Z_R1. 1117 switch (MVCblocksize) { 1118 1119 case 8: logMVCblocksize = 3; 1120 __ z_ltgr(Z_R0, Z_R1); // #bytes is index 1121 break; // reasonable size, use shift 1122 1123 case 16: logMVCblocksize = 4; 1124 __ z_slag(Z_R0, Z_R1, logMVCblocksize-log2_size); 1125 break; // reasonable size, use shift 1126 1127 default: logMVCblocksize = 0; 1128 __ z_ltgr(Z_R0, len_reg); // #DW for mul 1129 break; // all other sizes: use mul 1130 } 1131 } else { 1132 guarantee(log2_size, "doMVCUnrolled: only for DW entities"); 1133 } 1134 1135 // This test (and branch) is redundant. Previous code makes sure that 1136 // - element count > 0 1137 // - element size == 8. 1138 // Thus, len reg should never be zero here. We insert an asm_assert() here, 1139 // just to double-check and to be on the safe side. 1140 __ asm_assert(false, "zero len cannot occur", 99); 1141 1142 __ z_larl(Z_R1, MVC_ListEnd); // Get addr of last instr block. 1143 // Avoid mul, prefer shift where possible. 1144 if (logMVCblocksize == 0) { 1145 __ z_mghi(Z_R0, MVCblocksize); 1146 } 1147 __ z_slgr(Z_R1, Z_R0); 1148 __ z_br(Z_R1); 1149 BLOCK_COMMENT("} mode MVC unrolled"); 1150 } 1151 // No fallthru possible here. 1152 1153 // MVC execute template 1154 // Must always generate. Usage may be switched on below. 1155 // There is no suitable place after here to put the template. 1156 __ bind(MVC_template); 1157 __ z_mvc(0,0,dst_reg,0,src_reg); // Instr template, never exec directly! 1158 1159 1160 // MVC Loop: for medium-sized arrays 1161 1162 // Only for DW aligned arrays (src and dst). 1163 // #bytes to copy must be at least 256!!! 1164 // Non-aligned cases handled separately. 1165 stride = 256; 1166 stride_reg = Z_R1; // Holds #bytes when control arrives here. 1167 ix_reg = Z_ARG3; // Alias for len_reg. 1168 1169 1170 if (usedMVCLOOP) { 1171 BLOCK_COMMENT("mode MVC loop {"); 1172 __ bind(doMVCLOOP); 1173 1174 __ z_lcgr(ix_reg, Z_R1); // Ix runs from -(n-2)*stride to 1*stride (inclusive). 1175 __ z_llill(stride_reg, stride); 1176 __ add2reg(ix_reg, 2*stride); // Thus: increment ix by 2*stride. 1177 1178 __ bind(doMVCLOOPiterate); 1179 __ z_mvc(0, stride-1, dst_reg, 0, src_reg); 1180 __ add2reg(dst_reg, stride); 1181 __ add2reg(src_reg, stride); 1182 __ bind(doMVCLOOPcount); 1183 __ z_brxlg(ix_reg, stride_reg, doMVCLOOPiterate); 1184 1185 // Don 't use add2reg() here, since we must set the condition code! 1186 __ z_aghi(ix_reg, -2*stride); // Compensate incr from above: zero diff means "all copied". 1187 1188 if (restoreArgs) { 1189 __ z_lcgr(Z_R1, ix_reg); // Prepare ix_reg for copy loop, #bytes expected in Z_R1. 1190 __ z_brnz(doMVCgeneral); // We're not done yet, ix_reg is not zero. 1191 1192 // ARG1, ARG2, and ARG3 were altered by the code above, so restore them building on save_reg. 1193 __ z_slgr(dst_reg, save_reg); // copied #bytes 1194 __ z_slgr(src_reg, dst_reg); // = ARG1 (now restored) 1195 if (log2_size) { 1196 __ z_srag(Z_ARG3, dst_reg, log2_size); // Convert back to #elements to restore ARG3. 1197 } else { 1198 __ z_lgr(Z_ARG3, dst_reg); 1199 } 1200 __ z_lgr(Z_ARG2, save_reg); // ARG2 now restored. 1201 1202 if (branchToEnd) { 1203 __ z_bru(done); 1204 } else { 1205 __ z_br(Z_R14); 1206 } 1207 1208 } else { 1209 if (branchToEnd) { 1210 __ z_brz(done); // CC set by aghi instr. 1211 } else { 1212 __ z_bcr(Assembler::bcondZero, Z_R14); // We're all done if zero. 1213 } 1214 1215 __ z_lcgr(Z_R1, ix_reg); // Prepare ix_reg for copy loop, #bytes expected in Z_R1. 1216 // __ z_bru(doMVCgeneral); // fallthru 1217 } 1218 usedMVCgeneral = true; 1219 BLOCK_COMMENT("} mode MVC loop"); 1220 } 1221 // Fallthru to doMVCgeneral 1222 1223 // MVCgeneral: for short, unaligned arrays, after other copy operations 1224 1225 // Somewhat expensive due to use of EX instruction, but simple. 1226 if (usedMVCgeneral) { 1227 BLOCK_COMMENT("mode MVC general {"); 1228 __ bind(doMVCgeneral); 1229 1230 __ add2reg(len_reg, -1, Z_R1); // Get #bytes-1 for EXECUTE. 1231 if (VM_Version::has_ExecuteExtensions()) { 1232 __ z_exrl(len_reg, MVC_template); // Execute MVC with variable length. 1233 } else { 1234 __ z_larl(Z_R1, MVC_template); // Get addr of instr template. 1235 __ z_ex(len_reg, 0, Z_R0, Z_R1); // Execute MVC with variable length. 1236 } // penalty: 9 ticks 1237 1238 if (restoreArgs) { 1239 // ARG1, ARG2, and ARG3 were altered by code executed before, so restore them building on save_reg 1240 __ z_slgr(dst_reg, save_reg); // Copied #bytes without the "doMVCgeneral" chunk 1241 __ z_slgr(src_reg, dst_reg); // = ARG1 (now restored), was not advanced for "doMVCgeneral" chunk 1242 __ add2reg_with_index(dst_reg, 1, len_reg, dst_reg); // Len of executed MVC was not accounted for, yet. 1243 if (log2_size) { 1244 __ z_srag(Z_ARG3, dst_reg, log2_size); // Convert back to #elements to restore ARG3 1245 } else { 1246 __ z_lgr(Z_ARG3, dst_reg); 1247 } 1248 __ z_lgr(Z_ARG2, save_reg); // ARG2 now restored. 1249 } 1250 1251 if (usedMVC) { 1252 if (branchToEnd) { 1253 __ z_bru(done); 1254 } else { 1255 __ z_br(Z_R14); 1256 } 1257 } else { 1258 if (!branchToEnd) __ z_br(Z_R14); 1259 } 1260 BLOCK_COMMENT("} mode MVC general"); 1261 } 1262 // Fallthru possible if following block not generated. 1263 1264 // MVC: for short, unaligned arrays 1265 1266 // Somewhat expensive due to use of EX instruction, but simple. penalty: 9 ticks. 1267 // Differs from doMVCgeneral in reconstruction of ARG2, ARG3, and ARG4. 1268 if (usedMVC) { 1269 BLOCK_COMMENT("mode MVC {"); 1270 __ bind(doMVC); 1271 1272 // get #bytes-1 for EXECUTE 1273 if (log2_size) { 1274 __ add2reg(Z_R1, -1); // Length was scaled into Z_R1. 1275 } else { 1276 __ add2reg(Z_R1, -1, len_reg); // Length was not scaled. 1277 } 1278 1279 if (VM_Version::has_ExecuteExtensions()) { 1280 __ z_exrl(Z_R1, MVC_template); // Execute MVC with variable length. 1281 } else { 1282 __ z_lgr(Z_R0, Z_R5); // Save ARG4, may be unnecessary. 1283 __ z_larl(Z_R5, MVC_template); // Get addr of instr template. 1284 __ z_ex(Z_R1, 0, Z_R0, Z_R5); // Execute MVC with variable length. 1285 __ z_lgr(Z_R5, Z_R0); // Restore ARG4, may be unnecessary. 1286 } 1287 1288 if (!branchToEnd) { 1289 __ z_br(Z_R14); 1290 } 1291 BLOCK_COMMENT("} mode MVC"); 1292 } 1293 1294 __ bind(done); 1295 1296 switch (element_size) { 1297 case 1: BLOCK_COMMENT("} ARRAYCOPY DISJOINT byte "); break; 1298 case 2: BLOCK_COMMENT("} ARRAYCOPY DISJOINT short"); break; 1299 case 4: BLOCK_COMMENT("} ARRAYCOPY DISJOINT int "); break; 1300 case 8: BLOCK_COMMENT("} ARRAYCOPY DISJOINT long "); break; 1301 default: BLOCK_COMMENT("} ARRAYCOPY DISJOINT "); break; 1302 } 1303 } 1304 } 1305 1306 // Generate stub for conjoint array copy. If "aligned" is true, the 1307 // "from" and "to" addresses are assumed to be heapword aligned. 1308 // 1309 // Arguments for generated stub: 1310 // from: Z_ARG1 1311 // to: Z_ARG2 1312 // count: Z_ARG3 treated as signed 1313 void generate_conjoint_copy(bool aligned, int element_size, bool branchToEnd) { 1314 1315 // This is the zarch specific stub generator for general array copy tasks. 1316 // It has the following prereqs and features: 1317 // 1318 // - Destructive overlap exists and is handled by reverse copy. 1319 // - Destructive overlap exists if the leftmost byte of the target 1320 // does coincide with any of the source bytes (except the leftmost). 1321 // - Z_R0 and Z_R1 are KILLed by the stub routine (data and stride) 1322 // - Z_ARG1 and Z_ARG2 are USEd but preserved by the stub routine. 1323 // - Z_ARG3 is USED but preserved by the stub routine. 1324 // - Z_ARG4 is used as index register and is thus KILLed. 1325 // 1326 { 1327 Register stride_reg = Z_R1; // Stride & compare value in loop (negative element_size). 1328 Register data_reg = Z_R0; // Holds value of currently processed element. 1329 Register ix_reg = Z_ARG4; // Holds byte index of currently processed element. 1330 Register len_reg = Z_ARG3; // Holds length (in #elements) of arrays. 1331 Register dst_reg = Z_ARG2; // Holds left operand addr. 1332 Register src_reg = Z_ARG1; // Holds right operand addr. 1333 1334 assert(256%element_size == 0, "Element size must be power of 2."); 1335 assert(element_size <= 8, "Can't handle more than DW units."); 1336 1337 switch (element_size) { 1338 case 1: BLOCK_COMMENT("ARRAYCOPY CONJOINT byte {"); break; 1339 case 2: BLOCK_COMMENT("ARRAYCOPY CONJOINT short {"); break; 1340 case 4: BLOCK_COMMENT("ARRAYCOPY CONJOINT int {"); break; 1341 case 8: BLOCK_COMMENT("ARRAYCOPY CONJOINT long {"); break; 1342 default: BLOCK_COMMENT("ARRAYCOPY CONJOINT {"); break; 1343 } 1344 1345 assert_positive_int(len_reg); 1346 1347 if (VM_Version::has_Prefetch()) { 1348 __ z_pfd(0x01, 0, Z_R0, src_reg); // Fetch access. 1349 __ z_pfd(0x02, 0, Z_R0, dst_reg); // Store access. 1350 } 1351 1352 unsigned int log2_size = exact_log2(element_size); 1353 if (log2_size) { 1354 __ z_sllg(ix_reg, len_reg, log2_size); 1355 } else { 1356 __ z_lgr(ix_reg, len_reg); 1357 } 1358 1359 // Optimize reverse copy loop. 1360 // Main loop copies DW units which may be unaligned. Unaligned access adds some penalty ticks. 1361 // Unaligned DW access (neither fetch nor store) is DW-atomic, but should be alignment-atomic. 1362 // Preceding the main loop, some bytes are copied to obtain a DW-multiple remaining length. 1363 1364 Label countLoop1; 1365 Label copyLoop1; 1366 Label skipBY; 1367 Label skipHW; 1368 int stride = -8; 1369 1370 __ load_const_optimized(stride_reg, stride); // Prepare for DW copy loop. 1371 1372 if (element_size == 8) // Nothing to do here. 1373 __ z_bru(countLoop1); 1374 else { // Do not generate dead code. 1375 __ z_tmll(ix_reg, 7); // Check the "odd" bits. 1376 __ z_bre(countLoop1); // There are none, very good! 1377 } 1378 1379 if (log2_size == 0) { // Handle leftover Byte. 1380 __ z_tmll(ix_reg, 1); 1381 __ z_bre(skipBY); 1382 __ z_lb(data_reg, -1, ix_reg, src_reg); 1383 __ z_stcy(data_reg, -1, ix_reg, dst_reg); 1384 __ add2reg(ix_reg, -1); // Decrement delayed to avoid AGI. 1385 __ bind(skipBY); 1386 // fallthru 1387 } 1388 if (log2_size <= 1) { // Handle leftover HW. 1389 __ z_tmll(ix_reg, 2); 1390 __ z_bre(skipHW); 1391 __ z_lhy(data_reg, -2, ix_reg, src_reg); 1392 __ z_sthy(data_reg, -2, ix_reg, dst_reg); 1393 __ add2reg(ix_reg, -2); // Decrement delayed to avoid AGI. 1394 __ bind(skipHW); 1395 __ z_tmll(ix_reg, 4); 1396 __ z_bre(countLoop1); 1397 // fallthru 1398 } 1399 if (log2_size <= 2) { // There are just 4 bytes (left) that need to be copied. 1400 __ z_ly(data_reg, -4, ix_reg, src_reg); 1401 __ z_sty(data_reg, -4, ix_reg, dst_reg); 1402 __ add2reg(ix_reg, -4); // Decrement delayed to avoid AGI. 1403 __ z_bru(countLoop1); 1404 } 1405 1406 // Control can never get to here. Never! Never ever! 1407 __ z_illtrap(0x99); 1408 __ bind(copyLoop1); 1409 __ z_lg(data_reg, 0, ix_reg, src_reg); 1410 __ z_stg(data_reg, 0, ix_reg, dst_reg); 1411 __ bind(countLoop1); 1412 __ z_brxhg(ix_reg, stride_reg, copyLoop1); 1413 1414 if (!branchToEnd) 1415 __ z_br(Z_R14); 1416 1417 switch (element_size) { 1418 case 1: BLOCK_COMMENT("} ARRAYCOPY CONJOINT byte "); break; 1419 case 2: BLOCK_COMMENT("} ARRAYCOPY CONJOINT short"); break; 1420 case 4: BLOCK_COMMENT("} ARRAYCOPY CONJOINT int "); break; 1421 case 8: BLOCK_COMMENT("} ARRAYCOPY CONJOINT long "); break; 1422 default: BLOCK_COMMENT("} ARRAYCOPY CONJOINT "); break; 1423 } 1424 } 1425 } 1426 1427 // Generate stub for disjoint byte copy. If "aligned" is true, the 1428 // "from" and "to" addresses are assumed to be heapword aligned. 1429 address generate_disjoint_byte_copy(bool aligned, const char * name) { 1430 StubCodeMark mark(this, "StubRoutines", name); 1431 1432 // This is the zarch specific stub generator for byte array copy. 1433 // Refer to generate_disjoint_copy for a list of prereqs and features: 1434 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1435 generate_disjoint_copy(aligned, 1, false, false); 1436 return __ addr_at(start_off); 1437 } 1438 1439 1440 address generate_disjoint_short_copy(bool aligned, const char * name) { 1441 StubCodeMark mark(this, "StubRoutines", name); 1442 // This is the zarch specific stub generator for short array copy. 1443 // Refer to generate_disjoint_copy for a list of prereqs and features: 1444 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1445 generate_disjoint_copy(aligned, 2, false, false); 1446 return __ addr_at(start_off); 1447 } 1448 1449 1450 address generate_disjoint_int_copy(bool aligned, const char * name) { 1451 StubCodeMark mark(this, "StubRoutines", name); 1452 // This is the zarch specific stub generator for int array copy. 1453 // Refer to generate_disjoint_copy for a list of prereqs and features: 1454 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1455 generate_disjoint_copy(aligned, 4, false, false); 1456 return __ addr_at(start_off); 1457 } 1458 1459 1460 address generate_disjoint_long_copy(bool aligned, const char * name) { 1461 StubCodeMark mark(this, "StubRoutines", name); 1462 // This is the zarch specific stub generator for long array copy. 1463 // Refer to generate_disjoint_copy for a list of prereqs and features: 1464 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1465 generate_disjoint_copy(aligned, 8, false, false); 1466 return __ addr_at(start_off); 1467 } 1468 1469 1470 address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) { 1471 StubCodeMark mark(this, "StubRoutines", name); 1472 // This is the zarch specific stub generator for oop array copy. 1473 // Refer to generate_disjoint_copy for a list of prereqs and features. 1474 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1475 unsigned int size = UseCompressedOops ? 4 : 8; 1476 1477 gen_write_ref_array_pre_barrier(Z_ARG2, Z_ARG3, dest_uninitialized); 1478 1479 generate_disjoint_copy(aligned, size, true, true); 1480 1481 gen_write_ref_array_post_barrier(Z_ARG2, Z_ARG3, false); 1482 1483 return __ addr_at(start_off); 1484 } 1485 1486 1487 address generate_conjoint_byte_copy(bool aligned, const char * name) { 1488 StubCodeMark mark(this, "StubRoutines", name); 1489 // This is the zarch specific stub generator for overlapping byte array copy. 1490 // Refer to generate_conjoint_copy for a list of prereqs and features: 1491 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1492 address nooverlap_target = aligned ? StubRoutines::arrayof_jbyte_disjoint_arraycopy() 1493 : StubRoutines::jbyte_disjoint_arraycopy(); 1494 1495 array_overlap_test(nooverlap_target, 0); // Branch away to nooverlap_target if disjoint. 1496 generate_conjoint_copy(aligned, 1, false); 1497 1498 return __ addr_at(start_off); 1499 } 1500 1501 1502 address generate_conjoint_short_copy(bool aligned, const char * name) { 1503 StubCodeMark mark(this, "StubRoutines", name); 1504 // This is the zarch specific stub generator for overlapping short array copy. 1505 // Refer to generate_conjoint_copy for a list of prereqs and features: 1506 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1507 address nooverlap_target = aligned ? StubRoutines::arrayof_jshort_disjoint_arraycopy() 1508 : StubRoutines::jshort_disjoint_arraycopy(); 1509 1510 array_overlap_test(nooverlap_target, 1); // Branch away to nooverlap_target if disjoint. 1511 generate_conjoint_copy(aligned, 2, false); 1512 1513 return __ addr_at(start_off); 1514 } 1515 1516 address generate_conjoint_int_copy(bool aligned, const char * name) { 1517 StubCodeMark mark(this, "StubRoutines", name); 1518 // This is the zarch specific stub generator for overlapping int array copy. 1519 // Refer to generate_conjoint_copy for a list of prereqs and features: 1520 1521 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1522 address nooverlap_target = aligned ? StubRoutines::arrayof_jint_disjoint_arraycopy() 1523 : StubRoutines::jint_disjoint_arraycopy(); 1524 1525 array_overlap_test(nooverlap_target, 2); // Branch away to nooverlap_target if disjoint. 1526 generate_conjoint_copy(aligned, 4, false); 1527 1528 return __ addr_at(start_off); 1529 } 1530 1531 address generate_conjoint_long_copy(bool aligned, const char * name) { 1532 StubCodeMark mark(this, "StubRoutines", name); 1533 // This is the zarch specific stub generator for overlapping long array copy. 1534 // Refer to generate_conjoint_copy for a list of prereqs and features: 1535 1536 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1537 address nooverlap_target = aligned ? StubRoutines::arrayof_jlong_disjoint_arraycopy() 1538 : StubRoutines::jlong_disjoint_arraycopy(); 1539 1540 array_overlap_test(nooverlap_target, 3); // Branch away to nooverlap_target if disjoint. 1541 generate_conjoint_copy(aligned, 8, false); 1542 1543 return __ addr_at(start_off); 1544 } 1545 1546 address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) { 1547 StubCodeMark mark(this, "StubRoutines", name); 1548 // This is the zarch specific stub generator for overlapping oop array copy. 1549 // Refer to generate_conjoint_copy for a list of prereqs and features. 1550 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1551 unsigned int size = UseCompressedOops ? 4 : 8; 1552 unsigned int shift = UseCompressedOops ? 2 : 3; 1553 1554 address nooverlap_target = aligned ? StubRoutines::arrayof_oop_disjoint_arraycopy(dest_uninitialized) 1555 : StubRoutines::oop_disjoint_arraycopy(dest_uninitialized); 1556 1557 // Branch to disjoint_copy (if applicable) before pre_barrier to avoid double pre_barrier. 1558 array_overlap_test(nooverlap_target, shift); // Branch away to nooverlap_target if disjoint. 1559 1560 gen_write_ref_array_pre_barrier(Z_ARG2, Z_ARG3, dest_uninitialized); 1561 1562 generate_conjoint_copy(aligned, size, true); // Must preserve ARG2, ARG3. 1563 1564 gen_write_ref_array_post_barrier(Z_ARG2, Z_ARG3, false); 1565 1566 return __ addr_at(start_off); 1567 } 1568 1569 1570 void generate_arraycopy_stubs() { 1571 1572 // Note: the disjoint stubs must be generated first, some of 1573 // the conjoint stubs use them. 1574 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy (false, "jbyte_disjoint_arraycopy"); 1575 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy"); 1576 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy (false, "jint_disjoint_arraycopy"); 1577 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy (false, "jlong_disjoint_arraycopy"); 1578 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy (false, "oop_disjoint_arraycopy", false); 1579 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (false, "oop_disjoint_arraycopy_uninit", true); 1580 1581 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy (true, "arrayof_jbyte_disjoint_arraycopy"); 1582 StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy"); 1583 StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy (true, "arrayof_jint_disjoint_arraycopy"); 1584 StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy (true, "arrayof_jlong_disjoint_arraycopy"); 1585 StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy (true, "arrayof_oop_disjoint_arraycopy", false); 1586 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (true, "arrayof_oop_disjoint_arraycopy_uninit", true); 1587 1588 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy (false, "jbyte_arraycopy"); 1589 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy"); 1590 StubRoutines::_jint_arraycopy = generate_conjoint_int_copy (false, "jint_arraycopy"); 1591 StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy (false, "jlong_arraycopy"); 1592 StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy (false, "oop_arraycopy", false); 1593 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy (false, "oop_arraycopy_uninit", true); 1594 1595 StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy (true, "arrayof_jbyte_arraycopy"); 1596 StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy"); 1597 StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy (true, "arrayof_jint_arraycopy"); 1598 StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy (true, "arrayof_jlong_arraycopy"); 1599 StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy (true, "arrayof_oop_arraycopy", false); 1600 StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy (true, "arrayof_oop_arraycopy_uninit", true); 1601 } 1602 1603 void generate_safefetch(const char* name, int size, address* entry, address* fault_pc, address* continuation_pc) { 1604 1605 // safefetch signatures: 1606 // int SafeFetch32(int* adr, int errValue); 1607 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 1608 // 1609 // arguments: 1610 // Z_ARG1 = adr 1611 // Z_ARG2 = errValue 1612 // 1613 // result: 1614 // Z_RET = *adr or errValue 1615 1616 StubCodeMark mark(this, "StubRoutines", name); 1617 1618 // entry point 1619 // Load *adr into Z_ARG2, may fault. 1620 *entry = *fault_pc = __ pc(); 1621 switch (size) { 1622 case 4: 1623 // Sign extended int32_t. 1624 __ z_lgf(Z_ARG2, 0, Z_ARG1); 1625 break; 1626 case 8: 1627 // int64_t 1628 __ z_lg(Z_ARG2, 0, Z_ARG1); 1629 break; 1630 default: 1631 ShouldNotReachHere(); 1632 } 1633 1634 // Return errValue or *adr. 1635 *continuation_pc = __ pc(); 1636 __ z_lgr(Z_RET, Z_ARG2); 1637 __ z_br(Z_R14); 1638 1639 } 1640 1641 // Call interface for AES_encryptBlock, AES_decryptBlock stubs. 1642 // 1643 // Z_ARG1 - source data block. Ptr to leftmost byte to be processed. 1644 // Z_ARG2 - destination data block. Ptr to leftmost byte to be stored. 1645 // For in-place encryption/decryption, ARG1 and ARG2 can point 1646 // to the same piece of storage. 1647 // Z_ARG3 - Crypto key address (expanded key). The first n bits of 1648 // the expanded key constitute the original AES-<n> key (see below). 1649 // 1650 // Z_RET - return value. First unprocessed byte offset in src buffer. 1651 // 1652 // Some remarks: 1653 // The crypto key, as passed from the caller to these encryption stubs, 1654 // is a so-called expanded key. It is derived from the original key 1655 // by the Rijndael key schedule, see http://en.wikipedia.org/wiki/Rijndael_key_schedule 1656 // With the expanded key, the cipher/decipher task is decomposed in 1657 // multiple, less complex steps, called rounds. Sun SPARC and Intel 1658 // processors obviously implement support for those less complex steps. 1659 // z/Architecture provides instructions for full cipher/decipher complexity. 1660 // Therefore, we need the original, not the expanded key here. 1661 // Luckily, the first n bits of an AES-<n> expanded key are formed 1662 // by the original key itself. That takes us out of trouble. :-) 1663 // The key length (in bytes) relation is as follows: 1664 // original expanded rounds key bit keylen 1665 // key bytes key bytes length in words 1666 // 16 176 11 128 44 1667 // 24 208 13 192 52 1668 // 32 240 15 256 60 1669 // 1670 // The crypto instructions used in the AES* stubs have some specific register requirements. 1671 // Z_R0 holds the crypto function code. Please refer to the KM/KMC instruction 1672 // description in the "z/Architecture Principles of Operation" manual for details. 1673 // Z_R1 holds the parameter block address. The parameter block contains the cryptographic key 1674 // (KM instruction) and the chaining value (KMC instruction). 1675 // dst must designate an even-numbered register, holding the address of the output message. 1676 // src must designate an even/odd register pair, holding the address/length of the original message 1677 1678 // Helper function which generates code to 1679 // - load the function code in register fCode (== Z_R0) 1680 // - load the data block length (depends on cipher function) in register srclen if requested. 1681 // - is_decipher switches between cipher/decipher function codes 1682 // - set_len requests (if true) loading the data block length in register srclen 1683 void generate_load_AES_fCode(Register keylen, Register fCode, Register srclen, bool is_decipher) { 1684 1685 BLOCK_COMMENT("Set fCode {"); { 1686 Label fCode_set; 1687 int mode = is_decipher ? VM_Version::CipherMode::decipher : VM_Version::CipherMode::cipher; 1688 bool identical_dataBlk_len = (VM_Version::Cipher::_AES128_dataBlk == VM_Version::Cipher::_AES192_dataBlk) 1689 && (VM_Version::Cipher::_AES128_dataBlk == VM_Version::Cipher::_AES256_dataBlk); 1690 // Expanded key length is 44/52/60 * 4 bytes for AES-128/AES-192/AES-256. 1691 __ z_cghi(keylen, 52); 1692 __ z_lghi(fCode, VM_Version::Cipher::_AES256 + mode); 1693 if (!identical_dataBlk_len) { 1694 __ z_lghi(srclen, VM_Version::Cipher::_AES256_dataBlk); 1695 } 1696 __ z_brh(fCode_set); // keyLen > 52: AES256 1697 1698 __ z_lghi(fCode, VM_Version::Cipher::_AES192 + mode); 1699 if (!identical_dataBlk_len) { 1700 __ z_lghi(srclen, VM_Version::Cipher::_AES192_dataBlk); 1701 } 1702 __ z_bre(fCode_set); // keyLen == 52: AES192 1703 1704 __ z_lghi(fCode, VM_Version::Cipher::_AES128 + mode); 1705 if (!identical_dataBlk_len) { 1706 __ z_lghi(srclen, VM_Version::Cipher::_AES128_dataBlk); 1707 } 1708 // __ z_brl(fCode_set); // keyLen < 52: AES128 // fallthru 1709 __ bind(fCode_set); 1710 if (identical_dataBlk_len) { 1711 __ z_lghi(srclen, VM_Version::Cipher::_AES128_dataBlk); 1712 } 1713 } 1714 BLOCK_COMMENT("} Set fCode"); 1715 } 1716 1717 // Push a parameter block for the cipher/decipher instruction on the stack. 1718 // NOTE: 1719 // Before returning, the stub has to copy the chaining value from 1720 // the parmBlk, where it was updated by the crypto instruction, back 1721 // to the chaining value array the address of which was passed in the cv argument. 1722 // As all the available registers are used and modified by KMC, we need to save 1723 // the key length across the KMC instruction. We do so by spilling it to the stack, 1724 // just preceding the parmBlk (at (parmBlk - 8)). 1725 void generate_push_parmBlk(Register keylen, Register fCode, Register parmBlk, Register key, Register cv, bool is_decipher) { 1726 const int AES_parmBlk_align = 32; 1727 const int AES_parmBlk_addspace = AES_parmBlk_align; // Must be multiple of AES_parmblk_align. 1728 int cv_len, key_len; 1729 int mode = is_decipher ? VM_Version::CipherMode::decipher : VM_Version::CipherMode::cipher; 1730 Label parmBlk_128, parmBlk_192, parmBlk_256, parmBlk_set; 1731 1732 BLOCK_COMMENT("push parmBlk {"); 1733 if (VM_Version::has_Crypto_AES() ) { __ z_cghi(keylen, 52); } 1734 if (VM_Version::has_Crypto_AES256()) { __ z_brh(parmBlk_256); } // keyLen > 52: AES256 1735 if (VM_Version::has_Crypto_AES192()) { __ z_bre(parmBlk_192); } // keyLen == 52: AES192 1736 if (VM_Version::has_Crypto_AES128()) { __ z_brl(parmBlk_128); } // keyLen < 52: AES128 1737 1738 // Security net: requested AES function not available on this CPU. 1739 // NOTE: 1740 // As of now (March 2015), this safety net is not required. JCE policy files limit the 1741 // cryptographic strength of the keys used to 128 bit. If we have AES hardware support 1742 // at all, we have at least AES-128. 1743 __ stop_static("AES key strength not supported by CPU. Use -XX:-UseAES as remedy.", 0); 1744 1745 if (VM_Version::has_Crypto_AES128()) { 1746 __ bind(parmBlk_128); 1747 cv_len = VM_Version::Cipher::_AES128_dataBlk; 1748 key_len = VM_Version::Cipher::_AES128_parmBlk_C - cv_len; 1749 __ z_lay(parmBlk, -(VM_Version::Cipher::_AES128_parmBlk_C+AES_parmBlk_align)+(AES_parmBlk_align-1), Z_SP); 1750 __ z_nill(parmBlk, (~(AES_parmBlk_align-1)) & 0xffff); // align parameter block 1751 1752 // Resize the frame to accommodate for the aligned parameter block and other stuff. 1753 // There is room for stuff in the range [parmBlk-AES_parmBlk_addspace, parmBlk). 1754 __ z_stg(keylen, -8, parmBlk); // Spill keylen for later use. 1755 __ z_stg(Z_SP, -16, parmBlk); // Spill SP for easy revert. 1756 __ z_aghi(parmBlk, -AES_parmBlk_addspace); // Additional space for keylen, etc.. 1757 __ resize_frame_absolute(parmBlk, keylen, true); // Resize frame with parmBlk being the new SP. 1758 __ z_aghi(parmBlk, AES_parmBlk_addspace); // Restore parameter block address. 1759 1760 __ z_mvc(0, cv_len-1, parmBlk, 0, cv); // Copy cv. 1761 __ z_mvc(cv_len, key_len-1, parmBlk, 0, key); // Copy key. 1762 __ z_lghi(fCode, VM_Version::Cipher::_AES128 + mode); 1763 if (VM_Version::has_Crypto_AES192() || VM_Version::has_Crypto_AES256()) { 1764 __ z_bru(parmBlk_set); // Fallthru otherwise. 1765 } 1766 } 1767 1768 if (VM_Version::has_Crypto_AES192()) { 1769 __ bind(parmBlk_192); 1770 cv_len = VM_Version::Cipher::_AES192_dataBlk; 1771 key_len = VM_Version::Cipher::_AES192_parmBlk_C - cv_len; 1772 __ z_lay(parmBlk, -(VM_Version::Cipher::_AES192_parmBlk_C+AES_parmBlk_align)+(AES_parmBlk_align-1), Z_SP); 1773 __ z_nill(parmBlk, (~(AES_parmBlk_align-1)) & 0xffff); // Align parameter block. 1774 1775 // Resize the frame to accommodate for the aligned parameter block and other stuff. 1776 // There is room for stuff in the range [parmBlk-AES_parmBlk_addspace, parmBlk). 1777 __ z_stg(keylen, -8, parmBlk); // Spill keylen for later use. 1778 __ z_stg(Z_SP, -16, parmBlk); // Spill SP for easy revert. 1779 __ z_aghi(parmBlk, -AES_parmBlk_addspace); // Additional space for keylen, etc.. 1780 __ resize_frame_absolute(parmBlk, keylen, true); // Resize frame with parmBlk being the new SP. 1781 __ z_aghi(parmBlk, AES_parmBlk_addspace); // Restore parameter block address. 1782 1783 __ z_mvc(0, cv_len-1, parmBlk, 0, cv); // Copy cv. 1784 __ z_mvc(cv_len, key_len-1, parmBlk, 0, key); // Copy key. 1785 __ z_lghi(fCode, VM_Version::Cipher::_AES192 + mode); 1786 if (VM_Version::has_Crypto_AES256()) { 1787 __ z_bru(parmBlk_set); // Fallthru otherwise. 1788 } 1789 } 1790 1791 if (VM_Version::has_Crypto_AES256()) { 1792 __ bind(parmBlk_256); 1793 cv_len = VM_Version::Cipher::_AES256_dataBlk; 1794 key_len = VM_Version::Cipher::_AES256_parmBlk_C - cv_len; 1795 __ z_lay(parmBlk, -(VM_Version::Cipher::_AES256_parmBlk_C+AES_parmBlk_align)+(AES_parmBlk_align-1), Z_SP); 1796 __ z_nill(parmBlk, (~(AES_parmBlk_align-1)) & 0xffff); // Align parameter block. 1797 1798 // Resize the frame to accommodate for the aligned parameter block and other stuff. 1799 // There is room for stuff in the range [parmBlk-AES_parmBlk_addspace, parmBlk). 1800 __ z_stg(keylen, -8, parmBlk); // Spill keylen for later use. 1801 __ z_stg(Z_SP, -16, parmBlk); // Spill SP for easy revert. 1802 __ z_aghi(parmBlk, -AES_parmBlk_addspace); // Additional space for keylen, etc.. 1803 __ resize_frame_absolute(parmBlk, keylen, true); // Resize frame with parmBlk being the new SP. 1804 __ z_aghi(parmBlk, AES_parmBlk_addspace); // Restore parameter block address. 1805 1806 __ z_mvc(0, cv_len-1, parmBlk, 0, cv); // Copy cv. 1807 __ z_mvc(cv_len, key_len-1, parmBlk, 0, key); // Copy key. 1808 __ z_lghi(fCode, VM_Version::Cipher::_AES256 + mode); 1809 // __ z_bru(parmBlk_set); // fallthru 1810 } 1811 1812 __ bind(parmBlk_set); 1813 BLOCK_COMMENT("} push parmBlk"); 1814 } 1815 1816 // Pop a parameter block from the stack. The chaining value portion of the parameter block 1817 // is copied back to the cv array as it is needed for subsequent cipher steps. 1818 // The keylen value as well as the original SP (before resizing) was pushed to the stack 1819 // when pushing the parameter block. 1820 void generate_pop_parmBlk(Register keylen, Register parmBlk, Register key, Register cv) { 1821 1822 BLOCK_COMMENT("pop parmBlk {"); 1823 bool identical_dataBlk_len = (VM_Version::Cipher::_AES128_dataBlk == VM_Version::Cipher::_AES192_dataBlk) && 1824 (VM_Version::Cipher::_AES128_dataBlk == VM_Version::Cipher::_AES256_dataBlk); 1825 if (identical_dataBlk_len) { 1826 int cv_len = VM_Version::Cipher::_AES128_dataBlk; 1827 __ z_mvc(0, cv_len-1, cv, 0, parmBlk); // Copy cv. 1828 } else { 1829 int cv_len; 1830 Label parmBlk_128, parmBlk_192, parmBlk_256, parmBlk_set; 1831 __ z_lg(keylen, -8, parmBlk); // restore keylen 1832 __ z_cghi(keylen, 52); 1833 if (VM_Version::has_Crypto_AES256()) __ z_brh(parmBlk_256); // keyLen > 52: AES256 1834 if (VM_Version::has_Crypto_AES192()) __ z_bre(parmBlk_192); // keyLen == 52: AES192 1835 // if (VM_Version::has_Crypto_AES128()) __ z_brl(parmBlk_128); // keyLen < 52: AES128 // fallthru 1836 1837 // Security net: there is no one here. If we would need it, we should have 1838 // fallen into it already when pushing the parameter block. 1839 if (VM_Version::has_Crypto_AES128()) { 1840 __ bind(parmBlk_128); 1841 cv_len = VM_Version::Cipher::_AES128_dataBlk; 1842 __ z_mvc(0, cv_len-1, cv, 0, parmBlk); // Copy cv. 1843 if (VM_Version::has_Crypto_AES192() || VM_Version::has_Crypto_AES256()) { 1844 __ z_bru(parmBlk_set); 1845 } 1846 } 1847 1848 if (VM_Version::has_Crypto_AES192()) { 1849 __ bind(parmBlk_192); 1850 cv_len = VM_Version::Cipher::_AES192_dataBlk; 1851 __ z_mvc(0, cv_len-1, cv, 0, parmBlk); // Copy cv. 1852 if (VM_Version::has_Crypto_AES256()) { 1853 __ z_bru(parmBlk_set); 1854 } 1855 } 1856 1857 if (VM_Version::has_Crypto_AES256()) { 1858 __ bind(parmBlk_256); 1859 cv_len = VM_Version::Cipher::_AES256_dataBlk; 1860 __ z_mvc(0, cv_len-1, cv, 0, parmBlk); // Copy cv. 1861 // __ z_bru(parmBlk_set); // fallthru 1862 } 1863 __ bind(parmBlk_set); 1864 } 1865 __ z_lg(Z_SP, -16, parmBlk); // Revert resize_frame_absolute. 1866 BLOCK_COMMENT("} pop parmBlk"); 1867 } 1868 1869 // Compute AES encrypt function. 1870 address generate_AES_encryptBlock(const char* name) { 1871 __ align(CodeEntryAlignment); 1872 StubCodeMark mark(this, "StubRoutines", name); 1873 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1874 1875 Register from = Z_ARG1; // source byte array 1876 Register to = Z_ARG2; // destination byte array 1877 Register key = Z_ARG3; // expanded key array 1878 1879 const Register keylen = Z_R0; // Temporarily (until fCode is set) holds the expanded key array length. 1880 const Register fCode = Z_R0; // crypto function code 1881 const Register parmBlk = Z_R1; // parameter block address (points to crypto key) 1882 const Register src = Z_ARG1; // is Z_R2 1883 const Register srclen = Z_ARG2; // Overwrites destination address. 1884 const Register dst = Z_ARG3; // Overwrites expanded key address. 1885 1886 // Read key len of expanded key (in 4-byte words). 1887 __ z_lgf(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 1888 1889 // Copy arguments to registers as required by crypto instruction. 1890 __ z_lgr(parmBlk, key); // crypto key (in T_INT array). 1891 // __ z_lgr(src, from); // Copy not needed, src/from are identical. 1892 __ z_lgr(dst, to); // Copy destination address to even register. 1893 1894 // Construct function code in Z_R0, data block length in Z_ARG2. 1895 generate_load_AES_fCode(keylen, fCode, srclen, false); 1896 1897 __ km(dst, src); // Cipher the message. 1898 1899 __ z_br(Z_R14); 1900 1901 return __ addr_at(start_off); 1902 } 1903 1904 // Compute AES decrypt function. 1905 address generate_AES_decryptBlock(const char* name) { 1906 __ align(CodeEntryAlignment); 1907 StubCodeMark mark(this, "StubRoutines", name); 1908 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1909 1910 Register from = Z_ARG1; // source byte array 1911 Register to = Z_ARG2; // destination byte array 1912 Register key = Z_ARG3; // expanded key array, not preset at entry!!! 1913 1914 const Register keylen = Z_R0; // Temporarily (until fCode is set) holds the expanded key array length. 1915 const Register fCode = Z_R0; // crypto function code 1916 const Register parmBlk = Z_R1; // parameter block address (points to crypto key) 1917 const Register src = Z_ARG1; // is Z_R2 1918 const Register srclen = Z_ARG2; // Overwrites destination address. 1919 const Register dst = Z_ARG3; // Overwrites key address. 1920 1921 // Read key len of expanded key (in 4-byte words). 1922 __ z_lgf(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 1923 1924 // Copy arguments to registers as required by crypto instruction. 1925 __ z_lgr(parmBlk, key); // Copy crypto key address. 1926 // __ z_lgr(src, from); // Copy not needed, src/from are identical. 1927 __ z_lgr(dst, to); // Copy destination address to even register. 1928 1929 // Construct function code in Z_R0, data block length in Z_ARG2. 1930 generate_load_AES_fCode(keylen, fCode, srclen, true); 1931 1932 __ km(dst, src); // Cipher the message. 1933 1934 __ z_br(Z_R14); 1935 1936 return __ addr_at(start_off); 1937 } 1938 1939 // These stubs receive the addresses of the cryptographic key and of the chaining value as two separate 1940 // arguments (registers "key" and "cv", respectively). The KMC instruction, on the other hand, requires 1941 // chaining value and key to be, in this sequence, adjacent in storage. Thus, we need to allocate some 1942 // thread-local working storage. Using heap memory incurs all the hassles of allocating/freeing. 1943 // Stack space, on the contrary, is deallocated automatically when we return from the stub to the caller. 1944 // *** WARNING *** 1945 // Please note that we do not formally allocate stack space, nor do we 1946 // update the stack pointer. Therefore, no function calls are allowed 1947 // and nobody else must use the stack range where the parameter block 1948 // is located. 1949 // We align the parameter block to the next available octoword. 1950 // 1951 // Compute chained AES encrypt function. 1952 address generate_cipherBlockChaining_AES_encrypt(const char* name) { 1953 __ align(CodeEntryAlignment); 1954 StubCodeMark mark(this, "StubRoutines", name); 1955 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1956 1957 Register from = Z_ARG1; // source byte array (clear text) 1958 Register to = Z_ARG2; // destination byte array (ciphered) 1959 Register key = Z_ARG3; // expanded key array. 1960 Register cv = Z_ARG4; // chaining value 1961 const Register msglen = Z_ARG5; // Total length of the msg to be encrypted. Value must be returned 1962 // in Z_RET upon completion of this stub. Is 32-bit integer. 1963 1964 const Register keylen = Z_R0; // Expanded key length, as read from key array. Temp only. 1965 const Register fCode = Z_R0; // crypto function code 1966 const Register parmBlk = Z_R1; // parameter block address (points to crypto key) 1967 const Register src = Z_ARG1; // is Z_R2 1968 const Register srclen = Z_ARG2; // Overwrites destination address. 1969 const Register dst = Z_ARG3; // Overwrites key address. 1970 1971 // Read key len of expanded key (in 4-byte words). 1972 __ z_lgf(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 1973 1974 // Construct parm block address in parmBlk (== Z_R1), copy cv and key to parm block. 1975 // Construct function code in Z_R0. 1976 generate_push_parmBlk(keylen, fCode, parmBlk, key, cv, false); 1977 1978 // Prepare other registers for instruction. 1979 // __ z_lgr(src, from); // Not needed, registers are the same. 1980 __ z_lgr(dst, to); 1981 __ z_llgfr(srclen, msglen); // We pass the offsets as ints, not as longs as required. 1982 1983 __ kmc(dst, src); // Cipher the message. 1984 1985 generate_pop_parmBlk(keylen, parmBlk, key, cv); 1986 1987 __ z_llgfr(Z_RET, msglen); // We pass the offsets as ints, not as longs as required. 1988 __ z_br(Z_R14); 1989 1990 return __ addr_at(start_off); 1991 } 1992 1993 // Compute chained AES encrypt function. 1994 address generate_cipherBlockChaining_AES_decrypt(const char* name) { 1995 __ align(CodeEntryAlignment); 1996 StubCodeMark mark(this, "StubRoutines", name); 1997 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1998 1999 Register from = Z_ARG1; // source byte array (ciphered) 2000 Register to = Z_ARG2; // destination byte array (clear text) 2001 Register key = Z_ARG3; // expanded key array, not preset at entry!!! 2002 Register cv = Z_ARG4; // chaining value 2003 const Register msglen = Z_ARG5; // Total length of the msg to be encrypted. Value must be returned 2004 // in Z_RET upon completion of this stub. 2005 2006 const Register keylen = Z_R0; // Expanded key length, as read from key array. Temp only. 2007 const Register fCode = Z_R0; // crypto function code 2008 const Register parmBlk = Z_R1; // parameter block address (points to crypto key) 2009 const Register src = Z_ARG1; // is Z_R2 2010 const Register srclen = Z_ARG2; // Overwrites destination address. 2011 const Register dst = Z_ARG3; // Overwrites key address. 2012 2013 // Read key len of expanded key (in 4-byte words). 2014 __ z_lgf(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 2015 2016 // Construct parm block address in parmBlk (== Z_R1), copy cv and key to parm block. 2017 // Construct function code in Z_R0. 2018 generate_push_parmBlk(keylen, fCode, parmBlk, key, cv, true); 2019 2020 // Prepare other registers for instruction. 2021 // __ z_lgr(src, from); // Not needed, registers are the same. 2022 __ z_lgr(dst, to); 2023 __ z_llgfr(srclen, msglen); // We pass the offsets as ints, not as longs as required. 2024 2025 __ kmc(dst, src); // Decipher the message. 2026 2027 generate_pop_parmBlk(keylen, parmBlk, key, cv); 2028 2029 __ z_llgfr(Z_RET, msglen); // We pass the offsets as ints, not as longs as required. 2030 __ z_br(Z_R14); 2031 2032 return __ addr_at(start_off); 2033 } 2034 2035 2036 // Call interface for all SHA* stubs. 2037 // 2038 // Z_ARG1 - source data block. Ptr to leftmost byte to be processed. 2039 // Z_ARG2 - current SHA state. Ptr to state area. This area serves as 2040 // parameter block as required by the crypto instruction. 2041 // Z_ARG3 - current byte offset in source data block. 2042 // Z_ARG4 - last byte offset in source data block. 2043 // (Z_ARG4 - Z_ARG3) gives the #bytes remaining to be processed. 2044 // 2045 // Z_RET - return value. First unprocessed byte offset in src buffer. 2046 // 2047 // A few notes on the call interface: 2048 // - All stubs, whether they are single-block or multi-block, are assumed to 2049 // digest an integer multiple of the data block length of data. All data 2050 // blocks are digested using the intermediate message digest (KIMD) instruction. 2051 // Special end processing, as done by the KLMD instruction, seems to be 2052 // emulated by the calling code. 2053 // 2054 // - Z_ARG1 addresses the first byte of source data. The offset (Z_ARG3) is 2055 // already accounted for. 2056 // 2057 // - The current SHA state (the intermediate message digest value) is contained 2058 // in an area addressed by Z_ARG2. The area size depends on the SHA variant 2059 // and is accessible via the enum VM_Version::MsgDigest::_SHA<n>_parmBlk_I 2060 // 2061 // - The single-block stub is expected to digest exactly one data block, starting 2062 // at the address passed in Z_ARG1. 2063 // 2064 // - The multi-block stub is expected to digest all data blocks which start in 2065 // the offset interval [srcOff(Z_ARG3), srcLimit(Z_ARG4)). The exact difference 2066 // (srcLimit-srcOff), rounded up to the next multiple of the data block length, 2067 // gives the number of blocks to digest. It must be assumed that the calling code 2068 // provides for a large enough source data buffer. 2069 // 2070 // Compute SHA-1 function. 2071 address generate_SHA1_stub(bool multiBlock, const char* name) { 2072 __ align(CodeEntryAlignment); 2073 StubCodeMark mark(this, "StubRoutines", name); 2074 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 2075 2076 const Register srcBuff = Z_ARG1; // Points to first block to process (offset already added). 2077 const Register SHAState = Z_ARG2; // Only on entry. Reused soon thereafter for kimd register pairs. 2078 const Register srcOff = Z_ARG3; // int 2079 const Register srcLimit = Z_ARG4; // Only passed in multiBlock case. int 2080 2081 const Register SHAState_local = Z_R1; 2082 const Register SHAState_save = Z_ARG3; 2083 const Register srcBufLen = Z_ARG2; // Destroys state address, must be copied before. 2084 Label useKLMD, rtn; 2085 2086 __ load_const_optimized(Z_R0, (int)VM_Version::MsgDigest::_SHA1); // function code 2087 __ z_lgr(SHAState_local, SHAState); // SHAState == parameter block 2088 2089 if (multiBlock) { // Process everything from offset to limit. 2090 2091 // The following description is valid if we get a raw (unpimped) source data buffer, 2092 // spanning the range between [srcOff(Z_ARG3), srcLimit(Z_ARG4)). As detailled above, 2093 // the calling convention for these stubs is different. We leave the description in 2094 // to inform the reader what must be happening hidden in the calling code. 2095 // 2096 // The data block to be processed can have arbitrary length, i.e. its length does not 2097 // need to be an integer multiple of SHA<n>_datablk. Therefore, we need to implement 2098 // two different paths. If the length is an integer multiple, we use KIMD, saving us 2099 // to copy the SHA state back and forth. If the length is odd, we copy the SHA state 2100 // to the stack, execute a KLMD instruction on it and copy the result back to the 2101 // caller's SHA state location. 2102 2103 // Total #srcBuff blocks to process. 2104 if (VM_Version::has_DistinctOpnds()) { 2105 __ z_srk(srcBufLen, srcLimit, srcOff); // exact difference 2106 __ z_ahi(srcBufLen, VM_Version::MsgDigest::_SHA1_dataBlk-1); // round up 2107 __ z_nill(srcBufLen, (~(VM_Version::MsgDigest::_SHA1_dataBlk-1)) & 0xffff); 2108 __ z_ark(srcLimit, srcOff, srcBufLen); // Srclimit temporarily holds return value. 2109 __ z_llgfr(srcBufLen, srcBufLen); // Cast to 64-bit. 2110 } else { 2111 __ z_lgfr(srcBufLen, srcLimit); // Exact difference. srcLimit passed as int. 2112 __ z_sgfr(srcBufLen, srcOff); // SrcOff passed as int, now properly casted to long. 2113 __ z_aghi(srcBufLen, VM_Version::MsgDigest::_SHA1_dataBlk-1); // round up 2114 __ z_nill(srcBufLen, (~(VM_Version::MsgDigest::_SHA1_dataBlk-1)) & 0xffff); 2115 __ z_lgr(srcLimit, srcOff); // SrcLimit temporarily holds return value. 2116 __ z_agr(srcLimit, srcBufLen); 2117 } 2118 2119 // Integral #blocks to digest? 2120 // As a result of the calculations above, srcBufLen MUST be an integer 2121 // multiple of _SHA1_dataBlk, or else we are in big trouble. 2122 // We insert an asm_assert into the KLMD case to guard against that. 2123 __ z_tmll(srcBufLen, VM_Version::MsgDigest::_SHA1_dataBlk-1); 2124 __ z_brc(Assembler::bcondNotAllZero, useKLMD); 2125 2126 // Process all full blocks. 2127 __ kimd(srcBuff); 2128 2129 __ z_lgr(Z_RET, srcLimit); // Offset of first unprocessed byte in buffer. 2130 } else { // Process one data block only. 2131 __ load_const_optimized(srcBufLen, (int)VM_Version::MsgDigest::_SHA1_dataBlk); // #srcBuff bytes to process 2132 __ kimd(srcBuff); 2133 __ add2reg(Z_RET, (int)VM_Version::MsgDigest::_SHA1_dataBlk, srcOff); // Offset of first unprocessed byte in buffer. No 32 to 64 bit extension needed. 2134 } 2135 2136 __ bind(rtn); 2137 __ z_br(Z_R14); 2138 2139 if (multiBlock) { 2140 __ bind(useKLMD); 2141 2142 #if 1 2143 // Security net: this stub is believed to be called for full-sized data blocks only 2144 // NOTE: The following code is believed to be correct, but is is not tested. 2145 __ stop_static("SHA128 stub can digest full data blocks only. Use -XX:-UseSHA as remedy.", 0); 2146 #endif 2147 } 2148 2149 return __ addr_at(start_off); 2150 } 2151 2152 // Compute SHA-256 function. 2153 address generate_SHA256_stub(bool multiBlock, const char* name) { 2154 __ align(CodeEntryAlignment); 2155 StubCodeMark mark(this, "StubRoutines", name); 2156 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 2157 2158 const Register srcBuff = Z_ARG1; 2159 const Register SHAState = Z_ARG2; // Only on entry. Reused soon thereafter. 2160 const Register SHAState_local = Z_R1; 2161 const Register SHAState_save = Z_ARG3; 2162 const Register srcOff = Z_ARG3; 2163 const Register srcLimit = Z_ARG4; 2164 const Register srcBufLen = Z_ARG2; // Destroys state address, must be copied before. 2165 Label useKLMD, rtn; 2166 2167 __ load_const_optimized(Z_R0, (int)VM_Version::MsgDigest::_SHA256); // function code 2168 __ z_lgr(SHAState_local, SHAState); // SHAState == parameter block 2169 2170 if (multiBlock) { // Process everything from offset to limit. 2171 // The following description is valid if we get a raw (unpimped) source data buffer, 2172 // spanning the range between [srcOff(Z_ARG3), srcLimit(Z_ARG4)). As detailled above, 2173 // the calling convention for these stubs is different. We leave the description in 2174 // to inform the reader what must be happening hidden in the calling code. 2175 // 2176 // The data block to be processed can have arbitrary length, i.e. its length does not 2177 // need to be an integer multiple of SHA<n>_datablk. Therefore, we need to implement 2178 // two different paths. If the length is an integer multiple, we use KIMD, saving us 2179 // to copy the SHA state back and forth. If the length is odd, we copy the SHA state 2180 // to the stack, execute a KLMD instruction on it and copy the result back to the 2181 // caller's SHA state location. 2182 2183 // total #srcBuff blocks to process 2184 if (VM_Version::has_DistinctOpnds()) { 2185 __ z_srk(srcBufLen, srcLimit, srcOff); // exact difference 2186 __ z_ahi(srcBufLen, VM_Version::MsgDigest::_SHA256_dataBlk-1); // round up 2187 __ z_nill(srcBufLen, (~(VM_Version::MsgDigest::_SHA256_dataBlk-1)) & 0xffff); 2188 __ z_ark(srcLimit, srcOff, srcBufLen); // Srclimit temporarily holds return value. 2189 __ z_llgfr(srcBufLen, srcBufLen); // Cast to 64-bit. 2190 } else { 2191 __ z_lgfr(srcBufLen, srcLimit); // exact difference 2192 __ z_sgfr(srcBufLen, srcOff); 2193 __ z_aghi(srcBufLen, VM_Version::MsgDigest::_SHA256_dataBlk-1); // round up 2194 __ z_nill(srcBufLen, (~(VM_Version::MsgDigest::_SHA256_dataBlk-1)) & 0xffff); 2195 __ z_lgr(srcLimit, srcOff); // Srclimit temporarily holds return value. 2196 __ z_agr(srcLimit, srcBufLen); 2197 } 2198 2199 // Integral #blocks to digest? 2200 // As a result of the calculations above, srcBufLen MUST be an integer 2201 // multiple of _SHA1_dataBlk, or else we are in big trouble. 2202 // We insert an asm_assert into the KLMD case to guard against that. 2203 __ z_tmll(srcBufLen, VM_Version::MsgDigest::_SHA256_dataBlk-1); 2204 __ z_brc(Assembler::bcondNotAllZero, useKLMD); 2205 2206 // Process all full blocks. 2207 __ kimd(srcBuff); 2208 2209 __ z_lgr(Z_RET, srcLimit); // Offset of first unprocessed byte in buffer. 2210 } else { // Process one data block only. 2211 __ load_const_optimized(srcBufLen, (int)VM_Version::MsgDigest::_SHA256_dataBlk); // #srcBuff bytes to process 2212 __ kimd(srcBuff); 2213 __ add2reg(Z_RET, (int)VM_Version::MsgDigest::_SHA256_dataBlk, srcOff); // Offset of first unprocessed byte in buffer. 2214 } 2215 2216 __ bind(rtn); 2217 __ z_br(Z_R14); 2218 2219 if (multiBlock) { 2220 __ bind(useKLMD); 2221 #if 1 2222 // Security net: this stub is believed to be called for full-sized data blocks only. 2223 // NOTE: 2224 // The following code is believed to be correct, but is is not tested. 2225 __ stop_static("SHA256 stub can digest full data blocks only. Use -XX:-UseSHA as remedy.", 0); 2226 #endif 2227 } 2228 2229 return __ addr_at(start_off); 2230 } 2231 2232 // Compute SHA-512 function. 2233 address generate_SHA512_stub(bool multiBlock, const char* name) { 2234 __ align(CodeEntryAlignment); 2235 StubCodeMark mark(this, "StubRoutines", name); 2236 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 2237 2238 const Register srcBuff = Z_ARG1; 2239 const Register SHAState = Z_ARG2; // Only on entry. Reused soon thereafter. 2240 const Register SHAState_local = Z_R1; 2241 const Register SHAState_save = Z_ARG3; 2242 const Register srcOff = Z_ARG3; 2243 const Register srcLimit = Z_ARG4; 2244 const Register srcBufLen = Z_ARG2; // Destroys state address, must be copied before. 2245 Label useKLMD, rtn; 2246 2247 __ load_const_optimized(Z_R0, (int)VM_Version::MsgDigest::_SHA512); // function code 2248 __ z_lgr(SHAState_local, SHAState); // SHAState == parameter block 2249 2250 if (multiBlock) { // Process everything from offset to limit. 2251 // The following description is valid if we get a raw (unpimped) source data buffer, 2252 // spanning the range between [srcOff(Z_ARG3), srcLimit(Z_ARG4)). As detailled above, 2253 // the calling convention for these stubs is different. We leave the description in 2254 // to inform the reader what must be happening hidden in the calling code. 2255 // 2256 // The data block to be processed can have arbitrary length, i.e. its length does not 2257 // need to be an integer multiple of SHA<n>_datablk. Therefore, we need to implement 2258 // two different paths. If the length is an integer multiple, we use KIMD, saving us 2259 // to copy the SHA state back and forth. If the length is odd, we copy the SHA state 2260 // to the stack, execute a KLMD instruction on it and copy the result back to the 2261 // caller's SHA state location. 2262 2263 // total #srcBuff blocks to process 2264 if (VM_Version::has_DistinctOpnds()) { 2265 __ z_srk(srcBufLen, srcLimit, srcOff); // exact difference 2266 __ z_ahi(srcBufLen, VM_Version::MsgDigest::_SHA512_dataBlk-1); // round up 2267 __ z_nill(srcBufLen, (~(VM_Version::MsgDigest::_SHA512_dataBlk-1)) & 0xffff); 2268 __ z_ark(srcLimit, srcOff, srcBufLen); // Srclimit temporarily holds return value. 2269 __ z_llgfr(srcBufLen, srcBufLen); // Cast to 64-bit. 2270 } else { 2271 __ z_lgfr(srcBufLen, srcLimit); // exact difference 2272 __ z_sgfr(srcBufLen, srcOff); 2273 __ z_aghi(srcBufLen, VM_Version::MsgDigest::_SHA512_dataBlk-1); // round up 2274 __ z_nill(srcBufLen, (~(VM_Version::MsgDigest::_SHA512_dataBlk-1)) & 0xffff); 2275 __ z_lgr(srcLimit, srcOff); // Srclimit temporarily holds return value. 2276 __ z_agr(srcLimit, srcBufLen); 2277 } 2278 2279 // integral #blocks to digest? 2280 // As a result of the calculations above, srcBufLen MUST be an integer 2281 // multiple of _SHA1_dataBlk, or else we are in big trouble. 2282 // We insert an asm_assert into the KLMD case to guard against that. 2283 __ z_tmll(srcBufLen, VM_Version::MsgDigest::_SHA512_dataBlk-1); 2284 __ z_brc(Assembler::bcondNotAllZero, useKLMD); 2285 2286 // Process all full blocks. 2287 __ kimd(srcBuff); 2288 2289 __ z_lgr(Z_RET, srcLimit); // Offset of first unprocessed byte in buffer. 2290 } else { // Process one data block only. 2291 __ load_const_optimized(srcBufLen, (int)VM_Version::MsgDigest::_SHA512_dataBlk); // #srcBuff bytes to process 2292 __ kimd(srcBuff); 2293 __ add2reg(Z_RET, (int)VM_Version::MsgDigest::_SHA512_dataBlk, srcOff); // Offset of first unprocessed byte in buffer. 2294 } 2295 2296 __ bind(rtn); 2297 __ z_br(Z_R14); 2298 2299 if (multiBlock) { 2300 __ bind(useKLMD); 2301 #if 1 2302 // Security net: this stub is believed to be called for full-sized data blocks only 2303 // NOTE: 2304 // The following code is believed to be correct, but is is not tested. 2305 __ stop_static("SHA512 stub can digest full data blocks only. Use -XX:-UseSHA as remedy.", 0); 2306 #endif 2307 } 2308 2309 return __ addr_at(start_off); 2310 } 2311 2312 2313 2314 // Arguments: 2315 // Z_ARG1 - int crc 2316 // Z_ARG2 - byte* buf 2317 // Z_ARG3 - int length (of buffer) 2318 // 2319 // Result: 2320 // Z_RET - int crc result 2321 // 2322 // Compute CRC32 function. 2323 address generate_CRC32_updateBytes(const char* name) { 2324 __ align(CodeEntryAlignment); 2325 StubCodeMark mark(this, "StubRoutines", name); 2326 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 2327 2328 // arguments to kernel_crc32: 2329 Register crc = Z_ARG1; // Current checksum, preset by caller or result from previous call, int. 2330 Register data = Z_ARG2; // source byte array 2331 Register dataLen = Z_ARG3; // #bytes to process, int 2332 Register table = Z_ARG4; // crc table address 2333 const Register t0 = Z_R10; // work reg for kernel* emitters 2334 const Register t1 = Z_R11; // work reg for kernel* emitters 2335 const Register t2 = Z_R12; // work reg for kernel* emitters 2336 const Register t3 = Z_R13; // work reg for kernel* emitters 2337 2338 assert_different_registers(crc, data, dataLen, table); 2339 2340 // We pass these values as ints, not as longs as required by C calling convention. 2341 // Crc used as int. 2342 __ z_llgfr(dataLen, dataLen); 2343 2344 StubRoutines::zarch::generate_load_crc_table_addr(_masm, table); 2345 2346 __ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers. 2347 __ z_stmg(Z_R10, Z_R13, 1*8, Z_SP); // Spill regs 10..11 to make them available as work registers. 2348 __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3); 2349 __ z_lmg(Z_R10, Z_R13, 1*8, Z_SP); // Spill regs 10..11 back from stack. 2350 __ resize_frame(+(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers. 2351 2352 __ z_llgfr(Z_RET, crc); // Updated crc is function result. No copying required, just zero upper 32 bits. 2353 __ z_br(Z_R14); // Result already in Z_RET == Z_ARG1. 2354 2355 return __ addr_at(start_off); 2356 } 2357 2358 2359 // Arguments: 2360 // Z_ARG1 - x address 2361 // Z_ARG2 - x length 2362 // Z_ARG3 - y address 2363 // Z_ARG4 - y length 2364 // Z_ARG5 - z address 2365 // 160[Z_SP] - z length 2366 address generate_multiplyToLen() { 2367 __ align(CodeEntryAlignment); 2368 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 2369 2370 address start = __ pc(); 2371 2372 const Register x = Z_ARG1; 2373 const Register xlen = Z_ARG2; 2374 const Register y = Z_ARG3; 2375 const Register ylen = Z_ARG4; 2376 const Register z = Z_ARG5; 2377 // zlen is passed on the stack: 2378 // Address zlen(Z_SP, _z_abi(remaining_cargs)); 2379 2380 // Next registers will be saved on stack in multiply_to_len(). 2381 const Register tmp1 = Z_tmp_1; 2382 const Register tmp2 = Z_tmp_2; 2383 const Register tmp3 = Z_tmp_3; 2384 const Register tmp4 = Z_tmp_4; 2385 const Register tmp5 = Z_R9; 2386 2387 BLOCK_COMMENT("Entry:"); 2388 2389 __ z_llgfr(xlen, xlen); 2390 __ z_llgfr(ylen, ylen); 2391 2392 __ multiply_to_len(x, xlen, y, ylen, z, tmp1, tmp2, tmp3, tmp4, tmp5); 2393 2394 __ z_br(Z_R14); // Return to caller. 2395 2396 return start; 2397 } 2398 2399 void generate_initial() { 2400 // Generates all stubs and initializes the entry points. 2401 2402 // Entry points that exist in all platforms. 2403 // Note: This is code that could be shared among different 2404 // platforms - however the benefit seems to be smaller than the 2405 // disadvantage of having a much more complicated generator 2406 // structure. See also comment in stubRoutines.hpp. 2407 StubRoutines::_forward_exception_entry = generate_forward_exception(); 2408 2409 StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address); 2410 StubRoutines::_catch_exception_entry = generate_catch_exception(); 2411 2412 // Build this early so it's available for the interpreter. 2413 StubRoutines::_throw_StackOverflowError_entry = 2414 generate_throw_exception("StackOverflowError throw_exception", 2415 CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false); 2416 2417 //---------------------------------------------------------------------- 2418 // Entry points that are platform specific. 2419 // Build this early so it's available for the interpreter. 2420 StubRoutines::_throw_StackOverflowError_entry = 2421 generate_throw_exception("StackOverflowError throw_exception", 2422 CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false); 2423 2424 if (UseCRC32Intrinsics) { 2425 // We have no CRC32 table on z/Architecture. 2426 StubRoutines::_crc_table_adr = (address)StubRoutines::zarch::_crc_table; 2427 StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes"); 2428 } 2429 2430 // Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction. 2431 StubRoutines::zarch::_trot_table_addr = (address)StubRoutines::zarch::_trot_table; 2432 } 2433 2434 2435 void generate_all() { 2436 // Generates all stubs and initializes the entry points. 2437 2438 StubRoutines::zarch::_partial_subtype_check = generate_partial_subtype_check(); 2439 2440 // These entry points require SharedInfo::stack0 to be set up in non-core builds. 2441 StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false); 2442 StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false); 2443 StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false); 2444 2445 // Support for verify_oop (must happen after universe_init). 2446 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine(); 2447 2448 // Arraycopy stubs used by compilers. 2449 generate_arraycopy_stubs(); 2450 2451 // safefetch stubs 2452 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, &StubRoutines::_safefetch32_fault_pc, &StubRoutines::_safefetch32_continuation_pc); 2453 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, &StubRoutines::_safefetchN_fault_pc, &StubRoutines::_safefetchN_continuation_pc); 2454 2455 // Generate AES intrinsics code. 2456 if (UseAESIntrinsics) { 2457 StubRoutines::_aescrypt_encryptBlock = generate_AES_encryptBlock("AES_encryptBlock"); 2458 StubRoutines::_aescrypt_decryptBlock = generate_AES_decryptBlock("AES_decryptBlock"); 2459 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_AES_encrypt("AES_encryptBlock_chaining"); 2460 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_AES_decrypt("AES_decryptBlock_chaining"); 2461 } 2462 2463 // Generate SHA1/SHA256/SHA512 intrinsics code. 2464 if (UseSHA1Intrinsics) { 2465 StubRoutines::_sha1_implCompress = generate_SHA1_stub(false, "SHA1_singleBlock"); 2466 StubRoutines::_sha1_implCompressMB = generate_SHA1_stub(true, "SHA1_multiBlock"); 2467 } 2468 if (UseSHA256Intrinsics) { 2469 StubRoutines::_sha256_implCompress = generate_SHA256_stub(false, "SHA256_singleBlock"); 2470 StubRoutines::_sha256_implCompressMB = generate_SHA256_stub(true, "SHA256_multiBlock"); 2471 } 2472 if (UseSHA512Intrinsics) { 2473 StubRoutines::_sha512_implCompress = generate_SHA512_stub(false, "SHA512_singleBlock"); 2474 StubRoutines::_sha512_implCompressMB = generate_SHA512_stub(true, "SHA512_multiBlock"); 2475 } 2476 2477 #ifdef COMPILER2 2478 if (UseMultiplyToLenIntrinsic) { 2479 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 2480 } 2481 if (UseMontgomeryMultiplyIntrinsic) { 2482 StubRoutines::_montgomeryMultiply 2483 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 2484 } 2485 if (UseMontgomerySquareIntrinsic) { 2486 StubRoutines::_montgomerySquare 2487 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 2488 } 2489 #endif 2490 } 2491 2492 public: 2493 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 2494 // Replace the standard masm with a special one: 2495 _masm = new MacroAssembler(code); 2496 2497 _stub_count = !all ? 0x100 : 0x200; 2498 if (all) { 2499 generate_all(); 2500 } else { 2501 generate_initial(); 2502 } 2503 } 2504 2505 private: 2506 int _stub_count; 2507 void stub_prolog(StubCodeDesc* cdesc) { 2508 #ifdef ASSERT 2509 // Put extra information in the stub code, to make it more readable. 2510 // Write the high part of the address. 2511 // [RGV] Check if there is a dependency on the size of this prolog. 2512 __ emit_32((intptr_t)cdesc >> 32); 2513 __ emit_32((intptr_t)cdesc); 2514 __ emit_32(++_stub_count); 2515 #endif 2516 align(true); 2517 } 2518 2519 void align(bool at_header = false) { 2520 // z/Architecture cache line size is 256 bytes. 2521 // There is no obvious benefit in aligning stub 2522 // code to cache lines. Use CodeEntryAlignment instead. 2523 const unsigned int icache_line_size = CodeEntryAlignment; 2524 const unsigned int icache_half_line_size = MIN2<unsigned int>(32, CodeEntryAlignment); 2525 2526 if (at_header) { 2527 while ((intptr_t)(__ pc()) % icache_line_size != 0) { 2528 __ emit_16(0); 2529 } 2530 } else { 2531 while ((intptr_t)(__ pc()) % icache_half_line_size != 0) { 2532 __ z_nop(); 2533 } 2534 } 2535 } 2536 2537 }; 2538 2539 void StubGenerator_generate(CodeBuffer* code, bool all) { 2540 StubGenerator g(code, all); 2541 }