1 /* 2 * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "ci/ciUtilities.hpp" 29 #include "gc/shared/barrierSet.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "gc/shared/barrierSetNMethod.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "memory/universe.hpp" 34 #include "nativeInst_x86.hpp" 35 #include "oops/instanceOop.hpp" 36 #include "oops/method.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "prims/methodHandles.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/handles.inline.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubCodeGenerator.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "runtime/thread.inline.hpp" 46 #ifdef COMPILER2 47 #include "opto/runtime.hpp" 48 #endif 49 #if INCLUDE_ZGC 50 #include "gc/z/zThreadLocalData.hpp" 51 #endif 52 53 // Declaration and definition of StubGenerator (no .hpp file). 54 // For a more detailed description of the stub routine structure 55 // see the comment in stubRoutines.hpp 56 57 #define __ _masm-> 58 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 59 #define a__ ((Assembler*)_masm)-> 60 61 #ifdef PRODUCT 62 #define BLOCK_COMMENT(str) /* nothing */ 63 #else 64 #define BLOCK_COMMENT(str) __ block_comment(str) 65 #endif 66 67 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 68 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 69 70 // Stub Code definitions 71 72 class StubGenerator: public StubCodeGenerator { 73 private: 74 75 #ifdef PRODUCT 76 #define inc_counter_np(counter) ((void)0) 77 #else 78 void inc_counter_np_(int& counter) { 79 // This can destroy rscratch1 if counter is far from the code cache 80 __ incrementl(ExternalAddress((address)&counter)); 81 } 82 #define inc_counter_np(counter) \ 83 BLOCK_COMMENT("inc_counter " #counter); \ 84 inc_counter_np_(counter); 85 #endif 86 87 // Call stubs are used to call Java from C 88 // 89 // Linux Arguments: 90 // c_rarg0: call wrapper address address 91 // c_rarg1: result address 92 // c_rarg2: result type BasicType 93 // c_rarg3: method Method* 94 // c_rarg4: (interpreter) entry point address 95 // c_rarg5: parameters intptr_t* 96 // 16(rbp): parameter size (in words) int 97 // 24(rbp): thread Thread* 98 // 99 // [ return_from_Java ] <--- rsp 100 // [ argument word n ] 101 // ... 102 // -12 [ argument word 1 ] 103 // -11 [ saved r15 ] <--- rsp_after_call 104 // -10 [ saved r14 ] 105 // -9 [ saved r13 ] 106 // -8 [ saved r12 ] 107 // -7 [ saved rbx ] 108 // -6 [ call wrapper ] 109 // -5 [ result ] 110 // -4 [ result type ] 111 // -3 [ method ] 112 // -2 [ entry point ] 113 // -1 [ parameters ] 114 // 0 [ saved rbp ] <--- rbp 115 // 1 [ return address ] 116 // 2 [ parameter size ] 117 // 3 [ thread ] 118 // 119 // Windows Arguments: 120 // c_rarg0: call wrapper address address 121 // c_rarg1: result address 122 // c_rarg2: result type BasicType 123 // c_rarg3: method Method* 124 // 48(rbp): (interpreter) entry point address 125 // 56(rbp): parameters intptr_t* 126 // 64(rbp): parameter size (in words) int 127 // 72(rbp): thread Thread* 128 // 129 // [ return_from_Java ] <--- rsp 130 // [ argument word n ] 131 // ... 132 // -60 [ argument word 1 ] 133 // -59 [ saved xmm31 ] <--- rsp after_call 134 // [ saved xmm16-xmm30 ] (EVEX enabled, else the space is blank) 135 // -27 [ saved xmm15 ] 136 // [ saved xmm7-xmm14 ] 137 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 138 // -7 [ saved r15 ] 139 // -6 [ saved r14 ] 140 // -5 [ saved r13 ] 141 // -4 [ saved r12 ] 142 // -3 [ saved rdi ] 143 // -2 [ saved rsi ] 144 // -1 [ saved rbx ] 145 // 0 [ saved rbp ] <--- rbp 146 // 1 [ return address ] 147 // 2 [ call wrapper ] 148 // 3 [ result ] 149 // 4 [ result type ] 150 // 5 [ method ] 151 // 6 [ entry point ] 152 // 7 [ parameters ] 153 // 8 [ parameter size ] 154 // 9 [ thread ] 155 // 156 // Windows reserves the callers stack space for arguments 1-4. 157 // We spill c_rarg0-c_rarg3 to this space. 158 159 // Call stub stack layout word offsets from rbp 160 enum call_stub_layout { 161 #ifdef _WIN64 162 xmm_save_first = 6, // save from xmm6 163 xmm_save_last = 31, // to xmm31 164 xmm_save_base = -9, 165 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 166 r15_off = -7, 167 r14_off = -6, 168 r13_off = -5, 169 r12_off = -4, 170 rdi_off = -3, 171 rsi_off = -2, 172 rbx_off = -1, 173 rbp_off = 0, 174 retaddr_off = 1, 175 call_wrapper_off = 2, 176 result_off = 3, 177 result_type_off = 4, 178 method_off = 5, 179 entry_point_off = 6, 180 parameters_off = 7, 181 parameter_size_off = 8, 182 thread_off = 9 183 #else 184 rsp_after_call_off = -12, 185 mxcsr_off = rsp_after_call_off, 186 r15_off = -11, 187 r14_off = -10, 188 r13_off = -9, 189 r12_off = -8, 190 rbx_off = -7, 191 call_wrapper_off = -6, 192 result_off = -5, 193 result_type_off = -4, 194 method_off = -3, 195 entry_point_off = -2, 196 parameters_off = -1, 197 rbp_off = 0, 198 retaddr_off = 1, 199 parameter_size_off = 2, 200 thread_off = 3 201 #endif 202 }; 203 204 #ifdef _WIN64 205 Address xmm_save(int reg) { 206 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 207 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 208 } 209 #endif 210 211 address generate_call_stub(address& return_address) { 212 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 213 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 214 "adjust this code"); 215 StubCodeMark mark(this, "StubRoutines", "call_stub"); 216 address start = __ pc(); 217 218 // same as in generate_catch_exception()! 219 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 220 221 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 222 const Address result (rbp, result_off * wordSize); 223 const Address result_type (rbp, result_type_off * wordSize); 224 const Address method (rbp, method_off * wordSize); 225 const Address entry_point (rbp, entry_point_off * wordSize); 226 const Address parameters (rbp, parameters_off * wordSize); 227 const Address parameter_size(rbp, parameter_size_off * wordSize); 228 229 // same as in generate_catch_exception()! 230 const Address thread (rbp, thread_off * wordSize); 231 232 const Address r15_save(rbp, r15_off * wordSize); 233 const Address r14_save(rbp, r14_off * wordSize); 234 const Address r13_save(rbp, r13_off * wordSize); 235 const Address r12_save(rbp, r12_off * wordSize); 236 const Address rbx_save(rbp, rbx_off * wordSize); 237 238 // stub code 239 __ enter(); 240 __ subptr(rsp, -rsp_after_call_off * wordSize); 241 242 // save register parameters 243 #ifndef _WIN64 244 __ movptr(parameters, c_rarg5); // parameters 245 __ movptr(entry_point, c_rarg4); // entry_point 246 #endif 247 248 __ movptr(method, c_rarg3); // method 249 __ movl(result_type, c_rarg2); // result type 250 __ movptr(result, c_rarg1); // result 251 __ movptr(call_wrapper, c_rarg0); // call wrapper 252 253 // save regs belonging to calling function 254 __ movptr(rbx_save, rbx); 255 __ movptr(r12_save, r12); 256 __ movptr(r13_save, r13); 257 __ movptr(r14_save, r14); 258 __ movptr(r15_save, r15); 259 260 #ifdef _WIN64 261 int last_reg = 15; 262 if (UseAVX > 2) { 263 last_reg = 31; 264 } 265 if (VM_Version::supports_evex()) { 266 for (int i = xmm_save_first; i <= last_reg; i++) { 267 __ vextractf32x4(xmm_save(i), as_XMMRegister(i), 0); 268 } 269 } else { 270 for (int i = xmm_save_first; i <= last_reg; i++) { 271 __ movdqu(xmm_save(i), as_XMMRegister(i)); 272 } 273 } 274 275 const Address rdi_save(rbp, rdi_off * wordSize); 276 const Address rsi_save(rbp, rsi_off * wordSize); 277 278 __ movptr(rsi_save, rsi); 279 __ movptr(rdi_save, rdi); 280 #else 281 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 282 { 283 Label skip_ldmx; 284 __ stmxcsr(mxcsr_save); 285 __ movl(rax, mxcsr_save); 286 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 287 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 288 __ cmp32(rax, mxcsr_std); 289 __ jcc(Assembler::equal, skip_ldmx); 290 __ ldmxcsr(mxcsr_std); 291 __ bind(skip_ldmx); 292 } 293 #endif 294 295 // Load up thread register 296 __ movptr(r15_thread, thread); 297 __ reinit_heapbase(); 298 299 #ifdef ASSERT 300 // make sure we have no pending exceptions 301 { 302 Label L; 303 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 304 __ jcc(Assembler::equal, L); 305 __ stop("StubRoutines::call_stub: entered with pending exception"); 306 __ bind(L); 307 } 308 #endif 309 310 // pass parameters if any 311 BLOCK_COMMENT("pass parameters if any"); 312 Label parameters_done; 313 __ movl(c_rarg3, parameter_size); 314 __ testl(c_rarg3, c_rarg3); 315 __ jcc(Assembler::zero, parameters_done); 316 317 Label loop; 318 __ movptr(c_rarg2, parameters); // parameter pointer 319 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 320 __ BIND(loop); 321 __ movptr(rax, Address(c_rarg2, 0));// get parameter 322 __ addptr(c_rarg2, wordSize); // advance to next parameter 323 __ decrementl(c_rarg1); // decrement counter 324 __ push(rax); // pass parameter 325 __ jcc(Assembler::notZero, loop); 326 327 // call Java function 328 __ BIND(parameters_done); 329 __ movptr(rbx, method); // get Method* 330 __ movptr(c_rarg1, entry_point); // get entry_point 331 __ mov(r13, rsp); // set sender sp 332 BLOCK_COMMENT("call Java function"); 333 __ call(c_rarg1); 334 335 BLOCK_COMMENT("call_stub_return_address:"); 336 return_address = __ pc(); 337 338 // store result depending on type (everything that is not 339 // T_OBJECT, T_VALUETYPE, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 340 __ movptr(r13, result); 341 Label is_long, is_float, is_double, is_value, exit; 342 __ movl(rbx, result_type); 343 __ cmpl(rbx, T_OBJECT); 344 __ jcc(Assembler::equal, is_long); 345 __ cmpl(rbx, T_VALUETYPE); 346 __ jcc(Assembler::equal, is_value); 347 __ cmpl(rbx, T_LONG); 348 __ jcc(Assembler::equal, is_long); 349 __ cmpl(rbx, T_FLOAT); 350 __ jcc(Assembler::equal, is_float); 351 __ cmpl(rbx, T_DOUBLE); 352 __ jcc(Assembler::equal, is_double); 353 354 // handle T_INT case 355 __ movl(Address(r13, 0), rax); 356 357 __ BIND(exit); 358 359 // pop parameters 360 __ lea(rsp, rsp_after_call); 361 362 #ifdef ASSERT 363 // verify that threads correspond 364 { 365 Label L1, L2, L3; 366 __ cmpptr(r15_thread, thread); 367 __ jcc(Assembler::equal, L1); 368 __ stop("StubRoutines::call_stub: r15_thread is corrupted"); 369 __ bind(L1); 370 __ get_thread(rbx); 371 __ cmpptr(r15_thread, thread); 372 __ jcc(Assembler::equal, L2); 373 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 374 __ bind(L2); 375 __ cmpptr(r15_thread, rbx); 376 __ jcc(Assembler::equal, L3); 377 __ stop("StubRoutines::call_stub: threads must correspond"); 378 __ bind(L3); 379 } 380 #endif 381 382 // restore regs belonging to calling function 383 #ifdef _WIN64 384 // emit the restores for xmm regs 385 if (VM_Version::supports_evex()) { 386 for (int i = xmm_save_first; i <= last_reg; i++) { 387 __ vinsertf32x4(as_XMMRegister(i), as_XMMRegister(i), xmm_save(i), 0); 388 } 389 } else { 390 for (int i = xmm_save_first; i <= last_reg; i++) { 391 __ movdqu(as_XMMRegister(i), xmm_save(i)); 392 } 393 } 394 #endif 395 __ movptr(r15, r15_save); 396 __ movptr(r14, r14_save); 397 __ movptr(r13, r13_save); 398 __ movptr(r12, r12_save); 399 __ movptr(rbx, rbx_save); 400 401 #ifdef _WIN64 402 __ movptr(rdi, rdi_save); 403 __ movptr(rsi, rsi_save); 404 #else 405 __ ldmxcsr(mxcsr_save); 406 #endif 407 408 // restore rsp 409 __ addptr(rsp, -rsp_after_call_off * wordSize); 410 411 // return 412 __ vzeroupper(); 413 __ pop(rbp); 414 __ ret(0); 415 416 // handle return types different from T_INT 417 __ BIND(is_value); 418 if (ValueTypeReturnedAsFields) { 419 // Handle value type returned as fields 420 __ store_value_type_fields_to_buf(NULL); 421 __ movptr(r13, result); 422 } 423 __ BIND(is_long); 424 __ movq(Address(r13, 0), rax); 425 __ jmp(exit); 426 427 __ BIND(is_float); 428 __ movflt(Address(r13, 0), xmm0); 429 __ jmp(exit); 430 431 __ BIND(is_double); 432 __ movdbl(Address(r13, 0), xmm0); 433 __ jmp(exit); 434 435 return start; 436 } 437 438 // Return point for a Java call if there's an exception thrown in 439 // Java code. The exception is caught and transformed into a 440 // pending exception stored in JavaThread that can be tested from 441 // within the VM. 442 // 443 // Note: Usually the parameters are removed by the callee. In case 444 // of an exception crossing an activation frame boundary, that is 445 // not the case if the callee is compiled code => need to setup the 446 // rsp. 447 // 448 // rax: exception oop 449 450 address generate_catch_exception() { 451 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 452 address start = __ pc(); 453 454 // same as in generate_call_stub(): 455 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 456 const Address thread (rbp, thread_off * wordSize); 457 458 #ifdef ASSERT 459 // verify that threads correspond 460 { 461 Label L1, L2, L3; 462 __ cmpptr(r15_thread, thread); 463 __ jcc(Assembler::equal, L1); 464 __ stop("StubRoutines::catch_exception: r15_thread is corrupted"); 465 __ bind(L1); 466 __ get_thread(rbx); 467 __ cmpptr(r15_thread, thread); 468 __ jcc(Assembler::equal, L2); 469 __ stop("StubRoutines::catch_exception: r15_thread is modified by call"); 470 __ bind(L2); 471 __ cmpptr(r15_thread, rbx); 472 __ jcc(Assembler::equal, L3); 473 __ stop("StubRoutines::catch_exception: threads must correspond"); 474 __ bind(L3); 475 } 476 #endif 477 478 // set pending exception 479 __ verify_oop(rax); 480 481 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 482 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 483 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 484 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 485 486 // complete return to VM 487 assert(StubRoutines::_call_stub_return_address != NULL, 488 "_call_stub_return_address must have been generated before"); 489 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 490 491 return start; 492 } 493 494 // Continuation point for runtime calls returning with a pending 495 // exception. The pending exception check happened in the runtime 496 // or native call stub. The pending exception in Thread is 497 // converted into a Java-level exception. 498 // 499 // Contract with Java-level exception handlers: 500 // rax: exception 501 // rdx: throwing pc 502 // 503 // NOTE: At entry of this stub, exception-pc must be on stack !! 504 505 address generate_forward_exception() { 506 StubCodeMark mark(this, "StubRoutines", "forward exception"); 507 address start = __ pc(); 508 509 // Upon entry, the sp points to the return address returning into 510 // Java (interpreted or compiled) code; i.e., the return address 511 // becomes the throwing pc. 512 // 513 // Arguments pushed before the runtime call are still on the stack 514 // but the exception handler will reset the stack pointer -> 515 // ignore them. A potential result in registers can be ignored as 516 // well. 517 518 #ifdef ASSERT 519 // make sure this code is only executed if there is a pending exception 520 { 521 Label L; 522 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 523 __ jcc(Assembler::notEqual, L); 524 __ stop("StubRoutines::forward exception: no pending exception (1)"); 525 __ bind(L); 526 } 527 #endif 528 529 // compute exception handler into rbx 530 __ movptr(c_rarg0, Address(rsp, 0)); 531 BLOCK_COMMENT("call exception_handler_for_return_address"); 532 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 533 SharedRuntime::exception_handler_for_return_address), 534 r15_thread, c_rarg0); 535 __ mov(rbx, rax); 536 537 // setup rax & rdx, remove return address & clear pending exception 538 __ pop(rdx); 539 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 540 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 541 542 #ifdef ASSERT 543 // make sure exception is set 544 { 545 Label L; 546 __ testptr(rax, rax); 547 __ jcc(Assembler::notEqual, L); 548 __ stop("StubRoutines::forward exception: no pending exception (2)"); 549 __ bind(L); 550 } 551 #endif 552 553 // continue at exception handler (return address removed) 554 // rax: exception 555 // rbx: exception handler 556 // rdx: throwing pc 557 __ verify_oop(rax); 558 __ jmp(rbx); 559 560 return start; 561 } 562 563 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) 564 // 565 // Arguments : 566 // c_rarg0: exchange_value 567 // c_rarg0: dest 568 // 569 // Result: 570 // *dest <- ex, return (orig *dest) 571 address generate_atomic_xchg() { 572 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 573 address start = __ pc(); 574 575 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 576 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 577 __ ret(0); 578 579 return start; 580 } 581 582 // Support for intptr_t atomic::xchg_long(jlong exchange_value, volatile jlong* dest) 583 // 584 // Arguments : 585 // c_rarg0: exchange_value 586 // c_rarg1: dest 587 // 588 // Result: 589 // *dest <- ex, return (orig *dest) 590 address generate_atomic_xchg_long() { 591 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_long"); 592 address start = __ pc(); 593 594 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 595 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 596 __ ret(0); 597 598 return start; 599 } 600 601 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 602 // jint compare_value) 603 // 604 // Arguments : 605 // c_rarg0: exchange_value 606 // c_rarg1: dest 607 // c_rarg2: compare_value 608 // 609 // Result: 610 // if ( compare_value == *dest ) { 611 // *dest = exchange_value 612 // return compare_value; 613 // else 614 // return *dest; 615 address generate_atomic_cmpxchg() { 616 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 617 address start = __ pc(); 618 619 __ movl(rax, c_rarg2); 620 __ lock(); 621 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 622 __ ret(0); 623 624 return start; 625 } 626 627 // Support for int8_t atomic::atomic_cmpxchg(int8_t exchange_value, volatile int8_t* dest, 628 // int8_t compare_value) 629 // 630 // Arguments : 631 // c_rarg0: exchange_value 632 // c_rarg1: dest 633 // c_rarg2: compare_value 634 // 635 // Result: 636 // if ( compare_value == *dest ) { 637 // *dest = exchange_value 638 // return compare_value; 639 // else 640 // return *dest; 641 address generate_atomic_cmpxchg_byte() { 642 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); 643 address start = __ pc(); 644 645 __ movsbq(rax, c_rarg2); 646 __ lock(); 647 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); 648 __ ret(0); 649 650 return start; 651 } 652 653 // Support for int64_t atomic::atomic_cmpxchg(int64_t exchange_value, 654 // volatile int64_t* dest, 655 // int64_t compare_value) 656 // Arguments : 657 // c_rarg0: exchange_value 658 // c_rarg1: dest 659 // c_rarg2: compare_value 660 // 661 // Result: 662 // if ( compare_value == *dest ) { 663 // *dest = exchange_value 664 // return compare_value; 665 // else 666 // return *dest; 667 address generate_atomic_cmpxchg_long() { 668 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 669 address start = __ pc(); 670 671 __ movq(rax, c_rarg2); 672 __ lock(); 673 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 674 __ ret(0); 675 676 return start; 677 } 678 679 // Support for jint atomic::add(jint add_value, volatile jint* dest) 680 // 681 // Arguments : 682 // c_rarg0: add_value 683 // c_rarg1: dest 684 // 685 // Result: 686 // *dest += add_value 687 // return *dest; 688 address generate_atomic_add() { 689 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 690 address start = __ pc(); 691 692 __ movl(rax, c_rarg0); 693 __ lock(); 694 __ xaddl(Address(c_rarg1, 0), c_rarg0); 695 __ addl(rax, c_rarg0); 696 __ ret(0); 697 698 return start; 699 } 700 701 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 702 // 703 // Arguments : 704 // c_rarg0: add_value 705 // c_rarg1: dest 706 // 707 // Result: 708 // *dest += add_value 709 // return *dest; 710 address generate_atomic_add_long() { 711 StubCodeMark mark(this, "StubRoutines", "atomic_add_long"); 712 address start = __ pc(); 713 714 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 715 __ lock(); 716 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 717 __ addptr(rax, c_rarg0); 718 __ ret(0); 719 720 return start; 721 } 722 723 // Support for intptr_t OrderAccess::fence() 724 // 725 // Arguments : 726 // 727 // Result: 728 address generate_orderaccess_fence() { 729 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 730 address start = __ pc(); 731 __ membar(Assembler::StoreLoad); 732 __ ret(0); 733 734 return start; 735 } 736 737 // Support for intptr_t get_previous_fp() 738 // 739 // This routine is used to find the previous frame pointer for the 740 // caller (current_frame_guess). This is used as part of debugging 741 // ps() is seemingly lost trying to find frames. 742 // This code assumes that caller current_frame_guess) has a frame. 743 address generate_get_previous_fp() { 744 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 745 const Address old_fp(rbp, 0); 746 const Address older_fp(rax, 0); 747 address start = __ pc(); 748 749 __ enter(); 750 __ movptr(rax, old_fp); // callers fp 751 __ movptr(rax, older_fp); // the frame for ps() 752 __ pop(rbp); 753 __ ret(0); 754 755 return start; 756 } 757 758 // Support for intptr_t get_previous_sp() 759 // 760 // This routine is used to find the previous stack pointer for the 761 // caller. 762 address generate_get_previous_sp() { 763 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 764 address start = __ pc(); 765 766 __ movptr(rax, rsp); 767 __ addptr(rax, 8); // return address is at the top of the stack. 768 __ ret(0); 769 770 return start; 771 } 772 773 //---------------------------------------------------------------------------------------------------- 774 // Support for void verify_mxcsr() 775 // 776 // This routine is used with -Xcheck:jni to verify that native 777 // JNI code does not return to Java code without restoring the 778 // MXCSR register to our expected state. 779 780 address generate_verify_mxcsr() { 781 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 782 address start = __ pc(); 783 784 const Address mxcsr_save(rsp, 0); 785 786 if (CheckJNICalls) { 787 Label ok_ret; 788 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 789 __ push(rax); 790 __ subptr(rsp, wordSize); // allocate a temp location 791 __ stmxcsr(mxcsr_save); 792 __ movl(rax, mxcsr_save); 793 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 794 __ cmp32(rax, mxcsr_std); 795 __ jcc(Assembler::equal, ok_ret); 796 797 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 798 799 __ ldmxcsr(mxcsr_std); 800 801 __ bind(ok_ret); 802 __ addptr(rsp, wordSize); 803 __ pop(rax); 804 } 805 806 __ ret(0); 807 808 return start; 809 } 810 811 address generate_f2i_fixup() { 812 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 813 Address inout(rsp, 5 * wordSize); // return address + 4 saves 814 815 address start = __ pc(); 816 817 Label L; 818 819 __ push(rax); 820 __ push(c_rarg3); 821 __ push(c_rarg2); 822 __ push(c_rarg1); 823 824 __ movl(rax, 0x7f800000); 825 __ xorl(c_rarg3, c_rarg3); 826 __ movl(c_rarg2, inout); 827 __ movl(c_rarg1, c_rarg2); 828 __ andl(c_rarg1, 0x7fffffff); 829 __ cmpl(rax, c_rarg1); // NaN? -> 0 830 __ jcc(Assembler::negative, L); 831 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 832 __ movl(c_rarg3, 0x80000000); 833 __ movl(rax, 0x7fffffff); 834 __ cmovl(Assembler::positive, c_rarg3, rax); 835 836 __ bind(L); 837 __ movptr(inout, c_rarg3); 838 839 __ pop(c_rarg1); 840 __ pop(c_rarg2); 841 __ pop(c_rarg3); 842 __ pop(rax); 843 844 __ ret(0); 845 846 return start; 847 } 848 849 address generate_f2l_fixup() { 850 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 851 Address inout(rsp, 5 * wordSize); // return address + 4 saves 852 address start = __ pc(); 853 854 Label L; 855 856 __ push(rax); 857 __ push(c_rarg3); 858 __ push(c_rarg2); 859 __ push(c_rarg1); 860 861 __ movl(rax, 0x7f800000); 862 __ xorl(c_rarg3, c_rarg3); 863 __ movl(c_rarg2, inout); 864 __ movl(c_rarg1, c_rarg2); 865 __ andl(c_rarg1, 0x7fffffff); 866 __ cmpl(rax, c_rarg1); // NaN? -> 0 867 __ jcc(Assembler::negative, L); 868 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 869 __ mov64(c_rarg3, 0x8000000000000000); 870 __ mov64(rax, 0x7fffffffffffffff); 871 __ cmov(Assembler::positive, c_rarg3, rax); 872 873 __ bind(L); 874 __ movptr(inout, c_rarg3); 875 876 __ pop(c_rarg1); 877 __ pop(c_rarg2); 878 __ pop(c_rarg3); 879 __ pop(rax); 880 881 __ ret(0); 882 883 return start; 884 } 885 886 address generate_d2i_fixup() { 887 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 888 Address inout(rsp, 6 * wordSize); // return address + 5 saves 889 890 address start = __ pc(); 891 892 Label L; 893 894 __ push(rax); 895 __ push(c_rarg3); 896 __ push(c_rarg2); 897 __ push(c_rarg1); 898 __ push(c_rarg0); 899 900 __ movl(rax, 0x7ff00000); 901 __ movq(c_rarg2, inout); 902 __ movl(c_rarg3, c_rarg2); 903 __ mov(c_rarg1, c_rarg2); 904 __ mov(c_rarg0, c_rarg2); 905 __ negl(c_rarg3); 906 __ shrptr(c_rarg1, 0x20); 907 __ orl(c_rarg3, c_rarg2); 908 __ andl(c_rarg1, 0x7fffffff); 909 __ xorl(c_rarg2, c_rarg2); 910 __ shrl(c_rarg3, 0x1f); 911 __ orl(c_rarg1, c_rarg3); 912 __ cmpl(rax, c_rarg1); 913 __ jcc(Assembler::negative, L); // NaN -> 0 914 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 915 __ movl(c_rarg2, 0x80000000); 916 __ movl(rax, 0x7fffffff); 917 __ cmov(Assembler::positive, c_rarg2, rax); 918 919 __ bind(L); 920 __ movptr(inout, c_rarg2); 921 922 __ pop(c_rarg0); 923 __ pop(c_rarg1); 924 __ pop(c_rarg2); 925 __ pop(c_rarg3); 926 __ pop(rax); 927 928 __ ret(0); 929 930 return start; 931 } 932 933 address generate_d2l_fixup() { 934 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 935 Address inout(rsp, 6 * wordSize); // return address + 5 saves 936 937 address start = __ pc(); 938 939 Label L; 940 941 __ push(rax); 942 __ push(c_rarg3); 943 __ push(c_rarg2); 944 __ push(c_rarg1); 945 __ push(c_rarg0); 946 947 __ movl(rax, 0x7ff00000); 948 __ movq(c_rarg2, inout); 949 __ movl(c_rarg3, c_rarg2); 950 __ mov(c_rarg1, c_rarg2); 951 __ mov(c_rarg0, c_rarg2); 952 __ negl(c_rarg3); 953 __ shrptr(c_rarg1, 0x20); 954 __ orl(c_rarg3, c_rarg2); 955 __ andl(c_rarg1, 0x7fffffff); 956 __ xorl(c_rarg2, c_rarg2); 957 __ shrl(c_rarg3, 0x1f); 958 __ orl(c_rarg1, c_rarg3); 959 __ cmpl(rax, c_rarg1); 960 __ jcc(Assembler::negative, L); // NaN -> 0 961 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 962 __ mov64(c_rarg2, 0x8000000000000000); 963 __ mov64(rax, 0x7fffffffffffffff); 964 __ cmovq(Assembler::positive, c_rarg2, rax); 965 966 __ bind(L); 967 __ movq(inout, c_rarg2); 968 969 __ pop(c_rarg0); 970 __ pop(c_rarg1); 971 __ pop(c_rarg2); 972 __ pop(c_rarg3); 973 __ pop(rax); 974 975 __ ret(0); 976 977 return start; 978 } 979 980 address generate_fp_mask(const char *stub_name, int64_t mask) { 981 __ align(CodeEntryAlignment); 982 StubCodeMark mark(this, "StubRoutines", stub_name); 983 address start = __ pc(); 984 985 __ emit_data64( mask, relocInfo::none ); 986 __ emit_data64( mask, relocInfo::none ); 987 988 return start; 989 } 990 991 address generate_vector_mask(const char *stub_name, int64_t mask) { 992 __ align(CodeEntryAlignment); 993 StubCodeMark mark(this, "StubRoutines", stub_name); 994 address start = __ pc(); 995 996 __ emit_data64(mask, relocInfo::none); 997 __ emit_data64(mask, relocInfo::none); 998 __ emit_data64(mask, relocInfo::none); 999 __ emit_data64(mask, relocInfo::none); 1000 __ emit_data64(mask, relocInfo::none); 1001 __ emit_data64(mask, relocInfo::none); 1002 __ emit_data64(mask, relocInfo::none); 1003 __ emit_data64(mask, relocInfo::none); 1004 1005 return start; 1006 } 1007 1008 address generate_vector_byte_perm_mask(const char *stub_name) { 1009 __ align(CodeEntryAlignment); 1010 StubCodeMark mark(this, "StubRoutines", stub_name); 1011 address start = __ pc(); 1012 1013 __ emit_data64(0x0000000000000001, relocInfo::none); 1014 __ emit_data64(0x0000000000000003, relocInfo::none); 1015 __ emit_data64(0x0000000000000005, relocInfo::none); 1016 __ emit_data64(0x0000000000000007, relocInfo::none); 1017 __ emit_data64(0x0000000000000000, relocInfo::none); 1018 __ emit_data64(0x0000000000000002, relocInfo::none); 1019 __ emit_data64(0x0000000000000004, relocInfo::none); 1020 __ emit_data64(0x0000000000000006, relocInfo::none); 1021 1022 return start; 1023 } 1024 1025 // Non-destructive plausibility checks for oops 1026 // 1027 // Arguments: 1028 // all args on stack! 1029 // 1030 // Stack after saving c_rarg3: 1031 // [tos + 0]: saved c_rarg3 1032 // [tos + 1]: saved c_rarg2 1033 // [tos + 2]: saved r12 (several TemplateTable methods use it) 1034 // [tos + 3]: saved flags 1035 // [tos + 4]: return address 1036 // * [tos + 5]: error message (char*) 1037 // * [tos + 6]: object to verify (oop) 1038 // * [tos + 7]: saved rax - saved by caller and bashed 1039 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 1040 // * = popped on exit 1041 address generate_verify_oop() { 1042 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 1043 address start = __ pc(); 1044 1045 Label exit, error, in_Java_heap; 1046 1047 __ pushf(); 1048 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1049 1050 __ push(r12); 1051 1052 // save c_rarg2 and c_rarg3 1053 __ push(c_rarg2); 1054 __ push(c_rarg3); 1055 1056 enum { 1057 // After previous pushes. 1058 oop_to_verify = 6 * wordSize, 1059 saved_rax = 7 * wordSize, 1060 saved_r10 = 8 * wordSize, 1061 1062 // Before the call to MacroAssembler::debug(), see below. 1063 return_addr = 16 * wordSize, 1064 error_msg = 17 * wordSize 1065 }; 1066 1067 // get object 1068 __ movptr(rax, Address(rsp, oop_to_verify)); 1069 1070 // make sure object is 'reasonable' 1071 __ testptr(rax, rax); 1072 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1073 1074 #if INCLUDE_ZGC 1075 if (UseZGC) { 1076 // Check if metadata bits indicate a bad oop 1077 __ testptr(rax, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset())); 1078 __ jcc(Assembler::notZero, error); 1079 } 1080 #endif 1081 1082 // Check if the oop is in the right area of memory 1083 __ movptr(c_rarg2, rax); 1084 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1085 __ andptr(c_rarg2, c_rarg3); 1086 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1087 __ cmpptr(c_rarg2, c_rarg3); 1088 __ jcc(Assembler::zero, in_Java_heap); 1089 // Not in Java heap, but could be valid if it's a bufferable value type 1090 __ load_klass(c_rarg2, rax); 1091 __ movbool(c_rarg2, Address(c_rarg2, InstanceKlass::extra_flags_offset())); 1092 __ andptr(c_rarg2, InstanceKlass::_extra_is_bufferable); 1093 __ testbool(c_rarg2); 1094 __ jcc(Assembler::zero, error); 1095 __ bind(in_Java_heap); 1096 1097 // set r12 to heapbase for load_klass() 1098 __ reinit_heapbase(); 1099 1100 // make sure klass is 'reasonable', which is not zero. 1101 __ load_klass(rax, rax); // get klass 1102 __ testptr(rax, rax); 1103 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1104 1105 // return if everything seems ok 1106 __ bind(exit); 1107 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1108 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1109 __ pop(c_rarg3); // restore c_rarg3 1110 __ pop(c_rarg2); // restore c_rarg2 1111 __ pop(r12); // restore r12 1112 __ popf(); // restore flags 1113 __ ret(4 * wordSize); // pop caller saved stuff 1114 1115 // handle errors 1116 __ bind(error); 1117 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1118 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1119 __ pop(c_rarg3); // get saved c_rarg3 back 1120 __ pop(c_rarg2); // get saved c_rarg2 back 1121 __ pop(r12); // get saved r12 back 1122 __ popf(); // get saved flags off stack -- 1123 // will be ignored 1124 1125 __ pusha(); // push registers 1126 // (rip is already 1127 // already pushed) 1128 // debug(char* msg, int64_t pc, int64_t regs[]) 1129 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1130 // pushed all the registers, so now the stack looks like: 1131 // [tos + 0] 16 saved registers 1132 // [tos + 16] return address 1133 // * [tos + 17] error message (char*) 1134 // * [tos + 18] object to verify (oop) 1135 // * [tos + 19] saved rax - saved by caller and bashed 1136 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1137 // * = popped on exit 1138 1139 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1140 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1141 __ movq(c_rarg2, rsp); // pass address of regs on stack 1142 __ mov(r12, rsp); // remember rsp 1143 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1144 __ andptr(rsp, -16); // align stack as required by ABI 1145 BLOCK_COMMENT("call MacroAssembler::debug"); 1146 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1147 __ mov(rsp, r12); // restore rsp 1148 __ popa(); // pop registers (includes r12) 1149 __ ret(4 * wordSize); // pop caller saved stuff 1150 1151 return start; 1152 } 1153 1154 // 1155 // Verify that a register contains clean 32-bits positive value 1156 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1157 // 1158 // Input: 1159 // Rint - 32-bits value 1160 // Rtmp - scratch 1161 // 1162 void assert_clean_int(Register Rint, Register Rtmp) { 1163 #ifdef ASSERT 1164 Label L; 1165 assert_different_registers(Rtmp, Rint); 1166 __ movslq(Rtmp, Rint); 1167 __ cmpq(Rtmp, Rint); 1168 __ jcc(Assembler::equal, L); 1169 __ stop("high 32-bits of int value are not 0"); 1170 __ bind(L); 1171 #endif 1172 } 1173 1174 // Generate overlap test for array copy stubs 1175 // 1176 // Input: 1177 // c_rarg0 - from 1178 // c_rarg1 - to 1179 // c_rarg2 - element count 1180 // 1181 // Output: 1182 // rax - &from[element count - 1] 1183 // 1184 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1185 assert(no_overlap_target != NULL, "must be generated"); 1186 array_overlap_test(no_overlap_target, NULL, sf); 1187 } 1188 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1189 array_overlap_test(NULL, &L_no_overlap, sf); 1190 } 1191 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1192 const Register from = c_rarg0; 1193 const Register to = c_rarg1; 1194 const Register count = c_rarg2; 1195 const Register end_from = rax; 1196 1197 __ cmpptr(to, from); 1198 __ lea(end_from, Address(from, count, sf, 0)); 1199 if (NOLp == NULL) { 1200 ExternalAddress no_overlap(no_overlap_target); 1201 __ jump_cc(Assembler::belowEqual, no_overlap); 1202 __ cmpptr(to, end_from); 1203 __ jump_cc(Assembler::aboveEqual, no_overlap); 1204 } else { 1205 __ jcc(Assembler::belowEqual, (*NOLp)); 1206 __ cmpptr(to, end_from); 1207 __ jcc(Assembler::aboveEqual, (*NOLp)); 1208 } 1209 } 1210 1211 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1212 // 1213 // Outputs: 1214 // rdi - rcx 1215 // rsi - rdx 1216 // rdx - r8 1217 // rcx - r9 1218 // 1219 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1220 // are non-volatile. r9 and r10 should not be used by the caller. 1221 // 1222 DEBUG_ONLY(bool regs_in_thread;) 1223 1224 void setup_arg_regs(int nargs = 3) { 1225 const Register saved_rdi = r9; 1226 const Register saved_rsi = r10; 1227 assert(nargs == 3 || nargs == 4, "else fix"); 1228 #ifdef _WIN64 1229 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1230 "unexpected argument registers"); 1231 if (nargs >= 4) 1232 __ mov(rax, r9); // r9 is also saved_rdi 1233 __ movptr(saved_rdi, rdi); 1234 __ movptr(saved_rsi, rsi); 1235 __ mov(rdi, rcx); // c_rarg0 1236 __ mov(rsi, rdx); // c_rarg1 1237 __ mov(rdx, r8); // c_rarg2 1238 if (nargs >= 4) 1239 __ mov(rcx, rax); // c_rarg3 (via rax) 1240 #else 1241 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1242 "unexpected argument registers"); 1243 #endif 1244 DEBUG_ONLY(regs_in_thread = false;) 1245 } 1246 1247 void restore_arg_regs() { 1248 assert(!regs_in_thread, "wrong call to restore_arg_regs"); 1249 const Register saved_rdi = r9; 1250 const Register saved_rsi = r10; 1251 #ifdef _WIN64 1252 __ movptr(rdi, saved_rdi); 1253 __ movptr(rsi, saved_rsi); 1254 #endif 1255 } 1256 1257 // This is used in places where r10 is a scratch register, and can 1258 // be adapted if r9 is needed also. 1259 void setup_arg_regs_using_thread() { 1260 const Register saved_r15 = r9; 1261 #ifdef _WIN64 1262 __ mov(saved_r15, r15); // r15 is callee saved and needs to be restored 1263 __ get_thread(r15_thread); 1264 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1265 "unexpected argument registers"); 1266 __ movptr(Address(r15_thread, in_bytes(JavaThread::windows_saved_rdi_offset())), rdi); 1267 __ movptr(Address(r15_thread, in_bytes(JavaThread::windows_saved_rsi_offset())), rsi); 1268 1269 __ mov(rdi, rcx); // c_rarg0 1270 __ mov(rsi, rdx); // c_rarg1 1271 __ mov(rdx, r8); // c_rarg2 1272 #else 1273 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1274 "unexpected argument registers"); 1275 #endif 1276 DEBUG_ONLY(regs_in_thread = true;) 1277 } 1278 1279 void restore_arg_regs_using_thread() { 1280 assert(regs_in_thread, "wrong call to restore_arg_regs"); 1281 const Register saved_r15 = r9; 1282 #ifdef _WIN64 1283 __ get_thread(r15_thread); 1284 __ movptr(rsi, Address(r15_thread, in_bytes(JavaThread::windows_saved_rsi_offset()))); 1285 __ movptr(rdi, Address(r15_thread, in_bytes(JavaThread::windows_saved_rdi_offset()))); 1286 __ mov(r15, saved_r15); // r15 is callee saved and needs to be restored 1287 #endif 1288 } 1289 1290 // Copy big chunks forward 1291 // 1292 // Inputs: 1293 // end_from - source arrays end address 1294 // end_to - destination array end address 1295 // qword_count - 64-bits element count, negative 1296 // to - scratch 1297 // L_copy_bytes - entry label 1298 // L_copy_8_bytes - exit label 1299 // 1300 void copy_bytes_forward(Register end_from, Register end_to, 1301 Register qword_count, Register to, 1302 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1303 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1304 Label L_loop; 1305 __ align(OptoLoopAlignment); 1306 if (UseUnalignedLoadStores) { 1307 Label L_end; 1308 // Copy 64-bytes per iteration 1309 __ BIND(L_loop); 1310 if (UseAVX > 2) { 1311 __ evmovdqul(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit); 1312 __ evmovdqul(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit); 1313 } else if (UseAVX == 2) { 1314 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1315 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1316 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1317 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1318 } else { 1319 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1320 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1321 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1322 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1323 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1324 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1325 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1326 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1327 } 1328 __ BIND(L_copy_bytes); 1329 __ addptr(qword_count, 8); 1330 __ jcc(Assembler::lessEqual, L_loop); 1331 __ subptr(qword_count, 4); // sub(8) and add(4) 1332 __ jccb(Assembler::greater, L_end); 1333 // Copy trailing 32 bytes 1334 if (UseAVX >= 2) { 1335 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1336 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1337 } else { 1338 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1339 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1340 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1341 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1342 } 1343 __ addptr(qword_count, 4); 1344 __ BIND(L_end); 1345 if (UseAVX >= 2) { 1346 // clean upper bits of YMM registers 1347 __ vpxor(xmm0, xmm0); 1348 __ vpxor(xmm1, xmm1); 1349 } 1350 } else { 1351 // Copy 32-bytes per iteration 1352 __ BIND(L_loop); 1353 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1354 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1355 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1356 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1357 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1358 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1359 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1360 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1361 1362 __ BIND(L_copy_bytes); 1363 __ addptr(qword_count, 4); 1364 __ jcc(Assembler::lessEqual, L_loop); 1365 } 1366 __ subptr(qword_count, 4); 1367 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1368 } 1369 1370 // Copy big chunks backward 1371 // 1372 // Inputs: 1373 // from - source arrays address 1374 // dest - destination array address 1375 // qword_count - 64-bits element count 1376 // to - scratch 1377 // L_copy_bytes - entry label 1378 // L_copy_8_bytes - exit label 1379 // 1380 void copy_bytes_backward(Register from, Register dest, 1381 Register qword_count, Register to, 1382 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1383 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1384 Label L_loop; 1385 __ align(OptoLoopAlignment); 1386 if (UseUnalignedLoadStores) { 1387 Label L_end; 1388 // Copy 64-bytes per iteration 1389 __ BIND(L_loop); 1390 if (UseAVX > 2) { 1391 __ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 0), Assembler::AVX_512bit); 1392 __ evmovdqul(Address(dest, qword_count, Address::times_8, 0), xmm0, Assembler::AVX_512bit); 1393 } else if (UseAVX == 2) { 1394 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1395 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1396 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1397 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1398 } else { 1399 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1400 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1401 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1402 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1403 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1404 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1405 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1406 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1407 } 1408 __ BIND(L_copy_bytes); 1409 __ subptr(qword_count, 8); 1410 __ jcc(Assembler::greaterEqual, L_loop); 1411 1412 __ addptr(qword_count, 4); // add(8) and sub(4) 1413 __ jccb(Assembler::less, L_end); 1414 // Copy trailing 32 bytes 1415 if (UseAVX >= 2) { 1416 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1417 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1418 } else { 1419 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1420 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1421 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1422 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1423 } 1424 __ subptr(qword_count, 4); 1425 __ BIND(L_end); 1426 if (UseAVX >= 2) { 1427 // clean upper bits of YMM registers 1428 __ vpxor(xmm0, xmm0); 1429 __ vpxor(xmm1, xmm1); 1430 } 1431 } else { 1432 // Copy 32-bytes per iteration 1433 __ BIND(L_loop); 1434 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1435 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1436 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1437 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1438 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1439 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1440 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1441 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1442 1443 __ BIND(L_copy_bytes); 1444 __ subptr(qword_count, 4); 1445 __ jcc(Assembler::greaterEqual, L_loop); 1446 } 1447 __ addptr(qword_count, 4); 1448 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1449 } 1450 1451 1452 // Arguments: 1453 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1454 // ignored 1455 // name - stub name string 1456 // 1457 // Inputs: 1458 // c_rarg0 - source array address 1459 // c_rarg1 - destination array address 1460 // c_rarg2 - element count, treated as ssize_t, can be zero 1461 // 1462 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1463 // we let the hardware handle it. The one to eight bytes within words, 1464 // dwords or qwords that span cache line boundaries will still be loaded 1465 // and stored atomically. 1466 // 1467 // Side Effects: 1468 // disjoint_byte_copy_entry is set to the no-overlap entry point 1469 // used by generate_conjoint_byte_copy(). 1470 // 1471 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1472 __ align(CodeEntryAlignment); 1473 StubCodeMark mark(this, "StubRoutines", name); 1474 address start = __ pc(); 1475 1476 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1477 Label L_copy_byte, L_exit; 1478 const Register from = rdi; // source array address 1479 const Register to = rsi; // destination array address 1480 const Register count = rdx; // elements count 1481 const Register byte_count = rcx; 1482 const Register qword_count = count; 1483 const Register end_from = from; // source array end address 1484 const Register end_to = to; // destination array end address 1485 // End pointers are inclusive, and if count is not zero they point 1486 // to the last unit copied: end_to[0] := end_from[0] 1487 1488 __ enter(); // required for proper stackwalking of RuntimeStub frame 1489 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1490 1491 if (entry != NULL) { 1492 *entry = __ pc(); 1493 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1494 BLOCK_COMMENT("Entry:"); 1495 } 1496 1497 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1498 // r9 and r10 may be used to save non-volatile registers 1499 1500 // 'from', 'to' and 'count' are now valid 1501 __ movptr(byte_count, count); 1502 __ shrptr(count, 3); // count => qword_count 1503 1504 // Copy from low to high addresses. Use 'to' as scratch. 1505 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1506 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1507 __ negptr(qword_count); // make the count negative 1508 __ jmp(L_copy_bytes); 1509 1510 // Copy trailing qwords 1511 __ BIND(L_copy_8_bytes); 1512 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1513 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1514 __ increment(qword_count); 1515 __ jcc(Assembler::notZero, L_copy_8_bytes); 1516 1517 // Check for and copy trailing dword 1518 __ BIND(L_copy_4_bytes); 1519 __ testl(byte_count, 4); 1520 __ jccb(Assembler::zero, L_copy_2_bytes); 1521 __ movl(rax, Address(end_from, 8)); 1522 __ movl(Address(end_to, 8), rax); 1523 1524 __ addptr(end_from, 4); 1525 __ addptr(end_to, 4); 1526 1527 // Check for and copy trailing word 1528 __ BIND(L_copy_2_bytes); 1529 __ testl(byte_count, 2); 1530 __ jccb(Assembler::zero, L_copy_byte); 1531 __ movw(rax, Address(end_from, 8)); 1532 __ movw(Address(end_to, 8), rax); 1533 1534 __ addptr(end_from, 2); 1535 __ addptr(end_to, 2); 1536 1537 // Check for and copy trailing byte 1538 __ BIND(L_copy_byte); 1539 __ testl(byte_count, 1); 1540 __ jccb(Assembler::zero, L_exit); 1541 __ movb(rax, Address(end_from, 8)); 1542 __ movb(Address(end_to, 8), rax); 1543 1544 __ BIND(L_exit); 1545 restore_arg_regs(); 1546 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1547 __ xorptr(rax, rax); // return 0 1548 __ vzeroupper(); 1549 __ leave(); // required for proper stackwalking of RuntimeStub frame 1550 __ ret(0); 1551 1552 // Copy in multi-bytes chunks 1553 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1554 __ jmp(L_copy_4_bytes); 1555 1556 return start; 1557 } 1558 1559 // Arguments: 1560 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1561 // ignored 1562 // name - stub name string 1563 // 1564 // Inputs: 1565 // c_rarg0 - source array address 1566 // c_rarg1 - destination array address 1567 // c_rarg2 - element count, treated as ssize_t, can be zero 1568 // 1569 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1570 // we let the hardware handle it. The one to eight bytes within words, 1571 // dwords or qwords that span cache line boundaries will still be loaded 1572 // and stored atomically. 1573 // 1574 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1575 address* entry, const char *name) { 1576 __ align(CodeEntryAlignment); 1577 StubCodeMark mark(this, "StubRoutines", name); 1578 address start = __ pc(); 1579 1580 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1581 const Register from = rdi; // source array address 1582 const Register to = rsi; // destination array address 1583 const Register count = rdx; // elements count 1584 const Register byte_count = rcx; 1585 const Register qword_count = count; 1586 1587 __ enter(); // required for proper stackwalking of RuntimeStub frame 1588 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1589 1590 if (entry != NULL) { 1591 *entry = __ pc(); 1592 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1593 BLOCK_COMMENT("Entry:"); 1594 } 1595 1596 array_overlap_test(nooverlap_target, Address::times_1); 1597 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1598 // r9 and r10 may be used to save non-volatile registers 1599 1600 // 'from', 'to' and 'count' are now valid 1601 __ movptr(byte_count, count); 1602 __ shrptr(count, 3); // count => qword_count 1603 1604 // Copy from high to low addresses. 1605 1606 // Check for and copy trailing byte 1607 __ testl(byte_count, 1); 1608 __ jcc(Assembler::zero, L_copy_2_bytes); 1609 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1610 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1611 __ decrement(byte_count); // Adjust for possible trailing word 1612 1613 // Check for and copy trailing word 1614 __ BIND(L_copy_2_bytes); 1615 __ testl(byte_count, 2); 1616 __ jcc(Assembler::zero, L_copy_4_bytes); 1617 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1618 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1619 1620 // Check for and copy trailing dword 1621 __ BIND(L_copy_4_bytes); 1622 __ testl(byte_count, 4); 1623 __ jcc(Assembler::zero, L_copy_bytes); 1624 __ movl(rax, Address(from, qword_count, Address::times_8)); 1625 __ movl(Address(to, qword_count, Address::times_8), rax); 1626 __ jmp(L_copy_bytes); 1627 1628 // Copy trailing qwords 1629 __ BIND(L_copy_8_bytes); 1630 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1631 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1632 __ decrement(qword_count); 1633 __ jcc(Assembler::notZero, L_copy_8_bytes); 1634 1635 restore_arg_regs(); 1636 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1637 __ xorptr(rax, rax); // return 0 1638 __ vzeroupper(); 1639 __ leave(); // required for proper stackwalking of RuntimeStub frame 1640 __ ret(0); 1641 1642 // Copy in multi-bytes chunks 1643 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1644 1645 restore_arg_regs(); 1646 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1647 __ xorptr(rax, rax); // return 0 1648 __ vzeroupper(); 1649 __ leave(); // required for proper stackwalking of RuntimeStub frame 1650 __ ret(0); 1651 1652 return start; 1653 } 1654 1655 // Arguments: 1656 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1657 // ignored 1658 // name - stub name string 1659 // 1660 // Inputs: 1661 // c_rarg0 - source array address 1662 // c_rarg1 - destination array address 1663 // c_rarg2 - element count, treated as ssize_t, can be zero 1664 // 1665 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1666 // let the hardware handle it. The two or four words within dwords 1667 // or qwords that span cache line boundaries will still be loaded 1668 // and stored atomically. 1669 // 1670 // Side Effects: 1671 // disjoint_short_copy_entry is set to the no-overlap entry point 1672 // used by generate_conjoint_short_copy(). 1673 // 1674 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1675 __ align(CodeEntryAlignment); 1676 StubCodeMark mark(this, "StubRoutines", name); 1677 address start = __ pc(); 1678 1679 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1680 const Register from = rdi; // source array address 1681 const Register to = rsi; // destination array address 1682 const Register count = rdx; // elements count 1683 const Register word_count = rcx; 1684 const Register qword_count = count; 1685 const Register end_from = from; // source array end address 1686 const Register end_to = to; // destination array end address 1687 // End pointers are inclusive, and if count is not zero they point 1688 // to the last unit copied: end_to[0] := end_from[0] 1689 1690 __ enter(); // required for proper stackwalking of RuntimeStub frame 1691 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1692 1693 if (entry != NULL) { 1694 *entry = __ pc(); 1695 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1696 BLOCK_COMMENT("Entry:"); 1697 } 1698 1699 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1700 // r9 and r10 may be used to save non-volatile registers 1701 1702 // 'from', 'to' and 'count' are now valid 1703 __ movptr(word_count, count); 1704 __ shrptr(count, 2); // count => qword_count 1705 1706 // Copy from low to high addresses. Use 'to' as scratch. 1707 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1708 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1709 __ negptr(qword_count); 1710 __ jmp(L_copy_bytes); 1711 1712 // Copy trailing qwords 1713 __ BIND(L_copy_8_bytes); 1714 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1715 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1716 __ increment(qword_count); 1717 __ jcc(Assembler::notZero, L_copy_8_bytes); 1718 1719 // Original 'dest' is trashed, so we can't use it as a 1720 // base register for a possible trailing word copy 1721 1722 // Check for and copy trailing dword 1723 __ BIND(L_copy_4_bytes); 1724 __ testl(word_count, 2); 1725 __ jccb(Assembler::zero, L_copy_2_bytes); 1726 __ movl(rax, Address(end_from, 8)); 1727 __ movl(Address(end_to, 8), rax); 1728 1729 __ addptr(end_from, 4); 1730 __ addptr(end_to, 4); 1731 1732 // Check for and copy trailing word 1733 __ BIND(L_copy_2_bytes); 1734 __ testl(word_count, 1); 1735 __ jccb(Assembler::zero, L_exit); 1736 __ movw(rax, Address(end_from, 8)); 1737 __ movw(Address(end_to, 8), rax); 1738 1739 __ BIND(L_exit); 1740 restore_arg_regs(); 1741 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1742 __ xorptr(rax, rax); // return 0 1743 __ vzeroupper(); 1744 __ leave(); // required for proper stackwalking of RuntimeStub frame 1745 __ ret(0); 1746 1747 // Copy in multi-bytes chunks 1748 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1749 __ jmp(L_copy_4_bytes); 1750 1751 return start; 1752 } 1753 1754 address generate_fill(BasicType t, bool aligned, const char *name) { 1755 __ align(CodeEntryAlignment); 1756 StubCodeMark mark(this, "StubRoutines", name); 1757 address start = __ pc(); 1758 1759 BLOCK_COMMENT("Entry:"); 1760 1761 const Register to = c_rarg0; // source array address 1762 const Register value = c_rarg1; // value 1763 const Register count = c_rarg2; // elements count 1764 1765 __ enter(); // required for proper stackwalking of RuntimeStub frame 1766 1767 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1768 1769 __ vzeroupper(); 1770 __ leave(); // required for proper stackwalking of RuntimeStub frame 1771 __ ret(0); 1772 return start; 1773 } 1774 1775 // Arguments: 1776 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1777 // ignored 1778 // name - stub name string 1779 // 1780 // Inputs: 1781 // c_rarg0 - source array address 1782 // c_rarg1 - destination array address 1783 // c_rarg2 - element count, treated as ssize_t, can be zero 1784 // 1785 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1786 // let the hardware handle it. The two or four words within dwords 1787 // or qwords that span cache line boundaries will still be loaded 1788 // and stored atomically. 1789 // 1790 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1791 address *entry, const char *name) { 1792 __ align(CodeEntryAlignment); 1793 StubCodeMark mark(this, "StubRoutines", name); 1794 address start = __ pc(); 1795 1796 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1797 const Register from = rdi; // source array address 1798 const Register to = rsi; // destination array address 1799 const Register count = rdx; // elements count 1800 const Register word_count = rcx; 1801 const Register qword_count = count; 1802 1803 __ enter(); // required for proper stackwalking of RuntimeStub frame 1804 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1805 1806 if (entry != NULL) { 1807 *entry = __ pc(); 1808 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1809 BLOCK_COMMENT("Entry:"); 1810 } 1811 1812 array_overlap_test(nooverlap_target, Address::times_2); 1813 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1814 // r9 and r10 may be used to save non-volatile registers 1815 1816 // 'from', 'to' and 'count' are now valid 1817 __ movptr(word_count, count); 1818 __ shrptr(count, 2); // count => qword_count 1819 1820 // Copy from high to low addresses. Use 'to' as scratch. 1821 1822 // Check for and copy trailing word 1823 __ testl(word_count, 1); 1824 __ jccb(Assembler::zero, L_copy_4_bytes); 1825 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1826 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1827 1828 // Check for and copy trailing dword 1829 __ BIND(L_copy_4_bytes); 1830 __ testl(word_count, 2); 1831 __ jcc(Assembler::zero, L_copy_bytes); 1832 __ movl(rax, Address(from, qword_count, Address::times_8)); 1833 __ movl(Address(to, qword_count, Address::times_8), rax); 1834 __ jmp(L_copy_bytes); 1835 1836 // Copy trailing qwords 1837 __ BIND(L_copy_8_bytes); 1838 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1839 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1840 __ decrement(qword_count); 1841 __ jcc(Assembler::notZero, L_copy_8_bytes); 1842 1843 restore_arg_regs(); 1844 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1845 __ xorptr(rax, rax); // return 0 1846 __ vzeroupper(); 1847 __ leave(); // required for proper stackwalking of RuntimeStub frame 1848 __ ret(0); 1849 1850 // Copy in multi-bytes chunks 1851 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1852 1853 restore_arg_regs(); 1854 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1855 __ xorptr(rax, rax); // return 0 1856 __ vzeroupper(); 1857 __ leave(); // required for proper stackwalking of RuntimeStub frame 1858 __ ret(0); 1859 1860 return start; 1861 } 1862 1863 // Arguments: 1864 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1865 // ignored 1866 // is_oop - true => oop array, so generate store check code 1867 // name - stub name string 1868 // 1869 // Inputs: 1870 // c_rarg0 - source array address 1871 // c_rarg1 - destination array address 1872 // c_rarg2 - element count, treated as ssize_t, can be zero 1873 // 1874 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1875 // the hardware handle it. The two dwords within qwords that span 1876 // cache line boundaries will still be loaded and stored atomicly. 1877 // 1878 // Side Effects: 1879 // disjoint_int_copy_entry is set to the no-overlap entry point 1880 // used by generate_conjoint_int_oop_copy(). 1881 // 1882 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 1883 const char *name, bool dest_uninitialized = false) { 1884 __ align(CodeEntryAlignment); 1885 StubCodeMark mark(this, "StubRoutines", name); 1886 address start = __ pc(); 1887 1888 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 1889 const Register from = rdi; // source array address 1890 const Register to = rsi; // destination array address 1891 const Register count = rdx; // elements count 1892 const Register dword_count = rcx; 1893 const Register qword_count = count; 1894 const Register end_from = from; // source array end address 1895 const Register end_to = to; // destination array end address 1896 // End pointers are inclusive, and if count is not zero they point 1897 // to the last unit copied: end_to[0] := end_from[0] 1898 1899 __ enter(); // required for proper stackwalking of RuntimeStub frame 1900 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1901 1902 if (entry != NULL) { 1903 *entry = __ pc(); 1904 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1905 BLOCK_COMMENT("Entry:"); 1906 } 1907 1908 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 1909 // r9 is used to save r15_thread 1910 1911 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 1912 if (dest_uninitialized) { 1913 decorators |= IS_DEST_UNINITIALIZED; 1914 } 1915 if (aligned) { 1916 decorators |= ARRAYCOPY_ALIGNED; 1917 } 1918 1919 BasicType type = is_oop ? T_OBJECT : T_INT; 1920 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1921 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 1922 1923 // 'from', 'to' and 'count' are now valid 1924 __ movptr(dword_count, count); 1925 __ shrptr(count, 1); // count => qword_count 1926 1927 // Copy from low to high addresses. Use 'to' as scratch. 1928 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1929 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1930 __ negptr(qword_count); 1931 __ jmp(L_copy_bytes); 1932 1933 // Copy trailing qwords 1934 __ BIND(L_copy_8_bytes); 1935 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1936 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1937 __ increment(qword_count); 1938 __ jcc(Assembler::notZero, L_copy_8_bytes); 1939 1940 // Check for and copy trailing dword 1941 __ BIND(L_copy_4_bytes); 1942 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 1943 __ jccb(Assembler::zero, L_exit); 1944 __ movl(rax, Address(end_from, 8)); 1945 __ movl(Address(end_to, 8), rax); 1946 1947 __ BIND(L_exit); 1948 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 1949 restore_arg_regs_using_thread(); 1950 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1951 __ vzeroupper(); 1952 __ xorptr(rax, rax); // return 0 1953 __ leave(); // required for proper stackwalking of RuntimeStub frame 1954 __ ret(0); 1955 1956 // Copy in multi-bytes chunks 1957 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1958 __ jmp(L_copy_4_bytes); 1959 1960 return start; 1961 } 1962 1963 // Arguments: 1964 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1965 // ignored 1966 // is_oop - true => oop array, so generate store check code 1967 // name - stub name string 1968 // 1969 // Inputs: 1970 // c_rarg0 - source array address 1971 // c_rarg1 - destination array address 1972 // c_rarg2 - element count, treated as ssize_t, can be zero 1973 // 1974 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1975 // the hardware handle it. The two dwords within qwords that span 1976 // cache line boundaries will still be loaded and stored atomicly. 1977 // 1978 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 1979 address *entry, const char *name, 1980 bool dest_uninitialized = false) { 1981 __ align(CodeEntryAlignment); 1982 StubCodeMark mark(this, "StubRoutines", name); 1983 address start = __ pc(); 1984 1985 Label L_copy_bytes, L_copy_8_bytes, L_exit; 1986 const Register from = rdi; // source array address 1987 const Register to = rsi; // destination array address 1988 const Register count = rdx; // elements count 1989 const Register dword_count = rcx; 1990 const Register qword_count = count; 1991 1992 __ enter(); // required for proper stackwalking of RuntimeStub frame 1993 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1994 1995 if (entry != NULL) { 1996 *entry = __ pc(); 1997 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1998 BLOCK_COMMENT("Entry:"); 1999 } 2000 2001 array_overlap_test(nooverlap_target, Address::times_4); 2002 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 2003 // r9 is used to save r15_thread 2004 2005 DecoratorSet decorators = IN_HEAP | IS_ARRAY; 2006 if (dest_uninitialized) { 2007 decorators |= IS_DEST_UNINITIALIZED; 2008 } 2009 if (aligned) { 2010 decorators |= ARRAYCOPY_ALIGNED; 2011 } 2012 2013 BasicType type = is_oop ? T_OBJECT : T_INT; 2014 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2015 // no registers are destroyed by this call 2016 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 2017 2018 assert_clean_int(count, rax); // Make sure 'count' is clean int. 2019 // 'from', 'to' and 'count' are now valid 2020 __ movptr(dword_count, count); 2021 __ shrptr(count, 1); // count => qword_count 2022 2023 // Copy from high to low addresses. Use 'to' as scratch. 2024 2025 // Check for and copy trailing dword 2026 __ testl(dword_count, 1); 2027 __ jcc(Assembler::zero, L_copy_bytes); 2028 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 2029 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 2030 __ jmp(L_copy_bytes); 2031 2032 // Copy trailing qwords 2033 __ BIND(L_copy_8_bytes); 2034 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2035 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2036 __ decrement(qword_count); 2037 __ jcc(Assembler::notZero, L_copy_8_bytes); 2038 2039 if (is_oop) { 2040 __ jmp(L_exit); 2041 } 2042 restore_arg_regs_using_thread(); 2043 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2044 __ xorptr(rax, rax); // return 0 2045 __ vzeroupper(); 2046 __ leave(); // required for proper stackwalking of RuntimeStub frame 2047 __ ret(0); 2048 2049 // Copy in multi-bytes chunks 2050 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2051 2052 __ BIND(L_exit); 2053 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 2054 restore_arg_regs_using_thread(); 2055 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2056 __ xorptr(rax, rax); // return 0 2057 __ vzeroupper(); 2058 __ leave(); // required for proper stackwalking of RuntimeStub frame 2059 __ ret(0); 2060 2061 return start; 2062 } 2063 2064 // Arguments: 2065 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2066 // ignored 2067 // is_oop - true => oop array, so generate store check code 2068 // name - stub name string 2069 // 2070 // Inputs: 2071 // c_rarg0 - source array address 2072 // c_rarg1 - destination array address 2073 // c_rarg2 - element count, treated as ssize_t, can be zero 2074 // 2075 // Side Effects: 2076 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 2077 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 2078 // 2079 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 2080 const char *name, bool dest_uninitialized = false) { 2081 __ align(CodeEntryAlignment); 2082 StubCodeMark mark(this, "StubRoutines", name); 2083 address start = __ pc(); 2084 2085 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2086 const Register from = rdi; // source array address 2087 const Register to = rsi; // destination array address 2088 const Register qword_count = rdx; // elements count 2089 const Register end_from = from; // source array end address 2090 const Register end_to = rcx; // destination array end address 2091 const Register saved_count = r11; 2092 // End pointers are inclusive, and if count is not zero they point 2093 // to the last unit copied: end_to[0] := end_from[0] 2094 2095 __ enter(); // required for proper stackwalking of RuntimeStub frame 2096 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2097 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2098 2099 if (entry != NULL) { 2100 *entry = __ pc(); 2101 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2102 BLOCK_COMMENT("Entry:"); 2103 } 2104 2105 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 2106 // r9 is used to save r15_thread 2107 // 'from', 'to' and 'qword_count' are now valid 2108 2109 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 2110 if (dest_uninitialized) { 2111 decorators |= IS_DEST_UNINITIALIZED; 2112 } 2113 if (aligned) { 2114 decorators |= ARRAYCOPY_ALIGNED; 2115 } 2116 2117 BasicType type = is_oop ? T_OBJECT : T_LONG; 2118 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2119 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2120 2121 // Copy from low to high addresses. Use 'to' as scratch. 2122 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2123 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2124 __ negptr(qword_count); 2125 __ jmp(L_copy_bytes); 2126 2127 // Copy trailing qwords 2128 __ BIND(L_copy_8_bytes); 2129 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2130 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2131 __ increment(qword_count); 2132 __ jcc(Assembler::notZero, L_copy_8_bytes); 2133 2134 if (is_oop) { 2135 __ jmp(L_exit); 2136 } else { 2137 restore_arg_regs_using_thread(); 2138 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2139 __ xorptr(rax, rax); // return 0 2140 __ vzeroupper(); 2141 __ leave(); // required for proper stackwalking of RuntimeStub frame 2142 __ ret(0); 2143 } 2144 2145 // Copy in multi-bytes chunks 2146 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2147 2148 __ BIND(L_exit); 2149 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2150 restore_arg_regs_using_thread(); 2151 if (is_oop) { 2152 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2153 } else { 2154 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2155 } 2156 __ vzeroupper(); 2157 __ xorptr(rax, rax); // return 0 2158 __ leave(); // required for proper stackwalking of RuntimeStub frame 2159 __ ret(0); 2160 2161 return start; 2162 } 2163 2164 // Arguments: 2165 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2166 // ignored 2167 // is_oop - true => oop array, so generate store check code 2168 // name - stub name string 2169 // 2170 // Inputs: 2171 // c_rarg0 - source array address 2172 // c_rarg1 - destination array address 2173 // c_rarg2 - element count, treated as ssize_t, can be zero 2174 // 2175 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2176 address nooverlap_target, address *entry, 2177 const char *name, bool dest_uninitialized = false) { 2178 __ align(CodeEntryAlignment); 2179 StubCodeMark mark(this, "StubRoutines", name); 2180 address start = __ pc(); 2181 2182 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2183 const Register from = rdi; // source array address 2184 const Register to = rsi; // destination array address 2185 const Register qword_count = rdx; // elements count 2186 const Register saved_count = rcx; 2187 2188 __ enter(); // required for proper stackwalking of RuntimeStub frame 2189 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2190 2191 if (entry != NULL) { 2192 *entry = __ pc(); 2193 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2194 BLOCK_COMMENT("Entry:"); 2195 } 2196 2197 array_overlap_test(nooverlap_target, Address::times_8); 2198 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 2199 // r9 is used to save r15_thread 2200 // 'from', 'to' and 'qword_count' are now valid 2201 2202 DecoratorSet decorators = IN_HEAP | IS_ARRAY; 2203 if (dest_uninitialized) { 2204 decorators |= IS_DEST_UNINITIALIZED; 2205 } 2206 if (aligned) { 2207 decorators |= ARRAYCOPY_ALIGNED; 2208 } 2209 2210 BasicType type = is_oop ? T_OBJECT : T_LONG; 2211 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2212 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2213 2214 __ jmp(L_copy_bytes); 2215 2216 // Copy trailing qwords 2217 __ BIND(L_copy_8_bytes); 2218 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2219 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2220 __ decrement(qword_count); 2221 __ jcc(Assembler::notZero, L_copy_8_bytes); 2222 2223 if (is_oop) { 2224 __ jmp(L_exit); 2225 } else { 2226 restore_arg_regs_using_thread(); 2227 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2228 __ xorptr(rax, rax); // return 0 2229 __ vzeroupper(); 2230 __ leave(); // required for proper stackwalking of RuntimeStub frame 2231 __ ret(0); 2232 } 2233 2234 // Copy in multi-bytes chunks 2235 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2236 2237 __ BIND(L_exit); 2238 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2239 restore_arg_regs_using_thread(); 2240 if (is_oop) { 2241 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2242 } else { 2243 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2244 } 2245 __ vzeroupper(); 2246 __ xorptr(rax, rax); // return 0 2247 __ leave(); // required for proper stackwalking of RuntimeStub frame 2248 __ ret(0); 2249 2250 return start; 2251 } 2252 2253 2254 // Helper for generating a dynamic type check. 2255 // Smashes no registers. 2256 void generate_type_check(Register sub_klass, 2257 Register super_check_offset, 2258 Register super_klass, 2259 Label& L_success) { 2260 assert_different_registers(sub_klass, super_check_offset, super_klass); 2261 2262 BLOCK_COMMENT("type_check:"); 2263 2264 Label L_miss; 2265 2266 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2267 super_check_offset); 2268 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2269 2270 // Fall through on failure! 2271 __ BIND(L_miss); 2272 } 2273 2274 // 2275 // Generate checkcasting array copy stub 2276 // 2277 // Input: 2278 // c_rarg0 - source array address 2279 // c_rarg1 - destination array address 2280 // c_rarg2 - element count, treated as ssize_t, can be zero 2281 // c_rarg3 - size_t ckoff (super_check_offset) 2282 // not Win64 2283 // c_rarg4 - oop ckval (super_klass) 2284 // Win64 2285 // rsp+40 - oop ckval (super_klass) 2286 // 2287 // Output: 2288 // rax == 0 - success 2289 // rax == -1^K - failure, where K is partial transfer count 2290 // 2291 address generate_checkcast_copy(const char *name, address *entry, 2292 bool dest_uninitialized = false) { 2293 2294 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2295 2296 // Input registers (after setup_arg_regs) 2297 const Register from = rdi; // source array address 2298 const Register to = rsi; // destination array address 2299 const Register length = rdx; // elements count 2300 const Register ckoff = rcx; // super_check_offset 2301 const Register ckval = r8; // super_klass 2302 2303 // Registers used as temps (r13, r14 are save-on-entry) 2304 const Register end_from = from; // source array end address 2305 const Register end_to = r13; // destination array end address 2306 const Register count = rdx; // -(count_remaining) 2307 const Register r14_length = r14; // saved copy of length 2308 // End pointers are inclusive, and if length is not zero they point 2309 // to the last unit copied: end_to[0] := end_from[0] 2310 2311 const Register rax_oop = rax; // actual oop copied 2312 const Register r11_klass = r11; // oop._klass 2313 2314 //--------------------------------------------------------------- 2315 // Assembler stub will be used for this call to arraycopy 2316 // if the two arrays are subtypes of Object[] but the 2317 // destination array type is not equal to or a supertype 2318 // of the source type. Each element must be separately 2319 // checked. 2320 2321 __ align(CodeEntryAlignment); 2322 StubCodeMark mark(this, "StubRoutines", name); 2323 address start = __ pc(); 2324 2325 __ enter(); // required for proper stackwalking of RuntimeStub frame 2326 2327 #ifdef ASSERT 2328 // caller guarantees that the arrays really are different 2329 // otherwise, we would have to make conjoint checks 2330 { Label L; 2331 array_overlap_test(L, TIMES_OOP); 2332 __ stop("checkcast_copy within a single array"); 2333 __ bind(L); 2334 } 2335 #endif //ASSERT 2336 2337 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2338 // ckoff => rcx, ckval => r8 2339 // r9 and r10 may be used to save non-volatile registers 2340 #ifdef _WIN64 2341 // last argument (#4) is on stack on Win64 2342 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2343 #endif 2344 2345 // Caller of this entry point must set up the argument registers. 2346 if (entry != NULL) { 2347 *entry = __ pc(); 2348 BLOCK_COMMENT("Entry:"); 2349 } 2350 2351 // allocate spill slots for r13, r14 2352 enum { 2353 saved_r13_offset, 2354 saved_r14_offset, 2355 saved_r10_offset, 2356 saved_rbp_offset 2357 }; 2358 __ subptr(rsp, saved_rbp_offset * wordSize); 2359 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2360 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2361 __ movptr(Address(rsp, saved_r10_offset * wordSize), r10); 2362 2363 #ifdef ASSERT 2364 Label L2; 2365 __ get_thread(r14); 2366 __ cmpptr(r15_thread, r14); 2367 __ jcc(Assembler::equal, L2); 2368 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 2369 __ bind(L2); 2370 #endif // ASSERT 2371 2372 // check that int operands are properly extended to size_t 2373 assert_clean_int(length, rax); 2374 assert_clean_int(ckoff, rax); 2375 2376 #ifdef ASSERT 2377 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2378 // The ckoff and ckval must be mutually consistent, 2379 // even though caller generates both. 2380 { Label L; 2381 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2382 __ cmpl(ckoff, Address(ckval, sco_offset)); 2383 __ jcc(Assembler::equal, L); 2384 __ stop("super_check_offset inconsistent"); 2385 __ bind(L); 2386 } 2387 #endif //ASSERT 2388 2389 // Loop-invariant addresses. They are exclusive end pointers. 2390 Address end_from_addr(from, length, TIMES_OOP, 0); 2391 Address end_to_addr(to, length, TIMES_OOP, 0); 2392 // Loop-variant addresses. They assume post-incremented count < 0. 2393 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2394 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2395 2396 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST | ARRAYCOPY_DISJOINT; 2397 if (dest_uninitialized) { 2398 decorators |= IS_DEST_UNINITIALIZED; 2399 } 2400 2401 BasicType type = T_OBJECT; 2402 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2403 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 2404 2405 // Copy from low to high addresses, indexed from the end of each array. 2406 __ lea(end_from, end_from_addr); 2407 __ lea(end_to, end_to_addr); 2408 __ movptr(r14_length, length); // save a copy of the length 2409 assert(length == count, ""); // else fix next line: 2410 __ negptr(count); // negate and test the length 2411 __ jcc(Assembler::notZero, L_load_element); 2412 2413 // Empty array: Nothing to do. 2414 __ xorptr(rax, rax); // return 0 on (trivial) success 2415 __ jmp(L_done); 2416 2417 // ======== begin loop ======== 2418 // (Loop is rotated; its entry is L_load_element.) 2419 // Loop control: 2420 // for (count = -count; count != 0; count++) 2421 // Base pointers src, dst are biased by 8*(count-1),to last element. 2422 __ align(OptoLoopAlignment); 2423 2424 __ BIND(L_store_element); 2425 __ store_heap_oop(to_element_addr, rax_oop, noreg, noreg, noreg, AS_RAW); // store the oop 2426 __ increment(count); // increment the count toward zero 2427 __ jcc(Assembler::zero, L_do_card_marks); 2428 2429 // ======== loop entry is here ======== 2430 __ BIND(L_load_element); 2431 __ load_heap_oop(rax_oop, from_element_addr, noreg, noreg, AS_RAW); // load the oop 2432 __ testptr(rax_oop, rax_oop); 2433 __ jcc(Assembler::zero, L_store_element); 2434 2435 __ load_klass(r11_klass, rax_oop);// query the object klass 2436 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2437 // ======== end loop ======== 2438 2439 // It was a real error; we must depend on the caller to finish the job. 2440 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2441 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2442 // and report their number to the caller. 2443 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2444 Label L_post_barrier; 2445 __ addptr(r14_length, count); // K = (original - remaining) oops 2446 __ movptr(rax, r14_length); // save the value 2447 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2448 __ jccb(Assembler::notZero, L_post_barrier); 2449 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2450 2451 // Come here on success only. 2452 __ BIND(L_do_card_marks); 2453 __ xorptr(rax, rax); // return 0 on success 2454 2455 __ BIND(L_post_barrier); 2456 bs->arraycopy_epilogue(_masm, decorators, type, from, to, r14_length); 2457 2458 // Common exit point (success or failure). 2459 __ BIND(L_done); 2460 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2461 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2462 __ movptr(r10, Address(rsp, saved_r10_offset * wordSize)); 2463 restore_arg_regs(); 2464 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2465 __ leave(); // required for proper stackwalking of RuntimeStub frame 2466 __ ret(0); 2467 2468 return start; 2469 } 2470 2471 // 2472 // Generate 'unsafe' array copy stub 2473 // Though just as safe as the other stubs, it takes an unscaled 2474 // size_t argument instead of an element count. 2475 // 2476 // Input: 2477 // c_rarg0 - source array address 2478 // c_rarg1 - destination array address 2479 // c_rarg2 - byte count, treated as ssize_t, can be zero 2480 // 2481 // Examines the alignment of the operands and dispatches 2482 // to a long, int, short, or byte copy loop. 2483 // 2484 address generate_unsafe_copy(const char *name, 2485 address byte_copy_entry, address short_copy_entry, 2486 address int_copy_entry, address long_copy_entry) { 2487 2488 Label L_long_aligned, L_int_aligned, L_short_aligned; 2489 2490 // Input registers (before setup_arg_regs) 2491 const Register from = c_rarg0; // source array address 2492 const Register to = c_rarg1; // destination array address 2493 const Register size = c_rarg2; // byte count (size_t) 2494 2495 // Register used as a temp 2496 const Register bits = rax; // test copy of low bits 2497 2498 __ align(CodeEntryAlignment); 2499 StubCodeMark mark(this, "StubRoutines", name); 2500 address start = __ pc(); 2501 2502 __ enter(); // required for proper stackwalking of RuntimeStub frame 2503 2504 // bump this on entry, not on exit: 2505 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2506 2507 __ mov(bits, from); 2508 __ orptr(bits, to); 2509 __ orptr(bits, size); 2510 2511 __ testb(bits, BytesPerLong-1); 2512 __ jccb(Assembler::zero, L_long_aligned); 2513 2514 __ testb(bits, BytesPerInt-1); 2515 __ jccb(Assembler::zero, L_int_aligned); 2516 2517 __ testb(bits, BytesPerShort-1); 2518 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2519 2520 __ BIND(L_short_aligned); 2521 __ shrptr(size, LogBytesPerShort); // size => short_count 2522 __ jump(RuntimeAddress(short_copy_entry)); 2523 2524 __ BIND(L_int_aligned); 2525 __ shrptr(size, LogBytesPerInt); // size => int_count 2526 __ jump(RuntimeAddress(int_copy_entry)); 2527 2528 __ BIND(L_long_aligned); 2529 __ shrptr(size, LogBytesPerLong); // size => qword_count 2530 __ jump(RuntimeAddress(long_copy_entry)); 2531 2532 return start; 2533 } 2534 2535 // Perform range checks on the proposed arraycopy. 2536 // Kills temp, but nothing else. 2537 // Also, clean the sign bits of src_pos and dst_pos. 2538 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2539 Register src_pos, // source position (c_rarg1) 2540 Register dst, // destination array oo (c_rarg2) 2541 Register dst_pos, // destination position (c_rarg3) 2542 Register length, 2543 Register temp, 2544 Label& L_failed) { 2545 BLOCK_COMMENT("arraycopy_range_checks:"); 2546 2547 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2548 __ movl(temp, length); 2549 __ addl(temp, src_pos); // src_pos + length 2550 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2551 __ jcc(Assembler::above, L_failed); 2552 2553 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2554 __ movl(temp, length); 2555 __ addl(temp, dst_pos); // dst_pos + length 2556 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2557 __ jcc(Assembler::above, L_failed); 2558 2559 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2560 // Move with sign extension can be used since they are positive. 2561 __ movslq(src_pos, src_pos); 2562 __ movslq(dst_pos, dst_pos); 2563 2564 BLOCK_COMMENT("arraycopy_range_checks done"); 2565 } 2566 2567 // 2568 // Generate generic array copy stubs 2569 // 2570 // Input: 2571 // c_rarg0 - src oop 2572 // c_rarg1 - src_pos (32-bits) 2573 // c_rarg2 - dst oop 2574 // c_rarg3 - dst_pos (32-bits) 2575 // not Win64 2576 // c_rarg4 - element count (32-bits) 2577 // Win64 2578 // rsp+40 - element count (32-bits) 2579 // 2580 // Output: 2581 // rax == 0 - success 2582 // rax == -1^K - failure, where K is partial transfer count 2583 // 2584 address generate_generic_copy(const char *name, 2585 address byte_copy_entry, address short_copy_entry, 2586 address int_copy_entry, address oop_copy_entry, 2587 address long_copy_entry, address checkcast_copy_entry) { 2588 2589 Label L_failed, L_failed_0, L_objArray; 2590 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2591 2592 // Input registers 2593 const Register src = c_rarg0; // source array oop 2594 const Register src_pos = c_rarg1; // source position 2595 const Register dst = c_rarg2; // destination array oop 2596 const Register dst_pos = c_rarg3; // destination position 2597 #ifndef _WIN64 2598 const Register length = c_rarg4; 2599 #else 2600 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2601 #endif 2602 2603 { int modulus = CodeEntryAlignment; 2604 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2605 int advance = target - (__ offset() % modulus); 2606 if (advance < 0) advance += modulus; 2607 if (advance > 0) __ nop(advance); 2608 } 2609 StubCodeMark mark(this, "StubRoutines", name); 2610 2611 // Short-hop target to L_failed. Makes for denser prologue code. 2612 __ BIND(L_failed_0); 2613 __ jmp(L_failed); 2614 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2615 2616 __ align(CodeEntryAlignment); 2617 address start = __ pc(); 2618 2619 __ enter(); // required for proper stackwalking of RuntimeStub frame 2620 2621 // bump this on entry, not on exit: 2622 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2623 2624 //----------------------------------------------------------------------- 2625 // Assembler stub will be used for this call to arraycopy 2626 // if the following conditions are met: 2627 // 2628 // (1) src and dst must not be null. 2629 // (2) src_pos must not be negative. 2630 // (3) dst_pos must not be negative. 2631 // (4) length must not be negative. 2632 // (5) src klass and dst klass should be the same and not NULL. 2633 // (6) src and dst should be arrays. 2634 // (7) src_pos + length must not exceed length of src. 2635 // (8) dst_pos + length must not exceed length of dst. 2636 // 2637 2638 // if (src == NULL) return -1; 2639 __ testptr(src, src); // src oop 2640 size_t j1off = __ offset(); 2641 __ jccb(Assembler::zero, L_failed_0); 2642 2643 // if (src_pos < 0) return -1; 2644 __ testl(src_pos, src_pos); // src_pos (32-bits) 2645 __ jccb(Assembler::negative, L_failed_0); 2646 2647 // if (dst == NULL) return -1; 2648 __ testptr(dst, dst); // dst oop 2649 __ jccb(Assembler::zero, L_failed_0); 2650 2651 // if (dst_pos < 0) return -1; 2652 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2653 size_t j4off = __ offset(); 2654 __ jccb(Assembler::negative, L_failed_0); 2655 2656 // The first four tests are very dense code, 2657 // but not quite dense enough to put four 2658 // jumps in a 16-byte instruction fetch buffer. 2659 // That's good, because some branch predicters 2660 // do not like jumps so close together. 2661 // Make sure of this. 2662 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2663 2664 // registers used as temp 2665 const Register r11_length = r11; // elements count to copy 2666 const Register r10_src_klass = r10; // array klass 2667 2668 // if (length < 0) return -1; 2669 __ movl(r11_length, length); // length (elements count, 32-bits value) 2670 __ testl(r11_length, r11_length); 2671 __ jccb(Assembler::negative, L_failed_0); 2672 2673 __ load_klass(r10_src_klass, src); 2674 #ifdef ASSERT 2675 // assert(src->klass() != NULL); 2676 { 2677 BLOCK_COMMENT("assert klasses not null {"); 2678 Label L1, L2; 2679 __ testptr(r10_src_klass, r10_src_klass); 2680 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2681 __ bind(L1); 2682 __ stop("broken null klass"); 2683 __ bind(L2); 2684 __ load_klass(rax, dst); 2685 __ cmpq(rax, 0); 2686 __ jcc(Assembler::equal, L1); // this would be broken also 2687 BLOCK_COMMENT("} assert klasses not null done"); 2688 } 2689 #endif 2690 2691 // Load layout helper (32-bits) 2692 // 2693 // |array_tag| | header_size | element_type | |log2_element_size| 2694 // 32 30 24 16 8 2 0 2695 // 2696 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2697 // 2698 2699 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2700 2701 // Handle objArrays completely differently... 2702 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2703 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2704 __ jcc(Assembler::equal, L_objArray); 2705 2706 // if (src->klass() != dst->klass()) return -1; 2707 __ load_klass(rax, dst); 2708 __ cmpq(r10_src_klass, rax); 2709 __ jcc(Assembler::notEqual, L_failed); 2710 2711 const Register rax_lh = rax; // layout helper 2712 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2713 2714 // if (!src->is_Array()) return -1; 2715 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2716 __ jcc(Assembler::greaterEqual, L_failed); 2717 2718 // At this point, it is known to be a typeArray (array_tag 0x3). 2719 #ifdef ASSERT 2720 { 2721 BLOCK_COMMENT("assert primitive array {"); 2722 Label L; 2723 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2724 __ jcc(Assembler::greaterEqual, L); 2725 __ stop("must be a primitive array"); 2726 __ bind(L); 2727 BLOCK_COMMENT("} assert primitive array done"); 2728 } 2729 #endif 2730 2731 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2732 r10, L_failed); 2733 2734 // TypeArrayKlass 2735 // 2736 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2737 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2738 // 2739 2740 const Register r10_offset = r10; // array offset 2741 const Register rax_elsize = rax_lh; // element size 2742 2743 __ movl(r10_offset, rax_lh); 2744 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2745 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2746 __ addptr(src, r10_offset); // src array offset 2747 __ addptr(dst, r10_offset); // dst array offset 2748 BLOCK_COMMENT("choose copy loop based on element size"); 2749 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2750 2751 // next registers should be set before the jump to corresponding stub 2752 const Register from = c_rarg0; // source array address 2753 const Register to = c_rarg1; // destination array address 2754 const Register count = c_rarg2; // elements count 2755 2756 // 'from', 'to', 'count' registers should be set in such order 2757 // since they are the same as 'src', 'src_pos', 'dst'. 2758 2759 __ BIND(L_copy_bytes); 2760 __ cmpl(rax_elsize, 0); 2761 __ jccb(Assembler::notEqual, L_copy_shorts); 2762 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2763 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2764 __ movl2ptr(count, r11_length); // length 2765 __ jump(RuntimeAddress(byte_copy_entry)); 2766 2767 __ BIND(L_copy_shorts); 2768 __ cmpl(rax_elsize, LogBytesPerShort); 2769 __ jccb(Assembler::notEqual, L_copy_ints); 2770 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2771 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2772 __ movl2ptr(count, r11_length); // length 2773 __ jump(RuntimeAddress(short_copy_entry)); 2774 2775 __ BIND(L_copy_ints); 2776 __ cmpl(rax_elsize, LogBytesPerInt); 2777 __ jccb(Assembler::notEqual, L_copy_longs); 2778 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2779 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2780 __ movl2ptr(count, r11_length); // length 2781 __ jump(RuntimeAddress(int_copy_entry)); 2782 2783 __ BIND(L_copy_longs); 2784 #ifdef ASSERT 2785 { 2786 BLOCK_COMMENT("assert long copy {"); 2787 Label L; 2788 __ cmpl(rax_elsize, LogBytesPerLong); 2789 __ jcc(Assembler::equal, L); 2790 __ stop("must be long copy, but elsize is wrong"); 2791 __ bind(L); 2792 BLOCK_COMMENT("} assert long copy done"); 2793 } 2794 #endif 2795 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2796 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2797 __ movl2ptr(count, r11_length); // length 2798 __ jump(RuntimeAddress(long_copy_entry)); 2799 2800 // ObjArrayKlass 2801 __ BIND(L_objArray); 2802 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2803 2804 Label L_plain_copy, L_checkcast_copy; 2805 // test array classes for subtyping 2806 __ load_klass(rax, dst); 2807 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2808 __ jcc(Assembler::notEqual, L_checkcast_copy); 2809 2810 // Identically typed arrays can be copied without element-wise checks. 2811 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2812 r10, L_failed); 2813 2814 __ lea(from, Address(src, src_pos, TIMES_OOP, 2815 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2816 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2817 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2818 __ movl2ptr(count, r11_length); // length 2819 __ BIND(L_plain_copy); 2820 __ jump(RuntimeAddress(oop_copy_entry)); 2821 2822 __ BIND(L_checkcast_copy); 2823 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2824 { 2825 // Before looking at dst.length, make sure dst is also an objArray. 2826 __ cmpl(Address(rax, lh_offset), objArray_lh); 2827 __ jcc(Assembler::notEqual, L_failed); 2828 2829 // It is safe to examine both src.length and dst.length. 2830 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2831 rax, L_failed); 2832 2833 const Register r11_dst_klass = r11; 2834 __ load_klass(r11_dst_klass, dst); // reload 2835 2836 // Marshal the base address arguments now, freeing registers. 2837 __ lea(from, Address(src, src_pos, TIMES_OOP, 2838 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2839 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2840 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2841 __ movl(count, length); // length (reloaded) 2842 Register sco_temp = c_rarg3; // this register is free now 2843 assert_different_registers(from, to, count, sco_temp, 2844 r11_dst_klass, r10_src_klass); 2845 assert_clean_int(count, sco_temp); 2846 2847 // Generate the type check. 2848 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2849 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2850 assert_clean_int(sco_temp, rax); 2851 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2852 2853 // Fetch destination element klass from the ObjArrayKlass header. 2854 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2855 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2856 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2857 assert_clean_int(sco_temp, rax); 2858 2859 // the checkcast_copy loop needs two extra arguments: 2860 assert(c_rarg3 == sco_temp, "#3 already in place"); 2861 // Set up arguments for checkcast_copy_entry. 2862 setup_arg_regs(4); 2863 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2864 __ jump(RuntimeAddress(checkcast_copy_entry)); 2865 } 2866 2867 __ BIND(L_failed); 2868 __ xorptr(rax, rax); 2869 __ notptr(rax); // return -1 2870 __ leave(); // required for proper stackwalking of RuntimeStub frame 2871 __ ret(0); 2872 2873 return start; 2874 } 2875 2876 void generate_arraycopy_stubs() { 2877 address entry; 2878 address entry_jbyte_arraycopy; 2879 address entry_jshort_arraycopy; 2880 address entry_jint_arraycopy; 2881 address entry_oop_arraycopy; 2882 address entry_jlong_arraycopy; 2883 address entry_checkcast_arraycopy; 2884 2885 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 2886 "jbyte_disjoint_arraycopy"); 2887 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 2888 "jbyte_arraycopy"); 2889 2890 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 2891 "jshort_disjoint_arraycopy"); 2892 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 2893 "jshort_arraycopy"); 2894 2895 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 2896 "jint_disjoint_arraycopy"); 2897 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 2898 &entry_jint_arraycopy, "jint_arraycopy"); 2899 2900 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 2901 "jlong_disjoint_arraycopy"); 2902 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 2903 &entry_jlong_arraycopy, "jlong_arraycopy"); 2904 2905 2906 if (UseCompressedOops) { 2907 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 2908 "oop_disjoint_arraycopy"); 2909 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 2910 &entry_oop_arraycopy, "oop_arraycopy"); 2911 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 2912 "oop_disjoint_arraycopy_uninit", 2913 /*dest_uninitialized*/true); 2914 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 2915 NULL, "oop_arraycopy_uninit", 2916 /*dest_uninitialized*/true); 2917 } else { 2918 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 2919 "oop_disjoint_arraycopy"); 2920 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 2921 &entry_oop_arraycopy, "oop_arraycopy"); 2922 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 2923 "oop_disjoint_arraycopy_uninit", 2924 /*dest_uninitialized*/true); 2925 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 2926 NULL, "oop_arraycopy_uninit", 2927 /*dest_uninitialized*/true); 2928 } 2929 2930 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 2931 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 2932 /*dest_uninitialized*/true); 2933 2934 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 2935 entry_jbyte_arraycopy, 2936 entry_jshort_arraycopy, 2937 entry_jint_arraycopy, 2938 entry_jlong_arraycopy); 2939 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 2940 entry_jbyte_arraycopy, 2941 entry_jshort_arraycopy, 2942 entry_jint_arraycopy, 2943 entry_oop_arraycopy, 2944 entry_jlong_arraycopy, 2945 entry_checkcast_arraycopy); 2946 2947 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2948 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2949 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2950 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2951 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2952 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2953 2954 // We don't generate specialized code for HeapWord-aligned source 2955 // arrays, so just use the code we've already generated 2956 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 2957 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 2958 2959 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 2960 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 2961 2962 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2963 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2964 2965 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2966 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2967 2968 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2969 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2970 2971 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 2972 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 2973 } 2974 2975 // AES intrinsic stubs 2976 enum {AESBlockSize = 16}; 2977 2978 address generate_key_shuffle_mask() { 2979 __ align(16); 2980 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 2981 address start = __ pc(); 2982 __ emit_data64( 0x0405060700010203, relocInfo::none ); 2983 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 2984 return start; 2985 } 2986 2987 address generate_counter_shuffle_mask() { 2988 __ align(16); 2989 StubCodeMark mark(this, "StubRoutines", "counter_shuffle_mask"); 2990 address start = __ pc(); 2991 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 2992 __ emit_data64(0x0001020304050607, relocInfo::none); 2993 return start; 2994 } 2995 2996 // Utility routine for loading a 128-bit key word in little endian format 2997 // can optionally specify that the shuffle mask is already in an xmmregister 2998 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2999 __ movdqu(xmmdst, Address(key, offset)); 3000 if (xmm_shuf_mask != NULL) { 3001 __ pshufb(xmmdst, xmm_shuf_mask); 3002 } else { 3003 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3004 } 3005 } 3006 3007 // Utility routine for increase 128bit counter (iv in CTR mode) 3008 void inc_counter(Register reg, XMMRegister xmmdst, int inc_delta, Label& next_block) { 3009 __ pextrq(reg, xmmdst, 0x0); 3010 __ addq(reg, inc_delta); 3011 __ pinsrq(xmmdst, reg, 0x0); 3012 __ jcc(Assembler::carryClear, next_block); // jump if no carry 3013 __ pextrq(reg, xmmdst, 0x01); // Carry 3014 __ addq(reg, 0x01); 3015 __ pinsrq(xmmdst, reg, 0x01); //Carry end 3016 __ BIND(next_block); // next instruction 3017 } 3018 3019 // Arguments: 3020 // 3021 // Inputs: 3022 // c_rarg0 - source byte array address 3023 // c_rarg1 - destination byte array address 3024 // c_rarg2 - K (key) in little endian int array 3025 // 3026 address generate_aescrypt_encryptBlock() { 3027 assert(UseAES, "need AES instructions and misaligned SSE support"); 3028 __ align(CodeEntryAlignment); 3029 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 3030 Label L_doLast; 3031 address start = __ pc(); 3032 3033 const Register from = c_rarg0; // source array address 3034 const Register to = c_rarg1; // destination array address 3035 const Register key = c_rarg2; // key array address 3036 const Register keylen = rax; 3037 3038 const XMMRegister xmm_result = xmm0; 3039 const XMMRegister xmm_key_shuf_mask = xmm1; 3040 // On win64 xmm6-xmm15 must be preserved so don't use them. 3041 const XMMRegister xmm_temp1 = xmm2; 3042 const XMMRegister xmm_temp2 = xmm3; 3043 const XMMRegister xmm_temp3 = xmm4; 3044 const XMMRegister xmm_temp4 = xmm5; 3045 3046 __ enter(); // required for proper stackwalking of RuntimeStub frame 3047 3048 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3049 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3050 3051 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3052 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 3053 3054 // For encryption, the java expanded key ordering is just what we need 3055 // we don't know if the key is aligned, hence not using load-execute form 3056 3057 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 3058 __ pxor(xmm_result, xmm_temp1); 3059 3060 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3061 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3062 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3063 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3064 3065 __ aesenc(xmm_result, xmm_temp1); 3066 __ aesenc(xmm_result, xmm_temp2); 3067 __ aesenc(xmm_result, xmm_temp3); 3068 __ aesenc(xmm_result, xmm_temp4); 3069 3070 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3071 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3072 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3073 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3074 3075 __ aesenc(xmm_result, xmm_temp1); 3076 __ aesenc(xmm_result, xmm_temp2); 3077 __ aesenc(xmm_result, xmm_temp3); 3078 __ aesenc(xmm_result, xmm_temp4); 3079 3080 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3081 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3082 3083 __ cmpl(keylen, 44); 3084 __ jccb(Assembler::equal, L_doLast); 3085 3086 __ aesenc(xmm_result, xmm_temp1); 3087 __ aesenc(xmm_result, xmm_temp2); 3088 3089 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3090 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3091 3092 __ cmpl(keylen, 52); 3093 __ jccb(Assembler::equal, L_doLast); 3094 3095 __ aesenc(xmm_result, xmm_temp1); 3096 __ aesenc(xmm_result, xmm_temp2); 3097 3098 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3099 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3100 3101 __ BIND(L_doLast); 3102 __ aesenc(xmm_result, xmm_temp1); 3103 __ aesenclast(xmm_result, xmm_temp2); 3104 __ movdqu(Address(to, 0), xmm_result); // store the result 3105 __ xorptr(rax, rax); // return 0 3106 __ leave(); // required for proper stackwalking of RuntimeStub frame 3107 __ ret(0); 3108 3109 return start; 3110 } 3111 3112 3113 // Arguments: 3114 // 3115 // Inputs: 3116 // c_rarg0 - source byte array address 3117 // c_rarg1 - destination byte array address 3118 // c_rarg2 - K (key) in little endian int array 3119 // 3120 address generate_aescrypt_decryptBlock() { 3121 assert(UseAES, "need AES instructions and misaligned SSE support"); 3122 __ align(CodeEntryAlignment); 3123 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3124 Label L_doLast; 3125 address start = __ pc(); 3126 3127 const Register from = c_rarg0; // source array address 3128 const Register to = c_rarg1; // destination array address 3129 const Register key = c_rarg2; // key array address 3130 const Register keylen = rax; 3131 3132 const XMMRegister xmm_result = xmm0; 3133 const XMMRegister xmm_key_shuf_mask = xmm1; 3134 // On win64 xmm6-xmm15 must be preserved so don't use them. 3135 const XMMRegister xmm_temp1 = xmm2; 3136 const XMMRegister xmm_temp2 = xmm3; 3137 const XMMRegister xmm_temp3 = xmm4; 3138 const XMMRegister xmm_temp4 = xmm5; 3139 3140 __ enter(); // required for proper stackwalking of RuntimeStub frame 3141 3142 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3143 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3144 3145 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3146 __ movdqu(xmm_result, Address(from, 0)); 3147 3148 // for decryption java expanded key ordering is rotated one position from what we want 3149 // so we start from 0x10 here and hit 0x00 last 3150 // we don't know if the key is aligned, hence not using load-execute form 3151 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3152 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3153 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3154 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3155 3156 __ pxor (xmm_result, xmm_temp1); 3157 __ aesdec(xmm_result, xmm_temp2); 3158 __ aesdec(xmm_result, xmm_temp3); 3159 __ aesdec(xmm_result, xmm_temp4); 3160 3161 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3162 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3163 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3164 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3165 3166 __ aesdec(xmm_result, xmm_temp1); 3167 __ aesdec(xmm_result, xmm_temp2); 3168 __ aesdec(xmm_result, xmm_temp3); 3169 __ aesdec(xmm_result, xmm_temp4); 3170 3171 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3172 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3173 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3174 3175 __ cmpl(keylen, 44); 3176 __ jccb(Assembler::equal, L_doLast); 3177 3178 __ aesdec(xmm_result, xmm_temp1); 3179 __ aesdec(xmm_result, xmm_temp2); 3180 3181 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3182 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3183 3184 __ cmpl(keylen, 52); 3185 __ jccb(Assembler::equal, L_doLast); 3186 3187 __ aesdec(xmm_result, xmm_temp1); 3188 __ aesdec(xmm_result, xmm_temp2); 3189 3190 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3191 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3192 3193 __ BIND(L_doLast); 3194 __ aesdec(xmm_result, xmm_temp1); 3195 __ aesdec(xmm_result, xmm_temp2); 3196 3197 // for decryption the aesdeclast operation is always on key+0x00 3198 __ aesdeclast(xmm_result, xmm_temp3); 3199 __ movdqu(Address(to, 0), xmm_result); // store the result 3200 __ xorptr(rax, rax); // return 0 3201 __ leave(); // required for proper stackwalking of RuntimeStub frame 3202 __ ret(0); 3203 3204 return start; 3205 } 3206 3207 3208 // Arguments: 3209 // 3210 // Inputs: 3211 // c_rarg0 - source byte array address 3212 // c_rarg1 - destination byte array address 3213 // c_rarg2 - K (key) in little endian int array 3214 // c_rarg3 - r vector byte array address 3215 // c_rarg4 - input length 3216 // 3217 // Output: 3218 // rax - input length 3219 // 3220 address generate_cipherBlockChaining_encryptAESCrypt() { 3221 assert(UseAES, "need AES instructions and misaligned SSE support"); 3222 __ align(CodeEntryAlignment); 3223 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3224 address start = __ pc(); 3225 3226 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3227 const Register from = c_rarg0; // source array address 3228 const Register to = c_rarg1; // destination array address 3229 const Register key = c_rarg2; // key array address 3230 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3231 // and left with the results of the last encryption block 3232 #ifndef _WIN64 3233 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3234 #else 3235 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3236 const Register len_reg = r11; // pick the volatile windows register 3237 #endif 3238 const Register pos = rax; 3239 3240 // xmm register assignments for the loops below 3241 const XMMRegister xmm_result = xmm0; 3242 const XMMRegister xmm_temp = xmm1; 3243 // keys 0-10 preloaded into xmm2-xmm12 3244 const int XMM_REG_NUM_KEY_FIRST = 2; 3245 const int XMM_REG_NUM_KEY_LAST = 15; 3246 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3247 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3248 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3249 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3250 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3251 3252 __ enter(); // required for proper stackwalking of RuntimeStub frame 3253 3254 #ifdef _WIN64 3255 // on win64, fill len_reg from stack position 3256 __ movl(len_reg, len_mem); 3257 #else 3258 __ push(len_reg); // Save 3259 #endif 3260 3261 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3262 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3263 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3264 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3265 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3266 offset += 0x10; 3267 } 3268 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3269 3270 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3271 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3272 __ cmpl(rax, 44); 3273 __ jcc(Assembler::notEqual, L_key_192_256); 3274 3275 // 128 bit code follows here 3276 __ movptr(pos, 0); 3277 __ align(OptoLoopAlignment); 3278 3279 __ BIND(L_loopTop_128); 3280 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3281 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3282 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3283 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3284 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3285 } 3286 __ aesenclast(xmm_result, xmm_key10); 3287 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3288 // no need to store r to memory until we exit 3289 __ addptr(pos, AESBlockSize); 3290 __ subptr(len_reg, AESBlockSize); 3291 __ jcc(Assembler::notEqual, L_loopTop_128); 3292 3293 __ BIND(L_exit); 3294 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3295 3296 #ifdef _WIN64 3297 __ movl(rax, len_mem); 3298 #else 3299 __ pop(rax); // return length 3300 #endif 3301 __ leave(); // required for proper stackwalking of RuntimeStub frame 3302 __ ret(0); 3303 3304 __ BIND(L_key_192_256); 3305 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3306 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3307 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3308 __ cmpl(rax, 52); 3309 __ jcc(Assembler::notEqual, L_key_256); 3310 3311 // 192-bit code follows here (could be changed to use more xmm registers) 3312 __ movptr(pos, 0); 3313 __ align(OptoLoopAlignment); 3314 3315 __ BIND(L_loopTop_192); 3316 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3317 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3318 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3319 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3320 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3321 } 3322 __ aesenclast(xmm_result, xmm_key12); 3323 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3324 // no need to store r to memory until we exit 3325 __ addptr(pos, AESBlockSize); 3326 __ subptr(len_reg, AESBlockSize); 3327 __ jcc(Assembler::notEqual, L_loopTop_192); 3328 __ jmp(L_exit); 3329 3330 __ BIND(L_key_256); 3331 // 256-bit code follows here (could be changed to use more xmm registers) 3332 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3333 __ movptr(pos, 0); 3334 __ align(OptoLoopAlignment); 3335 3336 __ BIND(L_loopTop_256); 3337 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3338 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3339 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3340 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3341 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3342 } 3343 load_key(xmm_temp, key, 0xe0); 3344 __ aesenclast(xmm_result, xmm_temp); 3345 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3346 // no need to store r to memory until we exit 3347 __ addptr(pos, AESBlockSize); 3348 __ subptr(len_reg, AESBlockSize); 3349 __ jcc(Assembler::notEqual, L_loopTop_256); 3350 __ jmp(L_exit); 3351 3352 return start; 3353 } 3354 3355 // Safefetch stubs. 3356 void generate_safefetch(const char* name, int size, address* entry, 3357 address* fault_pc, address* continuation_pc) { 3358 // safefetch signatures: 3359 // int SafeFetch32(int* adr, int errValue); 3360 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3361 // 3362 // arguments: 3363 // c_rarg0 = adr 3364 // c_rarg1 = errValue 3365 // 3366 // result: 3367 // PPC_RET = *adr or errValue 3368 3369 StubCodeMark mark(this, "StubRoutines", name); 3370 3371 // Entry point, pc or function descriptor. 3372 *entry = __ pc(); 3373 3374 // Load *adr into c_rarg1, may fault. 3375 *fault_pc = __ pc(); 3376 switch (size) { 3377 case 4: 3378 // int32_t 3379 __ movl(c_rarg1, Address(c_rarg0, 0)); 3380 break; 3381 case 8: 3382 // int64_t 3383 __ movq(c_rarg1, Address(c_rarg0, 0)); 3384 break; 3385 default: 3386 ShouldNotReachHere(); 3387 } 3388 3389 // return errValue or *adr 3390 *continuation_pc = __ pc(); 3391 __ movq(rax, c_rarg1); 3392 __ ret(0); 3393 } 3394 3395 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3396 // to hide instruction latency 3397 // 3398 // Arguments: 3399 // 3400 // Inputs: 3401 // c_rarg0 - source byte array address 3402 // c_rarg1 - destination byte array address 3403 // c_rarg2 - K (key) in little endian int array 3404 // c_rarg3 - r vector byte array address 3405 // c_rarg4 - input length 3406 // 3407 // Output: 3408 // rax - input length 3409 // 3410 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3411 assert(UseAES, "need AES instructions and misaligned SSE support"); 3412 __ align(CodeEntryAlignment); 3413 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3414 address start = __ pc(); 3415 3416 const Register from = c_rarg0; // source array address 3417 const Register to = c_rarg1; // destination array address 3418 const Register key = c_rarg2; // key array address 3419 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3420 // and left with the results of the last encryption block 3421 #ifndef _WIN64 3422 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3423 #else 3424 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3425 const Register len_reg = r11; // pick the volatile windows register 3426 #endif 3427 const Register pos = rax; 3428 3429 const int PARALLEL_FACTOR = 4; 3430 const int ROUNDS[3] = { 10, 12, 14 }; // aes rounds for key128, key192, key256 3431 3432 Label L_exit; 3433 Label L_singleBlock_loopTopHead[3]; // 128, 192, 256 3434 Label L_singleBlock_loopTopHead2[3]; // 128, 192, 256 3435 Label L_singleBlock_loopTop[3]; // 128, 192, 256 3436 Label L_multiBlock_loopTopHead[3]; // 128, 192, 256 3437 Label L_multiBlock_loopTop[3]; // 128, 192, 256 3438 3439 // keys 0-10 preloaded into xmm5-xmm15 3440 const int XMM_REG_NUM_KEY_FIRST = 5; 3441 const int XMM_REG_NUM_KEY_LAST = 15; 3442 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3443 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3444 3445 __ enter(); // required for proper stackwalking of RuntimeStub frame 3446 3447 #ifdef _WIN64 3448 // on win64, fill len_reg from stack position 3449 __ movl(len_reg, len_mem); 3450 #else 3451 __ push(len_reg); // Save 3452 #endif 3453 __ push(rbx); 3454 // the java expanded key ordering is rotated one position from what we want 3455 // so we start from 0x10 here and hit 0x00 last 3456 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3457 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3458 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3459 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3460 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3461 offset += 0x10; 3462 } 3463 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3464 3465 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3466 3467 // registers holding the four results in the parallelized loop 3468 const XMMRegister xmm_result0 = xmm0; 3469 const XMMRegister xmm_result1 = xmm2; 3470 const XMMRegister xmm_result2 = xmm3; 3471 const XMMRegister xmm_result3 = xmm4; 3472 3473 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3474 3475 __ xorptr(pos, pos); 3476 3477 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3478 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3479 __ cmpl(rbx, 52); 3480 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[1]); 3481 __ cmpl(rbx, 60); 3482 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[2]); 3483 3484 #define DoFour(opc, src_reg) \ 3485 __ opc(xmm_result0, src_reg); \ 3486 __ opc(xmm_result1, src_reg); \ 3487 __ opc(xmm_result2, src_reg); \ 3488 __ opc(xmm_result3, src_reg); \ 3489 3490 for (int k = 0; k < 3; ++k) { 3491 __ BIND(L_multiBlock_loopTopHead[k]); 3492 if (k != 0) { 3493 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3494 __ jcc(Assembler::less, L_singleBlock_loopTopHead2[k]); 3495 } 3496 if (k == 1) { 3497 __ subptr(rsp, 6 * wordSize); 3498 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3499 load_key(xmm15, key, 0xb0); // 0xb0; 192-bit key goes up to 0xc0 3500 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3501 load_key(xmm1, key, 0xc0); // 0xc0; 3502 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3503 } else if (k == 2) { 3504 __ subptr(rsp, 10 * wordSize); 3505 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3506 load_key(xmm15, key, 0xd0); // 0xd0; 256-bit key goes upto 0xe0 3507 __ movdqu(Address(rsp, 6 * wordSize), xmm15); 3508 load_key(xmm1, key, 0xe0); // 0xe0; 3509 __ movdqu(Address(rsp, 8 * wordSize), xmm1); 3510 load_key(xmm15, key, 0xb0); // 0xb0; 3511 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3512 load_key(xmm1, key, 0xc0); // 0xc0; 3513 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3514 } 3515 __ align(OptoLoopAlignment); 3516 __ BIND(L_multiBlock_loopTop[k]); 3517 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3518 __ jcc(Assembler::less, L_singleBlock_loopTopHead[k]); 3519 3520 if (k != 0) { 3521 __ movdqu(xmm15, Address(rsp, 2 * wordSize)); 3522 __ movdqu(xmm1, Address(rsp, 4 * wordSize)); 3523 } 3524 3525 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); // get next 4 blocks into xmmresult registers 3526 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3527 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3528 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 3529 3530 DoFour(pxor, xmm_key_first); 3531 if (k == 0) { 3532 for (int rnum = 1; rnum < ROUNDS[k]; rnum++) { 3533 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3534 } 3535 DoFour(aesdeclast, xmm_key_last); 3536 } else if (k == 1) { 3537 for (int rnum = 1; rnum <= ROUNDS[k]-2; rnum++) { 3538 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3539 } 3540 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3541 DoFour(aesdec, xmm1); // key : 0xc0 3542 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3543 DoFour(aesdeclast, xmm_key_last); 3544 } else if (k == 2) { 3545 for (int rnum = 1; rnum <= ROUNDS[k] - 4; rnum++) { 3546 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3547 } 3548 DoFour(aesdec, xmm1); // key : 0xc0 3549 __ movdqu(xmm15, Address(rsp, 6 * wordSize)); 3550 __ movdqu(xmm1, Address(rsp, 8 * wordSize)); 3551 DoFour(aesdec, xmm15); // key : 0xd0 3552 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3553 DoFour(aesdec, xmm1); // key : 0xe0 3554 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3555 DoFour(aesdeclast, xmm_key_last); 3556 } 3557 3558 // for each result, xor with the r vector of previous cipher block 3559 __ pxor(xmm_result0, xmm_prev_block_cipher); 3560 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3561 __ pxor(xmm_result1, xmm_prev_block_cipher); 3562 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3563 __ pxor(xmm_result2, xmm_prev_block_cipher); 3564 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3565 __ pxor(xmm_result3, xmm_prev_block_cipher); 3566 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3 * AESBlockSize)); // this will carry over to next set of blocks 3567 if (k != 0) { 3568 __ movdqu(Address(rvec, 0x00), xmm_prev_block_cipher); 3569 } 3570 3571 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3572 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 3573 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 3574 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 3575 3576 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); 3577 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); 3578 __ jmp(L_multiBlock_loopTop[k]); 3579 3580 // registers used in the non-parallelized loops 3581 // xmm register assignments for the loops below 3582 const XMMRegister xmm_result = xmm0; 3583 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3584 const XMMRegister xmm_key11 = xmm3; 3585 const XMMRegister xmm_key12 = xmm4; 3586 const XMMRegister key_tmp = xmm4; 3587 3588 __ BIND(L_singleBlock_loopTopHead[k]); 3589 if (k == 1) { 3590 __ addptr(rsp, 6 * wordSize); 3591 } else if (k == 2) { 3592 __ addptr(rsp, 10 * wordSize); 3593 } 3594 __ cmpptr(len_reg, 0); // any blocks left?? 3595 __ jcc(Assembler::equal, L_exit); 3596 __ BIND(L_singleBlock_loopTopHead2[k]); 3597 if (k == 1) { 3598 load_key(xmm_key11, key, 0xb0); // 0xb0; 192-bit key goes upto 0xc0 3599 load_key(xmm_key12, key, 0xc0); // 0xc0; 192-bit key goes upto 0xc0 3600 } 3601 if (k == 2) { 3602 load_key(xmm_key11, key, 0xb0); // 0xb0; 256-bit key goes upto 0xe0 3603 } 3604 __ align(OptoLoopAlignment); 3605 __ BIND(L_singleBlock_loopTop[k]); 3606 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3607 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3608 __ pxor(xmm_result, xmm_key_first); // do the aes dec rounds 3609 for (int rnum = 1; rnum <= 9 ; rnum++) { 3610 __ aesdec(xmm_result, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3611 } 3612 if (k == 1) { 3613 __ aesdec(xmm_result, xmm_key11); 3614 __ aesdec(xmm_result, xmm_key12); 3615 } 3616 if (k == 2) { 3617 __ aesdec(xmm_result, xmm_key11); 3618 load_key(key_tmp, key, 0xc0); 3619 __ aesdec(xmm_result, key_tmp); 3620 load_key(key_tmp, key, 0xd0); 3621 __ aesdec(xmm_result, key_tmp); 3622 load_key(key_tmp, key, 0xe0); 3623 __ aesdec(xmm_result, key_tmp); 3624 } 3625 3626 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3627 __ pxor(xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3628 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3629 // no need to store r to memory until we exit 3630 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3631 __ addptr(pos, AESBlockSize); 3632 __ subptr(len_reg, AESBlockSize); 3633 __ jcc(Assembler::notEqual, L_singleBlock_loopTop[k]); 3634 if (k != 2) { 3635 __ jmp(L_exit); 3636 } 3637 } //for 128/192/256 3638 3639 __ BIND(L_exit); 3640 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3641 __ pop(rbx); 3642 #ifdef _WIN64 3643 __ movl(rax, len_mem); 3644 #else 3645 __ pop(rax); // return length 3646 #endif 3647 __ leave(); // required for proper stackwalking of RuntimeStub frame 3648 __ ret(0); 3649 return start; 3650 } 3651 3652 address generate_upper_word_mask() { 3653 __ align(64); 3654 StubCodeMark mark(this, "StubRoutines", "upper_word_mask"); 3655 address start = __ pc(); 3656 __ emit_data64(0x0000000000000000, relocInfo::none); 3657 __ emit_data64(0xFFFFFFFF00000000, relocInfo::none); 3658 return start; 3659 } 3660 3661 address generate_shuffle_byte_flip_mask() { 3662 __ align(64); 3663 StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask"); 3664 address start = __ pc(); 3665 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3666 __ emit_data64(0x0001020304050607, relocInfo::none); 3667 return start; 3668 } 3669 3670 // ofs and limit are use for multi-block byte array. 3671 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3672 address generate_sha1_implCompress(bool multi_block, const char *name) { 3673 __ align(CodeEntryAlignment); 3674 StubCodeMark mark(this, "StubRoutines", name); 3675 address start = __ pc(); 3676 3677 Register buf = c_rarg0; 3678 Register state = c_rarg1; 3679 Register ofs = c_rarg2; 3680 Register limit = c_rarg3; 3681 3682 const XMMRegister abcd = xmm0; 3683 const XMMRegister e0 = xmm1; 3684 const XMMRegister e1 = xmm2; 3685 const XMMRegister msg0 = xmm3; 3686 3687 const XMMRegister msg1 = xmm4; 3688 const XMMRegister msg2 = xmm5; 3689 const XMMRegister msg3 = xmm6; 3690 const XMMRegister shuf_mask = xmm7; 3691 3692 __ enter(); 3693 3694 __ subptr(rsp, 4 * wordSize); 3695 3696 __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask, 3697 buf, state, ofs, limit, rsp, multi_block); 3698 3699 __ addptr(rsp, 4 * wordSize); 3700 3701 __ leave(); 3702 __ ret(0); 3703 return start; 3704 } 3705 3706 address generate_pshuffle_byte_flip_mask() { 3707 __ align(64); 3708 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask"); 3709 address start = __ pc(); 3710 __ emit_data64(0x0405060700010203, relocInfo::none); 3711 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3712 3713 if (VM_Version::supports_avx2()) { 3714 __ emit_data64(0x0405060700010203, relocInfo::none); // second copy 3715 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3716 // _SHUF_00BA 3717 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3718 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3719 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3720 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3721 // _SHUF_DC00 3722 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3723 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3724 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3725 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3726 } 3727 3728 return start; 3729 } 3730 3731 //Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. 3732 address generate_pshuffle_byte_flip_mask_sha512() { 3733 __ align(32); 3734 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask_sha512"); 3735 address start = __ pc(); 3736 if (VM_Version::supports_avx2()) { 3737 __ emit_data64(0x0001020304050607, relocInfo::none); // PSHUFFLE_BYTE_FLIP_MASK 3738 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3739 __ emit_data64(0x1011121314151617, relocInfo::none); 3740 __ emit_data64(0x18191a1b1c1d1e1f, relocInfo::none); 3741 __ emit_data64(0x0000000000000000, relocInfo::none); //MASK_YMM_LO 3742 __ emit_data64(0x0000000000000000, relocInfo::none); 3743 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3744 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3745 } 3746 3747 return start; 3748 } 3749 3750 // ofs and limit are use for multi-block byte array. 3751 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3752 address generate_sha256_implCompress(bool multi_block, const char *name) { 3753 assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), ""); 3754 __ align(CodeEntryAlignment); 3755 StubCodeMark mark(this, "StubRoutines", name); 3756 address start = __ pc(); 3757 3758 Register buf = c_rarg0; 3759 Register state = c_rarg1; 3760 Register ofs = c_rarg2; 3761 Register limit = c_rarg3; 3762 3763 const XMMRegister msg = xmm0; 3764 const XMMRegister state0 = xmm1; 3765 const XMMRegister state1 = xmm2; 3766 const XMMRegister msgtmp0 = xmm3; 3767 3768 const XMMRegister msgtmp1 = xmm4; 3769 const XMMRegister msgtmp2 = xmm5; 3770 const XMMRegister msgtmp3 = xmm6; 3771 const XMMRegister msgtmp4 = xmm7; 3772 3773 const XMMRegister shuf_mask = xmm8; 3774 3775 __ enter(); 3776 3777 __ subptr(rsp, 4 * wordSize); 3778 3779 if (VM_Version::supports_sha()) { 3780 __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3781 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3782 } else if (VM_Version::supports_avx2()) { 3783 __ sha256_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3784 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3785 } 3786 __ addptr(rsp, 4 * wordSize); 3787 __ vzeroupper(); 3788 __ leave(); 3789 __ ret(0); 3790 return start; 3791 } 3792 3793 address generate_sha512_implCompress(bool multi_block, const char *name) { 3794 assert(VM_Version::supports_avx2(), ""); 3795 assert(VM_Version::supports_bmi2(), ""); 3796 __ align(CodeEntryAlignment); 3797 StubCodeMark mark(this, "StubRoutines", name); 3798 address start = __ pc(); 3799 3800 Register buf = c_rarg0; 3801 Register state = c_rarg1; 3802 Register ofs = c_rarg2; 3803 Register limit = c_rarg3; 3804 3805 const XMMRegister msg = xmm0; 3806 const XMMRegister state0 = xmm1; 3807 const XMMRegister state1 = xmm2; 3808 const XMMRegister msgtmp0 = xmm3; 3809 const XMMRegister msgtmp1 = xmm4; 3810 const XMMRegister msgtmp2 = xmm5; 3811 const XMMRegister msgtmp3 = xmm6; 3812 const XMMRegister msgtmp4 = xmm7; 3813 3814 const XMMRegister shuf_mask = xmm8; 3815 3816 __ enter(); 3817 3818 __ sha512_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3819 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3820 3821 __ vzeroupper(); 3822 __ leave(); 3823 __ ret(0); 3824 return start; 3825 } 3826 3827 // This is a version of CTR/AES crypt which does 6 blocks in a loop at a time 3828 // to hide instruction latency 3829 // 3830 // Arguments: 3831 // 3832 // Inputs: 3833 // c_rarg0 - source byte array address 3834 // c_rarg1 - destination byte array address 3835 // c_rarg2 - K (key) in little endian int array 3836 // c_rarg3 - counter vector byte array address 3837 // Linux 3838 // c_rarg4 - input length 3839 // c_rarg5 - saved encryptedCounter start 3840 // rbp + 6 * wordSize - saved used length 3841 // Windows 3842 // rbp + 6 * wordSize - input length 3843 // rbp + 7 * wordSize - saved encryptedCounter start 3844 // rbp + 8 * wordSize - saved used length 3845 // 3846 // Output: 3847 // rax - input length 3848 // 3849 address generate_counterMode_AESCrypt_Parallel() { 3850 assert(UseAES, "need AES instructions and misaligned SSE support"); 3851 __ align(CodeEntryAlignment); 3852 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 3853 address start = __ pc(); 3854 const Register from = c_rarg0; // source array address 3855 const Register to = c_rarg1; // destination array address 3856 const Register key = c_rarg2; // key array address 3857 const Register counter = c_rarg3; // counter byte array initialized from counter array address 3858 // and updated with the incremented counter in the end 3859 #ifndef _WIN64 3860 const Register len_reg = c_rarg4; 3861 const Register saved_encCounter_start = c_rarg5; 3862 const Register used_addr = r10; 3863 const Address used_mem(rbp, 2 * wordSize); 3864 const Register used = r11; 3865 #else 3866 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3867 const Address saved_encCounter_mem(rbp, 7 * wordSize); // length is on stack on Win64 3868 const Address used_mem(rbp, 8 * wordSize); // length is on stack on Win64 3869 const Register len_reg = r10; // pick the first volatile windows register 3870 const Register saved_encCounter_start = r11; 3871 const Register used_addr = r13; 3872 const Register used = r14; 3873 #endif 3874 const Register pos = rax; 3875 3876 const int PARALLEL_FACTOR = 6; 3877 const XMMRegister xmm_counter_shuf_mask = xmm0; 3878 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3879 const XMMRegister xmm_curr_counter = xmm2; 3880 3881 const XMMRegister xmm_key_tmp0 = xmm3; 3882 const XMMRegister xmm_key_tmp1 = xmm4; 3883 3884 // registers holding the four results in the parallelized loop 3885 const XMMRegister xmm_result0 = xmm5; 3886 const XMMRegister xmm_result1 = xmm6; 3887 const XMMRegister xmm_result2 = xmm7; 3888 const XMMRegister xmm_result3 = xmm8; 3889 const XMMRegister xmm_result4 = xmm9; 3890 const XMMRegister xmm_result5 = xmm10; 3891 3892 const XMMRegister xmm_from0 = xmm11; 3893 const XMMRegister xmm_from1 = xmm12; 3894 const XMMRegister xmm_from2 = xmm13; 3895 const XMMRegister xmm_from3 = xmm14; //the last one is xmm14. we have to preserve it on WIN64. 3896 const XMMRegister xmm_from4 = xmm3; //reuse xmm3~4. Because xmm_key_tmp0~1 are useless when loading input text 3897 const XMMRegister xmm_from5 = xmm4; 3898 3899 //for key_128, key_192, key_256 3900 const int rounds[3] = {10, 12, 14}; 3901 Label L_exit_preLoop, L_preLoop_start; 3902 Label L_multiBlock_loopTop[3]; 3903 Label L_singleBlockLoopTop[3]; 3904 Label L__incCounter[3][6]; //for 6 blocks 3905 Label L__incCounter_single[3]; //for single block, key128, key192, key256 3906 Label L_processTail_insr[3], L_processTail_4_insr[3], L_processTail_2_insr[3], L_processTail_1_insr[3], L_processTail_exit_insr[3]; 3907 Label L_processTail_4_extr[3], L_processTail_2_extr[3], L_processTail_1_extr[3], L_processTail_exit_extr[3]; 3908 3909 Label L_exit; 3910 3911 __ enter(); // required for proper stackwalking of RuntimeStub frame 3912 3913 #ifdef _WIN64 3914 // allocate spill slots for r13, r14 3915 enum { 3916 saved_r13_offset, 3917 saved_r14_offset 3918 }; 3919 __ subptr(rsp, 2 * wordSize); 3920 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 3921 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 3922 3923 // on win64, fill len_reg from stack position 3924 __ movl(len_reg, len_mem); 3925 __ movptr(saved_encCounter_start, saved_encCounter_mem); 3926 __ movptr(used_addr, used_mem); 3927 __ movl(used, Address(used_addr, 0)); 3928 #else 3929 __ push(len_reg); // Save 3930 __ movptr(used_addr, used_mem); 3931 __ movl(used, Address(used_addr, 0)); 3932 #endif 3933 3934 __ push(rbx); // Save RBX 3935 __ movdqu(xmm_curr_counter, Address(counter, 0x00)); // initialize counter with initial counter 3936 __ movdqu(xmm_counter_shuf_mask, ExternalAddress(StubRoutines::x86::counter_shuffle_mask_addr()), pos); // pos as scratch 3937 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled 3938 __ movptr(pos, 0); 3939 3940 // Use the partially used encrpyted counter from last invocation 3941 __ BIND(L_preLoop_start); 3942 __ cmpptr(used, 16); 3943 __ jcc(Assembler::aboveEqual, L_exit_preLoop); 3944 __ cmpptr(len_reg, 0); 3945 __ jcc(Assembler::lessEqual, L_exit_preLoop); 3946 __ movb(rbx, Address(saved_encCounter_start, used)); 3947 __ xorb(rbx, Address(from, pos)); 3948 __ movb(Address(to, pos), rbx); 3949 __ addptr(pos, 1); 3950 __ addptr(used, 1); 3951 __ subptr(len_reg, 1); 3952 3953 __ jmp(L_preLoop_start); 3954 3955 __ BIND(L_exit_preLoop); 3956 __ movl(Address(used_addr, 0), used); 3957 3958 // key length could be only {11, 13, 15} * 4 = {44, 52, 60} 3959 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()), rbx); // rbx as scratch 3960 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3961 __ cmpl(rbx, 52); 3962 __ jcc(Assembler::equal, L_multiBlock_loopTop[1]); 3963 __ cmpl(rbx, 60); 3964 __ jcc(Assembler::equal, L_multiBlock_loopTop[2]); 3965 3966 #define CTR_DoSix(opc, src_reg) \ 3967 __ opc(xmm_result0, src_reg); \ 3968 __ opc(xmm_result1, src_reg); \ 3969 __ opc(xmm_result2, src_reg); \ 3970 __ opc(xmm_result3, src_reg); \ 3971 __ opc(xmm_result4, src_reg); \ 3972 __ opc(xmm_result5, src_reg); 3973 3974 // k == 0 : generate code for key_128 3975 // k == 1 : generate code for key_192 3976 // k == 2 : generate code for key_256 3977 for (int k = 0; k < 3; ++k) { 3978 //multi blocks starts here 3979 __ align(OptoLoopAlignment); 3980 __ BIND(L_multiBlock_loopTop[k]); 3981 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least PARALLEL_FACTOR blocks left 3982 __ jcc(Assembler::less, L_singleBlockLoopTop[k]); 3983 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 3984 3985 //load, then increase counters 3986 CTR_DoSix(movdqa, xmm_curr_counter); 3987 inc_counter(rbx, xmm_result1, 0x01, L__incCounter[k][0]); 3988 inc_counter(rbx, xmm_result2, 0x02, L__incCounter[k][1]); 3989 inc_counter(rbx, xmm_result3, 0x03, L__incCounter[k][2]); 3990 inc_counter(rbx, xmm_result4, 0x04, L__incCounter[k][3]); 3991 inc_counter(rbx, xmm_result5, 0x05, L__incCounter[k][4]); 3992 inc_counter(rbx, xmm_curr_counter, 0x06, L__incCounter[k][5]); 3993 CTR_DoSix(pshufb, xmm_counter_shuf_mask); // after increased, shuffled counters back for PXOR 3994 CTR_DoSix(pxor, xmm_key_tmp0); //PXOR with Round 0 key 3995 3996 //load two ROUND_KEYs at a time 3997 for (int i = 1; i < rounds[k]; ) { 3998 load_key(xmm_key_tmp1, key, (0x10 * i), xmm_key_shuf_mask); 3999 load_key(xmm_key_tmp0, key, (0x10 * (i+1)), xmm_key_shuf_mask); 4000 CTR_DoSix(aesenc, xmm_key_tmp1); 4001 i++; 4002 if (i != rounds[k]) { 4003 CTR_DoSix(aesenc, xmm_key_tmp0); 4004 } else { 4005 CTR_DoSix(aesenclast, xmm_key_tmp0); 4006 } 4007 i++; 4008 } 4009 4010 // get next PARALLEL_FACTOR blocks into xmm_result registers 4011 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4012 __ movdqu(xmm_from1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 4013 __ movdqu(xmm_from2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 4014 __ movdqu(xmm_from3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 4015 __ movdqu(xmm_from4, Address(from, pos, Address::times_1, 4 * AESBlockSize)); 4016 __ movdqu(xmm_from5, Address(from, pos, Address::times_1, 5 * AESBlockSize)); 4017 4018 __ pxor(xmm_result0, xmm_from0); 4019 __ pxor(xmm_result1, xmm_from1); 4020 __ pxor(xmm_result2, xmm_from2); 4021 __ pxor(xmm_result3, xmm_from3); 4022 __ pxor(xmm_result4, xmm_from4); 4023 __ pxor(xmm_result5, xmm_from5); 4024 4025 // store 6 results into the next 64 bytes of output 4026 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4027 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 4028 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 4029 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 4030 __ movdqu(Address(to, pos, Address::times_1, 4 * AESBlockSize), xmm_result4); 4031 __ movdqu(Address(to, pos, Address::times_1, 5 * AESBlockSize), xmm_result5); 4032 4033 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); // increase the length of crypt text 4034 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // decrease the remaining length 4035 __ jmp(L_multiBlock_loopTop[k]); 4036 4037 // singleBlock starts here 4038 __ align(OptoLoopAlignment); 4039 __ BIND(L_singleBlockLoopTop[k]); 4040 __ cmpptr(len_reg, 0); 4041 __ jcc(Assembler::lessEqual, L_exit); 4042 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4043 __ movdqa(xmm_result0, xmm_curr_counter); 4044 inc_counter(rbx, xmm_curr_counter, 0x01, L__incCounter_single[k]); 4045 __ pshufb(xmm_result0, xmm_counter_shuf_mask); 4046 __ pxor(xmm_result0, xmm_key_tmp0); 4047 for (int i = 1; i < rounds[k]; i++) { 4048 load_key(xmm_key_tmp0, key, (0x10 * i), xmm_key_shuf_mask); 4049 __ aesenc(xmm_result0, xmm_key_tmp0); 4050 } 4051 load_key(xmm_key_tmp0, key, (rounds[k] * 0x10), xmm_key_shuf_mask); 4052 __ aesenclast(xmm_result0, xmm_key_tmp0); 4053 __ cmpptr(len_reg, AESBlockSize); 4054 __ jcc(Assembler::less, L_processTail_insr[k]); 4055 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4056 __ pxor(xmm_result0, xmm_from0); 4057 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4058 __ addptr(pos, AESBlockSize); 4059 __ subptr(len_reg, AESBlockSize); 4060 __ jmp(L_singleBlockLoopTop[k]); 4061 __ BIND(L_processTail_insr[k]); // Process the tail part of the input array 4062 __ addptr(pos, len_reg); // 1. Insert bytes from src array into xmm_from0 register 4063 __ testptr(len_reg, 8); 4064 __ jcc(Assembler::zero, L_processTail_4_insr[k]); 4065 __ subptr(pos,8); 4066 __ pinsrq(xmm_from0, Address(from, pos), 0); 4067 __ BIND(L_processTail_4_insr[k]); 4068 __ testptr(len_reg, 4); 4069 __ jcc(Assembler::zero, L_processTail_2_insr[k]); 4070 __ subptr(pos,4); 4071 __ pslldq(xmm_from0, 4); 4072 __ pinsrd(xmm_from0, Address(from, pos), 0); 4073 __ BIND(L_processTail_2_insr[k]); 4074 __ testptr(len_reg, 2); 4075 __ jcc(Assembler::zero, L_processTail_1_insr[k]); 4076 __ subptr(pos, 2); 4077 __ pslldq(xmm_from0, 2); 4078 __ pinsrw(xmm_from0, Address(from, pos), 0); 4079 __ BIND(L_processTail_1_insr[k]); 4080 __ testptr(len_reg, 1); 4081 __ jcc(Assembler::zero, L_processTail_exit_insr[k]); 4082 __ subptr(pos, 1); 4083 __ pslldq(xmm_from0, 1); 4084 __ pinsrb(xmm_from0, Address(from, pos), 0); 4085 __ BIND(L_processTail_exit_insr[k]); 4086 4087 __ movdqu(Address(saved_encCounter_start, 0), xmm_result0); // 2. Perform pxor of the encrypted counter and plaintext Bytes. 4088 __ pxor(xmm_result0, xmm_from0); // Also the encrypted counter is saved for next invocation. 4089 4090 __ testptr(len_reg, 8); 4091 __ jcc(Assembler::zero, L_processTail_4_extr[k]); // 3. Extract bytes from xmm_result0 into the dest. array 4092 __ pextrq(Address(to, pos), xmm_result0, 0); 4093 __ psrldq(xmm_result0, 8); 4094 __ addptr(pos, 8); 4095 __ BIND(L_processTail_4_extr[k]); 4096 __ testptr(len_reg, 4); 4097 __ jcc(Assembler::zero, L_processTail_2_extr[k]); 4098 __ pextrd(Address(to, pos), xmm_result0, 0); 4099 __ psrldq(xmm_result0, 4); 4100 __ addptr(pos, 4); 4101 __ BIND(L_processTail_2_extr[k]); 4102 __ testptr(len_reg, 2); 4103 __ jcc(Assembler::zero, L_processTail_1_extr[k]); 4104 __ pextrw(Address(to, pos), xmm_result0, 0); 4105 __ psrldq(xmm_result0, 2); 4106 __ addptr(pos, 2); 4107 __ BIND(L_processTail_1_extr[k]); 4108 __ testptr(len_reg, 1); 4109 __ jcc(Assembler::zero, L_processTail_exit_extr[k]); 4110 __ pextrb(Address(to, pos), xmm_result0, 0); 4111 4112 __ BIND(L_processTail_exit_extr[k]); 4113 __ movl(Address(used_addr, 0), len_reg); 4114 __ jmp(L_exit); 4115 4116 } 4117 4118 __ BIND(L_exit); 4119 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled back. 4120 __ movdqu(Address(counter, 0), xmm_curr_counter); //save counter back 4121 __ pop(rbx); // pop the saved RBX. 4122 #ifdef _WIN64 4123 __ movl(rax, len_mem); 4124 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 4125 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 4126 __ addptr(rsp, 2 * wordSize); 4127 #else 4128 __ pop(rax); // return 'len' 4129 #endif 4130 __ leave(); // required for proper stackwalking of RuntimeStub frame 4131 __ ret(0); 4132 return start; 4133 } 4134 4135 void roundDec(XMMRegister xmm_reg) { 4136 __ vaesdec(xmm1, xmm1, xmm_reg, Assembler::AVX_512bit); 4137 __ vaesdec(xmm2, xmm2, xmm_reg, Assembler::AVX_512bit); 4138 __ vaesdec(xmm3, xmm3, xmm_reg, Assembler::AVX_512bit); 4139 __ vaesdec(xmm4, xmm4, xmm_reg, Assembler::AVX_512bit); 4140 __ vaesdec(xmm5, xmm5, xmm_reg, Assembler::AVX_512bit); 4141 __ vaesdec(xmm6, xmm6, xmm_reg, Assembler::AVX_512bit); 4142 __ vaesdec(xmm7, xmm7, xmm_reg, Assembler::AVX_512bit); 4143 __ vaesdec(xmm8, xmm8, xmm_reg, Assembler::AVX_512bit); 4144 } 4145 4146 void roundDeclast(XMMRegister xmm_reg) { 4147 __ vaesdeclast(xmm1, xmm1, xmm_reg, Assembler::AVX_512bit); 4148 __ vaesdeclast(xmm2, xmm2, xmm_reg, Assembler::AVX_512bit); 4149 __ vaesdeclast(xmm3, xmm3, xmm_reg, Assembler::AVX_512bit); 4150 __ vaesdeclast(xmm4, xmm4, xmm_reg, Assembler::AVX_512bit); 4151 __ vaesdeclast(xmm5, xmm5, xmm_reg, Assembler::AVX_512bit); 4152 __ vaesdeclast(xmm6, xmm6, xmm_reg, Assembler::AVX_512bit); 4153 __ vaesdeclast(xmm7, xmm7, xmm_reg, Assembler::AVX_512bit); 4154 __ vaesdeclast(xmm8, xmm8, xmm_reg, Assembler::AVX_512bit); 4155 } 4156 4157 void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask = NULL) { 4158 __ movdqu(xmmdst, Address(key, offset)); 4159 if (xmm_shuf_mask != NULL) { 4160 __ pshufb(xmmdst, xmm_shuf_mask); 4161 } else { 4162 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 4163 } 4164 __ evshufi64x2(xmmdst, xmmdst, xmmdst, 0x0, Assembler::AVX_512bit); 4165 4166 } 4167 4168 address generate_cipherBlockChaining_decryptVectorAESCrypt() { 4169 assert(VM_Version::supports_vaes(), "need AES instructions and misaligned SSE support"); 4170 __ align(CodeEntryAlignment); 4171 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 4172 address start = __ pc(); 4173 4174 const Register from = c_rarg0; // source array address 4175 const Register to = c_rarg1; // destination array address 4176 const Register key = c_rarg2; // key array address 4177 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 4178 // and left with the results of the last encryption block 4179 #ifndef _WIN64 4180 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 4181 #else 4182 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 4183 const Register len_reg = r11; // pick the volatile windows register 4184 #endif 4185 4186 Label Loop, Loop1, L_128, L_256, L_192, KEY_192, KEY_256, Loop2, Lcbc_dec_rem_loop, 4187 Lcbc_dec_rem_last, Lcbc_dec_ret, Lcbc_dec_rem, Lcbc_exit; 4188 4189 __ enter(); 4190 4191 #ifdef _WIN64 4192 // on win64, fill len_reg from stack position 4193 __ movl(len_reg, len_mem); 4194 #else 4195 __ push(len_reg); // Save 4196 #endif 4197 __ push(rbx); 4198 __ vzeroupper(); 4199 4200 // Temporary variable declaration for swapping key bytes 4201 const XMMRegister xmm_key_shuf_mask = xmm1; 4202 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 4203 4204 // Calculate number of rounds from key size: 44 for 10-rounds, 52 for 12-rounds, 60 for 14-rounds 4205 const Register rounds = rbx; 4206 __ movl(rounds, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 4207 4208 const XMMRegister IV = xmm0; 4209 // Load IV and broadcast value to 512-bits 4210 __ evbroadcasti64x2(IV, Address(rvec, 0), Assembler::AVX_512bit); 4211 4212 // Temporary variables for storing round keys 4213 const XMMRegister RK0 = xmm30; 4214 const XMMRegister RK1 = xmm9; 4215 const XMMRegister RK2 = xmm18; 4216 const XMMRegister RK3 = xmm19; 4217 const XMMRegister RK4 = xmm20; 4218 const XMMRegister RK5 = xmm21; 4219 const XMMRegister RK6 = xmm22; 4220 const XMMRegister RK7 = xmm23; 4221 const XMMRegister RK8 = xmm24; 4222 const XMMRegister RK9 = xmm25; 4223 const XMMRegister RK10 = xmm26; 4224 4225 // Load and shuffle key 4226 // the java expanded key ordering is rotated one position from what we want 4227 // so we start from 1*16 here and hit 0*16 last 4228 ev_load_key(RK1, key, 1 * 16, xmm_key_shuf_mask); 4229 ev_load_key(RK2, key, 2 * 16, xmm_key_shuf_mask); 4230 ev_load_key(RK3, key, 3 * 16, xmm_key_shuf_mask); 4231 ev_load_key(RK4, key, 4 * 16, xmm_key_shuf_mask); 4232 ev_load_key(RK5, key, 5 * 16, xmm_key_shuf_mask); 4233 ev_load_key(RK6, key, 6 * 16, xmm_key_shuf_mask); 4234 ev_load_key(RK7, key, 7 * 16, xmm_key_shuf_mask); 4235 ev_load_key(RK8, key, 8 * 16, xmm_key_shuf_mask); 4236 ev_load_key(RK9, key, 9 * 16, xmm_key_shuf_mask); 4237 ev_load_key(RK10, key, 10 * 16, xmm_key_shuf_mask); 4238 ev_load_key(RK0, key, 0*16, xmm_key_shuf_mask); 4239 4240 // Variables for storing source cipher text 4241 const XMMRegister S0 = xmm10; 4242 const XMMRegister S1 = xmm11; 4243 const XMMRegister S2 = xmm12; 4244 const XMMRegister S3 = xmm13; 4245 const XMMRegister S4 = xmm14; 4246 const XMMRegister S5 = xmm15; 4247 const XMMRegister S6 = xmm16; 4248 const XMMRegister S7 = xmm17; 4249 4250 // Variables for storing decrypted text 4251 const XMMRegister B0 = xmm1; 4252 const XMMRegister B1 = xmm2; 4253 const XMMRegister B2 = xmm3; 4254 const XMMRegister B3 = xmm4; 4255 const XMMRegister B4 = xmm5; 4256 const XMMRegister B5 = xmm6; 4257 const XMMRegister B6 = xmm7; 4258 const XMMRegister B7 = xmm8; 4259 4260 __ cmpl(rounds, 44); 4261 __ jcc(Assembler::greater, KEY_192); 4262 __ jmp(Loop); 4263 4264 __ BIND(KEY_192); 4265 const XMMRegister RK11 = xmm27; 4266 const XMMRegister RK12 = xmm28; 4267 ev_load_key(RK11, key, 11*16, xmm_key_shuf_mask); 4268 ev_load_key(RK12, key, 12*16, xmm_key_shuf_mask); 4269 4270 __ cmpl(rounds, 52); 4271 __ jcc(Assembler::greater, KEY_256); 4272 __ jmp(Loop); 4273 4274 __ BIND(KEY_256); 4275 const XMMRegister RK13 = xmm29; 4276 const XMMRegister RK14 = xmm31; 4277 ev_load_key(RK13, key, 13*16, xmm_key_shuf_mask); 4278 ev_load_key(RK14, key, 14*16, xmm_key_shuf_mask); 4279 4280 __ BIND(Loop); 4281 __ cmpl(len_reg, 512); 4282 __ jcc(Assembler::below, Lcbc_dec_rem); 4283 __ BIND(Loop1); 4284 __ subl(len_reg, 512); 4285 __ evmovdquq(S0, Address(from, 0 * 64), Assembler::AVX_512bit); 4286 __ evmovdquq(S1, Address(from, 1 * 64), Assembler::AVX_512bit); 4287 __ evmovdquq(S2, Address(from, 2 * 64), Assembler::AVX_512bit); 4288 __ evmovdquq(S3, Address(from, 3 * 64), Assembler::AVX_512bit); 4289 __ evmovdquq(S4, Address(from, 4 * 64), Assembler::AVX_512bit); 4290 __ evmovdquq(S5, Address(from, 5 * 64), Assembler::AVX_512bit); 4291 __ evmovdquq(S6, Address(from, 6 * 64), Assembler::AVX_512bit); 4292 __ evmovdquq(S7, Address(from, 7 * 64), Assembler::AVX_512bit); 4293 __ leaq(from, Address(from, 8 * 64)); 4294 4295 __ evpxorq(B0, S0, RK1, Assembler::AVX_512bit); 4296 __ evpxorq(B1, S1, RK1, Assembler::AVX_512bit); 4297 __ evpxorq(B2, S2, RK1, Assembler::AVX_512bit); 4298 __ evpxorq(B3, S3, RK1, Assembler::AVX_512bit); 4299 __ evpxorq(B4, S4, RK1, Assembler::AVX_512bit); 4300 __ evpxorq(B5, S5, RK1, Assembler::AVX_512bit); 4301 __ evpxorq(B6, S6, RK1, Assembler::AVX_512bit); 4302 __ evpxorq(B7, S7, RK1, Assembler::AVX_512bit); 4303 4304 __ evalignq(IV, S0, IV, 0x06); 4305 __ evalignq(S0, S1, S0, 0x06); 4306 __ evalignq(S1, S2, S1, 0x06); 4307 __ evalignq(S2, S3, S2, 0x06); 4308 __ evalignq(S3, S4, S3, 0x06); 4309 __ evalignq(S4, S5, S4, 0x06); 4310 __ evalignq(S5, S6, S5, 0x06); 4311 __ evalignq(S6, S7, S6, 0x06); 4312 4313 roundDec(RK2); 4314 roundDec(RK3); 4315 roundDec(RK4); 4316 roundDec(RK5); 4317 roundDec(RK6); 4318 roundDec(RK7); 4319 roundDec(RK8); 4320 roundDec(RK9); 4321 roundDec(RK10); 4322 4323 __ cmpl(rounds, 44); 4324 __ jcc(Assembler::belowEqual, L_128); 4325 roundDec(RK11); 4326 roundDec(RK12); 4327 4328 __ cmpl(rounds, 52); 4329 __ jcc(Assembler::belowEqual, L_192); 4330 roundDec(RK13); 4331 roundDec(RK14); 4332 4333 __ BIND(L_256); 4334 roundDeclast(RK0); 4335 __ jmp(Loop2); 4336 4337 __ BIND(L_128); 4338 roundDeclast(RK0); 4339 __ jmp(Loop2); 4340 4341 __ BIND(L_192); 4342 roundDeclast(RK0); 4343 4344 __ BIND(Loop2); 4345 __ evpxorq(B0, B0, IV, Assembler::AVX_512bit); 4346 __ evpxorq(B1, B1, S0, Assembler::AVX_512bit); 4347 __ evpxorq(B2, B2, S1, Assembler::AVX_512bit); 4348 __ evpxorq(B3, B3, S2, Assembler::AVX_512bit); 4349 __ evpxorq(B4, B4, S3, Assembler::AVX_512bit); 4350 __ evpxorq(B5, B5, S4, Assembler::AVX_512bit); 4351 __ evpxorq(B6, B6, S5, Assembler::AVX_512bit); 4352 __ evpxorq(B7, B7, S6, Assembler::AVX_512bit); 4353 __ evmovdquq(IV, S7, Assembler::AVX_512bit); 4354 4355 __ evmovdquq(Address(to, 0 * 64), B0, Assembler::AVX_512bit); 4356 __ evmovdquq(Address(to, 1 * 64), B1, Assembler::AVX_512bit); 4357 __ evmovdquq(Address(to, 2 * 64), B2, Assembler::AVX_512bit); 4358 __ evmovdquq(Address(to, 3 * 64), B3, Assembler::AVX_512bit); 4359 __ evmovdquq(Address(to, 4 * 64), B4, Assembler::AVX_512bit); 4360 __ evmovdquq(Address(to, 5 * 64), B5, Assembler::AVX_512bit); 4361 __ evmovdquq(Address(to, 6 * 64), B6, Assembler::AVX_512bit); 4362 __ evmovdquq(Address(to, 7 * 64), B7, Assembler::AVX_512bit); 4363 __ leaq(to, Address(to, 8 * 64)); 4364 __ jmp(Loop); 4365 4366 __ BIND(Lcbc_dec_rem); 4367 __ evshufi64x2(IV, IV, IV, 0x03, Assembler::AVX_512bit); 4368 4369 __ BIND(Lcbc_dec_rem_loop); 4370 __ subl(len_reg, 16); 4371 __ jcc(Assembler::carrySet, Lcbc_dec_ret); 4372 4373 __ movdqu(S0, Address(from, 0)); 4374 __ evpxorq(B0, S0, RK1, Assembler::AVX_512bit); 4375 __ vaesdec(B0, B0, RK2, Assembler::AVX_512bit); 4376 __ vaesdec(B0, B0, RK3, Assembler::AVX_512bit); 4377 __ vaesdec(B0, B0, RK4, Assembler::AVX_512bit); 4378 __ vaesdec(B0, B0, RK5, Assembler::AVX_512bit); 4379 __ vaesdec(B0, B0, RK6, Assembler::AVX_512bit); 4380 __ vaesdec(B0, B0, RK7, Assembler::AVX_512bit); 4381 __ vaesdec(B0, B0, RK8, Assembler::AVX_512bit); 4382 __ vaesdec(B0, B0, RK9, Assembler::AVX_512bit); 4383 __ vaesdec(B0, B0, RK10, Assembler::AVX_512bit); 4384 __ cmpl(rounds, 44); 4385 __ jcc(Assembler::belowEqual, Lcbc_dec_rem_last); 4386 4387 __ vaesdec(B0, B0, RK11, Assembler::AVX_512bit); 4388 __ vaesdec(B0, B0, RK12, Assembler::AVX_512bit); 4389 __ cmpl(rounds, 52); 4390 __ jcc(Assembler::belowEqual, Lcbc_dec_rem_last); 4391 4392 __ vaesdec(B0, B0, RK13, Assembler::AVX_512bit); 4393 __ vaesdec(B0, B0, RK14, Assembler::AVX_512bit); 4394 4395 __ BIND(Lcbc_dec_rem_last); 4396 __ vaesdeclast(B0, B0, RK0, Assembler::AVX_512bit); 4397 4398 __ evpxorq(B0, B0, IV, Assembler::AVX_512bit); 4399 __ evmovdquq(IV, S0, Assembler::AVX_512bit); 4400 __ movdqu(Address(to, 0), B0); 4401 __ leaq(from, Address(from, 16)); 4402 __ leaq(to, Address(to, 16)); 4403 __ jmp(Lcbc_dec_rem_loop); 4404 4405 __ BIND(Lcbc_dec_ret); 4406 __ movdqu(Address(rvec, 0), IV); 4407 4408 // Zero out the round keys 4409 __ evpxorq(RK0, RK0, RK0, Assembler::AVX_512bit); 4410 __ evpxorq(RK1, RK1, RK1, Assembler::AVX_512bit); 4411 __ evpxorq(RK2, RK2, RK2, Assembler::AVX_512bit); 4412 __ evpxorq(RK3, RK3, RK3, Assembler::AVX_512bit); 4413 __ evpxorq(RK4, RK4, RK4, Assembler::AVX_512bit); 4414 __ evpxorq(RK5, RK5, RK5, Assembler::AVX_512bit); 4415 __ evpxorq(RK6, RK6, RK6, Assembler::AVX_512bit); 4416 __ evpxorq(RK7, RK7, RK7, Assembler::AVX_512bit); 4417 __ evpxorq(RK8, RK8, RK8, Assembler::AVX_512bit); 4418 __ evpxorq(RK9, RK9, RK9, Assembler::AVX_512bit); 4419 __ evpxorq(RK10, RK10, RK10, Assembler::AVX_512bit); 4420 __ cmpl(rounds, 44); 4421 __ jcc(Assembler::belowEqual, Lcbc_exit); 4422 __ evpxorq(RK11, RK11, RK11, Assembler::AVX_512bit); 4423 __ evpxorq(RK12, RK12, RK12, Assembler::AVX_512bit); 4424 __ cmpl(rounds, 52); 4425 __ jcc(Assembler::belowEqual, Lcbc_exit); 4426 __ evpxorq(RK13, RK13, RK13, Assembler::AVX_512bit); 4427 __ evpxorq(RK14, RK14, RK14, Assembler::AVX_512bit); 4428 4429 __ BIND(Lcbc_exit); 4430 __ pop(rbx); 4431 #ifdef _WIN64 4432 __ movl(rax, len_mem); 4433 #else 4434 __ pop(rax); // return length 4435 #endif 4436 __ leave(); // required for proper stackwalking of RuntimeStub frame 4437 __ ret(0); 4438 return start; 4439 } 4440 4441 // Polynomial x^128+x^127+x^126+x^121+1 4442 address ghash_polynomial_addr() { 4443 __ align(CodeEntryAlignment); 4444 StubCodeMark mark(this, "StubRoutines", "_ghash_poly_addr"); 4445 address start = __ pc(); 4446 __ emit_data64(0x0000000000000001, relocInfo::none); 4447 __ emit_data64(0xc200000000000000, relocInfo::none); 4448 return start; 4449 } 4450 4451 address ghash_shufflemask_addr() { 4452 __ align(CodeEntryAlignment); 4453 StubCodeMark mark(this, "StubRoutines", "_ghash_shuffmask_addr"); 4454 address start = __ pc(); 4455 __ emit_data64(0x0f0f0f0f0f0f0f0f, relocInfo::none); 4456 __ emit_data64(0x0f0f0f0f0f0f0f0f, relocInfo::none); 4457 return start; 4458 } 4459 4460 // Ghash single and multi block operations using AVX instructions 4461 address generate_avx_ghash_processBlocks() { 4462 __ align(CodeEntryAlignment); 4463 4464 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4465 address start = __ pc(); 4466 4467 // arguments 4468 const Register state = c_rarg0; 4469 const Register htbl = c_rarg1; 4470 const Register data = c_rarg2; 4471 const Register blocks = c_rarg3; 4472 __ enter(); 4473 // Save state before entering routine 4474 __ avx_ghash(state, htbl, data, blocks); 4475 __ leave(); // required for proper stackwalking of RuntimeStub frame 4476 __ ret(0); 4477 return start; 4478 } 4479 4480 // byte swap x86 long 4481 address generate_ghash_long_swap_mask() { 4482 __ align(CodeEntryAlignment); 4483 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 4484 address start = __ pc(); 4485 __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 4486 __ emit_data64(0x0706050403020100, relocInfo::none ); 4487 return start; 4488 } 4489 4490 // byte swap x86 byte array 4491 address generate_ghash_byte_swap_mask() { 4492 __ align(CodeEntryAlignment); 4493 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 4494 address start = __ pc(); 4495 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 4496 __ emit_data64(0x0001020304050607, relocInfo::none ); 4497 return start; 4498 } 4499 4500 /* Single and multi-block ghash operations */ 4501 address generate_ghash_processBlocks() { 4502 __ align(CodeEntryAlignment); 4503 Label L_ghash_loop, L_exit; 4504 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4505 address start = __ pc(); 4506 4507 const Register state = c_rarg0; 4508 const Register subkeyH = c_rarg1; 4509 const Register data = c_rarg2; 4510 const Register blocks = c_rarg3; 4511 4512 const XMMRegister xmm_temp0 = xmm0; 4513 const XMMRegister xmm_temp1 = xmm1; 4514 const XMMRegister xmm_temp2 = xmm2; 4515 const XMMRegister xmm_temp3 = xmm3; 4516 const XMMRegister xmm_temp4 = xmm4; 4517 const XMMRegister xmm_temp5 = xmm5; 4518 const XMMRegister xmm_temp6 = xmm6; 4519 const XMMRegister xmm_temp7 = xmm7; 4520 const XMMRegister xmm_temp8 = xmm8; 4521 const XMMRegister xmm_temp9 = xmm9; 4522 const XMMRegister xmm_temp10 = xmm10; 4523 4524 __ enter(); 4525 4526 __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 4527 4528 __ movdqu(xmm_temp0, Address(state, 0)); 4529 __ pshufb(xmm_temp0, xmm_temp10); 4530 4531 4532 __ BIND(L_ghash_loop); 4533 __ movdqu(xmm_temp2, Address(data, 0)); 4534 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 4535 4536 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 4537 __ pshufb(xmm_temp1, xmm_temp10); 4538 4539 __ pxor(xmm_temp0, xmm_temp2); 4540 4541 // 4542 // Multiply with the hash key 4543 // 4544 __ movdqu(xmm_temp3, xmm_temp0); 4545 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 4546 __ movdqu(xmm_temp4, xmm_temp0); 4547 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 4548 4549 __ movdqu(xmm_temp5, xmm_temp0); 4550 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 4551 __ movdqu(xmm_temp6, xmm_temp0); 4552 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 4553 4554 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 4555 4556 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 4557 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 4558 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 4559 __ pxor(xmm_temp3, xmm_temp5); 4560 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 4561 // of the carry-less multiplication of 4562 // xmm0 by xmm1. 4563 4564 // We shift the result of the multiplication by one bit position 4565 // to the left to cope for the fact that the bits are reversed. 4566 __ movdqu(xmm_temp7, xmm_temp3); 4567 __ movdqu(xmm_temp8, xmm_temp6); 4568 __ pslld(xmm_temp3, 1); 4569 __ pslld(xmm_temp6, 1); 4570 __ psrld(xmm_temp7, 31); 4571 __ psrld(xmm_temp8, 31); 4572 __ movdqu(xmm_temp9, xmm_temp7); 4573 __ pslldq(xmm_temp8, 4); 4574 __ pslldq(xmm_temp7, 4); 4575 __ psrldq(xmm_temp9, 12); 4576 __ por(xmm_temp3, xmm_temp7); 4577 __ por(xmm_temp6, xmm_temp8); 4578 __ por(xmm_temp6, xmm_temp9); 4579 4580 // 4581 // First phase of the reduction 4582 // 4583 // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 4584 // independently. 4585 __ movdqu(xmm_temp7, xmm_temp3); 4586 __ movdqu(xmm_temp8, xmm_temp3); 4587 __ movdqu(xmm_temp9, xmm_temp3); 4588 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 4589 __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 4590 __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 4591 __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 4592 __ pxor(xmm_temp7, xmm_temp9); 4593 __ movdqu(xmm_temp8, xmm_temp7); 4594 __ pslldq(xmm_temp7, 12); 4595 __ psrldq(xmm_temp8, 4); 4596 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 4597 4598 // 4599 // Second phase of the reduction 4600 // 4601 // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 4602 // shift operations. 4603 __ movdqu(xmm_temp2, xmm_temp3); 4604 __ movdqu(xmm_temp4, xmm_temp3); 4605 __ movdqu(xmm_temp5, xmm_temp3); 4606 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 4607 __ psrld(xmm_temp4, 2); // packed left shifting >> 2 4608 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 4609 __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 4610 __ pxor(xmm_temp2, xmm_temp5); 4611 __ pxor(xmm_temp2, xmm_temp8); 4612 __ pxor(xmm_temp3, xmm_temp2); 4613 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 4614 4615 __ decrement(blocks); 4616 __ jcc(Assembler::zero, L_exit); 4617 __ movdqu(xmm_temp0, xmm_temp6); 4618 __ addptr(data, 16); 4619 __ jmp(L_ghash_loop); 4620 4621 __ BIND(L_exit); 4622 __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 4623 __ movdqu(Address(state, 0), xmm_temp6); // store the result 4624 __ leave(); 4625 __ ret(0); 4626 return start; 4627 } 4628 4629 //base64 character set 4630 address base64_charset_addr() { 4631 __ align(CodeEntryAlignment); 4632 StubCodeMark mark(this, "StubRoutines", "base64_charset"); 4633 address start = __ pc(); 4634 __ emit_data64(0x0000004200000041, relocInfo::none); 4635 __ emit_data64(0x0000004400000043, relocInfo::none); 4636 __ emit_data64(0x0000004600000045, relocInfo::none); 4637 __ emit_data64(0x0000004800000047, relocInfo::none); 4638 __ emit_data64(0x0000004a00000049, relocInfo::none); 4639 __ emit_data64(0x0000004c0000004b, relocInfo::none); 4640 __ emit_data64(0x0000004e0000004d, relocInfo::none); 4641 __ emit_data64(0x000000500000004f, relocInfo::none); 4642 __ emit_data64(0x0000005200000051, relocInfo::none); 4643 __ emit_data64(0x0000005400000053, relocInfo::none); 4644 __ emit_data64(0x0000005600000055, relocInfo::none); 4645 __ emit_data64(0x0000005800000057, relocInfo::none); 4646 __ emit_data64(0x0000005a00000059, relocInfo::none); 4647 __ emit_data64(0x0000006200000061, relocInfo::none); 4648 __ emit_data64(0x0000006400000063, relocInfo::none); 4649 __ emit_data64(0x0000006600000065, relocInfo::none); 4650 __ emit_data64(0x0000006800000067, relocInfo::none); 4651 __ emit_data64(0x0000006a00000069, relocInfo::none); 4652 __ emit_data64(0x0000006c0000006b, relocInfo::none); 4653 __ emit_data64(0x0000006e0000006d, relocInfo::none); 4654 __ emit_data64(0x000000700000006f, relocInfo::none); 4655 __ emit_data64(0x0000007200000071, relocInfo::none); 4656 __ emit_data64(0x0000007400000073, relocInfo::none); 4657 __ emit_data64(0x0000007600000075, relocInfo::none); 4658 __ emit_data64(0x0000007800000077, relocInfo::none); 4659 __ emit_data64(0x0000007a00000079, relocInfo::none); 4660 __ emit_data64(0x0000003100000030, relocInfo::none); 4661 __ emit_data64(0x0000003300000032, relocInfo::none); 4662 __ emit_data64(0x0000003500000034, relocInfo::none); 4663 __ emit_data64(0x0000003700000036, relocInfo::none); 4664 __ emit_data64(0x0000003900000038, relocInfo::none); 4665 __ emit_data64(0x0000002f0000002b, relocInfo::none); 4666 return start; 4667 } 4668 4669 //base64 url character set 4670 address base64url_charset_addr() { 4671 __ align(CodeEntryAlignment); 4672 StubCodeMark mark(this, "StubRoutines", "base64url_charset"); 4673 address start = __ pc(); 4674 __ emit_data64(0x0000004200000041, relocInfo::none); 4675 __ emit_data64(0x0000004400000043, relocInfo::none); 4676 __ emit_data64(0x0000004600000045, relocInfo::none); 4677 __ emit_data64(0x0000004800000047, relocInfo::none); 4678 __ emit_data64(0x0000004a00000049, relocInfo::none); 4679 __ emit_data64(0x0000004c0000004b, relocInfo::none); 4680 __ emit_data64(0x0000004e0000004d, relocInfo::none); 4681 __ emit_data64(0x000000500000004f, relocInfo::none); 4682 __ emit_data64(0x0000005200000051, relocInfo::none); 4683 __ emit_data64(0x0000005400000053, relocInfo::none); 4684 __ emit_data64(0x0000005600000055, relocInfo::none); 4685 __ emit_data64(0x0000005800000057, relocInfo::none); 4686 __ emit_data64(0x0000005a00000059, relocInfo::none); 4687 __ emit_data64(0x0000006200000061, relocInfo::none); 4688 __ emit_data64(0x0000006400000063, relocInfo::none); 4689 __ emit_data64(0x0000006600000065, relocInfo::none); 4690 __ emit_data64(0x0000006800000067, relocInfo::none); 4691 __ emit_data64(0x0000006a00000069, relocInfo::none); 4692 __ emit_data64(0x0000006c0000006b, relocInfo::none); 4693 __ emit_data64(0x0000006e0000006d, relocInfo::none); 4694 __ emit_data64(0x000000700000006f, relocInfo::none); 4695 __ emit_data64(0x0000007200000071, relocInfo::none); 4696 __ emit_data64(0x0000007400000073, relocInfo::none); 4697 __ emit_data64(0x0000007600000075, relocInfo::none); 4698 __ emit_data64(0x0000007800000077, relocInfo::none); 4699 __ emit_data64(0x0000007a00000079, relocInfo::none); 4700 __ emit_data64(0x0000003100000030, relocInfo::none); 4701 __ emit_data64(0x0000003300000032, relocInfo::none); 4702 __ emit_data64(0x0000003500000034, relocInfo::none); 4703 __ emit_data64(0x0000003700000036, relocInfo::none); 4704 __ emit_data64(0x0000003900000038, relocInfo::none); 4705 __ emit_data64(0x0000005f0000002d, relocInfo::none); 4706 4707 return start; 4708 } 4709 4710 address base64_bswap_mask_addr() { 4711 __ align(CodeEntryAlignment); 4712 StubCodeMark mark(this, "StubRoutines", "bswap_mask_base64"); 4713 address start = __ pc(); 4714 __ emit_data64(0x0504038002010080, relocInfo::none); 4715 __ emit_data64(0x0b0a098008070680, relocInfo::none); 4716 __ emit_data64(0x0908078006050480, relocInfo::none); 4717 __ emit_data64(0x0f0e0d800c0b0a80, relocInfo::none); 4718 __ emit_data64(0x0605048003020180, relocInfo::none); 4719 __ emit_data64(0x0c0b0a8009080780, relocInfo::none); 4720 __ emit_data64(0x0504038002010080, relocInfo::none); 4721 __ emit_data64(0x0b0a098008070680, relocInfo::none); 4722 4723 return start; 4724 } 4725 4726 address base64_right_shift_mask_addr() { 4727 __ align(CodeEntryAlignment); 4728 StubCodeMark mark(this, "StubRoutines", "right_shift_mask"); 4729 address start = __ pc(); 4730 __ emit_data64(0x0006000400020000, relocInfo::none); 4731 __ emit_data64(0x0006000400020000, relocInfo::none); 4732 __ emit_data64(0x0006000400020000, relocInfo::none); 4733 __ emit_data64(0x0006000400020000, relocInfo::none); 4734 __ emit_data64(0x0006000400020000, relocInfo::none); 4735 __ emit_data64(0x0006000400020000, relocInfo::none); 4736 __ emit_data64(0x0006000400020000, relocInfo::none); 4737 __ emit_data64(0x0006000400020000, relocInfo::none); 4738 4739 return start; 4740 } 4741 4742 address base64_left_shift_mask_addr() { 4743 __ align(CodeEntryAlignment); 4744 StubCodeMark mark(this, "StubRoutines", "left_shift_mask"); 4745 address start = __ pc(); 4746 __ emit_data64(0x0000000200040000, relocInfo::none); 4747 __ emit_data64(0x0000000200040000, relocInfo::none); 4748 __ emit_data64(0x0000000200040000, relocInfo::none); 4749 __ emit_data64(0x0000000200040000, relocInfo::none); 4750 __ emit_data64(0x0000000200040000, relocInfo::none); 4751 __ emit_data64(0x0000000200040000, relocInfo::none); 4752 __ emit_data64(0x0000000200040000, relocInfo::none); 4753 __ emit_data64(0x0000000200040000, relocInfo::none); 4754 4755 return start; 4756 } 4757 4758 address base64_and_mask_addr() { 4759 __ align(CodeEntryAlignment); 4760 StubCodeMark mark(this, "StubRoutines", "and_mask"); 4761 address start = __ pc(); 4762 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4763 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4764 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4765 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4766 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4767 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4768 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4769 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4770 return start; 4771 } 4772 4773 address base64_gather_mask_addr() { 4774 __ align(CodeEntryAlignment); 4775 StubCodeMark mark(this, "StubRoutines", "gather_mask"); 4776 address start = __ pc(); 4777 __ emit_data64(0xffffffffffffffff, relocInfo::none); 4778 return start; 4779 } 4780 4781 // Code for generating Base64 encoding. 4782 // Intrinsic function prototype in Base64.java: 4783 // private void encodeBlock(byte[] src, int sp, int sl, byte[] dst, int dp, boolean isURL) { 4784 address generate_base64_encodeBlock() { 4785 __ align(CodeEntryAlignment); 4786 StubCodeMark mark(this, "StubRoutines", "implEncode"); 4787 address start = __ pc(); 4788 __ enter(); 4789 4790 // Save callee-saved registers before using them 4791 __ push(r12); 4792 __ push(r13); 4793 __ push(r14); 4794 __ push(r15); 4795 4796 // arguments 4797 const Register source = c_rarg0; // Source Array 4798 const Register start_offset = c_rarg1; // start offset 4799 const Register end_offset = c_rarg2; // end offset 4800 const Register dest = c_rarg3; // destination array 4801 4802 #ifndef _WIN64 4803 const Register dp = c_rarg4; // Position for writing to dest array 4804 const Register isURL = c_rarg5;// Base64 or URL character set 4805 #else 4806 const Address dp_mem(rbp, 6 * wordSize); // length is on stack on Win64 4807 const Address isURL_mem(rbp, 7 * wordSize); 4808 const Register isURL = r10; // pick the volatile windows register 4809 const Register dp = r12; 4810 __ movl(dp, dp_mem); 4811 __ movl(isURL, isURL_mem); 4812 #endif 4813 4814 const Register length = r14; 4815 Label L_process80, L_process32, L_process3, L_exit, L_processdata; 4816 4817 // calculate length from offsets 4818 __ movl(length, end_offset); 4819 __ subl(length, start_offset); 4820 __ cmpl(length, 0); 4821 __ jcc(Assembler::lessEqual, L_exit); 4822 4823 __ lea(r11, ExternalAddress(StubRoutines::x86::base64_charset_addr())); 4824 // check if base64 charset(isURL=0) or base64 url charset(isURL=1) needs to be loaded 4825 __ cmpl(isURL, 0); 4826 __ jcc(Assembler::equal, L_processdata); 4827 __ lea(r11, ExternalAddress(StubRoutines::x86::base64url_charset_addr())); 4828 4829 // load masks required for encoding data 4830 __ BIND(L_processdata); 4831 __ movdqu(xmm16, ExternalAddress(StubRoutines::x86::base64_gather_mask_addr())); 4832 // Set 64 bits of K register. 4833 __ evpcmpeqb(k3, xmm16, xmm16, Assembler::AVX_512bit); 4834 __ evmovdquq(xmm12, ExternalAddress(StubRoutines::x86::base64_bswap_mask_addr()), Assembler::AVX_256bit, r13); 4835 __ evmovdquq(xmm13, ExternalAddress(StubRoutines::x86::base64_right_shift_mask_addr()), Assembler::AVX_512bit, r13); 4836 __ evmovdquq(xmm14, ExternalAddress(StubRoutines::x86::base64_left_shift_mask_addr()), Assembler::AVX_512bit, r13); 4837 __ evmovdquq(xmm15, ExternalAddress(StubRoutines::x86::base64_and_mask_addr()), Assembler::AVX_512bit, r13); 4838 4839 // Vector Base64 implementation, producing 96 bytes of encoded data 4840 __ BIND(L_process80); 4841 __ cmpl(length, 80); 4842 __ jcc(Assembler::below, L_process32); 4843 __ evmovdquq(xmm0, Address(source, start_offset, Address::times_1, 0), Assembler::AVX_256bit); 4844 __ evmovdquq(xmm1, Address(source, start_offset, Address::times_1, 24), Assembler::AVX_256bit); 4845 __ evmovdquq(xmm2, Address(source, start_offset, Address::times_1, 48), Assembler::AVX_256bit); 4846 4847 //permute the input data in such a manner that we have continuity of the source 4848 __ vpermq(xmm3, xmm0, 148, Assembler::AVX_256bit); 4849 __ vpermq(xmm4, xmm1, 148, Assembler::AVX_256bit); 4850 __ vpermq(xmm5, xmm2, 148, Assembler::AVX_256bit); 4851 4852 //shuffle input and group 3 bytes of data and to it add 0 as the 4th byte. 4853 //we can deal with 12 bytes at a time in a 128 bit register 4854 __ vpshufb(xmm3, xmm3, xmm12, Assembler::AVX_256bit); 4855 __ vpshufb(xmm4, xmm4, xmm12, Assembler::AVX_256bit); 4856 __ vpshufb(xmm5, xmm5, xmm12, Assembler::AVX_256bit); 4857 4858 //convert byte to word. Each 128 bit register will have 6 bytes for processing 4859 __ vpmovzxbw(xmm3, xmm3, Assembler::AVX_512bit); 4860 __ vpmovzxbw(xmm4, xmm4, Assembler::AVX_512bit); 4861 __ vpmovzxbw(xmm5, xmm5, Assembler::AVX_512bit); 4862 4863 // Extract bits in the following pattern 6, 4+2, 2+4, 6 to convert 3, 8 bit numbers to 4, 6 bit numbers 4864 __ evpsrlvw(xmm0, xmm3, xmm13, Assembler::AVX_512bit); 4865 __ evpsrlvw(xmm1, xmm4, xmm13, Assembler::AVX_512bit); 4866 __ evpsrlvw(xmm2, xmm5, xmm13, Assembler::AVX_512bit); 4867 4868 __ evpsllvw(xmm3, xmm3, xmm14, Assembler::AVX_512bit); 4869 __ evpsllvw(xmm4, xmm4, xmm14, Assembler::AVX_512bit); 4870 __ evpsllvw(xmm5, xmm5, xmm14, Assembler::AVX_512bit); 4871 4872 __ vpsrlq(xmm0, xmm0, 8, Assembler::AVX_512bit); 4873 __ vpsrlq(xmm1, xmm1, 8, Assembler::AVX_512bit); 4874 __ vpsrlq(xmm2, xmm2, 8, Assembler::AVX_512bit); 4875 4876 __ vpsllq(xmm3, xmm3, 8, Assembler::AVX_512bit); 4877 __ vpsllq(xmm4, xmm4, 8, Assembler::AVX_512bit); 4878 __ vpsllq(xmm5, xmm5, 8, Assembler::AVX_512bit); 4879 4880 __ vpandq(xmm3, xmm3, xmm15, Assembler::AVX_512bit); 4881 __ vpandq(xmm4, xmm4, xmm15, Assembler::AVX_512bit); 4882 __ vpandq(xmm5, xmm5, xmm15, Assembler::AVX_512bit); 4883 4884 // Get the final 4*6 bits base64 encoding 4885 __ vporq(xmm3, xmm3, xmm0, Assembler::AVX_512bit); 4886 __ vporq(xmm4, xmm4, xmm1, Assembler::AVX_512bit); 4887 __ vporq(xmm5, xmm5, xmm2, Assembler::AVX_512bit); 4888 4889 // Shift 4890 __ vpsrlq(xmm3, xmm3, 8, Assembler::AVX_512bit); 4891 __ vpsrlq(xmm4, xmm4, 8, Assembler::AVX_512bit); 4892 __ vpsrlq(xmm5, xmm5, 8, Assembler::AVX_512bit); 4893 4894 // look up 6 bits in the base64 character set to fetch the encoding 4895 // we are converting word to dword as gather instructions need dword indices for looking up encoding 4896 __ vextracti64x4(xmm6, xmm3, 0); 4897 __ vpmovzxwd(xmm0, xmm6, Assembler::AVX_512bit); 4898 __ vextracti64x4(xmm6, xmm3, 1); 4899 __ vpmovzxwd(xmm1, xmm6, Assembler::AVX_512bit); 4900 4901 __ vextracti64x4(xmm6, xmm4, 0); 4902 __ vpmovzxwd(xmm2, xmm6, Assembler::AVX_512bit); 4903 __ vextracti64x4(xmm6, xmm4, 1); 4904 __ vpmovzxwd(xmm3, xmm6, Assembler::AVX_512bit); 4905 4906 __ vextracti64x4(xmm4, xmm5, 0); 4907 __ vpmovzxwd(xmm6, xmm4, Assembler::AVX_512bit); 4908 4909 __ vextracti64x4(xmm4, xmm5, 1); 4910 __ vpmovzxwd(xmm7, xmm4, Assembler::AVX_512bit); 4911 4912 __ kmovql(k2, k3); 4913 __ evpgatherdd(xmm4, k2, Address(r11, xmm0, Address::times_4, 0), Assembler::AVX_512bit); 4914 __ kmovql(k2, k3); 4915 __ evpgatherdd(xmm5, k2, Address(r11, xmm1, Address::times_4, 0), Assembler::AVX_512bit); 4916 __ kmovql(k2, k3); 4917 __ evpgatherdd(xmm8, k2, Address(r11, xmm2, Address::times_4, 0), Assembler::AVX_512bit); 4918 __ kmovql(k2, k3); 4919 __ evpgatherdd(xmm9, k2, Address(r11, xmm3, Address::times_4, 0), Assembler::AVX_512bit); 4920 __ kmovql(k2, k3); 4921 __ evpgatherdd(xmm10, k2, Address(r11, xmm6, Address::times_4, 0), Assembler::AVX_512bit); 4922 __ kmovql(k2, k3); 4923 __ evpgatherdd(xmm11, k2, Address(r11, xmm7, Address::times_4, 0), Assembler::AVX_512bit); 4924 4925 //Down convert dword to byte. Final output is 16*6 = 96 bytes long 4926 __ evpmovdb(Address(dest, dp, Address::times_1, 0), xmm4, Assembler::AVX_512bit); 4927 __ evpmovdb(Address(dest, dp, Address::times_1, 16), xmm5, Assembler::AVX_512bit); 4928 __ evpmovdb(Address(dest, dp, Address::times_1, 32), xmm8, Assembler::AVX_512bit); 4929 __ evpmovdb(Address(dest, dp, Address::times_1, 48), xmm9, Assembler::AVX_512bit); 4930 __ evpmovdb(Address(dest, dp, Address::times_1, 64), xmm10, Assembler::AVX_512bit); 4931 __ evpmovdb(Address(dest, dp, Address::times_1, 80), xmm11, Assembler::AVX_512bit); 4932 4933 __ addq(dest, 96); 4934 __ addq(source, 72); 4935 __ subq(length, 72); 4936 __ jmp(L_process80); 4937 4938 // Vector Base64 implementation generating 32 bytes of encoded data 4939 __ BIND(L_process32); 4940 __ cmpl(length, 32); 4941 __ jcc(Assembler::below, L_process3); 4942 __ evmovdquq(xmm0, Address(source, start_offset), Assembler::AVX_256bit); 4943 __ vpermq(xmm0, xmm0, 148, Assembler::AVX_256bit); 4944 __ vpshufb(xmm6, xmm0, xmm12, Assembler::AVX_256bit); 4945 __ vpmovzxbw(xmm6, xmm6, Assembler::AVX_512bit); 4946 __ evpsrlvw(xmm2, xmm6, xmm13, Assembler::AVX_512bit); 4947 __ evpsllvw(xmm3, xmm6, xmm14, Assembler::AVX_512bit); 4948 4949 __ vpsrlq(xmm2, xmm2, 8, Assembler::AVX_512bit); 4950 __ vpsllq(xmm3, xmm3, 8, Assembler::AVX_512bit); 4951 __ vpandq(xmm3, xmm3, xmm15, Assembler::AVX_512bit); 4952 __ vporq(xmm1, xmm2, xmm3, Assembler::AVX_512bit); 4953 __ vpsrlq(xmm1, xmm1, 8, Assembler::AVX_512bit); 4954 __ vextracti64x4(xmm9, xmm1, 0); 4955 __ vpmovzxwd(xmm6, xmm9, Assembler::AVX_512bit); 4956 __ vextracti64x4(xmm9, xmm1, 1); 4957 __ vpmovzxwd(xmm5, xmm9, Assembler::AVX_512bit); 4958 __ kmovql(k2, k3); 4959 __ evpgatherdd(xmm8, k2, Address(r11, xmm6, Address::times_4, 0), Assembler::AVX_512bit); 4960 __ kmovql(k2, k3); 4961 __ evpgatherdd(xmm10, k2, Address(r11, xmm5, Address::times_4, 0), Assembler::AVX_512bit); 4962 __ evpmovdb(Address(dest, dp, Address::times_1, 0), xmm8, Assembler::AVX_512bit); 4963 __ evpmovdb(Address(dest, dp, Address::times_1, 16), xmm10, Assembler::AVX_512bit); 4964 __ subq(length, 24); 4965 __ addq(dest, 32); 4966 __ addq(source, 24); 4967 __ jmp(L_process32); 4968 4969 // Scalar data processing takes 3 bytes at a time and produces 4 bytes of encoded data 4970 /* This code corresponds to the scalar version of the following snippet in Base64.java 4971 ** int bits = (src[sp0++] & 0xff) << 16 |(src[sp0++] & 0xff) << 8 |(src[sp0++] & 0xff); 4972 ** dst[dp0++] = (byte)base64[(bits >> > 18) & 0x3f]; 4973 ** dst[dp0++] = (byte)base64[(bits >> > 12) & 0x3f]; 4974 ** dst[dp0++] = (byte)base64[(bits >> > 6) & 0x3f]; 4975 ** dst[dp0++] = (byte)base64[bits & 0x3f];*/ 4976 __ BIND(L_process3); 4977 __ cmpl(length, 3); 4978 __ jcc(Assembler::below, L_exit); 4979 // Read 1 byte at a time 4980 __ movzbl(rax, Address(source, start_offset)); 4981 __ shll(rax, 0x10); 4982 __ movl(r15, rax); 4983 __ movzbl(rax, Address(source, start_offset, Address::times_1, 1)); 4984 __ shll(rax, 0x8); 4985 __ movzwl(rax, rax); 4986 __ orl(r15, rax); 4987 __ movzbl(rax, Address(source, start_offset, Address::times_1, 2)); 4988 __ orl(rax, r15); 4989 // Save 3 bytes read in r15 4990 __ movl(r15, rax); 4991 __ shrl(rax, 0x12); 4992 __ andl(rax, 0x3f); 4993 // rax contains the index, r11 contains base64 lookup table 4994 __ movb(rax, Address(r11, rax, Address::times_4)); 4995 // Write the encoded byte to destination 4996 __ movb(Address(dest, dp, Address::times_1, 0), rax); 4997 __ movl(rax, r15); 4998 __ shrl(rax, 0xc); 4999 __ andl(rax, 0x3f); 5000 __ movb(rax, Address(r11, rax, Address::times_4)); 5001 __ movb(Address(dest, dp, Address::times_1, 1), rax); 5002 __ movl(rax, r15); 5003 __ shrl(rax, 0x6); 5004 __ andl(rax, 0x3f); 5005 __ movb(rax, Address(r11, rax, Address::times_4)); 5006 __ movb(Address(dest, dp, Address::times_1, 2), rax); 5007 __ movl(rax, r15); 5008 __ andl(rax, 0x3f); 5009 __ movb(rax, Address(r11, rax, Address::times_4)); 5010 __ movb(Address(dest, dp, Address::times_1, 3), rax); 5011 __ subl(length, 3); 5012 __ addq(dest, 4); 5013 __ addq(source, 3); 5014 __ jmp(L_process3); 5015 __ BIND(L_exit); 5016 __ pop(r15); 5017 __ pop(r14); 5018 __ pop(r13); 5019 __ pop(r12); 5020 __ leave(); 5021 __ ret(0); 5022 return start; 5023 } 5024 5025 /** 5026 * Arguments: 5027 * 5028 * Inputs: 5029 * c_rarg0 - int crc 5030 * c_rarg1 - byte* buf 5031 * c_rarg2 - int length 5032 * 5033 * Ouput: 5034 * rax - int crc result 5035 */ 5036 address generate_updateBytesCRC32() { 5037 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 5038 5039 __ align(CodeEntryAlignment); 5040 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 5041 5042 address start = __ pc(); 5043 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5044 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5045 // rscratch1: r10 5046 const Register crc = c_rarg0; // crc 5047 const Register buf = c_rarg1; // source java byte array address 5048 const Register len = c_rarg2; // length 5049 const Register table = c_rarg3; // crc_table address (reuse register) 5050 const Register tmp = r11; 5051 assert_different_registers(crc, buf, len, table, tmp, rax); 5052 5053 BLOCK_COMMENT("Entry:"); 5054 __ enter(); // required for proper stackwalking of RuntimeStub frame 5055 5056 __ kernel_crc32(crc, buf, len, table, tmp); 5057 5058 __ movl(rax, crc); 5059 __ vzeroupper(); 5060 __ leave(); // required for proper stackwalking of RuntimeStub frame 5061 __ ret(0); 5062 5063 return start; 5064 } 5065 5066 /** 5067 * Arguments: 5068 * 5069 * Inputs: 5070 * c_rarg0 - int crc 5071 * c_rarg1 - byte* buf 5072 * c_rarg2 - long length 5073 * c_rarg3 - table_start - optional (present only when doing a library_call, 5074 * not used by x86 algorithm) 5075 * 5076 * Ouput: 5077 * rax - int crc result 5078 */ 5079 address generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { 5080 assert(UseCRC32CIntrinsics, "need SSE4_2"); 5081 __ align(CodeEntryAlignment); 5082 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); 5083 address start = __ pc(); 5084 //reg.arg int#0 int#1 int#2 int#3 int#4 int#5 float regs 5085 //Windows RCX RDX R8 R9 none none XMM0..XMM3 5086 //Lin / Sol RDI RSI RDX RCX R8 R9 XMM0..XMM7 5087 const Register crc = c_rarg0; // crc 5088 const Register buf = c_rarg1; // source java byte array address 5089 const Register len = c_rarg2; // length 5090 const Register a = rax; 5091 const Register j = r9; 5092 const Register k = r10; 5093 const Register l = r11; 5094 #ifdef _WIN64 5095 const Register y = rdi; 5096 const Register z = rsi; 5097 #else 5098 const Register y = rcx; 5099 const Register z = r8; 5100 #endif 5101 assert_different_registers(crc, buf, len, a, j, k, l, y, z); 5102 5103 BLOCK_COMMENT("Entry:"); 5104 __ enter(); // required for proper stackwalking of RuntimeStub frame 5105 #ifdef _WIN64 5106 __ push(y); 5107 __ push(z); 5108 #endif 5109 __ crc32c_ipl_alg2_alt2(crc, buf, len, 5110 a, j, k, 5111 l, y, z, 5112 c_farg0, c_farg1, c_farg2, 5113 is_pclmulqdq_supported); 5114 __ movl(rax, crc); 5115 #ifdef _WIN64 5116 __ pop(z); 5117 __ pop(y); 5118 #endif 5119 __ vzeroupper(); 5120 __ leave(); // required for proper stackwalking of RuntimeStub frame 5121 __ ret(0); 5122 5123 return start; 5124 } 5125 5126 /** 5127 * Arguments: 5128 * 5129 * Input: 5130 * c_rarg0 - x address 5131 * c_rarg1 - x length 5132 * c_rarg2 - y address 5133 * c_rarg3 - y length 5134 * not Win64 5135 * c_rarg4 - z address 5136 * c_rarg5 - z length 5137 * Win64 5138 * rsp+40 - z address 5139 * rsp+48 - z length 5140 */ 5141 address generate_multiplyToLen() { 5142 __ align(CodeEntryAlignment); 5143 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 5144 5145 address start = __ pc(); 5146 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5147 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5148 const Register x = rdi; 5149 const Register xlen = rax; 5150 const Register y = rsi; 5151 const Register ylen = rcx; 5152 const Register z = r8; 5153 const Register zlen = r11; 5154 5155 // Next registers will be saved on stack in multiply_to_len(). 5156 const Register tmp1 = r12; 5157 const Register tmp2 = r13; 5158 const Register tmp3 = r14; 5159 const Register tmp4 = r15; 5160 const Register tmp5 = rbx; 5161 5162 BLOCK_COMMENT("Entry:"); 5163 __ enter(); // required for proper stackwalking of RuntimeStub frame 5164 5165 #ifndef _WIN64 5166 __ movptr(zlen, r9); // Save r9 in r11 - zlen 5167 #endif 5168 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 5169 // ylen => rcx, z => r8, zlen => r11 5170 // r9 and r10 may be used to save non-volatile registers 5171 #ifdef _WIN64 5172 // last 2 arguments (#4, #5) are on stack on Win64 5173 __ movptr(z, Address(rsp, 6 * wordSize)); 5174 __ movptr(zlen, Address(rsp, 7 * wordSize)); 5175 #endif 5176 5177 __ movptr(xlen, rsi); 5178 __ movptr(y, rdx); 5179 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 5180 5181 restore_arg_regs(); 5182 5183 __ leave(); // required for proper stackwalking of RuntimeStub frame 5184 __ ret(0); 5185 5186 return start; 5187 } 5188 5189 /** 5190 * Arguments: 5191 * 5192 * Input: 5193 * c_rarg0 - obja address 5194 * c_rarg1 - objb address 5195 * c_rarg3 - length length 5196 * c_rarg4 - scale log2_array_indxscale 5197 * 5198 * Output: 5199 * rax - int >= mismatched index, < 0 bitwise complement of tail 5200 */ 5201 address generate_vectorizedMismatch() { 5202 __ align(CodeEntryAlignment); 5203 StubCodeMark mark(this, "StubRoutines", "vectorizedMismatch"); 5204 address start = __ pc(); 5205 5206 BLOCK_COMMENT("Entry:"); 5207 __ enter(); 5208 5209 #ifdef _WIN64 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5210 const Register scale = c_rarg0; //rcx, will exchange with r9 5211 const Register objb = c_rarg1; //rdx 5212 const Register length = c_rarg2; //r8 5213 const Register obja = c_rarg3; //r9 5214 __ xchgq(obja, scale); //now obja and scale contains the correct contents 5215 5216 const Register tmp1 = r10; 5217 const Register tmp2 = r11; 5218 #endif 5219 #ifndef _WIN64 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5220 const Register obja = c_rarg0; //U:rdi 5221 const Register objb = c_rarg1; //U:rsi 5222 const Register length = c_rarg2; //U:rdx 5223 const Register scale = c_rarg3; //U:rcx 5224 const Register tmp1 = r8; 5225 const Register tmp2 = r9; 5226 #endif 5227 const Register result = rax; //return value 5228 const XMMRegister vec0 = xmm0; 5229 const XMMRegister vec1 = xmm1; 5230 const XMMRegister vec2 = xmm2; 5231 5232 __ vectorized_mismatch(obja, objb, length, scale, result, tmp1, tmp2, vec0, vec1, vec2); 5233 5234 __ vzeroupper(); 5235 __ leave(); 5236 __ ret(0); 5237 5238 return start; 5239 } 5240 5241 /** 5242 * Arguments: 5243 * 5244 // Input: 5245 // c_rarg0 - x address 5246 // c_rarg1 - x length 5247 // c_rarg2 - z address 5248 // c_rarg3 - z lenth 5249 * 5250 */ 5251 address generate_squareToLen() { 5252 5253 __ align(CodeEntryAlignment); 5254 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 5255 5256 address start = __ pc(); 5257 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5258 // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...) 5259 const Register x = rdi; 5260 const Register len = rsi; 5261 const Register z = r8; 5262 const Register zlen = rcx; 5263 5264 const Register tmp1 = r12; 5265 const Register tmp2 = r13; 5266 const Register tmp3 = r14; 5267 const Register tmp4 = r15; 5268 const Register tmp5 = rbx; 5269 5270 BLOCK_COMMENT("Entry:"); 5271 __ enter(); // required for proper stackwalking of RuntimeStub frame 5272 5273 setup_arg_regs(4); // x => rdi, len => rsi, z => rdx 5274 // zlen => rcx 5275 // r9 and r10 may be used to save non-volatile registers 5276 __ movptr(r8, rdx); 5277 __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 5278 5279 restore_arg_regs(); 5280 5281 __ leave(); // required for proper stackwalking of RuntimeStub frame 5282 __ ret(0); 5283 5284 return start; 5285 } 5286 5287 address generate_method_entry_barrier() { 5288 __ align(CodeEntryAlignment); 5289 StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier"); 5290 5291 Label deoptimize_label; 5292 5293 address start = __ pc(); 5294 5295 __ push(-1); // cookie, this is used for writing the new rsp when deoptimizing 5296 5297 BLOCK_COMMENT("Entry:"); 5298 __ enter(); // save rbp 5299 5300 // save c_rarg0, because we want to use that value. 5301 // We could do without it but then we depend on the number of slots used by pusha 5302 __ push(c_rarg0); 5303 5304 __ lea(c_rarg0, Address(rsp, wordSize * 3)); // 1 for cookie, 1 for rbp, 1 for c_rarg0 - this should be the return address 5305 5306 __ pusha(); 5307 5308 // The method may have floats as arguments, and we must spill them before calling 5309 // the VM runtime. 5310 assert(Argument::n_float_register_parameters_j == 8, "Assumption"); 5311 const int xmm_size = wordSize * 2; 5312 const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j; 5313 __ subptr(rsp, xmm_spill_size); 5314 __ movdqu(Address(rsp, xmm_size * 7), xmm7); 5315 __ movdqu(Address(rsp, xmm_size * 6), xmm6); 5316 __ movdqu(Address(rsp, xmm_size * 5), xmm5); 5317 __ movdqu(Address(rsp, xmm_size * 4), xmm4); 5318 __ movdqu(Address(rsp, xmm_size * 3), xmm3); 5319 __ movdqu(Address(rsp, xmm_size * 2), xmm2); 5320 __ movdqu(Address(rsp, xmm_size * 1), xmm1); 5321 __ movdqu(Address(rsp, xmm_size * 0), xmm0); 5322 5323 __ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(address*)>(BarrierSetNMethod::nmethod_stub_entry_barrier)), 1); 5324 5325 __ movdqu(xmm0, Address(rsp, xmm_size * 0)); 5326 __ movdqu(xmm1, Address(rsp, xmm_size * 1)); 5327 __ movdqu(xmm2, Address(rsp, xmm_size * 2)); 5328 __ movdqu(xmm3, Address(rsp, xmm_size * 3)); 5329 __ movdqu(xmm4, Address(rsp, xmm_size * 4)); 5330 __ movdqu(xmm5, Address(rsp, xmm_size * 5)); 5331 __ movdqu(xmm6, Address(rsp, xmm_size * 6)); 5332 __ movdqu(xmm7, Address(rsp, xmm_size * 7)); 5333 __ addptr(rsp, xmm_spill_size); 5334 5335 __ cmpl(rax, 1); // 1 means deoptimize 5336 __ jcc(Assembler::equal, deoptimize_label); 5337 5338 __ popa(); 5339 __ pop(c_rarg0); 5340 5341 __ leave(); 5342 5343 __ addptr(rsp, 1 * wordSize); // cookie 5344 __ ret(0); 5345 5346 5347 __ BIND(deoptimize_label); 5348 5349 __ popa(); 5350 __ pop(c_rarg0); 5351 5352 __ leave(); 5353 5354 // this can be taken out, but is good for verification purposes. getting a SIGSEGV 5355 // here while still having a correct stack is valuable 5356 __ testptr(rsp, Address(rsp, 0)); 5357 5358 __ movptr(rsp, Address(rsp, 0)); // new rsp was written in the barrier 5359 __ jmp(Address(rsp, -1 * wordSize)); // jmp target should be callers verified_entry_point 5360 5361 return start; 5362 } 5363 5364 /** 5365 * Arguments: 5366 * 5367 * Input: 5368 * c_rarg0 - out address 5369 * c_rarg1 - in address 5370 * c_rarg2 - offset 5371 * c_rarg3 - len 5372 * not Win64 5373 * c_rarg4 - k 5374 * Win64 5375 * rsp+40 - k 5376 */ 5377 address generate_mulAdd() { 5378 __ align(CodeEntryAlignment); 5379 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 5380 5381 address start = __ pc(); 5382 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5383 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5384 const Register out = rdi; 5385 const Register in = rsi; 5386 const Register offset = r11; 5387 const Register len = rcx; 5388 const Register k = r8; 5389 5390 // Next registers will be saved on stack in mul_add(). 5391 const Register tmp1 = r12; 5392 const Register tmp2 = r13; 5393 const Register tmp3 = r14; 5394 const Register tmp4 = r15; 5395 const Register tmp5 = rbx; 5396 5397 BLOCK_COMMENT("Entry:"); 5398 __ enter(); // required for proper stackwalking of RuntimeStub frame 5399 5400 setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx 5401 // len => rcx, k => r8 5402 // r9 and r10 may be used to save non-volatile registers 5403 #ifdef _WIN64 5404 // last argument is on stack on Win64 5405 __ movl(k, Address(rsp, 6 * wordSize)); 5406 #endif 5407 __ movptr(r11, rdx); // move offset in rdx to offset(r11) 5408 __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 5409 5410 restore_arg_regs(); 5411 5412 __ leave(); // required for proper stackwalking of RuntimeStub frame 5413 __ ret(0); 5414 5415 return start; 5416 } 5417 5418 address generate_libmExp() { 5419 StubCodeMark mark(this, "StubRoutines", "libmExp"); 5420 5421 address start = __ pc(); 5422 5423 const XMMRegister x0 = xmm0; 5424 const XMMRegister x1 = xmm1; 5425 const XMMRegister x2 = xmm2; 5426 const XMMRegister x3 = xmm3; 5427 5428 const XMMRegister x4 = xmm4; 5429 const XMMRegister x5 = xmm5; 5430 const XMMRegister x6 = xmm6; 5431 const XMMRegister x7 = xmm7; 5432 5433 const Register tmp = r11; 5434 5435 BLOCK_COMMENT("Entry:"); 5436 __ enter(); // required for proper stackwalking of RuntimeStub frame 5437 5438 __ fast_exp(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 5439 5440 __ leave(); // required for proper stackwalking of RuntimeStub frame 5441 __ ret(0); 5442 5443 return start; 5444 5445 } 5446 5447 address generate_libmLog() { 5448 StubCodeMark mark(this, "StubRoutines", "libmLog"); 5449 5450 address start = __ pc(); 5451 5452 const XMMRegister x0 = xmm0; 5453 const XMMRegister x1 = xmm1; 5454 const XMMRegister x2 = xmm2; 5455 const XMMRegister x3 = xmm3; 5456 5457 const XMMRegister x4 = xmm4; 5458 const XMMRegister x5 = xmm5; 5459 const XMMRegister x6 = xmm6; 5460 const XMMRegister x7 = xmm7; 5461 5462 const Register tmp1 = r11; 5463 const Register tmp2 = r8; 5464 5465 BLOCK_COMMENT("Entry:"); 5466 __ enter(); // required for proper stackwalking of RuntimeStub frame 5467 5468 __ fast_log(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2); 5469 5470 __ leave(); // required for proper stackwalking of RuntimeStub frame 5471 __ ret(0); 5472 5473 return start; 5474 5475 } 5476 5477 address generate_libmLog10() { 5478 StubCodeMark mark(this, "StubRoutines", "libmLog10"); 5479 5480 address start = __ pc(); 5481 5482 const XMMRegister x0 = xmm0; 5483 const XMMRegister x1 = xmm1; 5484 const XMMRegister x2 = xmm2; 5485 const XMMRegister x3 = xmm3; 5486 5487 const XMMRegister x4 = xmm4; 5488 const XMMRegister x5 = xmm5; 5489 const XMMRegister x6 = xmm6; 5490 const XMMRegister x7 = xmm7; 5491 5492 const Register tmp = r11; 5493 5494 BLOCK_COMMENT("Entry:"); 5495 __ enter(); // required for proper stackwalking of RuntimeStub frame 5496 5497 __ fast_log10(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 5498 5499 __ leave(); // required for proper stackwalking of RuntimeStub frame 5500 __ ret(0); 5501 5502 return start; 5503 5504 } 5505 5506 address generate_libmPow() { 5507 StubCodeMark mark(this, "StubRoutines", "libmPow"); 5508 5509 address start = __ pc(); 5510 5511 const XMMRegister x0 = xmm0; 5512 const XMMRegister x1 = xmm1; 5513 const XMMRegister x2 = xmm2; 5514 const XMMRegister x3 = xmm3; 5515 5516 const XMMRegister x4 = xmm4; 5517 const XMMRegister x5 = xmm5; 5518 const XMMRegister x6 = xmm6; 5519 const XMMRegister x7 = xmm7; 5520 5521 const Register tmp1 = r8; 5522 const Register tmp2 = r9; 5523 const Register tmp3 = r10; 5524 const Register tmp4 = r11; 5525 5526 BLOCK_COMMENT("Entry:"); 5527 __ enter(); // required for proper stackwalking of RuntimeStub frame 5528 5529 __ fast_pow(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5530 5531 __ leave(); // required for proper stackwalking of RuntimeStub frame 5532 __ ret(0); 5533 5534 return start; 5535 5536 } 5537 5538 address generate_libmSin() { 5539 StubCodeMark mark(this, "StubRoutines", "libmSin"); 5540 5541 address start = __ pc(); 5542 5543 const XMMRegister x0 = xmm0; 5544 const XMMRegister x1 = xmm1; 5545 const XMMRegister x2 = xmm2; 5546 const XMMRegister x3 = xmm3; 5547 5548 const XMMRegister x4 = xmm4; 5549 const XMMRegister x5 = xmm5; 5550 const XMMRegister x6 = xmm6; 5551 const XMMRegister x7 = xmm7; 5552 5553 const Register tmp1 = r8; 5554 const Register tmp2 = r9; 5555 const Register tmp3 = r10; 5556 const Register tmp4 = r11; 5557 5558 BLOCK_COMMENT("Entry:"); 5559 __ enter(); // required for proper stackwalking of RuntimeStub frame 5560 5561 #ifdef _WIN64 5562 __ push(rsi); 5563 __ push(rdi); 5564 #endif 5565 __ fast_sin(x0, x1, x2, x3, x4, x5, x6, x7, rax, rbx, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5566 5567 #ifdef _WIN64 5568 __ pop(rdi); 5569 __ pop(rsi); 5570 #endif 5571 5572 __ leave(); // required for proper stackwalking of RuntimeStub frame 5573 __ ret(0); 5574 5575 return start; 5576 5577 } 5578 5579 address generate_libmCos() { 5580 StubCodeMark mark(this, "StubRoutines", "libmCos"); 5581 5582 address start = __ pc(); 5583 5584 const XMMRegister x0 = xmm0; 5585 const XMMRegister x1 = xmm1; 5586 const XMMRegister x2 = xmm2; 5587 const XMMRegister x3 = xmm3; 5588 5589 const XMMRegister x4 = xmm4; 5590 const XMMRegister x5 = xmm5; 5591 const XMMRegister x6 = xmm6; 5592 const XMMRegister x7 = xmm7; 5593 5594 const Register tmp1 = r8; 5595 const Register tmp2 = r9; 5596 const Register tmp3 = r10; 5597 const Register tmp4 = r11; 5598 5599 BLOCK_COMMENT("Entry:"); 5600 __ enter(); // required for proper stackwalking of RuntimeStub frame 5601 5602 #ifdef _WIN64 5603 __ push(rsi); 5604 __ push(rdi); 5605 #endif 5606 __ fast_cos(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5607 5608 #ifdef _WIN64 5609 __ pop(rdi); 5610 __ pop(rsi); 5611 #endif 5612 5613 __ leave(); // required for proper stackwalking of RuntimeStub frame 5614 __ ret(0); 5615 5616 return start; 5617 5618 } 5619 5620 address generate_libmTan() { 5621 StubCodeMark mark(this, "StubRoutines", "libmTan"); 5622 5623 address start = __ pc(); 5624 5625 const XMMRegister x0 = xmm0; 5626 const XMMRegister x1 = xmm1; 5627 const XMMRegister x2 = xmm2; 5628 const XMMRegister x3 = xmm3; 5629 5630 const XMMRegister x4 = xmm4; 5631 const XMMRegister x5 = xmm5; 5632 const XMMRegister x6 = xmm6; 5633 const XMMRegister x7 = xmm7; 5634 5635 const Register tmp1 = r8; 5636 const Register tmp2 = r9; 5637 const Register tmp3 = r10; 5638 const Register tmp4 = r11; 5639 5640 BLOCK_COMMENT("Entry:"); 5641 __ enter(); // required for proper stackwalking of RuntimeStub frame 5642 5643 #ifdef _WIN64 5644 __ push(rsi); 5645 __ push(rdi); 5646 #endif 5647 __ fast_tan(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5648 5649 #ifdef _WIN64 5650 __ pop(rdi); 5651 __ pop(rsi); 5652 #endif 5653 5654 __ leave(); // required for proper stackwalking of RuntimeStub frame 5655 __ ret(0); 5656 5657 return start; 5658 5659 } 5660 5661 #undef __ 5662 #define __ masm-> 5663 5664 // Continuation point for throwing of implicit exceptions that are 5665 // not handled in the current activation. Fabricates an exception 5666 // oop and initiates normal exception dispatching in this 5667 // frame. Since we need to preserve callee-saved values (currently 5668 // only for C2, but done for C1 as well) we need a callee-saved oop 5669 // map and therefore have to make these stubs into RuntimeStubs 5670 // rather than BufferBlobs. If the compiler needs all registers to 5671 // be preserved between the fault point and the exception handler 5672 // then it must assume responsibility for that in 5673 // AbstractCompiler::continuation_for_implicit_null_exception or 5674 // continuation_for_implicit_division_by_zero_exception. All other 5675 // implicit exceptions (e.g., NullPointerException or 5676 // AbstractMethodError on entry) are either at call sites or 5677 // otherwise assume that stack unwinding will be initiated, so 5678 // caller saved registers were assumed volatile in the compiler. 5679 address generate_throw_exception(const char* name, 5680 address runtime_entry, 5681 Register arg1 = noreg, 5682 Register arg2 = noreg) { 5683 // Information about frame layout at time of blocking runtime call. 5684 // Note that we only have to preserve callee-saved registers since 5685 // the compilers are responsible for supplying a continuation point 5686 // if they expect all registers to be preserved. 5687 enum layout { 5688 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 5689 rbp_off2, 5690 return_off, 5691 return_off2, 5692 framesize // inclusive of return address 5693 }; 5694 5695 int insts_size = 512; 5696 int locs_size = 64; 5697 5698 CodeBuffer code(name, insts_size, locs_size); 5699 OopMapSet* oop_maps = new OopMapSet(); 5700 MacroAssembler* masm = new MacroAssembler(&code); 5701 5702 address start = __ pc(); 5703 5704 // This is an inlined and slightly modified version of call_VM 5705 // which has the ability to fetch the return PC out of 5706 // thread-local storage and also sets up last_Java_sp slightly 5707 // differently than the real call_VM 5708 5709 __ enter(); // required for proper stackwalking of RuntimeStub frame 5710 5711 assert(is_even(framesize/2), "sp not 16-byte aligned"); 5712 5713 // return address and rbp are already in place 5714 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 5715 5716 int frame_complete = __ pc() - start; 5717 5718 // Set up last_Java_sp and last_Java_fp 5719 address the_pc = __ pc(); 5720 __ set_last_Java_frame(rsp, rbp, the_pc); 5721 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 5722 5723 // Call runtime 5724 if (arg1 != noreg) { 5725 assert(arg2 != c_rarg1, "clobbered"); 5726 __ movptr(c_rarg1, arg1); 5727 } 5728 if (arg2 != noreg) { 5729 __ movptr(c_rarg2, arg2); 5730 } 5731 __ movptr(c_rarg0, r15_thread); 5732 BLOCK_COMMENT("call runtime_entry"); 5733 __ call(RuntimeAddress(runtime_entry)); 5734 5735 // Generate oop map 5736 OopMap* map = new OopMap(framesize, 0); 5737 5738 oop_maps->add_gc_map(the_pc - start, map); 5739 5740 __ reset_last_Java_frame(true); 5741 5742 __ leave(); // required for proper stackwalking of RuntimeStub frame 5743 5744 // check for pending exceptions 5745 #ifdef ASSERT 5746 Label L; 5747 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 5748 (int32_t) NULL_WORD); 5749 __ jcc(Assembler::notEqual, L); 5750 __ should_not_reach_here(); 5751 __ bind(L); 5752 #endif // ASSERT 5753 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 5754 5755 5756 // codeBlob framesize is in words (not VMRegImpl::slot_size) 5757 RuntimeStub* stub = 5758 RuntimeStub::new_runtime_stub(name, 5759 &code, 5760 frame_complete, 5761 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 5762 oop_maps, false); 5763 return stub->entry_point(); 5764 } 5765 5766 void create_control_words() { 5767 // Round to nearest, 53-bit mode, exceptions masked 5768 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 5769 // Round to zero, 53-bit mode, exception mased 5770 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 5771 // Round to nearest, 24-bit mode, exceptions masked 5772 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 5773 // Round to nearest, 64-bit mode, exceptions masked 5774 StubRoutines::_mxcsr_std = 0x1F80; 5775 // Note: the following two constants are 80-bit values 5776 // layout is critical for correct loading by FPU. 5777 // Bias for strict fp multiply/divide 5778 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 5779 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 5780 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 5781 // Un-Bias for strict fp multiply/divide 5782 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 5783 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 5784 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 5785 } 5786 5787 // Call here from the interpreter or compiled code to either load 5788 // multiple returned values from the value type instance being 5789 // returned to registers or to store returned values to a newly 5790 // allocated value type instance. 5791 address generate_return_value_stub(address destination, const char* name, bool has_res) { 5792 // We need to save all registers the calling convention may use so 5793 // the runtime calls read or update those registers. This needs to 5794 // be in sync with SharedRuntime::java_return_convention(). 5795 enum layout { 5796 pad_off = frame::arg_reg_save_area_bytes/BytesPerInt, pad_off_2, 5797 rax_off, rax_off_2, 5798 j_rarg5_off, j_rarg5_2, 5799 j_rarg4_off, j_rarg4_2, 5800 j_rarg3_off, j_rarg3_2, 5801 j_rarg2_off, j_rarg2_2, 5802 j_rarg1_off, j_rarg1_2, 5803 j_rarg0_off, j_rarg0_2, 5804 j_farg0_off, j_farg0_2, 5805 j_farg1_off, j_farg1_2, 5806 j_farg2_off, j_farg2_2, 5807 j_farg3_off, j_farg3_2, 5808 j_farg4_off, j_farg4_2, 5809 j_farg5_off, j_farg5_2, 5810 j_farg6_off, j_farg6_2, 5811 j_farg7_off, j_farg7_2, 5812 rbp_off, rbp_off_2, 5813 return_off, return_off_2, 5814 5815 framesize 5816 }; 5817 5818 CodeBuffer buffer(name, 1000, 512); 5819 MacroAssembler* masm = new MacroAssembler(&buffer); 5820 5821 int frame_size_in_bytes = align_up(framesize*BytesPerInt, 16); 5822 assert(frame_size_in_bytes == framesize*BytesPerInt, "misaligned"); 5823 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt; 5824 int frame_size_in_words = frame_size_in_bytes / wordSize; 5825 5826 OopMapSet *oop_maps = new OopMapSet(); 5827 OopMap* map = new OopMap(frame_size_in_slots, 0); 5828 5829 map->set_callee_saved(VMRegImpl::stack2reg(rax_off), rax->as_VMReg()); 5830 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg5_off), j_rarg5->as_VMReg()); 5831 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg4_off), j_rarg4->as_VMReg()); 5832 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg3_off), j_rarg3->as_VMReg()); 5833 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg2_off), j_rarg2->as_VMReg()); 5834 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg1_off), j_rarg1->as_VMReg()); 5835 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg0_off), j_rarg0->as_VMReg()); 5836 map->set_callee_saved(VMRegImpl::stack2reg(j_farg0_off), j_farg0->as_VMReg()); 5837 map->set_callee_saved(VMRegImpl::stack2reg(j_farg1_off), j_farg1->as_VMReg()); 5838 map->set_callee_saved(VMRegImpl::stack2reg(j_farg2_off), j_farg2->as_VMReg()); 5839 map->set_callee_saved(VMRegImpl::stack2reg(j_farg3_off), j_farg3->as_VMReg()); 5840 map->set_callee_saved(VMRegImpl::stack2reg(j_farg4_off), j_farg4->as_VMReg()); 5841 map->set_callee_saved(VMRegImpl::stack2reg(j_farg5_off), j_farg5->as_VMReg()); 5842 map->set_callee_saved(VMRegImpl::stack2reg(j_farg6_off), j_farg6->as_VMReg()); 5843 map->set_callee_saved(VMRegImpl::stack2reg(j_farg7_off), j_farg7->as_VMReg()); 5844 5845 int start = __ offset(); 5846 5847 __ subptr(rsp, frame_size_in_bytes - 8 /* return address*/); 5848 5849 __ movptr(Address(rsp, rbp_off * BytesPerInt), rbp); 5850 __ movdbl(Address(rsp, j_farg7_off * BytesPerInt), j_farg7); 5851 __ movdbl(Address(rsp, j_farg6_off * BytesPerInt), j_farg6); 5852 __ movdbl(Address(rsp, j_farg5_off * BytesPerInt), j_farg5); 5853 __ movdbl(Address(rsp, j_farg4_off * BytesPerInt), j_farg4); 5854 __ movdbl(Address(rsp, j_farg3_off * BytesPerInt), j_farg3); 5855 __ movdbl(Address(rsp, j_farg2_off * BytesPerInt), j_farg2); 5856 __ movdbl(Address(rsp, j_farg1_off * BytesPerInt), j_farg1); 5857 __ movdbl(Address(rsp, j_farg0_off * BytesPerInt), j_farg0); 5858 5859 __ movptr(Address(rsp, j_rarg0_off * BytesPerInt), j_rarg0); 5860 __ movptr(Address(rsp, j_rarg1_off * BytesPerInt), j_rarg1); 5861 __ movptr(Address(rsp, j_rarg2_off * BytesPerInt), j_rarg2); 5862 __ movptr(Address(rsp, j_rarg3_off * BytesPerInt), j_rarg3); 5863 __ movptr(Address(rsp, j_rarg4_off * BytesPerInt), j_rarg4); 5864 __ movptr(Address(rsp, j_rarg5_off * BytesPerInt), j_rarg5); 5865 __ movptr(Address(rsp, rax_off * BytesPerInt), rax); 5866 5867 int frame_complete = __ offset(); 5868 5869 __ set_last_Java_frame(noreg, noreg, NULL); 5870 5871 __ mov(c_rarg0, r15_thread); 5872 __ mov(c_rarg1, rax); 5873 5874 __ call(RuntimeAddress(destination)); 5875 5876 // Set an oopmap for the call site. 5877 5878 oop_maps->add_gc_map( __ offset() - start, map); 5879 5880 // clear last_Java_sp 5881 __ reset_last_Java_frame(false); 5882 5883 __ movptr(rbp, Address(rsp, rbp_off * BytesPerInt)); 5884 __ movdbl(j_farg7, Address(rsp, j_farg7_off * BytesPerInt)); 5885 __ movdbl(j_farg6, Address(rsp, j_farg6_off * BytesPerInt)); 5886 __ movdbl(j_farg5, Address(rsp, j_farg5_off * BytesPerInt)); 5887 __ movdbl(j_farg4, Address(rsp, j_farg4_off * BytesPerInt)); 5888 __ movdbl(j_farg3, Address(rsp, j_farg3_off * BytesPerInt)); 5889 __ movdbl(j_farg2, Address(rsp, j_farg2_off * BytesPerInt)); 5890 __ movdbl(j_farg1, Address(rsp, j_farg1_off * BytesPerInt)); 5891 __ movdbl(j_farg0, Address(rsp, j_farg0_off * BytesPerInt)); 5892 5893 __ movptr(j_rarg0, Address(rsp, j_rarg0_off * BytesPerInt)); 5894 __ movptr(j_rarg1, Address(rsp, j_rarg1_off * BytesPerInt)); 5895 __ movptr(j_rarg2, Address(rsp, j_rarg2_off * BytesPerInt)); 5896 __ movptr(j_rarg3, Address(rsp, j_rarg3_off * BytesPerInt)); 5897 __ movptr(j_rarg4, Address(rsp, j_rarg4_off * BytesPerInt)); 5898 __ movptr(j_rarg5, Address(rsp, j_rarg5_off * BytesPerInt)); 5899 __ movptr(rax, Address(rsp, rax_off * BytesPerInt)); 5900 5901 __ addptr(rsp, frame_size_in_bytes-8); 5902 5903 // check for pending exceptions 5904 Label pending; 5905 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 5906 __ jcc(Assembler::notEqual, pending); 5907 5908 if (has_res) { 5909 __ get_vm_result(rax, r15_thread); 5910 } 5911 5912 __ ret(0); 5913 5914 __ bind(pending); 5915 5916 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 5917 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 5918 5919 // ------------- 5920 // make sure all code is generated 5921 masm->flush(); 5922 5923 // The caller may not know the register mapping of the fields of the returned value 5924 // object, so it won't generate a valid oopmap for the call site. Hence, we can't 5925 // do InterfaceSupport::verify_stack(). 5926 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, false, 5927 /*can_verify_stack =*/false); 5928 return stub->entry_point(); 5929 } 5930 5931 // Initialization 5932 void generate_initial() { 5933 // Generates all stubs and initializes the entry points 5934 5935 // This platform-specific settings are needed by generate_call_stub() 5936 create_control_words(); 5937 5938 // entry points that exist in all platforms Note: This is code 5939 // that could be shared among different platforms - however the 5940 // benefit seems to be smaller than the disadvantage of having a 5941 // much more complicated generator structure. See also comment in 5942 // stubRoutines.hpp. 5943 5944 StubRoutines::_forward_exception_entry = generate_forward_exception(); 5945 5946 // Generate these first because they are called from other stubs 5947 StubRoutines::_load_value_type_fields_in_regs = generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::load_value_type_fields_in_regs), "load_value_type_fields_in_regs", false); 5948 StubRoutines::_store_value_type_fields_to_buf = generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::store_value_type_fields_to_buf), "store_value_type_fields_to_buf", true); 5949 5950 StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address); 5951 5952 // is referenced by megamorphic call 5953 StubRoutines::_catch_exception_entry = generate_catch_exception(); 5954 5955 // atomic calls 5956 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 5957 StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long(); 5958 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 5959 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); 5960 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 5961 StubRoutines::_atomic_add_entry = generate_atomic_add(); 5962 StubRoutines::_atomic_add_long_entry = generate_atomic_add_long(); 5963 StubRoutines::_fence_entry = generate_orderaccess_fence(); 5964 5965 // platform dependent 5966 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 5967 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 5968 5969 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 5970 5971 // Build this early so it's available for the interpreter. 5972 StubRoutines::_throw_StackOverflowError_entry = 5973 generate_throw_exception("StackOverflowError throw_exception", 5974 CAST_FROM_FN_PTR(address, 5975 SharedRuntime:: 5976 throw_StackOverflowError)); 5977 StubRoutines::_throw_delayed_StackOverflowError_entry = 5978 generate_throw_exception("delayed StackOverflowError throw_exception", 5979 CAST_FROM_FN_PTR(address, 5980 SharedRuntime:: 5981 throw_delayed_StackOverflowError)); 5982 if (UseCRC32Intrinsics) { 5983 // set table address before stub generation which use it 5984 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 5985 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 5986 } 5987 5988 if (UseCRC32CIntrinsics) { 5989 bool supports_clmul = VM_Version::supports_clmul(); 5990 StubRoutines::x86::generate_CRC32C_table(supports_clmul); 5991 StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table; 5992 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul); 5993 } 5994 if (VM_Version::supports_sse2() && UseLibmIntrinsic && InlineIntrinsics) { 5995 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) || 5996 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos) || 5997 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5998 StubRoutines::x86::_ONEHALF_adr = (address)StubRoutines::x86::_ONEHALF; 5999 StubRoutines::x86::_P_2_adr = (address)StubRoutines::x86::_P_2; 6000 StubRoutines::x86::_SC_4_adr = (address)StubRoutines::x86::_SC_4; 6001 StubRoutines::x86::_Ctable_adr = (address)StubRoutines::x86::_Ctable; 6002 StubRoutines::x86::_SC_2_adr = (address)StubRoutines::x86::_SC_2; 6003 StubRoutines::x86::_SC_3_adr = (address)StubRoutines::x86::_SC_3; 6004 StubRoutines::x86::_SC_1_adr = (address)StubRoutines::x86::_SC_1; 6005 StubRoutines::x86::_PI_INV_TABLE_adr = (address)StubRoutines::x86::_PI_INV_TABLE; 6006 StubRoutines::x86::_PI_4_adr = (address)StubRoutines::x86::_PI_4; 6007 StubRoutines::x86::_PI32INV_adr = (address)StubRoutines::x86::_PI32INV; 6008 StubRoutines::x86::_SIGN_MASK_adr = (address)StubRoutines::x86::_SIGN_MASK; 6009 StubRoutines::x86::_P_1_adr = (address)StubRoutines::x86::_P_1; 6010 StubRoutines::x86::_P_3_adr = (address)StubRoutines::x86::_P_3; 6011 StubRoutines::x86::_NEG_ZERO_adr = (address)StubRoutines::x86::_NEG_ZERO; 6012 } 6013 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dexp)) { 6014 StubRoutines::_dexp = generate_libmExp(); 6015 } 6016 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) { 6017 StubRoutines::_dlog = generate_libmLog(); 6018 } 6019 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) { 6020 StubRoutines::_dlog10 = generate_libmLog10(); 6021 } 6022 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) { 6023 StubRoutines::_dpow = generate_libmPow(); 6024 } 6025 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) { 6026 StubRoutines::_dsin = generate_libmSin(); 6027 } 6028 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) { 6029 StubRoutines::_dcos = generate_libmCos(); 6030 } 6031 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 6032 StubRoutines::_dtan = generate_libmTan(); 6033 } 6034 } 6035 } 6036 6037 void generate_all() { 6038 // Generates all stubs and initializes the entry points 6039 6040 // These entry points require SharedInfo::stack0 to be set up in 6041 // non-core builds and need to be relocatable, so they each 6042 // fabricate a RuntimeStub internally. 6043 StubRoutines::_throw_AbstractMethodError_entry = 6044 generate_throw_exception("AbstractMethodError throw_exception", 6045 CAST_FROM_FN_PTR(address, 6046 SharedRuntime:: 6047 throw_AbstractMethodError)); 6048 6049 StubRoutines::_throw_IncompatibleClassChangeError_entry = 6050 generate_throw_exception("IncompatibleClassChangeError throw_exception", 6051 CAST_FROM_FN_PTR(address, 6052 SharedRuntime:: 6053 throw_IncompatibleClassChangeError)); 6054 6055 StubRoutines::_throw_NullPointerException_at_call_entry = 6056 generate_throw_exception("NullPointerException at call throw_exception", 6057 CAST_FROM_FN_PTR(address, 6058 SharedRuntime:: 6059 throw_NullPointerException_at_call)); 6060 6061 // entry points that are platform specific 6062 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 6063 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 6064 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 6065 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 6066 6067 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 6068 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 6069 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 6070 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 6071 StubRoutines::x86::_vector_float_sign_mask = generate_vector_mask("vector_float_sign_mask", 0x7FFFFFFF7FFFFFFF); 6072 StubRoutines::x86::_vector_float_sign_flip = generate_vector_mask("vector_float_sign_flip", 0x8000000080000000); 6073 StubRoutines::x86::_vector_double_sign_mask = generate_vector_mask("vector_double_sign_mask", 0x7FFFFFFFFFFFFFFF); 6074 StubRoutines::x86::_vector_double_sign_flip = generate_vector_mask("vector_double_sign_flip", 0x8000000000000000); 6075 StubRoutines::x86::_vector_short_to_byte_mask = generate_vector_mask("vector_short_to_byte_mask", 0x00ff00ff00ff00ff); 6076 StubRoutines::x86::_vector_byte_perm_mask = generate_vector_byte_perm_mask("vector_byte_perm_mask"); 6077 StubRoutines::x86::_vector_long_sign_mask = generate_vector_mask("vector_long_sign_mask", 0x8000000000000000); 6078 6079 // support for verify_oop (must happen after universe_init) 6080 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 6081 6082 // arraycopy stubs used by compilers 6083 generate_arraycopy_stubs(); 6084 6085 // don't bother generating these AES intrinsic stubs unless global flag is set 6086 if (UseAESIntrinsics) { 6087 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 6088 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 6089 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 6090 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 6091 if (VM_Version::supports_vaes() && VM_Version::supports_avx512vl() && VM_Version::supports_avx512dq() ) { 6092 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptVectorAESCrypt(); 6093 } else { 6094 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 6095 } 6096 } 6097 if (UseAESCTRIntrinsics){ 6098 StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask(); 6099 StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel(); 6100 } 6101 6102 if (UseSHA1Intrinsics) { 6103 StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask(); 6104 StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask(); 6105 StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); 6106 StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); 6107 } 6108 if (UseSHA256Intrinsics) { 6109 StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256; 6110 char* dst = (char*)StubRoutines::x86::_k256_W; 6111 char* src = (char*)StubRoutines::x86::_k256; 6112 for (int ii = 0; ii < 16; ++ii) { 6113 memcpy(dst + 32 * ii, src + 16 * ii, 16); 6114 memcpy(dst + 32 * ii + 16, src + 16 * ii, 16); 6115 } 6116 StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W; 6117 StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); 6118 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 6119 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 6120 } 6121 if (UseSHA512Intrinsics) { 6122 StubRoutines::x86::_k512_W_addr = (address)StubRoutines::x86::_k512_W; 6123 StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = generate_pshuffle_byte_flip_mask_sha512(); 6124 StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); 6125 StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); 6126 } 6127 6128 // Generate GHASH intrinsics code 6129 if (UseGHASHIntrinsics) { 6130 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 6131 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 6132 if (VM_Version::supports_avx()) { 6133 StubRoutines::x86::_ghash_shuffmask_addr = ghash_shufflemask_addr(); 6134 StubRoutines::x86::_ghash_poly_addr = ghash_polynomial_addr(); 6135 StubRoutines::_ghash_processBlocks = generate_avx_ghash_processBlocks(); 6136 } else { 6137 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 6138 } 6139 } 6140 6141 if (UseBASE64Intrinsics) { 6142 StubRoutines::x86::_and_mask = base64_and_mask_addr(); 6143 StubRoutines::x86::_bswap_mask = base64_bswap_mask_addr(); 6144 StubRoutines::x86::_base64_charset = base64_charset_addr(); 6145 StubRoutines::x86::_url_charset = base64url_charset_addr(); 6146 StubRoutines::x86::_gather_mask = base64_gather_mask_addr(); 6147 StubRoutines::x86::_left_shift_mask = base64_left_shift_mask_addr(); 6148 StubRoutines::x86::_right_shift_mask = base64_right_shift_mask_addr(); 6149 StubRoutines::_base64_encodeBlock = generate_base64_encodeBlock(); 6150 } 6151 6152 // Safefetch stubs. 6153 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 6154 &StubRoutines::_safefetch32_fault_pc, 6155 &StubRoutines::_safefetch32_continuation_pc); 6156 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 6157 &StubRoutines::_safefetchN_fault_pc, 6158 &StubRoutines::_safefetchN_continuation_pc); 6159 6160 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 6161 if (bs_nm != NULL) { 6162 StubRoutines::x86::_method_entry_barrier = generate_method_entry_barrier(); 6163 } 6164 #ifdef COMPILER2 6165 if (UseMultiplyToLenIntrinsic) { 6166 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 6167 } 6168 if (UseSquareToLenIntrinsic) { 6169 StubRoutines::_squareToLen = generate_squareToLen(); 6170 } 6171 if (UseMulAddIntrinsic) { 6172 StubRoutines::_mulAdd = generate_mulAdd(); 6173 } 6174 #ifndef _WINDOWS 6175 if (UseMontgomeryMultiplyIntrinsic) { 6176 StubRoutines::_montgomeryMultiply 6177 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 6178 } 6179 if (UseMontgomerySquareIntrinsic) { 6180 StubRoutines::_montgomerySquare 6181 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 6182 } 6183 #endif // WINDOWS 6184 #endif // COMPILER2 6185 6186 if (UseVectorizedMismatchIntrinsic) { 6187 StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch(); 6188 } 6189 } 6190 6191 public: 6192 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 6193 if (all) { 6194 generate_all(); 6195 } else { 6196 generate_initial(); 6197 } 6198 } 6199 }; // end class declaration 6200 6201 void StubGenerator_generate(CodeBuffer* code, bool all) { 6202 StubGenerator g(code, all); 6203 }