1 /* 2 * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "nativeInst_x86.hpp" 30 #include "oops/instanceOop.hpp" 31 #include "oops/method.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/frame.inline.hpp" 36 #include "runtime/handles.inline.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubCodeGenerator.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "runtime/thread.inline.hpp" 41 #include "utilities/top.hpp" 42 #ifdef COMPILER2 43 #include "opto/runtime.hpp" 44 #endif 45 46 // Declaration and definition of StubGenerator (no .hpp file). 47 // For a more detailed description of the stub routine structure 48 // see the comment in stubRoutines.hpp 49 50 #define __ _masm-> 51 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 52 #define a__ ((Assembler*)_masm)-> 53 54 #ifdef PRODUCT 55 #define BLOCK_COMMENT(str) /* nothing */ 56 #else 57 #define BLOCK_COMMENT(str) __ block_comment(str) 58 #endif 59 60 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 61 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 62 63 // Stub Code definitions 64 65 static address handle_unsafe_access() { 66 JavaThread* thread = JavaThread::current(); 67 address pc = thread->saved_exception_pc(); 68 // pc is the instruction which we must emulate 69 // doing a no-op is fine: return garbage from the load 70 // therefore, compute npc 71 address npc = Assembler::locate_next_instruction(pc); 72 73 // request an async exception 74 thread->set_pending_unsafe_access_error(); 75 76 // return address of next instruction to execute 77 return npc; 78 } 79 80 class StubGenerator: public StubCodeGenerator { 81 private: 82 83 #ifdef PRODUCT 84 #define inc_counter_np(counter) ((void)0) 85 #else 86 void inc_counter_np_(int& counter) { 87 // This can destroy rscratch1 if counter is far from the code cache 88 __ incrementl(ExternalAddress((address)&counter)); 89 } 90 #define inc_counter_np(counter) \ 91 BLOCK_COMMENT("inc_counter " #counter); \ 92 inc_counter_np_(counter); 93 #endif 94 95 // Call stubs are used to call Java from C 96 // 97 // Linux Arguments: 98 // c_rarg0: call wrapper address address 99 // c_rarg1: result address 100 // c_rarg2: result type BasicType 101 // c_rarg3: method Method* 102 // c_rarg4: (interpreter) entry point address 103 // c_rarg5: parameters intptr_t* 104 // 16(rbp): parameter size (in words) int 105 // 24(rbp): thread Thread* 106 // 107 // [ return_from_Java ] <--- rsp 108 // [ argument word n ] 109 // ... 110 // -12 [ argument word 1 ] 111 // -11 [ saved r15 ] <--- rsp_after_call 112 // -10 [ saved r14 ] 113 // -9 [ saved r13 ] 114 // -8 [ saved r12 ] 115 // -7 [ saved rbx ] 116 // -6 [ call wrapper ] 117 // -5 [ result ] 118 // -4 [ result type ] 119 // -3 [ method ] 120 // -2 [ entry point ] 121 // -1 [ parameters ] 122 // 0 [ saved rbp ] <--- rbp 123 // 1 [ return address ] 124 // 2 [ parameter size ] 125 // 3 [ thread ] 126 // 127 // Windows Arguments: 128 // c_rarg0: call wrapper address address 129 // c_rarg1: result address 130 // c_rarg2: result type BasicType 131 // c_rarg3: method Method* 132 // 48(rbp): (interpreter) entry point address 133 // 56(rbp): parameters intptr_t* 134 // 64(rbp): parameter size (in words) int 135 // 72(rbp): thread Thread* 136 // 137 // [ return_from_Java ] <--- rsp 138 // [ argument word n ] 139 // ... 140 // -60 [ argument word 1 ] 141 // -59 [ saved xmm31 ] <--- rsp after_call 142 // [ saved xmm16-xmm30 ] (EVEX enabled, else the space is blank) 143 // -27 [ saved xmm15 ] 144 // [ saved xmm7-xmm14 ] 145 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 146 // -7 [ saved r15 ] 147 // -6 [ saved r14 ] 148 // -5 [ saved r13 ] 149 // -4 [ saved r12 ] 150 // -3 [ saved rdi ] 151 // -2 [ saved rsi ] 152 // -1 [ saved rbx ] 153 // 0 [ saved rbp ] <--- rbp 154 // 1 [ return address ] 155 // 2 [ call wrapper ] 156 // 3 [ result ] 157 // 4 [ result type ] 158 // 5 [ method ] 159 // 6 [ entry point ] 160 // 7 [ parameters ] 161 // 8 [ parameter size ] 162 // 9 [ thread ] 163 // 164 // Windows reserves the callers stack space for arguments 1-4. 165 // We spill c_rarg0-c_rarg3 to this space. 166 167 // Call stub stack layout word offsets from rbp 168 enum call_stub_layout { 169 #ifdef _WIN64 170 xmm_save_first = 6, // save from xmm6 171 xmm_save_last = 31, // to xmm31 172 xmm_save_base = -9, 173 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 174 r15_off = -7, 175 r14_off = -6, 176 r13_off = -5, 177 r12_off = -4, 178 rdi_off = -3, 179 rsi_off = -2, 180 rbx_off = -1, 181 rbp_off = 0, 182 retaddr_off = 1, 183 call_wrapper_off = 2, 184 result_off = 3, 185 result_type_off = 4, 186 method_off = 5, 187 entry_point_off = 6, 188 parameters_off = 7, 189 parameter_size_off = 8, 190 thread_off = 9 191 #else 192 rsp_after_call_off = -12, 193 mxcsr_off = rsp_after_call_off, 194 r15_off = -11, 195 r14_off = -10, 196 r13_off = -9, 197 r12_off = -8, 198 rbx_off = -7, 199 call_wrapper_off = -6, 200 result_off = -5, 201 result_type_off = -4, 202 method_off = -3, 203 entry_point_off = -2, 204 parameters_off = -1, 205 rbp_off = 0, 206 retaddr_off = 1, 207 parameter_size_off = 2, 208 thread_off = 3 209 #endif 210 }; 211 212 #ifdef _WIN64 213 Address xmm_save(int reg) { 214 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 215 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 216 } 217 #endif 218 219 address generate_call_stub(address& return_address) { 220 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 221 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 222 "adjust this code"); 223 StubCodeMark mark(this, "StubRoutines", "call_stub"); 224 address start = __ pc(); 225 226 // same as in generate_catch_exception()! 227 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 228 229 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 230 const Address result (rbp, result_off * wordSize); 231 const Address result_type (rbp, result_type_off * wordSize); 232 const Address method (rbp, method_off * wordSize); 233 const Address entry_point (rbp, entry_point_off * wordSize); 234 const Address parameters (rbp, parameters_off * wordSize); 235 const Address parameter_size(rbp, parameter_size_off * wordSize); 236 237 // same as in generate_catch_exception()! 238 const Address thread (rbp, thread_off * wordSize); 239 240 const Address r15_save(rbp, r15_off * wordSize); 241 const Address r14_save(rbp, r14_off * wordSize); 242 const Address r13_save(rbp, r13_off * wordSize); 243 const Address r12_save(rbp, r12_off * wordSize); 244 const Address rbx_save(rbp, rbx_off * wordSize); 245 246 // stub code 247 __ enter(); 248 __ subptr(rsp, -rsp_after_call_off * wordSize); 249 250 // save register parameters 251 #ifndef _WIN64 252 __ movptr(parameters, c_rarg5); // parameters 253 __ movptr(entry_point, c_rarg4); // entry_point 254 #endif 255 256 __ movptr(method, c_rarg3); // method 257 __ movl(result_type, c_rarg2); // result type 258 __ movptr(result, c_rarg1); // result 259 __ movptr(call_wrapper, c_rarg0); // call wrapper 260 261 // save regs belonging to calling function 262 __ movptr(rbx_save, rbx); 263 __ movptr(r12_save, r12); 264 __ movptr(r13_save, r13); 265 __ movptr(r14_save, r14); 266 __ movptr(r15_save, r15); 267 if (UseAVX > 2) { 268 __ movl(rbx, 0xffff); 269 __ kmovql(k1, rbx); 270 } 271 #ifdef _WIN64 272 if (UseAVX > 2) { 273 for (int i = 6; i <= 31; i++) { 274 __ movdqu(xmm_save(i), as_XMMRegister(i)); 275 } 276 } else { 277 for (int i = 6; i <= 15; i++) { 278 __ movdqu(xmm_save(i), as_XMMRegister(i)); 279 } 280 } 281 282 const Address rdi_save(rbp, rdi_off * wordSize); 283 const Address rsi_save(rbp, rsi_off * wordSize); 284 285 __ movptr(rsi_save, rsi); 286 __ movptr(rdi_save, rdi); 287 #else 288 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 289 { 290 Label skip_ldmx; 291 __ stmxcsr(mxcsr_save); 292 __ movl(rax, mxcsr_save); 293 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 294 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 295 __ cmp32(rax, mxcsr_std); 296 __ jcc(Assembler::equal, skip_ldmx); 297 __ ldmxcsr(mxcsr_std); 298 __ bind(skip_ldmx); 299 } 300 #endif 301 302 // Load up thread register 303 __ movptr(r15_thread, thread); 304 __ reinit_heapbase(); 305 306 #ifdef ASSERT 307 // make sure we have no pending exceptions 308 { 309 Label L; 310 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 311 __ jcc(Assembler::equal, L); 312 __ stop("StubRoutines::call_stub: entered with pending exception"); 313 __ bind(L); 314 } 315 #endif 316 317 // pass parameters if any 318 BLOCK_COMMENT("pass parameters if any"); 319 Label parameters_done; 320 __ movl(c_rarg3, parameter_size); 321 __ testl(c_rarg3, c_rarg3); 322 __ jcc(Assembler::zero, parameters_done); 323 324 Label loop; 325 __ movptr(c_rarg2, parameters); // parameter pointer 326 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 327 __ BIND(loop); 328 __ movptr(rax, Address(c_rarg2, 0));// get parameter 329 __ addptr(c_rarg2, wordSize); // advance to next parameter 330 __ decrementl(c_rarg1); // decrement counter 331 __ push(rax); // pass parameter 332 __ jcc(Assembler::notZero, loop); 333 334 // call Java function 335 __ BIND(parameters_done); 336 __ movptr(rbx, method); // get Method* 337 __ movptr(c_rarg1, entry_point); // get entry_point 338 __ mov(r13, rsp); // set sender sp 339 BLOCK_COMMENT("call Java function"); 340 __ call(c_rarg1); 341 342 BLOCK_COMMENT("call_stub_return_address:"); 343 return_address = __ pc(); 344 345 // store result depending on type (everything that is not 346 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 347 __ movptr(c_rarg0, result); 348 Label is_long, is_float, is_double, exit; 349 __ movl(c_rarg1, result_type); 350 __ cmpl(c_rarg1, T_OBJECT); 351 __ jcc(Assembler::equal, is_long); 352 __ cmpl(c_rarg1, T_LONG); 353 __ jcc(Assembler::equal, is_long); 354 __ cmpl(c_rarg1, T_FLOAT); 355 __ jcc(Assembler::equal, is_float); 356 __ cmpl(c_rarg1, T_DOUBLE); 357 __ jcc(Assembler::equal, is_double); 358 359 // handle T_INT case 360 __ movl(Address(c_rarg0, 0), rax); 361 362 __ BIND(exit); 363 364 // pop parameters 365 __ lea(rsp, rsp_after_call); 366 367 #ifdef ASSERT 368 // verify that threads correspond 369 { 370 Label L1, L2, L3; 371 __ cmpptr(r15_thread, thread); 372 __ jcc(Assembler::equal, L1); 373 __ stop("StubRoutines::call_stub: r15_thread is corrupted"); 374 __ bind(L1); 375 __ get_thread(rbx); 376 __ cmpptr(r15_thread, thread); 377 __ jcc(Assembler::equal, L2); 378 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 379 __ bind(L2); 380 __ cmpptr(r15_thread, rbx); 381 __ jcc(Assembler::equal, L3); 382 __ stop("StubRoutines::call_stub: threads must correspond"); 383 __ bind(L3); 384 } 385 #endif 386 387 // restore regs belonging to calling function 388 #ifdef _WIN64 389 int xmm_ub = 15; 390 if (UseAVX > 2) { 391 xmm_ub = 31; 392 } 393 // emit the restores for xmm regs 394 for (int i = 6; i <= xmm_ub; i++) { 395 __ movdqu(as_XMMRegister(i), xmm_save(i)); 396 } 397 #endif 398 __ movptr(r15, r15_save); 399 __ movptr(r14, r14_save); 400 __ movptr(r13, r13_save); 401 __ movptr(r12, r12_save); 402 __ movptr(rbx, rbx_save); 403 404 #ifdef _WIN64 405 __ movptr(rdi, rdi_save); 406 __ movptr(rsi, rsi_save); 407 #else 408 __ ldmxcsr(mxcsr_save); 409 #endif 410 411 // restore rsp 412 __ addptr(rsp, -rsp_after_call_off * wordSize); 413 414 // return 415 __ pop(rbp); 416 __ ret(0); 417 418 // handle return types different from T_INT 419 __ BIND(is_long); 420 __ movq(Address(c_rarg0, 0), rax); 421 __ jmp(exit); 422 423 __ BIND(is_float); 424 __ movflt(Address(c_rarg0, 0), xmm0); 425 __ jmp(exit); 426 427 __ BIND(is_double); 428 __ movdbl(Address(c_rarg0, 0), xmm0); 429 __ jmp(exit); 430 431 return start; 432 } 433 434 // Return point for a Java call if there's an exception thrown in 435 // Java code. The exception is caught and transformed into a 436 // pending exception stored in JavaThread that can be tested from 437 // within the VM. 438 // 439 // Note: Usually the parameters are removed by the callee. In case 440 // of an exception crossing an activation frame boundary, that is 441 // not the case if the callee is compiled code => need to setup the 442 // rsp. 443 // 444 // rax: exception oop 445 446 address generate_catch_exception() { 447 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 448 address start = __ pc(); 449 450 // same as in generate_call_stub(): 451 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 452 const Address thread (rbp, thread_off * wordSize); 453 454 #ifdef ASSERT 455 // verify that threads correspond 456 { 457 Label L1, L2, L3; 458 __ cmpptr(r15_thread, thread); 459 __ jcc(Assembler::equal, L1); 460 __ stop("StubRoutines::catch_exception: r15_thread is corrupted"); 461 __ bind(L1); 462 __ get_thread(rbx); 463 __ cmpptr(r15_thread, thread); 464 __ jcc(Assembler::equal, L2); 465 __ stop("StubRoutines::catch_exception: r15_thread is modified by call"); 466 __ bind(L2); 467 __ cmpptr(r15_thread, rbx); 468 __ jcc(Assembler::equal, L3); 469 __ stop("StubRoutines::catch_exception: threads must correspond"); 470 __ bind(L3); 471 } 472 #endif 473 474 // set pending exception 475 __ verify_oop(rax); 476 477 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 478 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 479 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 480 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 481 482 // complete return to VM 483 assert(StubRoutines::_call_stub_return_address != NULL, 484 "_call_stub_return_address must have been generated before"); 485 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 486 487 return start; 488 } 489 490 // Continuation point for runtime calls returning with a pending 491 // exception. The pending exception check happened in the runtime 492 // or native call stub. The pending exception in Thread is 493 // converted into a Java-level exception. 494 // 495 // Contract with Java-level exception handlers: 496 // rax: exception 497 // rdx: throwing pc 498 // 499 // NOTE: At entry of this stub, exception-pc must be on stack !! 500 501 address generate_forward_exception() { 502 StubCodeMark mark(this, "StubRoutines", "forward exception"); 503 address start = __ pc(); 504 505 // Upon entry, the sp points to the return address returning into 506 // Java (interpreted or compiled) code; i.e., the return address 507 // becomes the throwing pc. 508 // 509 // Arguments pushed before the runtime call are still on the stack 510 // but the exception handler will reset the stack pointer -> 511 // ignore them. A potential result in registers can be ignored as 512 // well. 513 514 #ifdef ASSERT 515 // make sure this code is only executed if there is a pending exception 516 { 517 Label L; 518 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 519 __ jcc(Assembler::notEqual, L); 520 __ stop("StubRoutines::forward exception: no pending exception (1)"); 521 __ bind(L); 522 } 523 #endif 524 525 // compute exception handler into rbx 526 __ movptr(c_rarg0, Address(rsp, 0)); 527 BLOCK_COMMENT("call exception_handler_for_return_address"); 528 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 529 SharedRuntime::exception_handler_for_return_address), 530 r15_thread, c_rarg0); 531 __ mov(rbx, rax); 532 533 // setup rax & rdx, remove return address & clear pending exception 534 __ pop(rdx); 535 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 536 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 537 538 #ifdef ASSERT 539 // make sure exception is set 540 { 541 Label L; 542 __ testptr(rax, rax); 543 __ jcc(Assembler::notEqual, L); 544 __ stop("StubRoutines::forward exception: no pending exception (2)"); 545 __ bind(L); 546 } 547 #endif 548 549 // continue at exception handler (return address removed) 550 // rax: exception 551 // rbx: exception handler 552 // rdx: throwing pc 553 __ verify_oop(rax); 554 __ jmp(rbx); 555 556 return start; 557 } 558 559 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) 560 // 561 // Arguments : 562 // c_rarg0: exchange_value 563 // c_rarg0: dest 564 // 565 // Result: 566 // *dest <- ex, return (orig *dest) 567 address generate_atomic_xchg() { 568 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 569 address start = __ pc(); 570 571 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 572 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 573 __ ret(0); 574 575 return start; 576 } 577 578 // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) 579 // 580 // Arguments : 581 // c_rarg0: exchange_value 582 // c_rarg1: dest 583 // 584 // Result: 585 // *dest <- ex, return (orig *dest) 586 address generate_atomic_xchg_ptr() { 587 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr"); 588 address start = __ pc(); 589 590 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 591 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 592 __ ret(0); 593 594 return start; 595 } 596 597 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 598 // jint compare_value) 599 // 600 // Arguments : 601 // c_rarg0: exchange_value 602 // c_rarg1: dest 603 // c_rarg2: compare_value 604 // 605 // Result: 606 // if ( compare_value == *dest ) { 607 // *dest = exchange_value 608 // return compare_value; 609 // else 610 // return *dest; 611 address generate_atomic_cmpxchg() { 612 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 613 address start = __ pc(); 614 615 __ movl(rax, c_rarg2); 616 if ( os::is_MP() ) __ lock(); 617 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 618 __ ret(0); 619 620 return start; 621 } 622 623 // Support for jbyte atomic::atomic_cmpxchg(jbyte exchange_value, volatile jbyte* dest, 624 // jbyte compare_value) 625 // 626 // Arguments : 627 // c_rarg0: exchange_value 628 // c_rarg1: dest 629 // c_rarg2: compare_value 630 // 631 // Result: 632 // if ( compare_value == *dest ) { 633 // *dest = exchange_value 634 // return compare_value; 635 // else 636 // return *dest; 637 address generate_atomic_cmpxchg_byte() { 638 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); 639 address start = __ pc(); 640 641 __ movsbq(rax, c_rarg2); 642 if ( os::is_MP() ) __ lock(); 643 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); 644 __ ret(0); 645 646 return start; 647 } 648 649 // Support for jlong atomic::atomic_cmpxchg(jlong exchange_value, 650 // volatile jlong* dest, 651 // jlong compare_value) 652 // Arguments : 653 // c_rarg0: exchange_value 654 // c_rarg1: dest 655 // c_rarg2: compare_value 656 // 657 // Result: 658 // if ( compare_value == *dest ) { 659 // *dest = exchange_value 660 // return compare_value; 661 // else 662 // return *dest; 663 address generate_atomic_cmpxchg_long() { 664 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 665 address start = __ pc(); 666 667 __ movq(rax, c_rarg2); 668 if ( os::is_MP() ) __ lock(); 669 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 670 __ ret(0); 671 672 return start; 673 } 674 675 // Support for jint atomic::add(jint add_value, volatile jint* dest) 676 // 677 // Arguments : 678 // c_rarg0: add_value 679 // c_rarg1: dest 680 // 681 // Result: 682 // *dest += add_value 683 // return *dest; 684 address generate_atomic_add() { 685 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 686 address start = __ pc(); 687 688 __ movl(rax, c_rarg0); 689 if ( os::is_MP() ) __ lock(); 690 __ xaddl(Address(c_rarg1, 0), c_rarg0); 691 __ addl(rax, c_rarg0); 692 __ ret(0); 693 694 return start; 695 } 696 697 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 698 // 699 // Arguments : 700 // c_rarg0: add_value 701 // c_rarg1: dest 702 // 703 // Result: 704 // *dest += add_value 705 // return *dest; 706 address generate_atomic_add_ptr() { 707 StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr"); 708 address start = __ pc(); 709 710 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 711 if ( os::is_MP() ) __ lock(); 712 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 713 __ addptr(rax, c_rarg0); 714 __ ret(0); 715 716 return start; 717 } 718 719 // Support for intptr_t OrderAccess::fence() 720 // 721 // Arguments : 722 // 723 // Result: 724 address generate_orderaccess_fence() { 725 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 726 address start = __ pc(); 727 __ membar(Assembler::StoreLoad); 728 __ ret(0); 729 730 return start; 731 } 732 733 // Support for intptr_t get_previous_fp() 734 // 735 // This routine is used to find the previous frame pointer for the 736 // caller (current_frame_guess). This is used as part of debugging 737 // ps() is seemingly lost trying to find frames. 738 // This code assumes that caller current_frame_guess) has a frame. 739 address generate_get_previous_fp() { 740 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 741 const Address old_fp(rbp, 0); 742 const Address older_fp(rax, 0); 743 address start = __ pc(); 744 745 __ enter(); 746 __ movptr(rax, old_fp); // callers fp 747 __ movptr(rax, older_fp); // the frame for ps() 748 __ pop(rbp); 749 __ ret(0); 750 751 return start; 752 } 753 754 // Support for intptr_t get_previous_sp() 755 // 756 // This routine is used to find the previous stack pointer for the 757 // caller. 758 address generate_get_previous_sp() { 759 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 760 address start = __ pc(); 761 762 __ movptr(rax, rsp); 763 __ addptr(rax, 8); // return address is at the top of the stack. 764 __ ret(0); 765 766 return start; 767 } 768 769 //---------------------------------------------------------------------------------------------------- 770 // Support for void verify_mxcsr() 771 // 772 // This routine is used with -Xcheck:jni to verify that native 773 // JNI code does not return to Java code without restoring the 774 // MXCSR register to our expected state. 775 776 address generate_verify_mxcsr() { 777 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 778 address start = __ pc(); 779 780 const Address mxcsr_save(rsp, 0); 781 782 if (CheckJNICalls) { 783 Label ok_ret; 784 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 785 __ push(rax); 786 __ subptr(rsp, wordSize); // allocate a temp location 787 __ stmxcsr(mxcsr_save); 788 __ movl(rax, mxcsr_save); 789 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 790 __ cmp32(rax, mxcsr_std); 791 __ jcc(Assembler::equal, ok_ret); 792 793 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 794 795 __ ldmxcsr(mxcsr_std); 796 797 __ bind(ok_ret); 798 __ addptr(rsp, wordSize); 799 __ pop(rax); 800 } 801 802 __ ret(0); 803 804 return start; 805 } 806 807 address generate_shenandoah_wb() { 808 StubCodeMark mark(this, "StubRoutines", "shenandoah_wb"); 809 address start = __ pc(); 810 811 Label done; 812 813 __ push(rbx); 814 // Check for object beeing in the collection set. 815 // TODO: Can we use only 1 register here? 816 __ movptr(rdi, rax); 817 __ shrptr(rdi, ShenandoahHeapRegion::RegionSizeShift); 818 __ movptr(rbx, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr()); 819 __ movbool(rbx, Address(rbx, rdi, Address::times_1)); 820 __ testbool(rbx); 821 __ jcc(Assembler::zero, done); 822 823 __ push(rcx); 824 __ push(rdx); 825 __ push(rdi); 826 __ push(rsi); 827 __ push(r8); 828 __ push(r9); 829 __ push(r10); 830 __ push(r11); 831 __ push(r12); 832 __ push(r13); 833 __ push(r14); 834 __ push(r15); 835 __ subptr(rsp, 128); 836 __ movdbl(Address(rsp, 0), xmm0); 837 __ movdbl(Address(rsp, 8), xmm1); 838 __ movdbl(Address(rsp, 16), xmm2); 839 __ movdbl(Address(rsp, 24), xmm3); 840 __ movdbl(Address(rsp, 32), xmm4); 841 __ movdbl(Address(rsp, 40), xmm5); 842 __ movdbl(Address(rsp, 48), xmm6); 843 __ movdbl(Address(rsp, 56), xmm7); 844 __ movdbl(Address(rsp, 64), xmm8); 845 __ movdbl(Address(rsp, 72), xmm9); 846 __ movdbl(Address(rsp, 80), xmm10); 847 __ movdbl(Address(rsp, 88), xmm11); 848 __ movdbl(Address(rsp, 96), xmm12); 849 __ movdbl(Address(rsp, 104), xmm13); 850 __ movdbl(Address(rsp, 112), xmm14); 851 __ movdbl(Address(rsp, 120), xmm15); 852 __ movptr(rdi, rax); 853 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahBarrierSet::resolve_and_maybe_copy_oop_c2), rdi); 854 __ movdbl(xmm0, Address(rsp, 0)); 855 __ movdbl(xmm1, Address(rsp, 8)); 856 __ movdbl(xmm2, Address(rsp, 16)); 857 __ movdbl(xmm3, Address(rsp, 24)); 858 __ movdbl(xmm4, Address(rsp, 32)); 859 __ movdbl(xmm5, Address(rsp, 40)); 860 __ movdbl(xmm6, Address(rsp, 48)); 861 __ movdbl(xmm7, Address(rsp, 56)); 862 __ movdbl(xmm8, Address(rsp, 64)); 863 __ movdbl(xmm9, Address(rsp, 72)); 864 __ movdbl(xmm10, Address(rsp, 80)); 865 __ movdbl(xmm11, Address(rsp, 88)); 866 __ movdbl(xmm12, Address(rsp, 96)); 867 __ movdbl(xmm13, Address(rsp, 104)); 868 __ movdbl(xmm14, Address(rsp, 112)); 869 __ movdbl(xmm15, Address(rsp, 120)); 870 __ addptr(rsp, 128); 871 __ pop(r15); 872 __ pop(r14); 873 __ pop(r13); 874 __ pop(r12); 875 __ pop(r11); 876 __ pop(r10); 877 __ pop(r9); 878 __ pop(r8); 879 __ pop(rsi); 880 __ pop(rdi); 881 __ pop(rdx); 882 __ pop(rcx); 883 884 __ bind(done); 885 886 __ pop(rbx); 887 888 __ ret(0); 889 890 return start; 891 } 892 893 address generate_f2i_fixup() { 894 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 895 Address inout(rsp, 5 * wordSize); // return address + 4 saves 896 897 address start = __ pc(); 898 899 Label L; 900 901 __ push(rax); 902 __ push(c_rarg3); 903 __ push(c_rarg2); 904 __ push(c_rarg1); 905 906 __ movl(rax, 0x7f800000); 907 __ xorl(c_rarg3, c_rarg3); 908 __ movl(c_rarg2, inout); 909 __ movl(c_rarg1, c_rarg2); 910 __ andl(c_rarg1, 0x7fffffff); 911 __ cmpl(rax, c_rarg1); // NaN? -> 0 912 __ jcc(Assembler::negative, L); 913 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 914 __ movl(c_rarg3, 0x80000000); 915 __ movl(rax, 0x7fffffff); 916 __ cmovl(Assembler::positive, c_rarg3, rax); 917 918 __ bind(L); 919 __ movptr(inout, c_rarg3); 920 921 __ pop(c_rarg1); 922 __ pop(c_rarg2); 923 __ pop(c_rarg3); 924 __ pop(rax); 925 926 __ ret(0); 927 928 return start; 929 } 930 931 address generate_f2l_fixup() { 932 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 933 Address inout(rsp, 5 * wordSize); // return address + 4 saves 934 address start = __ pc(); 935 936 Label L; 937 938 __ push(rax); 939 __ push(c_rarg3); 940 __ push(c_rarg2); 941 __ push(c_rarg1); 942 943 __ movl(rax, 0x7f800000); 944 __ xorl(c_rarg3, c_rarg3); 945 __ movl(c_rarg2, inout); 946 __ movl(c_rarg1, c_rarg2); 947 __ andl(c_rarg1, 0x7fffffff); 948 __ cmpl(rax, c_rarg1); // NaN? -> 0 949 __ jcc(Assembler::negative, L); 950 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 951 __ mov64(c_rarg3, 0x8000000000000000); 952 __ mov64(rax, 0x7fffffffffffffff); 953 __ cmov(Assembler::positive, c_rarg3, rax); 954 955 __ bind(L); 956 __ movptr(inout, c_rarg3); 957 958 __ pop(c_rarg1); 959 __ pop(c_rarg2); 960 __ pop(c_rarg3); 961 __ pop(rax); 962 963 __ ret(0); 964 965 return start; 966 } 967 968 address generate_d2i_fixup() { 969 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 970 Address inout(rsp, 6 * wordSize); // return address + 5 saves 971 972 address start = __ pc(); 973 974 Label L; 975 976 __ push(rax); 977 __ push(c_rarg3); 978 __ push(c_rarg2); 979 __ push(c_rarg1); 980 __ push(c_rarg0); 981 982 __ movl(rax, 0x7ff00000); 983 __ movq(c_rarg2, inout); 984 __ movl(c_rarg3, c_rarg2); 985 __ mov(c_rarg1, c_rarg2); 986 __ mov(c_rarg0, c_rarg2); 987 __ negl(c_rarg3); 988 __ shrptr(c_rarg1, 0x20); 989 __ orl(c_rarg3, c_rarg2); 990 __ andl(c_rarg1, 0x7fffffff); 991 __ xorl(c_rarg2, c_rarg2); 992 __ shrl(c_rarg3, 0x1f); 993 __ orl(c_rarg1, c_rarg3); 994 __ cmpl(rax, c_rarg1); 995 __ jcc(Assembler::negative, L); // NaN -> 0 996 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 997 __ movl(c_rarg2, 0x80000000); 998 __ movl(rax, 0x7fffffff); 999 __ cmov(Assembler::positive, c_rarg2, rax); 1000 1001 __ bind(L); 1002 __ movptr(inout, c_rarg2); 1003 1004 __ pop(c_rarg0); 1005 __ pop(c_rarg1); 1006 __ pop(c_rarg2); 1007 __ pop(c_rarg3); 1008 __ pop(rax); 1009 1010 __ ret(0); 1011 1012 return start; 1013 } 1014 1015 address generate_d2l_fixup() { 1016 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 1017 Address inout(rsp, 6 * wordSize); // return address + 5 saves 1018 1019 address start = __ pc(); 1020 1021 Label L; 1022 1023 __ push(rax); 1024 __ push(c_rarg3); 1025 __ push(c_rarg2); 1026 __ push(c_rarg1); 1027 __ push(c_rarg0); 1028 1029 __ movl(rax, 0x7ff00000); 1030 __ movq(c_rarg2, inout); 1031 __ movl(c_rarg3, c_rarg2); 1032 __ mov(c_rarg1, c_rarg2); 1033 __ mov(c_rarg0, c_rarg2); 1034 __ negl(c_rarg3); 1035 __ shrptr(c_rarg1, 0x20); 1036 __ orl(c_rarg3, c_rarg2); 1037 __ andl(c_rarg1, 0x7fffffff); 1038 __ xorl(c_rarg2, c_rarg2); 1039 __ shrl(c_rarg3, 0x1f); 1040 __ orl(c_rarg1, c_rarg3); 1041 __ cmpl(rax, c_rarg1); 1042 __ jcc(Assembler::negative, L); // NaN -> 0 1043 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 1044 __ mov64(c_rarg2, 0x8000000000000000); 1045 __ mov64(rax, 0x7fffffffffffffff); 1046 __ cmovq(Assembler::positive, c_rarg2, rax); 1047 1048 __ bind(L); 1049 __ movq(inout, c_rarg2); 1050 1051 __ pop(c_rarg0); 1052 __ pop(c_rarg1); 1053 __ pop(c_rarg2); 1054 __ pop(c_rarg3); 1055 __ pop(rax); 1056 1057 __ ret(0); 1058 1059 return start; 1060 } 1061 1062 address generate_fp_mask(const char *stub_name, int64_t mask) { 1063 __ align(CodeEntryAlignment); 1064 StubCodeMark mark(this, "StubRoutines", stub_name); 1065 address start = __ pc(); 1066 1067 __ emit_data64( mask, relocInfo::none ); 1068 __ emit_data64( mask, relocInfo::none ); 1069 1070 return start; 1071 } 1072 1073 // The following routine generates a subroutine to throw an 1074 // asynchronous UnknownError when an unsafe access gets a fault that 1075 // could not be reasonably prevented by the programmer. (Example: 1076 // SIGBUS/OBJERR.) 1077 address generate_handler_for_unsafe_access() { 1078 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); 1079 address start = __ pc(); 1080 1081 __ push(0); // hole for return address-to-be 1082 __ pusha(); // push registers 1083 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord); 1084 1085 // FIXME: this probably needs alignment logic 1086 1087 __ subptr(rsp, frame::arg_reg_save_area_bytes); 1088 BLOCK_COMMENT("call handle_unsafe_access"); 1089 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access))); 1090 __ addptr(rsp, frame::arg_reg_save_area_bytes); 1091 1092 __ movptr(next_pc, rax); // stuff next address 1093 __ popa(); 1094 __ ret(0); // jump to next address 1095 1096 return start; 1097 } 1098 1099 // Non-destructive plausibility checks for oops 1100 // 1101 // Arguments: 1102 // all args on stack! 1103 // 1104 // Stack after saving c_rarg3: 1105 // [tos + 0]: saved c_rarg3 1106 // [tos + 1]: saved c_rarg2 1107 // [tos + 2]: saved r12 (several TemplateTable methods use it) 1108 // [tos + 3]: saved flags 1109 // [tos + 4]: return address 1110 // * [tos + 5]: error message (char*) 1111 // * [tos + 6]: object to verify (oop) 1112 // * [tos + 7]: saved rax - saved by caller and bashed 1113 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 1114 // * = popped on exit 1115 address generate_verify_oop() { 1116 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 1117 address start = __ pc(); 1118 1119 Label exit, error; 1120 1121 __ pushf(); 1122 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1123 1124 __ push(r12); 1125 1126 // save c_rarg2 and c_rarg3 1127 __ push(c_rarg2); 1128 __ push(c_rarg3); 1129 1130 enum { 1131 // After previous pushes. 1132 oop_to_verify = 6 * wordSize, 1133 saved_rax = 7 * wordSize, 1134 saved_r10 = 8 * wordSize, 1135 1136 // Before the call to MacroAssembler::debug(), see below. 1137 return_addr = 16 * wordSize, 1138 error_msg = 17 * wordSize 1139 }; 1140 1141 // get object 1142 __ movptr(rax, Address(rsp, oop_to_verify)); 1143 1144 // make sure object is 'reasonable' 1145 __ testptr(rax, rax); 1146 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1147 // Check if the oop is in the right area of memory 1148 __ movptr(c_rarg2, rax); 1149 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1150 __ andptr(c_rarg2, c_rarg3); 1151 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1152 __ cmpptr(c_rarg2, c_rarg3); 1153 __ jcc(Assembler::notZero, error); 1154 1155 // set r12 to heapbase for load_klass() 1156 __ reinit_heapbase(); 1157 1158 // make sure klass is 'reasonable', which is not zero. 1159 __ load_klass(rax, rax); // get klass 1160 __ testptr(rax, rax); 1161 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1162 1163 // return if everything seems ok 1164 __ bind(exit); 1165 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1166 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1167 __ pop(c_rarg3); // restore c_rarg3 1168 __ pop(c_rarg2); // restore c_rarg2 1169 __ pop(r12); // restore r12 1170 __ popf(); // restore flags 1171 __ ret(4 * wordSize); // pop caller saved stuff 1172 1173 // handle errors 1174 __ bind(error); 1175 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1176 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1177 __ pop(c_rarg3); // get saved c_rarg3 back 1178 __ pop(c_rarg2); // get saved c_rarg2 back 1179 __ pop(r12); // get saved r12 back 1180 __ popf(); // get saved flags off stack -- 1181 // will be ignored 1182 1183 __ pusha(); // push registers 1184 // (rip is already 1185 // already pushed) 1186 // debug(char* msg, int64_t pc, int64_t regs[]) 1187 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1188 // pushed all the registers, so now the stack looks like: 1189 // [tos + 0] 16 saved registers 1190 // [tos + 16] return address 1191 // * [tos + 17] error message (char*) 1192 // * [tos + 18] object to verify (oop) 1193 // * [tos + 19] saved rax - saved by caller and bashed 1194 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1195 // * = popped on exit 1196 1197 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1198 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1199 __ movq(c_rarg2, rsp); // pass address of regs on stack 1200 __ mov(r12, rsp); // remember rsp 1201 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1202 __ andptr(rsp, -16); // align stack as required by ABI 1203 BLOCK_COMMENT("call MacroAssembler::debug"); 1204 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1205 __ mov(rsp, r12); // restore rsp 1206 __ popa(); // pop registers (includes r12) 1207 __ ret(4 * wordSize); // pop caller saved stuff 1208 1209 return start; 1210 } 1211 1212 // 1213 // Verify that a register contains clean 32-bits positive value 1214 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1215 // 1216 // Input: 1217 // Rint - 32-bits value 1218 // Rtmp - scratch 1219 // 1220 void assert_clean_int(Register Rint, Register Rtmp) { 1221 #ifdef ASSERT 1222 Label L; 1223 assert_different_registers(Rtmp, Rint); 1224 __ movslq(Rtmp, Rint); 1225 __ cmpq(Rtmp, Rint); 1226 __ jcc(Assembler::equal, L); 1227 __ stop("high 32-bits of int value are not 0"); 1228 __ bind(L); 1229 #endif 1230 } 1231 1232 // Generate overlap test for array copy stubs 1233 // 1234 // Input: 1235 // c_rarg0 - from 1236 // c_rarg1 - to 1237 // c_rarg2 - element count 1238 // 1239 // Output: 1240 // rax - &from[element count - 1] 1241 // 1242 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1243 assert(no_overlap_target != NULL, "must be generated"); 1244 array_overlap_test(no_overlap_target, NULL, sf); 1245 } 1246 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1247 array_overlap_test(NULL, &L_no_overlap, sf); 1248 } 1249 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1250 const Register from = c_rarg0; 1251 const Register to = c_rarg1; 1252 const Register count = c_rarg2; 1253 const Register end_from = rax; 1254 1255 __ cmpptr(to, from); 1256 __ lea(end_from, Address(from, count, sf, 0)); 1257 if (NOLp == NULL) { 1258 ExternalAddress no_overlap(no_overlap_target); 1259 __ jump_cc(Assembler::belowEqual, no_overlap); 1260 __ cmpptr(to, end_from); 1261 __ jump_cc(Assembler::aboveEqual, no_overlap); 1262 } else { 1263 __ jcc(Assembler::belowEqual, (*NOLp)); 1264 __ cmpptr(to, end_from); 1265 __ jcc(Assembler::aboveEqual, (*NOLp)); 1266 } 1267 } 1268 1269 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1270 // 1271 // Outputs: 1272 // rdi - rcx 1273 // rsi - rdx 1274 // rdx - r8 1275 // rcx - r9 1276 // 1277 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1278 // are non-volatile. r9 and r10 should not be used by the caller. 1279 // 1280 void setup_arg_regs(int nargs = 3) { 1281 const Register saved_rdi = r9; 1282 const Register saved_rsi = r10; 1283 assert(nargs == 3 || nargs == 4, "else fix"); 1284 #ifdef _WIN64 1285 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1286 "unexpected argument registers"); 1287 if (nargs >= 4) 1288 __ mov(rax, r9); // r9 is also saved_rdi 1289 __ movptr(saved_rdi, rdi); 1290 __ movptr(saved_rsi, rsi); 1291 __ mov(rdi, rcx); // c_rarg0 1292 __ mov(rsi, rdx); // c_rarg1 1293 __ mov(rdx, r8); // c_rarg2 1294 if (nargs >= 4) 1295 __ mov(rcx, rax); // c_rarg3 (via rax) 1296 #else 1297 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1298 "unexpected argument registers"); 1299 #endif 1300 } 1301 1302 void restore_arg_regs() { 1303 const Register saved_rdi = r9; 1304 const Register saved_rsi = r10; 1305 #ifdef _WIN64 1306 __ movptr(rdi, saved_rdi); 1307 __ movptr(rsi, saved_rsi); 1308 #endif 1309 } 1310 1311 // Generate code for an array write pre barrier 1312 // 1313 // addr - starting address 1314 // count - element count 1315 // tmp - scratch register 1316 // 1317 // Destroy no registers! 1318 // 1319 void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { 1320 BarrierSet* bs = Universe::heap()->barrier_set(); 1321 switch (bs->kind()) { 1322 case BarrierSet::G1SATBCTLogging: 1323 case BarrierSet::ShenandoahBarrierSet: 1324 // With G1, don't generate the call if we statically know that the target in uninitialized 1325 if (!dest_uninitialized) { 1326 __ pusha(); // push registers 1327 if (count == c_rarg0) { 1328 if (addr == c_rarg1) { 1329 // exactly backwards!! 1330 __ xchgptr(c_rarg1, c_rarg0); 1331 } else { 1332 __ movptr(c_rarg1, count); 1333 __ movptr(c_rarg0, addr); 1334 } 1335 } else { 1336 __ movptr(c_rarg0, addr); 1337 __ movptr(c_rarg1, count); 1338 } 1339 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2); 1340 __ popa(); 1341 } 1342 break; 1343 case BarrierSet::CardTableForRS: 1344 case BarrierSet::CardTableExtension: 1345 case BarrierSet::ModRef: 1346 break; 1347 default: 1348 ShouldNotReachHere(); 1349 1350 } 1351 } 1352 1353 // 1354 // Generate code for an array write post barrier 1355 // 1356 // Input: 1357 // start - register containing starting address of destination array 1358 // count - elements count 1359 // scratch - scratch register 1360 // 1361 // The input registers are overwritten. 1362 // 1363 void gen_write_ref_array_post_barrier(Register start, Register count, Register scratch) { 1364 assert_different_registers(start, count, scratch); 1365 BarrierSet* bs = Universe::heap()->barrier_set(); 1366 switch (bs->kind()) { 1367 case BarrierSet::G1SATBCTLogging: 1368 case BarrierSet::ShenandoahBarrierSet: 1369 { 1370 __ pusha(); // push registers (overkill) 1371 if (c_rarg0 == count) { // On win64 c_rarg0 == rcx 1372 assert_different_registers(c_rarg1, start); 1373 __ mov(c_rarg1, count); 1374 __ mov(c_rarg0, start); 1375 } else { 1376 assert_different_registers(c_rarg0, count); 1377 __ mov(c_rarg0, start); 1378 __ mov(c_rarg1, count); 1379 } 1380 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2); 1381 __ popa(); 1382 } 1383 break; 1384 case BarrierSet::CardTableForRS: 1385 case BarrierSet::CardTableExtension: 1386 { 1387 CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs); 1388 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 1389 1390 Label L_loop; 1391 const Register end = count; 1392 1393 __ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size 1394 __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive 1395 __ shrptr(start, CardTableModRefBS::card_shift); 1396 __ shrptr(end, CardTableModRefBS::card_shift); 1397 __ subptr(end, start); // end --> cards count 1398 1399 int64_t disp = (int64_t) ct->byte_map_base; 1400 __ mov64(scratch, disp); 1401 __ addptr(start, scratch); 1402 __ BIND(L_loop); 1403 __ movb(Address(start, count, Address::times_1), 0); 1404 __ decrement(count); 1405 __ jcc(Assembler::greaterEqual, L_loop); 1406 } 1407 break; 1408 default: 1409 ShouldNotReachHere(); 1410 1411 } 1412 } 1413 1414 1415 // Copy big chunks forward 1416 // 1417 // Inputs: 1418 // end_from - source arrays end address 1419 // end_to - destination array end address 1420 // qword_count - 64-bits element count, negative 1421 // to - scratch 1422 // L_copy_bytes - entry label 1423 // L_copy_8_bytes - exit label 1424 // 1425 void copy_bytes_forward(Register end_from, Register end_to, 1426 Register qword_count, Register to, 1427 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1428 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1429 Label L_loop; 1430 __ align(OptoLoopAlignment); 1431 if (UseUnalignedLoadStores) { 1432 Label L_end; 1433 // Copy 64-bytes per iteration 1434 __ BIND(L_loop); 1435 if (UseAVX > 2) { 1436 __ evmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit); 1437 __ evmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit); 1438 } else if (UseAVX == 2) { 1439 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1440 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1441 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1442 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1443 } else { 1444 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1445 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1446 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1447 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1448 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1449 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1450 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1451 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1452 } 1453 __ BIND(L_copy_bytes); 1454 __ addptr(qword_count, 8); 1455 __ jcc(Assembler::lessEqual, L_loop); 1456 __ subptr(qword_count, 4); // sub(8) and add(4) 1457 __ jccb(Assembler::greater, L_end); 1458 // Copy trailing 32 bytes 1459 if (UseAVX >= 2) { 1460 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1461 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1462 } else { 1463 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1464 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1465 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1466 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1467 } 1468 __ addptr(qword_count, 4); 1469 __ BIND(L_end); 1470 if (UseAVX >= 2) { 1471 // clean upper bits of YMM registers 1472 __ vpxor(xmm0, xmm0); 1473 __ vpxor(xmm1, xmm1); 1474 } 1475 } else { 1476 // Copy 32-bytes per iteration 1477 __ BIND(L_loop); 1478 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1479 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1480 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1481 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1482 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1483 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1484 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1485 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1486 1487 __ BIND(L_copy_bytes); 1488 __ addptr(qword_count, 4); 1489 __ jcc(Assembler::lessEqual, L_loop); 1490 } 1491 __ subptr(qword_count, 4); 1492 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1493 } 1494 1495 // Copy big chunks backward 1496 // 1497 // Inputs: 1498 // from - source arrays address 1499 // dest - destination array address 1500 // qword_count - 64-bits element count 1501 // to - scratch 1502 // L_copy_bytes - entry label 1503 // L_copy_8_bytes - exit label 1504 // 1505 void copy_bytes_backward(Register from, Register dest, 1506 Register qword_count, Register to, 1507 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1508 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1509 Label L_loop; 1510 __ align(OptoLoopAlignment); 1511 if (UseUnalignedLoadStores) { 1512 Label L_end; 1513 // Copy 64-bytes per iteration 1514 __ BIND(L_loop); 1515 if (UseAVX > 2) { 1516 __ evmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32), Assembler::AVX_512bit); 1517 __ evmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0, Assembler::AVX_512bit); 1518 } else if (UseAVX == 2) { 1519 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1520 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1521 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1522 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1523 } else { 1524 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1525 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1526 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1527 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1528 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1529 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1530 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1531 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1532 } 1533 __ BIND(L_copy_bytes); 1534 __ subptr(qword_count, 8); 1535 __ jcc(Assembler::greaterEqual, L_loop); 1536 1537 __ addptr(qword_count, 4); // add(8) and sub(4) 1538 __ jccb(Assembler::less, L_end); 1539 // Copy trailing 32 bytes 1540 if (UseAVX >= 2) { 1541 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1542 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1543 } else { 1544 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1545 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1546 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1547 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1548 } 1549 __ subptr(qword_count, 4); 1550 __ BIND(L_end); 1551 if (UseAVX >= 2) { 1552 // clean upper bits of YMM registers 1553 __ vpxor(xmm0, xmm0); 1554 __ vpxor(xmm1, xmm1); 1555 } 1556 } else { 1557 // Copy 32-bytes per iteration 1558 __ BIND(L_loop); 1559 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1560 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1561 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1562 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1563 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1564 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1565 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1566 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1567 1568 __ BIND(L_copy_bytes); 1569 __ subptr(qword_count, 4); 1570 __ jcc(Assembler::greaterEqual, L_loop); 1571 } 1572 __ addptr(qword_count, 4); 1573 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1574 } 1575 1576 1577 // Arguments: 1578 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1579 // ignored 1580 // name - stub name string 1581 // 1582 // Inputs: 1583 // c_rarg0 - source array address 1584 // c_rarg1 - destination array address 1585 // c_rarg2 - element count, treated as ssize_t, can be zero 1586 // 1587 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1588 // we let the hardware handle it. The one to eight bytes within words, 1589 // dwords or qwords that span cache line boundaries will still be loaded 1590 // and stored atomically. 1591 // 1592 // Side Effects: 1593 // disjoint_byte_copy_entry is set to the no-overlap entry point 1594 // used by generate_conjoint_byte_copy(). 1595 // 1596 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1597 __ align(CodeEntryAlignment); 1598 StubCodeMark mark(this, "StubRoutines", name); 1599 address start = __ pc(); 1600 1601 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1602 Label L_copy_byte, L_exit; 1603 const Register from = rdi; // source array address 1604 const Register to = rsi; // destination array address 1605 const Register count = rdx; // elements count 1606 const Register byte_count = rcx; 1607 const Register qword_count = count; 1608 const Register end_from = from; // source array end address 1609 const Register end_to = to; // destination array end address 1610 // End pointers are inclusive, and if count is not zero they point 1611 // to the last unit copied: end_to[0] := end_from[0] 1612 1613 __ enter(); // required for proper stackwalking of RuntimeStub frame 1614 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1615 1616 if (entry != NULL) { 1617 *entry = __ pc(); 1618 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1619 BLOCK_COMMENT("Entry:"); 1620 } 1621 1622 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1623 // r9 and r10 may be used to save non-volatile registers 1624 1625 // 'from', 'to' and 'count' are now valid 1626 __ movptr(byte_count, count); 1627 __ shrptr(count, 3); // count => qword_count 1628 1629 // Copy from low to high addresses. Use 'to' as scratch. 1630 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1631 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1632 __ negptr(qword_count); // make the count negative 1633 __ jmp(L_copy_bytes); 1634 1635 // Copy trailing qwords 1636 __ BIND(L_copy_8_bytes); 1637 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1638 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1639 __ increment(qword_count); 1640 __ jcc(Assembler::notZero, L_copy_8_bytes); 1641 1642 // Check for and copy trailing dword 1643 __ BIND(L_copy_4_bytes); 1644 __ testl(byte_count, 4); 1645 __ jccb(Assembler::zero, L_copy_2_bytes); 1646 __ movl(rax, Address(end_from, 8)); 1647 __ movl(Address(end_to, 8), rax); 1648 1649 __ addptr(end_from, 4); 1650 __ addptr(end_to, 4); 1651 1652 // Check for and copy trailing word 1653 __ BIND(L_copy_2_bytes); 1654 __ testl(byte_count, 2); 1655 __ jccb(Assembler::zero, L_copy_byte); 1656 __ movw(rax, Address(end_from, 8)); 1657 __ movw(Address(end_to, 8), rax); 1658 1659 __ addptr(end_from, 2); 1660 __ addptr(end_to, 2); 1661 1662 // Check for and copy trailing byte 1663 __ BIND(L_copy_byte); 1664 __ testl(byte_count, 1); 1665 __ jccb(Assembler::zero, L_exit); 1666 __ movb(rax, Address(end_from, 8)); 1667 __ movb(Address(end_to, 8), rax); 1668 1669 __ BIND(L_exit); 1670 restore_arg_regs(); 1671 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1672 __ xorptr(rax, rax); // return 0 1673 __ leave(); // required for proper stackwalking of RuntimeStub frame 1674 __ ret(0); 1675 1676 // Copy in multi-bytes chunks 1677 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1678 __ jmp(L_copy_4_bytes); 1679 1680 return start; 1681 } 1682 1683 // Arguments: 1684 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1685 // ignored 1686 // name - stub name string 1687 // 1688 // Inputs: 1689 // c_rarg0 - source array address 1690 // c_rarg1 - destination array address 1691 // c_rarg2 - element count, treated as ssize_t, can be zero 1692 // 1693 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1694 // we let the hardware handle it. The one to eight bytes within words, 1695 // dwords or qwords that span cache line boundaries will still be loaded 1696 // and stored atomically. 1697 // 1698 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1699 address* entry, const char *name) { 1700 __ align(CodeEntryAlignment); 1701 StubCodeMark mark(this, "StubRoutines", name); 1702 address start = __ pc(); 1703 1704 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1705 const Register from = rdi; // source array address 1706 const Register to = rsi; // destination array address 1707 const Register count = rdx; // elements count 1708 const Register byte_count = rcx; 1709 const Register qword_count = count; 1710 1711 __ enter(); // required for proper stackwalking of RuntimeStub frame 1712 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1713 1714 if (entry != NULL) { 1715 *entry = __ pc(); 1716 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1717 BLOCK_COMMENT("Entry:"); 1718 } 1719 1720 array_overlap_test(nooverlap_target, Address::times_1); 1721 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1722 // r9 and r10 may be used to save non-volatile registers 1723 1724 // 'from', 'to' and 'count' are now valid 1725 __ movptr(byte_count, count); 1726 __ shrptr(count, 3); // count => qword_count 1727 1728 // Copy from high to low addresses. 1729 1730 // Check for and copy trailing byte 1731 __ testl(byte_count, 1); 1732 __ jcc(Assembler::zero, L_copy_2_bytes); 1733 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1734 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1735 __ decrement(byte_count); // Adjust for possible trailing word 1736 1737 // Check for and copy trailing word 1738 __ BIND(L_copy_2_bytes); 1739 __ testl(byte_count, 2); 1740 __ jcc(Assembler::zero, L_copy_4_bytes); 1741 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1742 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1743 1744 // Check for and copy trailing dword 1745 __ BIND(L_copy_4_bytes); 1746 __ testl(byte_count, 4); 1747 __ jcc(Assembler::zero, L_copy_bytes); 1748 __ movl(rax, Address(from, qword_count, Address::times_8)); 1749 __ movl(Address(to, qword_count, Address::times_8), rax); 1750 __ jmp(L_copy_bytes); 1751 1752 // Copy trailing qwords 1753 __ BIND(L_copy_8_bytes); 1754 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1755 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1756 __ decrement(qword_count); 1757 __ jcc(Assembler::notZero, L_copy_8_bytes); 1758 1759 restore_arg_regs(); 1760 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1761 __ xorptr(rax, rax); // return 0 1762 __ leave(); // required for proper stackwalking of RuntimeStub frame 1763 __ ret(0); 1764 1765 // Copy in multi-bytes chunks 1766 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1767 1768 restore_arg_regs(); 1769 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1770 __ xorptr(rax, rax); // return 0 1771 __ leave(); // required for proper stackwalking of RuntimeStub frame 1772 __ ret(0); 1773 1774 return start; 1775 } 1776 1777 // Arguments: 1778 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1779 // ignored 1780 // name - stub name string 1781 // 1782 // Inputs: 1783 // c_rarg0 - source array address 1784 // c_rarg1 - destination array address 1785 // c_rarg2 - element count, treated as ssize_t, can be zero 1786 // 1787 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1788 // let the hardware handle it. The two or four words within dwords 1789 // or qwords that span cache line boundaries will still be loaded 1790 // and stored atomically. 1791 // 1792 // Side Effects: 1793 // disjoint_short_copy_entry is set to the no-overlap entry point 1794 // used by generate_conjoint_short_copy(). 1795 // 1796 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1797 __ align(CodeEntryAlignment); 1798 StubCodeMark mark(this, "StubRoutines", name); 1799 address start = __ pc(); 1800 1801 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1802 const Register from = rdi; // source array address 1803 const Register to = rsi; // destination array address 1804 const Register count = rdx; // elements count 1805 const Register word_count = rcx; 1806 const Register qword_count = count; 1807 const Register end_from = from; // source array end address 1808 const Register end_to = to; // destination array end address 1809 // End pointers are inclusive, and if count is not zero they point 1810 // to the last unit copied: end_to[0] := end_from[0] 1811 1812 __ enter(); // required for proper stackwalking of RuntimeStub frame 1813 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1814 1815 if (entry != NULL) { 1816 *entry = __ pc(); 1817 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1818 BLOCK_COMMENT("Entry:"); 1819 } 1820 1821 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1822 // r9 and r10 may be used to save non-volatile registers 1823 1824 // 'from', 'to' and 'count' are now valid 1825 __ movptr(word_count, count); 1826 __ shrptr(count, 2); // count => qword_count 1827 1828 // Copy from low to high addresses. Use 'to' as scratch. 1829 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1830 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1831 __ negptr(qword_count); 1832 __ jmp(L_copy_bytes); 1833 1834 // Copy trailing qwords 1835 __ BIND(L_copy_8_bytes); 1836 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1837 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1838 __ increment(qword_count); 1839 __ jcc(Assembler::notZero, L_copy_8_bytes); 1840 1841 // Original 'dest' is trashed, so we can't use it as a 1842 // base register for a possible trailing word copy 1843 1844 // Check for and copy trailing dword 1845 __ BIND(L_copy_4_bytes); 1846 __ testl(word_count, 2); 1847 __ jccb(Assembler::zero, L_copy_2_bytes); 1848 __ movl(rax, Address(end_from, 8)); 1849 __ movl(Address(end_to, 8), rax); 1850 1851 __ addptr(end_from, 4); 1852 __ addptr(end_to, 4); 1853 1854 // Check for and copy trailing word 1855 __ BIND(L_copy_2_bytes); 1856 __ testl(word_count, 1); 1857 __ jccb(Assembler::zero, L_exit); 1858 __ movw(rax, Address(end_from, 8)); 1859 __ movw(Address(end_to, 8), rax); 1860 1861 __ BIND(L_exit); 1862 restore_arg_regs(); 1863 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1864 __ xorptr(rax, rax); // return 0 1865 __ leave(); // required for proper stackwalking of RuntimeStub frame 1866 __ ret(0); 1867 1868 // Copy in multi-bytes chunks 1869 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1870 __ jmp(L_copy_4_bytes); 1871 1872 return start; 1873 } 1874 1875 address generate_fill(BasicType t, bool aligned, const char *name) { 1876 __ align(CodeEntryAlignment); 1877 StubCodeMark mark(this, "StubRoutines", name); 1878 address start = __ pc(); 1879 1880 BLOCK_COMMENT("Entry:"); 1881 1882 const Register to = c_rarg0; // source array address 1883 const Register value = c_rarg1; // value 1884 const Register count = c_rarg2; // elements count 1885 1886 __ enter(); // required for proper stackwalking of RuntimeStub frame 1887 1888 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1889 1890 __ leave(); // required for proper stackwalking of RuntimeStub frame 1891 __ ret(0); 1892 return start; 1893 } 1894 1895 // Arguments: 1896 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1897 // ignored 1898 // name - stub name string 1899 // 1900 // Inputs: 1901 // c_rarg0 - source array address 1902 // c_rarg1 - destination array address 1903 // c_rarg2 - element count, treated as ssize_t, can be zero 1904 // 1905 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1906 // let the hardware handle it. The two or four words within dwords 1907 // or qwords that span cache line boundaries will still be loaded 1908 // and stored atomically. 1909 // 1910 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1911 address *entry, const char *name) { 1912 __ align(CodeEntryAlignment); 1913 StubCodeMark mark(this, "StubRoutines", name); 1914 address start = __ pc(); 1915 1916 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1917 const Register from = rdi; // source array address 1918 const Register to = rsi; // destination array address 1919 const Register count = rdx; // elements count 1920 const Register word_count = rcx; 1921 const Register qword_count = count; 1922 1923 __ enter(); // required for proper stackwalking of RuntimeStub frame 1924 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1925 1926 if (entry != NULL) { 1927 *entry = __ pc(); 1928 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1929 BLOCK_COMMENT("Entry:"); 1930 } 1931 1932 array_overlap_test(nooverlap_target, Address::times_2); 1933 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1934 // r9 and r10 may be used to save non-volatile registers 1935 1936 // 'from', 'to' and 'count' are now valid 1937 __ movptr(word_count, count); 1938 __ shrptr(count, 2); // count => qword_count 1939 1940 // Copy from high to low addresses. Use 'to' as scratch. 1941 1942 // Check for and copy trailing word 1943 __ testl(word_count, 1); 1944 __ jccb(Assembler::zero, L_copy_4_bytes); 1945 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1946 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1947 1948 // Check for and copy trailing dword 1949 __ BIND(L_copy_4_bytes); 1950 __ testl(word_count, 2); 1951 __ jcc(Assembler::zero, L_copy_bytes); 1952 __ movl(rax, Address(from, qword_count, Address::times_8)); 1953 __ movl(Address(to, qword_count, Address::times_8), rax); 1954 __ jmp(L_copy_bytes); 1955 1956 // Copy trailing qwords 1957 __ BIND(L_copy_8_bytes); 1958 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1959 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1960 __ decrement(qword_count); 1961 __ jcc(Assembler::notZero, L_copy_8_bytes); 1962 1963 restore_arg_regs(); 1964 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1965 __ xorptr(rax, rax); // return 0 1966 __ leave(); // required for proper stackwalking of RuntimeStub frame 1967 __ ret(0); 1968 1969 // Copy in multi-bytes chunks 1970 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1971 1972 restore_arg_regs(); 1973 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1974 __ xorptr(rax, rax); // return 0 1975 __ leave(); // required for proper stackwalking of RuntimeStub frame 1976 __ ret(0); 1977 1978 return start; 1979 } 1980 1981 // Arguments: 1982 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1983 // ignored 1984 // is_oop - true => oop array, so generate store check code 1985 // name - stub name string 1986 // 1987 // Inputs: 1988 // c_rarg0 - source array address 1989 // c_rarg1 - destination array address 1990 // c_rarg2 - element count, treated as ssize_t, can be zero 1991 // 1992 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1993 // the hardware handle it. The two dwords within qwords that span 1994 // cache line boundaries will still be loaded and stored atomicly. 1995 // 1996 // Side Effects: 1997 // disjoint_int_copy_entry is set to the no-overlap entry point 1998 // used by generate_conjoint_int_oop_copy(). 1999 // 2000 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 2001 const char *name, bool dest_uninitialized = false) { 2002 __ align(CodeEntryAlignment); 2003 StubCodeMark mark(this, "StubRoutines", name); 2004 address start = __ pc(); 2005 2006 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 2007 const Register from = rdi; // source array address 2008 const Register to = rsi; // destination array address 2009 const Register count = rdx; // elements count 2010 const Register dword_count = rcx; 2011 const Register qword_count = count; 2012 const Register end_from = from; // source array end address 2013 const Register end_to = to; // destination array end address 2014 const Register saved_to = r11; // saved destination array address 2015 // End pointers are inclusive, and if count is not zero they point 2016 // to the last unit copied: end_to[0] := end_from[0] 2017 2018 __ enter(); // required for proper stackwalking of RuntimeStub frame 2019 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2020 2021 if (entry != NULL) { 2022 *entry = __ pc(); 2023 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2024 BLOCK_COMMENT("Entry:"); 2025 } 2026 2027 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2028 // r9 and r10 may be used to save non-volatile registers 2029 if (is_oop) { 2030 __ movq(saved_to, to); 2031 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2032 } 2033 2034 // 'from', 'to' and 'count' are now valid 2035 __ movptr(dword_count, count); 2036 __ shrptr(count, 1); // count => qword_count 2037 2038 // Copy from low to high addresses. Use 'to' as scratch. 2039 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2040 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2041 __ negptr(qword_count); 2042 __ jmp(L_copy_bytes); 2043 2044 // Copy trailing qwords 2045 __ BIND(L_copy_8_bytes); 2046 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2047 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2048 __ increment(qword_count); 2049 __ jcc(Assembler::notZero, L_copy_8_bytes); 2050 2051 // Check for and copy trailing dword 2052 __ BIND(L_copy_4_bytes); 2053 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 2054 __ jccb(Assembler::zero, L_exit); 2055 __ movl(rax, Address(end_from, 8)); 2056 __ movl(Address(end_to, 8), rax); 2057 2058 __ BIND(L_exit); 2059 if (is_oop) { 2060 gen_write_ref_array_post_barrier(saved_to, dword_count, rax); 2061 } 2062 restore_arg_regs(); 2063 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2064 __ xorptr(rax, rax); // return 0 2065 __ leave(); // required for proper stackwalking of RuntimeStub frame 2066 __ ret(0); 2067 2068 // Copy in multi-bytes chunks 2069 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2070 __ jmp(L_copy_4_bytes); 2071 2072 return start; 2073 } 2074 2075 // Arguments: 2076 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 2077 // ignored 2078 // is_oop - true => oop array, so generate store check code 2079 // name - stub name string 2080 // 2081 // Inputs: 2082 // c_rarg0 - source array address 2083 // c_rarg1 - destination array address 2084 // c_rarg2 - element count, treated as ssize_t, can be zero 2085 // 2086 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 2087 // the hardware handle it. The two dwords within qwords that span 2088 // cache line boundaries will still be loaded and stored atomicly. 2089 // 2090 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 2091 address *entry, const char *name, 2092 bool dest_uninitialized = false) { 2093 __ align(CodeEntryAlignment); 2094 StubCodeMark mark(this, "StubRoutines", name); 2095 address start = __ pc(); 2096 2097 Label L_copy_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit; 2098 const Register from = rdi; // source array address 2099 const Register to = rsi; // destination array address 2100 const Register count = rdx; // elements count 2101 const Register dword_count = rcx; 2102 const Register qword_count = count; 2103 2104 __ enter(); // required for proper stackwalking of RuntimeStub frame 2105 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2106 2107 if (entry != NULL) { 2108 *entry = __ pc(); 2109 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2110 BLOCK_COMMENT("Entry:"); 2111 } 2112 2113 array_overlap_test(nooverlap_target, Address::times_4); 2114 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2115 // r9 and r10 may be used to save non-volatile registers 2116 2117 if (is_oop) { 2118 // no registers are destroyed by this call 2119 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2120 } 2121 2122 assert_clean_int(count, rax); // Make sure 'count' is clean int. 2123 // 'from', 'to' and 'count' are now valid 2124 __ movptr(dword_count, count); 2125 __ shrptr(count, 1); // count => qword_count 2126 2127 // Copy from high to low addresses. Use 'to' as scratch. 2128 2129 // Check for and copy trailing dword 2130 __ testl(dword_count, 1); 2131 __ jcc(Assembler::zero, L_copy_bytes); 2132 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 2133 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 2134 __ jmp(L_copy_bytes); 2135 2136 // Copy trailing qwords 2137 __ BIND(L_copy_8_bytes); 2138 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2139 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2140 __ decrement(qword_count); 2141 __ jcc(Assembler::notZero, L_copy_8_bytes); 2142 2143 if (is_oop) { 2144 __ jmp(L_exit); 2145 } 2146 restore_arg_regs(); 2147 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2148 __ xorptr(rax, rax); // return 0 2149 __ leave(); // required for proper stackwalking of RuntimeStub frame 2150 __ ret(0); 2151 2152 // Copy in multi-bytes chunks 2153 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2154 2155 __ BIND(L_exit); 2156 if (is_oop) { 2157 gen_write_ref_array_post_barrier(to, dword_count, rax); 2158 } 2159 restore_arg_regs(); 2160 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2161 __ xorptr(rax, rax); // return 0 2162 __ leave(); // required for proper stackwalking of RuntimeStub frame 2163 __ ret(0); 2164 2165 return start; 2166 } 2167 2168 // Arguments: 2169 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2170 // ignored 2171 // is_oop - true => oop array, so generate store check code 2172 // name - stub name string 2173 // 2174 // Inputs: 2175 // c_rarg0 - source array address 2176 // c_rarg1 - destination array address 2177 // c_rarg2 - element count, treated as ssize_t, can be zero 2178 // 2179 // Side Effects: 2180 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 2181 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 2182 // 2183 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 2184 const char *name, bool dest_uninitialized = false) { 2185 __ align(CodeEntryAlignment); 2186 StubCodeMark mark(this, "StubRoutines", name); 2187 address start = __ pc(); 2188 2189 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2190 const Register from = rdi; // source array address 2191 const Register to = rsi; // destination array address 2192 const Register qword_count = rdx; // elements count 2193 const Register end_from = from; // source array end address 2194 const Register end_to = rcx; // destination array end address 2195 const Register saved_to = to; 2196 const Register saved_count = r11; 2197 // End pointers are inclusive, and if count is not zero they point 2198 // to the last unit copied: end_to[0] := end_from[0] 2199 2200 __ enter(); // required for proper stackwalking of RuntimeStub frame 2201 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2202 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2203 2204 if (entry != NULL) { 2205 *entry = __ pc(); 2206 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2207 BLOCK_COMMENT("Entry:"); 2208 } 2209 2210 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2211 // r9 and r10 may be used to save non-volatile registers 2212 // 'from', 'to' and 'qword_count' are now valid 2213 if (is_oop) { 2214 // Save to and count for store barrier 2215 __ movptr(saved_count, qword_count); 2216 // no registers are destroyed by this call 2217 gen_write_ref_array_pre_barrier(to, qword_count, dest_uninitialized); 2218 } 2219 2220 // Copy from low to high addresses. Use 'to' as scratch. 2221 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2222 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2223 __ negptr(qword_count); 2224 __ jmp(L_copy_bytes); 2225 2226 // Copy trailing qwords 2227 __ BIND(L_copy_8_bytes); 2228 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2229 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2230 __ increment(qword_count); 2231 __ jcc(Assembler::notZero, L_copy_8_bytes); 2232 2233 if (is_oop) { 2234 __ jmp(L_exit); 2235 } else { 2236 restore_arg_regs(); 2237 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2238 __ xorptr(rax, rax); // return 0 2239 __ leave(); // required for proper stackwalking of RuntimeStub frame 2240 __ ret(0); 2241 } 2242 2243 // Copy in multi-bytes chunks 2244 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2245 2246 if (is_oop) { 2247 __ BIND(L_exit); 2248 gen_write_ref_array_post_barrier(saved_to, saved_count, rax); 2249 } 2250 restore_arg_regs(); 2251 if (is_oop) { 2252 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2253 } else { 2254 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2255 } 2256 __ xorptr(rax, rax); // return 0 2257 __ leave(); // required for proper stackwalking of RuntimeStub frame 2258 __ ret(0); 2259 2260 return start; 2261 } 2262 2263 // Arguments: 2264 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2265 // ignored 2266 // is_oop - true => oop array, so generate store check code 2267 // name - stub name string 2268 // 2269 // Inputs: 2270 // c_rarg0 - source array address 2271 // c_rarg1 - destination array address 2272 // c_rarg2 - element count, treated as ssize_t, can be zero 2273 // 2274 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2275 address nooverlap_target, address *entry, 2276 const char *name, bool dest_uninitialized = false) { 2277 __ align(CodeEntryAlignment); 2278 StubCodeMark mark(this, "StubRoutines", name); 2279 address start = __ pc(); 2280 2281 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2282 const Register from = rdi; // source array address 2283 const Register to = rsi; // destination array address 2284 const Register qword_count = rdx; // elements count 2285 const Register saved_count = rcx; 2286 2287 __ enter(); // required for proper stackwalking of RuntimeStub frame 2288 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2289 2290 if (entry != NULL) { 2291 *entry = __ pc(); 2292 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2293 BLOCK_COMMENT("Entry:"); 2294 } 2295 2296 array_overlap_test(nooverlap_target, Address::times_8); 2297 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2298 // r9 and r10 may be used to save non-volatile registers 2299 // 'from', 'to' and 'qword_count' are now valid 2300 if (is_oop) { 2301 // Save to and count for store barrier 2302 __ movptr(saved_count, qword_count); 2303 // No registers are destroyed by this call 2304 gen_write_ref_array_pre_barrier(to, saved_count, dest_uninitialized); 2305 } 2306 2307 __ jmp(L_copy_bytes); 2308 2309 // Copy trailing qwords 2310 __ BIND(L_copy_8_bytes); 2311 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2312 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2313 __ decrement(qword_count); 2314 __ jcc(Assembler::notZero, L_copy_8_bytes); 2315 2316 if (is_oop) { 2317 __ jmp(L_exit); 2318 } else { 2319 restore_arg_regs(); 2320 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2321 __ xorptr(rax, rax); // return 0 2322 __ leave(); // required for proper stackwalking of RuntimeStub frame 2323 __ ret(0); 2324 } 2325 2326 // Copy in multi-bytes chunks 2327 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2328 2329 if (is_oop) { 2330 __ BIND(L_exit); 2331 gen_write_ref_array_post_barrier(to, saved_count, rax); 2332 } 2333 restore_arg_regs(); 2334 if (is_oop) { 2335 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2336 } else { 2337 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2338 } 2339 __ xorptr(rax, rax); // return 0 2340 __ leave(); // required for proper stackwalking of RuntimeStub frame 2341 __ ret(0); 2342 2343 return start; 2344 } 2345 2346 2347 // Helper for generating a dynamic type check. 2348 // Smashes no registers. 2349 void generate_type_check(Register sub_klass, 2350 Register super_check_offset, 2351 Register super_klass, 2352 Label& L_success) { 2353 assert_different_registers(sub_klass, super_check_offset, super_klass); 2354 2355 BLOCK_COMMENT("type_check:"); 2356 2357 Label L_miss; 2358 2359 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2360 super_check_offset); 2361 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2362 2363 // Fall through on failure! 2364 __ BIND(L_miss); 2365 } 2366 2367 // 2368 // Generate checkcasting array copy stub 2369 // 2370 // Input: 2371 // c_rarg0 - source array address 2372 // c_rarg1 - destination array address 2373 // c_rarg2 - element count, treated as ssize_t, can be zero 2374 // c_rarg3 - size_t ckoff (super_check_offset) 2375 // not Win64 2376 // c_rarg4 - oop ckval (super_klass) 2377 // Win64 2378 // rsp+40 - oop ckval (super_klass) 2379 // 2380 // Output: 2381 // rax == 0 - success 2382 // rax == -1^K - failure, where K is partial transfer count 2383 // 2384 address generate_checkcast_copy(const char *name, address *entry, 2385 bool dest_uninitialized = false) { 2386 2387 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2388 2389 // Input registers (after setup_arg_regs) 2390 const Register from = rdi; // source array address 2391 const Register to = rsi; // destination array address 2392 const Register length = rdx; // elements count 2393 const Register ckoff = rcx; // super_check_offset 2394 const Register ckval = r8; // super_klass 2395 2396 // Registers used as temps (r13, r14 are save-on-entry) 2397 const Register end_from = from; // source array end address 2398 const Register end_to = r13; // destination array end address 2399 const Register count = rdx; // -(count_remaining) 2400 const Register r14_length = r14; // saved copy of length 2401 // End pointers are inclusive, and if length is not zero they point 2402 // to the last unit copied: end_to[0] := end_from[0] 2403 2404 const Register rax_oop = rax; // actual oop copied 2405 const Register r11_klass = r11; // oop._klass 2406 2407 //--------------------------------------------------------------- 2408 // Assembler stub will be used for this call to arraycopy 2409 // if the two arrays are subtypes of Object[] but the 2410 // destination array type is not equal to or a supertype 2411 // of the source type. Each element must be separately 2412 // checked. 2413 2414 __ align(CodeEntryAlignment); 2415 StubCodeMark mark(this, "StubRoutines", name); 2416 address start = __ pc(); 2417 2418 __ enter(); // required for proper stackwalking of RuntimeStub frame 2419 2420 #ifdef ASSERT 2421 // caller guarantees that the arrays really are different 2422 // otherwise, we would have to make conjoint checks 2423 { Label L; 2424 array_overlap_test(L, TIMES_OOP); 2425 __ stop("checkcast_copy within a single array"); 2426 __ bind(L); 2427 } 2428 #endif //ASSERT 2429 2430 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2431 // ckoff => rcx, ckval => r8 2432 // r9 and r10 may be used to save non-volatile registers 2433 #ifdef _WIN64 2434 // last argument (#4) is on stack on Win64 2435 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2436 #endif 2437 2438 // Caller of this entry point must set up the argument registers. 2439 if (entry != NULL) { 2440 *entry = __ pc(); 2441 BLOCK_COMMENT("Entry:"); 2442 } 2443 2444 // allocate spill slots for r13, r14 2445 enum { 2446 saved_r13_offset, 2447 saved_r14_offset, 2448 saved_rbp_offset 2449 }; 2450 __ subptr(rsp, saved_rbp_offset * wordSize); 2451 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2452 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2453 2454 // check that int operands are properly extended to size_t 2455 assert_clean_int(length, rax); 2456 assert_clean_int(ckoff, rax); 2457 2458 #ifdef ASSERT 2459 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2460 // The ckoff and ckval must be mutually consistent, 2461 // even though caller generates both. 2462 { Label L; 2463 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2464 __ cmpl(ckoff, Address(ckval, sco_offset)); 2465 __ jcc(Assembler::equal, L); 2466 __ stop("super_check_offset inconsistent"); 2467 __ bind(L); 2468 } 2469 #endif //ASSERT 2470 2471 // Loop-invariant addresses. They are exclusive end pointers. 2472 Address end_from_addr(from, length, TIMES_OOP, 0); 2473 Address end_to_addr(to, length, TIMES_OOP, 0); 2474 // Loop-variant addresses. They assume post-incremented count < 0. 2475 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2476 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2477 2478 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2479 2480 // Copy from low to high addresses, indexed from the end of each array. 2481 __ lea(end_from, end_from_addr); 2482 __ lea(end_to, end_to_addr); 2483 __ movptr(r14_length, length); // save a copy of the length 2484 assert(length == count, ""); // else fix next line: 2485 __ negptr(count); // negate and test the length 2486 __ jcc(Assembler::notZero, L_load_element); 2487 2488 // Empty array: Nothing to do. 2489 __ xorptr(rax, rax); // return 0 on (trivial) success 2490 __ jmp(L_done); 2491 2492 // ======== begin loop ======== 2493 // (Loop is rotated; its entry is L_load_element.) 2494 // Loop control: 2495 // for (count = -count; count != 0; count++) 2496 // Base pointers src, dst are biased by 8*(count-1),to last element. 2497 __ align(OptoLoopAlignment); 2498 2499 __ BIND(L_store_element); 2500 __ store_heap_oop(to_element_addr, rax_oop); // store the oop 2501 __ increment(count); // increment the count toward zero 2502 __ jcc(Assembler::zero, L_do_card_marks); 2503 2504 // ======== loop entry is here ======== 2505 __ BIND(L_load_element); 2506 __ load_heap_oop(rax_oop, from_element_addr); // load the oop 2507 __ testptr(rax_oop, rax_oop); 2508 __ jcc(Assembler::zero, L_store_element); 2509 2510 __ load_klass(r11_klass, rax_oop);// query the object klass 2511 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2512 // ======== end loop ======== 2513 2514 // It was a real error; we must depend on the caller to finish the job. 2515 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2516 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2517 // and report their number to the caller. 2518 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2519 Label L_post_barrier; 2520 __ addptr(r14_length, count); // K = (original - remaining) oops 2521 __ movptr(rax, r14_length); // save the value 2522 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2523 __ jccb(Assembler::notZero, L_post_barrier); 2524 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2525 2526 // Come here on success only. 2527 __ BIND(L_do_card_marks); 2528 __ xorptr(rax, rax); // return 0 on success 2529 2530 __ BIND(L_post_barrier); 2531 gen_write_ref_array_post_barrier(to, r14_length, rscratch1); 2532 2533 // Common exit point (success or failure). 2534 __ BIND(L_done); 2535 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2536 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2537 restore_arg_regs(); 2538 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2539 __ leave(); // required for proper stackwalking of RuntimeStub frame 2540 __ ret(0); 2541 2542 return start; 2543 } 2544 2545 // 2546 // Generate 'unsafe' array copy stub 2547 // Though just as safe as the other stubs, it takes an unscaled 2548 // size_t argument instead of an element count. 2549 // 2550 // Input: 2551 // c_rarg0 - source array address 2552 // c_rarg1 - destination array address 2553 // c_rarg2 - byte count, treated as ssize_t, can be zero 2554 // 2555 // Examines the alignment of the operands and dispatches 2556 // to a long, int, short, or byte copy loop. 2557 // 2558 address generate_unsafe_copy(const char *name, 2559 address byte_copy_entry, address short_copy_entry, 2560 address int_copy_entry, address long_copy_entry) { 2561 2562 Label L_long_aligned, L_int_aligned, L_short_aligned; 2563 2564 // Input registers (before setup_arg_regs) 2565 const Register from = c_rarg0; // source array address 2566 const Register to = c_rarg1; // destination array address 2567 const Register size = c_rarg2; // byte count (size_t) 2568 2569 // Register used as a temp 2570 const Register bits = rax; // test copy of low bits 2571 2572 __ align(CodeEntryAlignment); 2573 StubCodeMark mark(this, "StubRoutines", name); 2574 address start = __ pc(); 2575 2576 __ enter(); // required for proper stackwalking of RuntimeStub frame 2577 2578 // bump this on entry, not on exit: 2579 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2580 2581 __ mov(bits, from); 2582 __ orptr(bits, to); 2583 __ orptr(bits, size); 2584 2585 __ testb(bits, BytesPerLong-1); 2586 __ jccb(Assembler::zero, L_long_aligned); 2587 2588 __ testb(bits, BytesPerInt-1); 2589 __ jccb(Assembler::zero, L_int_aligned); 2590 2591 __ testb(bits, BytesPerShort-1); 2592 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2593 2594 __ BIND(L_short_aligned); 2595 __ shrptr(size, LogBytesPerShort); // size => short_count 2596 __ jump(RuntimeAddress(short_copy_entry)); 2597 2598 __ BIND(L_int_aligned); 2599 __ shrptr(size, LogBytesPerInt); // size => int_count 2600 __ jump(RuntimeAddress(int_copy_entry)); 2601 2602 __ BIND(L_long_aligned); 2603 __ shrptr(size, LogBytesPerLong); // size => qword_count 2604 __ jump(RuntimeAddress(long_copy_entry)); 2605 2606 return start; 2607 } 2608 2609 // Perform range checks on the proposed arraycopy. 2610 // Kills temp, but nothing else. 2611 // Also, clean the sign bits of src_pos and dst_pos. 2612 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2613 Register src_pos, // source position (c_rarg1) 2614 Register dst, // destination array oo (c_rarg2) 2615 Register dst_pos, // destination position (c_rarg3) 2616 Register length, 2617 Register temp, 2618 Label& L_failed) { 2619 BLOCK_COMMENT("arraycopy_range_checks:"); 2620 2621 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2622 __ movl(temp, length); 2623 __ addl(temp, src_pos); // src_pos + length 2624 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2625 __ jcc(Assembler::above, L_failed); 2626 2627 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2628 __ movl(temp, length); 2629 __ addl(temp, dst_pos); // dst_pos + length 2630 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2631 __ jcc(Assembler::above, L_failed); 2632 2633 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2634 // Move with sign extension can be used since they are positive. 2635 __ movslq(src_pos, src_pos); 2636 __ movslq(dst_pos, dst_pos); 2637 2638 BLOCK_COMMENT("arraycopy_range_checks done"); 2639 } 2640 2641 // 2642 // Generate generic array copy stubs 2643 // 2644 // Input: 2645 // c_rarg0 - src oop 2646 // c_rarg1 - src_pos (32-bits) 2647 // c_rarg2 - dst oop 2648 // c_rarg3 - dst_pos (32-bits) 2649 // not Win64 2650 // c_rarg4 - element count (32-bits) 2651 // Win64 2652 // rsp+40 - element count (32-bits) 2653 // 2654 // Output: 2655 // rax == 0 - success 2656 // rax == -1^K - failure, where K is partial transfer count 2657 // 2658 address generate_generic_copy(const char *name, 2659 address byte_copy_entry, address short_copy_entry, 2660 address int_copy_entry, address oop_copy_entry, 2661 address long_copy_entry, address checkcast_copy_entry) { 2662 2663 Label L_failed, L_failed_0, L_objArray; 2664 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2665 2666 // Input registers 2667 const Register src = c_rarg0; // source array oop 2668 const Register src_pos = c_rarg1; // source position 2669 const Register dst = c_rarg2; // destination array oop 2670 const Register dst_pos = c_rarg3; // destination position 2671 #ifndef _WIN64 2672 const Register length = c_rarg4; 2673 #else 2674 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2675 #endif 2676 2677 { int modulus = CodeEntryAlignment; 2678 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2679 int advance = target - (__ offset() % modulus); 2680 if (advance < 0) advance += modulus; 2681 if (advance > 0) __ nop(advance); 2682 } 2683 StubCodeMark mark(this, "StubRoutines", name); 2684 2685 // Short-hop target to L_failed. Makes for denser prologue code. 2686 __ BIND(L_failed_0); 2687 __ jmp(L_failed); 2688 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2689 2690 __ align(CodeEntryAlignment); 2691 address start = __ pc(); 2692 2693 __ enter(); // required for proper stackwalking of RuntimeStub frame 2694 2695 // bump this on entry, not on exit: 2696 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2697 2698 //----------------------------------------------------------------------- 2699 // Assembler stub will be used for this call to arraycopy 2700 // if the following conditions are met: 2701 // 2702 // (1) src and dst must not be null. 2703 // (2) src_pos must not be negative. 2704 // (3) dst_pos must not be negative. 2705 // (4) length must not be negative. 2706 // (5) src klass and dst klass should be the same and not NULL. 2707 // (6) src and dst should be arrays. 2708 // (7) src_pos + length must not exceed length of src. 2709 // (8) dst_pos + length must not exceed length of dst. 2710 // 2711 2712 // if (src == NULL) return -1; 2713 __ testptr(src, src); // src oop 2714 size_t j1off = __ offset(); 2715 __ jccb(Assembler::zero, L_failed_0); 2716 2717 // if (src_pos < 0) return -1; 2718 __ testl(src_pos, src_pos); // src_pos (32-bits) 2719 __ jccb(Assembler::negative, L_failed_0); 2720 2721 // if (dst == NULL) return -1; 2722 __ testptr(dst, dst); // dst oop 2723 __ jccb(Assembler::zero, L_failed_0); 2724 2725 // if (dst_pos < 0) return -1; 2726 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2727 size_t j4off = __ offset(); 2728 __ jccb(Assembler::negative, L_failed_0); 2729 2730 // The first four tests are very dense code, 2731 // but not quite dense enough to put four 2732 // jumps in a 16-byte instruction fetch buffer. 2733 // That's good, because some branch predicters 2734 // do not like jumps so close together. 2735 // Make sure of this. 2736 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2737 2738 // registers used as temp 2739 const Register r11_length = r11; // elements count to copy 2740 const Register r10_src_klass = r10; // array klass 2741 2742 // if (length < 0) return -1; 2743 __ movl(r11_length, length); // length (elements count, 32-bits value) 2744 __ testl(r11_length, r11_length); 2745 __ jccb(Assembler::negative, L_failed_0); 2746 2747 __ load_klass(r10_src_klass, src); 2748 #ifdef ASSERT 2749 // assert(src->klass() != NULL); 2750 { 2751 BLOCK_COMMENT("assert klasses not null {"); 2752 Label L1, L2; 2753 __ testptr(r10_src_klass, r10_src_klass); 2754 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2755 __ bind(L1); 2756 __ stop("broken null klass"); 2757 __ bind(L2); 2758 __ load_klass(rax, dst); 2759 __ cmpq(rax, 0); 2760 __ jcc(Assembler::equal, L1); // this would be broken also 2761 BLOCK_COMMENT("} assert klasses not null done"); 2762 } 2763 #endif 2764 2765 // Load layout helper (32-bits) 2766 // 2767 // |array_tag| | header_size | element_type | |log2_element_size| 2768 // 32 30 24 16 8 2 0 2769 // 2770 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2771 // 2772 2773 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2774 2775 // Handle objArrays completely differently... 2776 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2777 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2778 __ jcc(Assembler::equal, L_objArray); 2779 2780 // if (src->klass() != dst->klass()) return -1; 2781 __ load_klass(rax, dst); 2782 __ cmpq(r10_src_klass, rax); 2783 __ jcc(Assembler::notEqual, L_failed); 2784 2785 const Register rax_lh = rax; // layout helper 2786 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2787 2788 // if (!src->is_Array()) return -1; 2789 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2790 __ jcc(Assembler::greaterEqual, L_failed); 2791 2792 // At this point, it is known to be a typeArray (array_tag 0x3). 2793 #ifdef ASSERT 2794 { 2795 BLOCK_COMMENT("assert primitive array {"); 2796 Label L; 2797 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2798 __ jcc(Assembler::greaterEqual, L); 2799 __ stop("must be a primitive array"); 2800 __ bind(L); 2801 BLOCK_COMMENT("} assert primitive array done"); 2802 } 2803 #endif 2804 2805 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2806 r10, L_failed); 2807 2808 // TypeArrayKlass 2809 // 2810 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2811 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2812 // 2813 2814 const Register r10_offset = r10; // array offset 2815 const Register rax_elsize = rax_lh; // element size 2816 2817 __ movl(r10_offset, rax_lh); 2818 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2819 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2820 __ addptr(src, r10_offset); // src array offset 2821 __ addptr(dst, r10_offset); // dst array offset 2822 BLOCK_COMMENT("choose copy loop based on element size"); 2823 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2824 2825 // next registers should be set before the jump to corresponding stub 2826 const Register from = c_rarg0; // source array address 2827 const Register to = c_rarg1; // destination array address 2828 const Register count = c_rarg2; // elements count 2829 2830 // 'from', 'to', 'count' registers should be set in such order 2831 // since they are the same as 'src', 'src_pos', 'dst'. 2832 2833 __ BIND(L_copy_bytes); 2834 __ cmpl(rax_elsize, 0); 2835 __ jccb(Assembler::notEqual, L_copy_shorts); 2836 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2837 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2838 __ movl2ptr(count, r11_length); // length 2839 __ jump(RuntimeAddress(byte_copy_entry)); 2840 2841 __ BIND(L_copy_shorts); 2842 __ cmpl(rax_elsize, LogBytesPerShort); 2843 __ jccb(Assembler::notEqual, L_copy_ints); 2844 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2845 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2846 __ movl2ptr(count, r11_length); // length 2847 __ jump(RuntimeAddress(short_copy_entry)); 2848 2849 __ BIND(L_copy_ints); 2850 __ cmpl(rax_elsize, LogBytesPerInt); 2851 __ jccb(Assembler::notEqual, L_copy_longs); 2852 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2853 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2854 __ movl2ptr(count, r11_length); // length 2855 __ jump(RuntimeAddress(int_copy_entry)); 2856 2857 __ BIND(L_copy_longs); 2858 #ifdef ASSERT 2859 { 2860 BLOCK_COMMENT("assert long copy {"); 2861 Label L; 2862 __ cmpl(rax_elsize, LogBytesPerLong); 2863 __ jcc(Assembler::equal, L); 2864 __ stop("must be long copy, but elsize is wrong"); 2865 __ bind(L); 2866 BLOCK_COMMENT("} assert long copy done"); 2867 } 2868 #endif 2869 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2870 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2871 __ movl2ptr(count, r11_length); // length 2872 __ jump(RuntimeAddress(long_copy_entry)); 2873 2874 // ObjArrayKlass 2875 __ BIND(L_objArray); 2876 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2877 2878 Label L_plain_copy, L_checkcast_copy; 2879 // test array classes for subtyping 2880 __ load_klass(rax, dst); 2881 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2882 __ jcc(Assembler::notEqual, L_checkcast_copy); 2883 2884 // Identically typed arrays can be copied without element-wise checks. 2885 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2886 r10, L_failed); 2887 2888 __ lea(from, Address(src, src_pos, TIMES_OOP, 2889 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2890 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2891 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2892 __ movl2ptr(count, r11_length); // length 2893 __ BIND(L_plain_copy); 2894 __ jump(RuntimeAddress(oop_copy_entry)); 2895 2896 __ BIND(L_checkcast_copy); 2897 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2898 { 2899 // Before looking at dst.length, make sure dst is also an objArray. 2900 __ cmpl(Address(rax, lh_offset), objArray_lh); 2901 __ jcc(Assembler::notEqual, L_failed); 2902 2903 // It is safe to examine both src.length and dst.length. 2904 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2905 rax, L_failed); 2906 2907 const Register r11_dst_klass = r11; 2908 __ load_klass(r11_dst_klass, dst); // reload 2909 2910 // Marshal the base address arguments now, freeing registers. 2911 __ lea(from, Address(src, src_pos, TIMES_OOP, 2912 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2913 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2914 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2915 __ movl(count, length); // length (reloaded) 2916 Register sco_temp = c_rarg3; // this register is free now 2917 assert_different_registers(from, to, count, sco_temp, 2918 r11_dst_klass, r10_src_klass); 2919 assert_clean_int(count, sco_temp); 2920 2921 // Generate the type check. 2922 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2923 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2924 assert_clean_int(sco_temp, rax); 2925 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2926 2927 // Fetch destination element klass from the ObjArrayKlass header. 2928 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2929 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2930 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2931 assert_clean_int(sco_temp, rax); 2932 2933 // the checkcast_copy loop needs two extra arguments: 2934 assert(c_rarg3 == sco_temp, "#3 already in place"); 2935 // Set up arguments for checkcast_copy_entry. 2936 setup_arg_regs(4); 2937 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2938 __ jump(RuntimeAddress(checkcast_copy_entry)); 2939 } 2940 2941 __ BIND(L_failed); 2942 __ xorptr(rax, rax); 2943 __ notptr(rax); // return -1 2944 __ leave(); // required for proper stackwalking of RuntimeStub frame 2945 __ ret(0); 2946 2947 return start; 2948 } 2949 2950 void generate_arraycopy_stubs() { 2951 address entry; 2952 address entry_jbyte_arraycopy; 2953 address entry_jshort_arraycopy; 2954 address entry_jint_arraycopy; 2955 address entry_oop_arraycopy; 2956 address entry_jlong_arraycopy; 2957 address entry_checkcast_arraycopy; 2958 2959 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 2960 "jbyte_disjoint_arraycopy"); 2961 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 2962 "jbyte_arraycopy"); 2963 2964 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 2965 "jshort_disjoint_arraycopy"); 2966 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 2967 "jshort_arraycopy"); 2968 2969 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 2970 "jint_disjoint_arraycopy"); 2971 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 2972 &entry_jint_arraycopy, "jint_arraycopy"); 2973 2974 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 2975 "jlong_disjoint_arraycopy"); 2976 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 2977 &entry_jlong_arraycopy, "jlong_arraycopy"); 2978 2979 2980 if (UseCompressedOops) { 2981 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 2982 "oop_disjoint_arraycopy"); 2983 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 2984 &entry_oop_arraycopy, "oop_arraycopy"); 2985 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 2986 "oop_disjoint_arraycopy_uninit", 2987 /*dest_uninitialized*/true); 2988 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 2989 NULL, "oop_arraycopy_uninit", 2990 /*dest_uninitialized*/true); 2991 } else { 2992 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 2993 "oop_disjoint_arraycopy"); 2994 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 2995 &entry_oop_arraycopy, "oop_arraycopy"); 2996 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 2997 "oop_disjoint_arraycopy_uninit", 2998 /*dest_uninitialized*/true); 2999 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 3000 NULL, "oop_arraycopy_uninit", 3001 /*dest_uninitialized*/true); 3002 } 3003 3004 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 3005 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 3006 /*dest_uninitialized*/true); 3007 3008 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 3009 entry_jbyte_arraycopy, 3010 entry_jshort_arraycopy, 3011 entry_jint_arraycopy, 3012 entry_jlong_arraycopy); 3013 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 3014 entry_jbyte_arraycopy, 3015 entry_jshort_arraycopy, 3016 entry_jint_arraycopy, 3017 entry_oop_arraycopy, 3018 entry_jlong_arraycopy, 3019 entry_checkcast_arraycopy); 3020 3021 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 3022 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 3023 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 3024 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 3025 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 3026 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 3027 3028 // We don't generate specialized code for HeapWord-aligned source 3029 // arrays, so just use the code we've already generated 3030 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 3031 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 3032 3033 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 3034 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 3035 3036 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 3037 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 3038 3039 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 3040 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 3041 3042 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 3043 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 3044 3045 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 3046 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 3047 } 3048 3049 void generate_math_stubs() { 3050 { 3051 StubCodeMark mark(this, "StubRoutines", "log"); 3052 StubRoutines::_intrinsic_log = (double (*)(double)) __ pc(); 3053 3054 __ subq(rsp, 8); 3055 __ movdbl(Address(rsp, 0), xmm0); 3056 __ fld_d(Address(rsp, 0)); 3057 __ flog(); 3058 __ fstp_d(Address(rsp, 0)); 3059 __ movdbl(xmm0, Address(rsp, 0)); 3060 __ addq(rsp, 8); 3061 __ ret(0); 3062 } 3063 { 3064 StubCodeMark mark(this, "StubRoutines", "log10"); 3065 StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc(); 3066 3067 __ subq(rsp, 8); 3068 __ movdbl(Address(rsp, 0), xmm0); 3069 __ fld_d(Address(rsp, 0)); 3070 __ flog10(); 3071 __ fstp_d(Address(rsp, 0)); 3072 __ movdbl(xmm0, Address(rsp, 0)); 3073 __ addq(rsp, 8); 3074 __ ret(0); 3075 } 3076 { 3077 StubCodeMark mark(this, "StubRoutines", "sin"); 3078 StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc(); 3079 3080 __ subq(rsp, 8); 3081 __ movdbl(Address(rsp, 0), xmm0); 3082 __ fld_d(Address(rsp, 0)); 3083 __ trigfunc('s'); 3084 __ fstp_d(Address(rsp, 0)); 3085 __ movdbl(xmm0, Address(rsp, 0)); 3086 __ addq(rsp, 8); 3087 __ ret(0); 3088 } 3089 { 3090 StubCodeMark mark(this, "StubRoutines", "cos"); 3091 StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc(); 3092 3093 __ subq(rsp, 8); 3094 __ movdbl(Address(rsp, 0), xmm0); 3095 __ fld_d(Address(rsp, 0)); 3096 __ trigfunc('c'); 3097 __ fstp_d(Address(rsp, 0)); 3098 __ movdbl(xmm0, Address(rsp, 0)); 3099 __ addq(rsp, 8); 3100 __ ret(0); 3101 } 3102 { 3103 StubCodeMark mark(this, "StubRoutines", "tan"); 3104 StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc(); 3105 3106 __ subq(rsp, 8); 3107 __ movdbl(Address(rsp, 0), xmm0); 3108 __ fld_d(Address(rsp, 0)); 3109 __ trigfunc('t'); 3110 __ fstp_d(Address(rsp, 0)); 3111 __ movdbl(xmm0, Address(rsp, 0)); 3112 __ addq(rsp, 8); 3113 __ ret(0); 3114 } 3115 { 3116 StubCodeMark mark(this, "StubRoutines", "exp"); 3117 StubRoutines::_intrinsic_exp = (double (*)(double)) __ pc(); 3118 3119 __ subq(rsp, 8); 3120 __ movdbl(Address(rsp, 0), xmm0); 3121 __ fld_d(Address(rsp, 0)); 3122 __ exp_with_fallback(0); 3123 __ fstp_d(Address(rsp, 0)); 3124 __ movdbl(xmm0, Address(rsp, 0)); 3125 __ addq(rsp, 8); 3126 __ ret(0); 3127 } 3128 { 3129 StubCodeMark mark(this, "StubRoutines", "pow"); 3130 StubRoutines::_intrinsic_pow = (double (*)(double,double)) __ pc(); 3131 3132 __ subq(rsp, 8); 3133 __ movdbl(Address(rsp, 0), xmm1); 3134 __ fld_d(Address(rsp, 0)); 3135 __ movdbl(Address(rsp, 0), xmm0); 3136 __ fld_d(Address(rsp, 0)); 3137 __ pow_with_fallback(0); 3138 __ fstp_d(Address(rsp, 0)); 3139 __ movdbl(xmm0, Address(rsp, 0)); 3140 __ addq(rsp, 8); 3141 __ ret(0); 3142 } 3143 } 3144 3145 // AES intrinsic stubs 3146 enum {AESBlockSize = 16}; 3147 3148 address generate_key_shuffle_mask() { 3149 __ align(16); 3150 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 3151 address start = __ pc(); 3152 __ emit_data64( 0x0405060700010203, relocInfo::none ); 3153 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 3154 return start; 3155 } 3156 3157 // Utility routine for loading a 128-bit key word in little endian format 3158 // can optionally specify that the shuffle mask is already in an xmmregister 3159 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 3160 __ movdqu(xmmdst, Address(key, offset)); 3161 if (xmm_shuf_mask != NULL) { 3162 __ pshufb(xmmdst, xmm_shuf_mask); 3163 } else { 3164 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3165 } 3166 } 3167 3168 // Arguments: 3169 // 3170 // Inputs: 3171 // c_rarg0 - source byte array address 3172 // c_rarg1 - destination byte array address 3173 // c_rarg2 - K (key) in little endian int array 3174 // 3175 address generate_aescrypt_encryptBlock() { 3176 assert(UseAES, "need AES instructions and misaligned SSE support"); 3177 __ align(CodeEntryAlignment); 3178 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 3179 Label L_doLast; 3180 address start = __ pc(); 3181 3182 const Register from = c_rarg0; // source array address 3183 const Register to = c_rarg1; // destination array address 3184 const Register key = c_rarg2; // key array address 3185 const Register keylen = rax; 3186 3187 const XMMRegister xmm_result = xmm0; 3188 const XMMRegister xmm_key_shuf_mask = xmm1; 3189 // On win64 xmm6-xmm15 must be preserved so don't use them. 3190 const XMMRegister xmm_temp1 = xmm2; 3191 const XMMRegister xmm_temp2 = xmm3; 3192 const XMMRegister xmm_temp3 = xmm4; 3193 const XMMRegister xmm_temp4 = xmm5; 3194 3195 __ enter(); // required for proper stackwalking of RuntimeStub frame 3196 3197 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3198 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3199 3200 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3201 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 3202 3203 // For encryption, the java expanded key ordering is just what we need 3204 // we don't know if the key is aligned, hence not using load-execute form 3205 3206 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 3207 __ pxor(xmm_result, xmm_temp1); 3208 3209 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3210 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3211 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3212 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3213 3214 __ aesenc(xmm_result, xmm_temp1); 3215 __ aesenc(xmm_result, xmm_temp2); 3216 __ aesenc(xmm_result, xmm_temp3); 3217 __ aesenc(xmm_result, xmm_temp4); 3218 3219 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3220 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3221 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3222 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3223 3224 __ aesenc(xmm_result, xmm_temp1); 3225 __ aesenc(xmm_result, xmm_temp2); 3226 __ aesenc(xmm_result, xmm_temp3); 3227 __ aesenc(xmm_result, xmm_temp4); 3228 3229 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3230 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3231 3232 __ cmpl(keylen, 44); 3233 __ jccb(Assembler::equal, L_doLast); 3234 3235 __ aesenc(xmm_result, xmm_temp1); 3236 __ aesenc(xmm_result, xmm_temp2); 3237 3238 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3239 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3240 3241 __ cmpl(keylen, 52); 3242 __ jccb(Assembler::equal, L_doLast); 3243 3244 __ aesenc(xmm_result, xmm_temp1); 3245 __ aesenc(xmm_result, xmm_temp2); 3246 3247 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3248 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3249 3250 __ BIND(L_doLast); 3251 __ aesenc(xmm_result, xmm_temp1); 3252 __ aesenclast(xmm_result, xmm_temp2); 3253 __ movdqu(Address(to, 0), xmm_result); // store the result 3254 __ xorptr(rax, rax); // return 0 3255 __ leave(); // required for proper stackwalking of RuntimeStub frame 3256 __ ret(0); 3257 3258 return start; 3259 } 3260 3261 3262 // Arguments: 3263 // 3264 // Inputs: 3265 // c_rarg0 - source byte array address 3266 // c_rarg1 - destination byte array address 3267 // c_rarg2 - K (key) in little endian int array 3268 // 3269 address generate_aescrypt_decryptBlock() { 3270 assert(UseAES, "need AES instructions and misaligned SSE support"); 3271 __ align(CodeEntryAlignment); 3272 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3273 Label L_doLast; 3274 address start = __ pc(); 3275 3276 const Register from = c_rarg0; // source array address 3277 const Register to = c_rarg1; // destination array address 3278 const Register key = c_rarg2; // key array address 3279 const Register keylen = rax; 3280 3281 const XMMRegister xmm_result = xmm0; 3282 const XMMRegister xmm_key_shuf_mask = xmm1; 3283 // On win64 xmm6-xmm15 must be preserved so don't use them. 3284 const XMMRegister xmm_temp1 = xmm2; 3285 const XMMRegister xmm_temp2 = xmm3; 3286 const XMMRegister xmm_temp3 = xmm4; 3287 const XMMRegister xmm_temp4 = xmm5; 3288 3289 __ enter(); // required for proper stackwalking of RuntimeStub frame 3290 3291 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3292 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3293 3294 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3295 __ movdqu(xmm_result, Address(from, 0)); 3296 3297 // for decryption java expanded key ordering is rotated one position from what we want 3298 // so we start from 0x10 here and hit 0x00 last 3299 // we don't know if the key is aligned, hence not using load-execute form 3300 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3301 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3302 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3303 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3304 3305 __ pxor (xmm_result, xmm_temp1); 3306 __ aesdec(xmm_result, xmm_temp2); 3307 __ aesdec(xmm_result, xmm_temp3); 3308 __ aesdec(xmm_result, xmm_temp4); 3309 3310 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3311 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3312 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3313 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3314 3315 __ aesdec(xmm_result, xmm_temp1); 3316 __ aesdec(xmm_result, xmm_temp2); 3317 __ aesdec(xmm_result, xmm_temp3); 3318 __ aesdec(xmm_result, xmm_temp4); 3319 3320 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3321 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3322 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3323 3324 __ cmpl(keylen, 44); 3325 __ jccb(Assembler::equal, L_doLast); 3326 3327 __ aesdec(xmm_result, xmm_temp1); 3328 __ aesdec(xmm_result, xmm_temp2); 3329 3330 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3331 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3332 3333 __ cmpl(keylen, 52); 3334 __ jccb(Assembler::equal, L_doLast); 3335 3336 __ aesdec(xmm_result, xmm_temp1); 3337 __ aesdec(xmm_result, xmm_temp2); 3338 3339 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3340 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3341 3342 __ BIND(L_doLast); 3343 __ aesdec(xmm_result, xmm_temp1); 3344 __ aesdec(xmm_result, xmm_temp2); 3345 3346 // for decryption the aesdeclast operation is always on key+0x00 3347 __ aesdeclast(xmm_result, xmm_temp3); 3348 __ movdqu(Address(to, 0), xmm_result); // store the result 3349 __ xorptr(rax, rax); // return 0 3350 __ leave(); // required for proper stackwalking of RuntimeStub frame 3351 __ ret(0); 3352 3353 return start; 3354 } 3355 3356 3357 // Arguments: 3358 // 3359 // Inputs: 3360 // c_rarg0 - source byte array address 3361 // c_rarg1 - destination byte array address 3362 // c_rarg2 - K (key) in little endian int array 3363 // c_rarg3 - r vector byte array address 3364 // c_rarg4 - input length 3365 // 3366 // Output: 3367 // rax - input length 3368 // 3369 address generate_cipherBlockChaining_encryptAESCrypt() { 3370 assert(UseAES, "need AES instructions and misaligned SSE support"); 3371 __ align(CodeEntryAlignment); 3372 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3373 address start = __ pc(); 3374 3375 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3376 const Register from = c_rarg0; // source array address 3377 const Register to = c_rarg1; // destination array address 3378 const Register key = c_rarg2; // key array address 3379 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3380 // and left with the results of the last encryption block 3381 #ifndef _WIN64 3382 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3383 #else 3384 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3385 const Register len_reg = r10; // pick the first volatile windows register 3386 #endif 3387 const Register pos = rax; 3388 3389 // xmm register assignments for the loops below 3390 const XMMRegister xmm_result = xmm0; 3391 const XMMRegister xmm_temp = xmm1; 3392 // keys 0-10 preloaded into xmm2-xmm12 3393 const int XMM_REG_NUM_KEY_FIRST = 2; 3394 const int XMM_REG_NUM_KEY_LAST = 15; 3395 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3396 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3397 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3398 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3399 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3400 3401 __ enter(); // required for proper stackwalking of RuntimeStub frame 3402 3403 #ifdef _WIN64 3404 // on win64, fill len_reg from stack position 3405 __ movl(len_reg, len_mem); 3406 // save the xmm registers which must be preserved 6-15 3407 __ subptr(rsp, -rsp_after_call_off * wordSize); 3408 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3409 __ movdqu(xmm_save(i), as_XMMRegister(i)); 3410 } 3411 #else 3412 __ push(len_reg); // Save 3413 #endif 3414 3415 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3416 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3417 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3418 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3419 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3420 offset += 0x10; 3421 } 3422 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3423 3424 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3425 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3426 __ cmpl(rax, 44); 3427 __ jcc(Assembler::notEqual, L_key_192_256); 3428 3429 // 128 bit code follows here 3430 __ movptr(pos, 0); 3431 __ align(OptoLoopAlignment); 3432 3433 __ BIND(L_loopTop_128); 3434 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3435 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3436 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3437 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3438 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3439 } 3440 __ aesenclast(xmm_result, xmm_key10); 3441 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3442 // no need to store r to memory until we exit 3443 __ addptr(pos, AESBlockSize); 3444 __ subptr(len_reg, AESBlockSize); 3445 __ jcc(Assembler::notEqual, L_loopTop_128); 3446 3447 __ BIND(L_exit); 3448 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3449 3450 #ifdef _WIN64 3451 // restore xmm regs belonging to calling function 3452 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3453 __ movdqu(as_XMMRegister(i), xmm_save(i)); 3454 } 3455 __ movl(rax, len_mem); 3456 #else 3457 __ pop(rax); // return length 3458 #endif 3459 __ leave(); // required for proper stackwalking of RuntimeStub frame 3460 __ ret(0); 3461 3462 __ BIND(L_key_192_256); 3463 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3464 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3465 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3466 __ cmpl(rax, 52); 3467 __ jcc(Assembler::notEqual, L_key_256); 3468 3469 // 192-bit code follows here (could be changed to use more xmm registers) 3470 __ movptr(pos, 0); 3471 __ align(OptoLoopAlignment); 3472 3473 __ BIND(L_loopTop_192); 3474 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3475 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3476 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3477 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3478 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3479 } 3480 __ aesenclast(xmm_result, xmm_key12); 3481 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3482 // no need to store r to memory until we exit 3483 __ addptr(pos, AESBlockSize); 3484 __ subptr(len_reg, AESBlockSize); 3485 __ jcc(Assembler::notEqual, L_loopTop_192); 3486 __ jmp(L_exit); 3487 3488 __ BIND(L_key_256); 3489 // 256-bit code follows here (could be changed to use more xmm registers) 3490 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3491 __ movptr(pos, 0); 3492 __ align(OptoLoopAlignment); 3493 3494 __ BIND(L_loopTop_256); 3495 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3496 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3497 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3498 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3499 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3500 } 3501 load_key(xmm_temp, key, 0xe0); 3502 __ aesenclast(xmm_result, xmm_temp); 3503 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3504 // no need to store r to memory until we exit 3505 __ addptr(pos, AESBlockSize); 3506 __ subptr(len_reg, AESBlockSize); 3507 __ jcc(Assembler::notEqual, L_loopTop_256); 3508 __ jmp(L_exit); 3509 3510 return start; 3511 } 3512 3513 // Safefetch stubs. 3514 void generate_safefetch(const char* name, int size, address* entry, 3515 address* fault_pc, address* continuation_pc) { 3516 // safefetch signatures: 3517 // int SafeFetch32(int* adr, int errValue); 3518 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3519 // 3520 // arguments: 3521 // c_rarg0 = adr 3522 // c_rarg1 = errValue 3523 // 3524 // result: 3525 // PPC_RET = *adr or errValue 3526 3527 StubCodeMark mark(this, "StubRoutines", name); 3528 3529 // Entry point, pc or function descriptor. 3530 *entry = __ pc(); 3531 3532 // Load *adr into c_rarg1, may fault. 3533 *fault_pc = __ pc(); 3534 switch (size) { 3535 case 4: 3536 // int32_t 3537 __ movl(c_rarg1, Address(c_rarg0, 0)); 3538 break; 3539 case 8: 3540 // int64_t 3541 __ movq(c_rarg1, Address(c_rarg0, 0)); 3542 break; 3543 default: 3544 ShouldNotReachHere(); 3545 } 3546 3547 // return errValue or *adr 3548 *continuation_pc = __ pc(); 3549 __ movq(rax, c_rarg1); 3550 __ ret(0); 3551 } 3552 3553 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3554 // to hide instruction latency 3555 // 3556 // Arguments: 3557 // 3558 // Inputs: 3559 // c_rarg0 - source byte array address 3560 // c_rarg1 - destination byte array address 3561 // c_rarg2 - K (key) in little endian int array 3562 // c_rarg3 - r vector byte array address 3563 // c_rarg4 - input length 3564 // 3565 // Output: 3566 // rax - input length 3567 // 3568 3569 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3570 assert(UseAES, "need AES instructions and misaligned SSE support"); 3571 __ align(CodeEntryAlignment); 3572 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3573 address start = __ pc(); 3574 3575 Label L_exit, L_key_192_256, L_key_256; 3576 Label L_singleBlock_loopTop_128, L_multiBlock_loopTop_128; 3577 Label L_singleBlock_loopTop_192, L_singleBlock_loopTop_256; 3578 const Register from = c_rarg0; // source array address 3579 const Register to = c_rarg1; // destination array address 3580 const Register key = c_rarg2; // key array address 3581 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3582 // and left with the results of the last encryption block 3583 #ifndef _WIN64 3584 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3585 #else 3586 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3587 const Register len_reg = r10; // pick the first volatile windows register 3588 #endif 3589 const Register pos = rax; 3590 3591 // keys 0-10 preloaded into xmm2-xmm12 3592 const int XMM_REG_NUM_KEY_FIRST = 5; 3593 const int XMM_REG_NUM_KEY_LAST = 15; 3594 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3595 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3596 3597 __ enter(); // required for proper stackwalking of RuntimeStub frame 3598 3599 #ifdef _WIN64 3600 // on win64, fill len_reg from stack position 3601 __ movl(len_reg, len_mem); 3602 // save the xmm registers which must be preserved 6-15 3603 __ subptr(rsp, -rsp_after_call_off * wordSize); 3604 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3605 __ movdqu(xmm_save(i), as_XMMRegister(i)); 3606 } 3607 #else 3608 __ push(len_reg); // Save 3609 #endif 3610 3611 // the java expanded key ordering is rotated one position from what we want 3612 // so we start from 0x10 here and hit 0x00 last 3613 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3614 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3615 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3616 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3617 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3618 offset += 0x10; 3619 } 3620 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3621 3622 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3623 3624 // registers holding the four results in the parallelized loop 3625 const XMMRegister xmm_result0 = xmm0; 3626 const XMMRegister xmm_result1 = xmm2; 3627 const XMMRegister xmm_result2 = xmm3; 3628 const XMMRegister xmm_result3 = xmm4; 3629 3630 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3631 3632 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3633 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3634 __ cmpl(rax, 44); 3635 __ jcc(Assembler::notEqual, L_key_192_256); 3636 3637 3638 // 128-bit code follows here, parallelized 3639 __ movptr(pos, 0); 3640 __ align(OptoLoopAlignment); 3641 __ BIND(L_multiBlock_loopTop_128); 3642 __ cmpptr(len_reg, 4*AESBlockSize); // see if at least 4 blocks left 3643 __ jcc(Assembler::less, L_singleBlock_loopTop_128); 3644 3645 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0*AESBlockSize)); // get next 4 blocks into xmmresult registers 3646 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1*AESBlockSize)); 3647 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2*AESBlockSize)); 3648 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3*AESBlockSize)); 3649 3650 #define DoFour(opc, src_reg) \ 3651 __ opc(xmm_result0, src_reg); \ 3652 __ opc(xmm_result1, src_reg); \ 3653 __ opc(xmm_result2, src_reg); \ 3654 __ opc(xmm_result3, src_reg); 3655 3656 DoFour(pxor, xmm_key_first); 3657 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) { 3658 DoFour(aesdec, as_XMMRegister(rnum)); 3659 } 3660 DoFour(aesdeclast, xmm_key_last); 3661 // for each result, xor with the r vector of previous cipher block 3662 __ pxor(xmm_result0, xmm_prev_block_cipher); 3663 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0*AESBlockSize)); 3664 __ pxor(xmm_result1, xmm_prev_block_cipher); 3665 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1*AESBlockSize)); 3666 __ pxor(xmm_result2, xmm_prev_block_cipher); 3667 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2*AESBlockSize)); 3668 __ pxor(xmm_result3, xmm_prev_block_cipher); 3669 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3*AESBlockSize)); // this will carry over to next set of blocks 3670 3671 __ movdqu(Address(to, pos, Address::times_1, 0*AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3672 __ movdqu(Address(to, pos, Address::times_1, 1*AESBlockSize), xmm_result1); 3673 __ movdqu(Address(to, pos, Address::times_1, 2*AESBlockSize), xmm_result2); 3674 __ movdqu(Address(to, pos, Address::times_1, 3*AESBlockSize), xmm_result3); 3675 3676 __ addptr(pos, 4*AESBlockSize); 3677 __ subptr(len_reg, 4*AESBlockSize); 3678 __ jmp(L_multiBlock_loopTop_128); 3679 3680 // registers used in the non-parallelized loops 3681 // xmm register assignments for the loops below 3682 const XMMRegister xmm_result = xmm0; 3683 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3684 const XMMRegister xmm_key11 = xmm3; 3685 const XMMRegister xmm_key12 = xmm4; 3686 const XMMRegister xmm_temp = xmm4; 3687 3688 __ align(OptoLoopAlignment); 3689 __ BIND(L_singleBlock_loopTop_128); 3690 __ cmpptr(len_reg, 0); // any blocks left?? 3691 __ jcc(Assembler::equal, L_exit); 3692 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3693 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3694 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 3695 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) { 3696 __ aesdec(xmm_result, as_XMMRegister(rnum)); 3697 } 3698 __ aesdeclast(xmm_result, xmm_key_last); 3699 __ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3700 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3701 // no need to store r to memory until we exit 3702 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3703 3704 __ addptr(pos, AESBlockSize); 3705 __ subptr(len_reg, AESBlockSize); 3706 __ jmp(L_singleBlock_loopTop_128); 3707 3708 3709 __ BIND(L_exit); 3710 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3711 #ifdef _WIN64 3712 // restore regs belonging to calling function 3713 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3714 __ movdqu(as_XMMRegister(i), xmm_save(i)); 3715 } 3716 __ movl(rax, len_mem); 3717 #else 3718 __ pop(rax); // return length 3719 #endif 3720 __ leave(); // required for proper stackwalking of RuntimeStub frame 3721 __ ret(0); 3722 3723 3724 __ BIND(L_key_192_256); 3725 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3726 load_key(xmm_key11, key, 0xb0); 3727 __ cmpl(rax, 52); 3728 __ jcc(Assembler::notEqual, L_key_256); 3729 3730 // 192-bit code follows here (could be optimized to use parallelism) 3731 load_key(xmm_key12, key, 0xc0); // 192-bit key goes up to c0 3732 __ movptr(pos, 0); 3733 __ align(OptoLoopAlignment); 3734 3735 __ BIND(L_singleBlock_loopTop_192); 3736 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3737 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3738 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 3739 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) { 3740 __ aesdec(xmm_result, as_XMMRegister(rnum)); 3741 } 3742 __ aesdec(xmm_result, xmm_key11); 3743 __ aesdec(xmm_result, xmm_key12); 3744 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3745 __ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3746 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3747 // no need to store r to memory until we exit 3748 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3749 __ addptr(pos, AESBlockSize); 3750 __ subptr(len_reg, AESBlockSize); 3751 __ jcc(Assembler::notEqual,L_singleBlock_loopTop_192); 3752 __ jmp(L_exit); 3753 3754 __ BIND(L_key_256); 3755 // 256-bit code follows here (could be optimized to use parallelism) 3756 __ movptr(pos, 0); 3757 __ align(OptoLoopAlignment); 3758 3759 __ BIND(L_singleBlock_loopTop_256); 3760 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3761 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3762 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 3763 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) { 3764 __ aesdec(xmm_result, as_XMMRegister(rnum)); 3765 } 3766 __ aesdec(xmm_result, xmm_key11); 3767 load_key(xmm_temp, key, 0xc0); 3768 __ aesdec(xmm_result, xmm_temp); 3769 load_key(xmm_temp, key, 0xd0); 3770 __ aesdec(xmm_result, xmm_temp); 3771 load_key(xmm_temp, key, 0xe0); // 256-bit key goes up to e0 3772 __ aesdec(xmm_result, xmm_temp); 3773 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 came from key+0 3774 __ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3775 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3776 // no need to store r to memory until we exit 3777 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3778 __ addptr(pos, AESBlockSize); 3779 __ subptr(len_reg, AESBlockSize); 3780 __ jcc(Assembler::notEqual,L_singleBlock_loopTop_256); 3781 __ jmp(L_exit); 3782 3783 return start; 3784 } 3785 3786 3787 // byte swap x86 long 3788 address generate_ghash_long_swap_mask() { 3789 __ align(CodeEntryAlignment); 3790 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 3791 address start = __ pc(); 3792 __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 3793 __ emit_data64(0x0706050403020100, relocInfo::none ); 3794 return start; 3795 } 3796 3797 // byte swap x86 byte array 3798 address generate_ghash_byte_swap_mask() { 3799 __ align(CodeEntryAlignment); 3800 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 3801 address start = __ pc(); 3802 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 3803 __ emit_data64(0x0001020304050607, relocInfo::none ); 3804 return start; 3805 } 3806 3807 /* Single and multi-block ghash operations */ 3808 address generate_ghash_processBlocks() { 3809 __ align(CodeEntryAlignment); 3810 Label L_ghash_loop, L_exit; 3811 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 3812 address start = __ pc(); 3813 3814 const Register state = c_rarg0; 3815 const Register subkeyH = c_rarg1; 3816 const Register data = c_rarg2; 3817 const Register blocks = c_rarg3; 3818 3819 #ifdef _WIN64 3820 const int XMM_REG_LAST = 10; 3821 #endif 3822 3823 const XMMRegister xmm_temp0 = xmm0; 3824 const XMMRegister xmm_temp1 = xmm1; 3825 const XMMRegister xmm_temp2 = xmm2; 3826 const XMMRegister xmm_temp3 = xmm3; 3827 const XMMRegister xmm_temp4 = xmm4; 3828 const XMMRegister xmm_temp5 = xmm5; 3829 const XMMRegister xmm_temp6 = xmm6; 3830 const XMMRegister xmm_temp7 = xmm7; 3831 const XMMRegister xmm_temp8 = xmm8; 3832 const XMMRegister xmm_temp9 = xmm9; 3833 const XMMRegister xmm_temp10 = xmm10; 3834 3835 __ enter(); 3836 3837 #ifdef _WIN64 3838 // save the xmm registers which must be preserved 6-10 3839 __ subptr(rsp, -rsp_after_call_off * wordSize); 3840 for (int i = 6; i <= XMM_REG_LAST; i++) { 3841 __ movdqu(xmm_save(i), as_XMMRegister(i)); 3842 } 3843 #endif 3844 3845 __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 3846 3847 __ movdqu(xmm_temp0, Address(state, 0)); 3848 __ pshufb(xmm_temp0, xmm_temp10); 3849 3850 3851 __ BIND(L_ghash_loop); 3852 __ movdqu(xmm_temp2, Address(data, 0)); 3853 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 3854 3855 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 3856 __ pshufb(xmm_temp1, xmm_temp10); 3857 3858 __ pxor(xmm_temp0, xmm_temp2); 3859 3860 // 3861 // Multiply with the hash key 3862 // 3863 __ movdqu(xmm_temp3, xmm_temp0); 3864 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 3865 __ movdqu(xmm_temp4, xmm_temp0); 3866 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 3867 3868 __ movdqu(xmm_temp5, xmm_temp0); 3869 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 3870 __ movdqu(xmm_temp6, xmm_temp0); 3871 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 3872 3873 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 3874 3875 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 3876 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 3877 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 3878 __ pxor(xmm_temp3, xmm_temp5); 3879 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 3880 // of the carry-less multiplication of 3881 // xmm0 by xmm1. 3882 3883 // We shift the result of the multiplication by one bit position 3884 // to the left to cope for the fact that the bits are reversed. 3885 __ movdqu(xmm_temp7, xmm_temp3); 3886 __ movdqu(xmm_temp8, xmm_temp6); 3887 __ pslld(xmm_temp3, 1); 3888 __ pslld(xmm_temp6, 1); 3889 __ psrld(xmm_temp7, 31); 3890 __ psrld(xmm_temp8, 31); 3891 __ movdqu(xmm_temp9, xmm_temp7); 3892 __ pslldq(xmm_temp8, 4); 3893 __ pslldq(xmm_temp7, 4); 3894 __ psrldq(xmm_temp9, 12); 3895 __ por(xmm_temp3, xmm_temp7); 3896 __ por(xmm_temp6, xmm_temp8); 3897 __ por(xmm_temp6, xmm_temp9); 3898 3899 // 3900 // First phase of the reduction 3901 // 3902 // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 3903 // independently. 3904 __ movdqu(xmm_temp7, xmm_temp3); 3905 __ movdqu(xmm_temp8, xmm_temp3); 3906 __ movdqu(xmm_temp9, xmm_temp3); 3907 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 3908 __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 3909 __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 3910 __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 3911 __ pxor(xmm_temp7, xmm_temp9); 3912 __ movdqu(xmm_temp8, xmm_temp7); 3913 __ pslldq(xmm_temp7, 12); 3914 __ psrldq(xmm_temp8, 4); 3915 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 3916 3917 // 3918 // Second phase of the reduction 3919 // 3920 // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 3921 // shift operations. 3922 __ movdqu(xmm_temp2, xmm_temp3); 3923 __ movdqu(xmm_temp4, xmm_temp3); 3924 __ movdqu(xmm_temp5, xmm_temp3); 3925 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 3926 __ psrld(xmm_temp4, 2); // packed left shifting >> 2 3927 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 3928 __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 3929 __ pxor(xmm_temp2, xmm_temp5); 3930 __ pxor(xmm_temp2, xmm_temp8); 3931 __ pxor(xmm_temp3, xmm_temp2); 3932 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 3933 3934 __ decrement(blocks); 3935 __ jcc(Assembler::zero, L_exit); 3936 __ movdqu(xmm_temp0, xmm_temp6); 3937 __ addptr(data, 16); 3938 __ jmp(L_ghash_loop); 3939 3940 __ BIND(L_exit); 3941 __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 3942 __ movdqu(Address(state, 0), xmm_temp6); // store the result 3943 3944 #ifdef _WIN64 3945 // restore xmm regs belonging to calling function 3946 for (int i = 6; i <= XMM_REG_LAST; i++) { 3947 __ movdqu(as_XMMRegister(i), xmm_save(i)); 3948 } 3949 #endif 3950 __ leave(); 3951 __ ret(0); 3952 return start; 3953 } 3954 3955 /** 3956 * Arguments: 3957 * 3958 * Inputs: 3959 * c_rarg0 - int crc 3960 * c_rarg1 - byte* buf 3961 * c_rarg2 - int length 3962 * 3963 * Ouput: 3964 * rax - int crc result 3965 */ 3966 address generate_updateBytesCRC32() { 3967 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 3968 3969 __ align(CodeEntryAlignment); 3970 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 3971 3972 address start = __ pc(); 3973 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 3974 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 3975 // rscratch1: r10 3976 const Register crc = c_rarg0; // crc 3977 const Register buf = c_rarg1; // source java byte array address 3978 const Register len = c_rarg2; // length 3979 const Register table = c_rarg3; // crc_table address (reuse register) 3980 const Register tmp = r11; 3981 assert_different_registers(crc, buf, len, table, tmp, rax); 3982 3983 BLOCK_COMMENT("Entry:"); 3984 __ enter(); // required for proper stackwalking of RuntimeStub frame 3985 3986 __ kernel_crc32(crc, buf, len, table, tmp); 3987 3988 __ movl(rax, crc); 3989 __ leave(); // required for proper stackwalking of RuntimeStub frame 3990 __ ret(0); 3991 3992 return start; 3993 } 3994 3995 3996 /** 3997 * Arguments: 3998 * 3999 * Input: 4000 * c_rarg0 - x address 4001 * c_rarg1 - x length 4002 * c_rarg2 - y address 4003 * c_rarg3 - y lenth 4004 * not Win64 4005 * c_rarg4 - z address 4006 * c_rarg5 - z length 4007 * Win64 4008 * rsp+40 - z address 4009 * rsp+48 - z length 4010 */ 4011 address generate_multiplyToLen() { 4012 __ align(CodeEntryAlignment); 4013 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 4014 4015 address start = __ pc(); 4016 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4017 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4018 const Register x = rdi; 4019 const Register xlen = rax; 4020 const Register y = rsi; 4021 const Register ylen = rcx; 4022 const Register z = r8; 4023 const Register zlen = r11; 4024 4025 // Next registers will be saved on stack in multiply_to_len(). 4026 const Register tmp1 = r12; 4027 const Register tmp2 = r13; 4028 const Register tmp3 = r14; 4029 const Register tmp4 = r15; 4030 const Register tmp5 = rbx; 4031 4032 BLOCK_COMMENT("Entry:"); 4033 __ enter(); // required for proper stackwalking of RuntimeStub frame 4034 4035 #ifndef _WIN64 4036 __ movptr(zlen, r9); // Save r9 in r11 - zlen 4037 #endif 4038 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 4039 // ylen => rcx, z => r8, zlen => r11 4040 // r9 and r10 may be used to save non-volatile registers 4041 #ifdef _WIN64 4042 // last 2 arguments (#4, #5) are on stack on Win64 4043 __ movptr(z, Address(rsp, 6 * wordSize)); 4044 __ movptr(zlen, Address(rsp, 7 * wordSize)); 4045 #endif 4046 4047 __ movptr(xlen, rsi); 4048 __ movptr(y, rdx); 4049 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 4050 4051 restore_arg_regs(); 4052 4053 __ leave(); // required for proper stackwalking of RuntimeStub frame 4054 __ ret(0); 4055 4056 return start; 4057 } 4058 4059 /** 4060 * Arguments: 4061 * 4062 // Input: 4063 // c_rarg0 - x address 4064 // c_rarg1 - x length 4065 // c_rarg2 - z address 4066 // c_rarg3 - z lenth 4067 * 4068 */ 4069 address generate_squareToLen() { 4070 4071 __ align(CodeEntryAlignment); 4072 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 4073 4074 address start = __ pc(); 4075 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4076 // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...) 4077 const Register x = rdi; 4078 const Register len = rsi; 4079 const Register z = r8; 4080 const Register zlen = rcx; 4081 4082 const Register tmp1 = r12; 4083 const Register tmp2 = r13; 4084 const Register tmp3 = r14; 4085 const Register tmp4 = r15; 4086 const Register tmp5 = rbx; 4087 4088 BLOCK_COMMENT("Entry:"); 4089 __ enter(); // required for proper stackwalking of RuntimeStub frame 4090 4091 setup_arg_regs(4); // x => rdi, len => rsi, z => rdx 4092 // zlen => rcx 4093 // r9 and r10 may be used to save non-volatile registers 4094 __ movptr(r8, rdx); 4095 __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4096 4097 restore_arg_regs(); 4098 4099 __ leave(); // required for proper stackwalking of RuntimeStub frame 4100 __ ret(0); 4101 4102 return start; 4103 } 4104 4105 /** 4106 * Arguments: 4107 * 4108 * Input: 4109 * c_rarg0 - out address 4110 * c_rarg1 - in address 4111 * c_rarg2 - offset 4112 * c_rarg3 - len 4113 * not Win64 4114 * c_rarg4 - k 4115 * Win64 4116 * rsp+40 - k 4117 */ 4118 address generate_mulAdd() { 4119 __ align(CodeEntryAlignment); 4120 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 4121 4122 address start = __ pc(); 4123 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4124 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4125 const Register out = rdi; 4126 const Register in = rsi; 4127 const Register offset = r11; 4128 const Register len = rcx; 4129 const Register k = r8; 4130 4131 // Next registers will be saved on stack in mul_add(). 4132 const Register tmp1 = r12; 4133 const Register tmp2 = r13; 4134 const Register tmp3 = r14; 4135 const Register tmp4 = r15; 4136 const Register tmp5 = rbx; 4137 4138 BLOCK_COMMENT("Entry:"); 4139 __ enter(); // required for proper stackwalking of RuntimeStub frame 4140 4141 setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx 4142 // len => rcx, k => r8 4143 // r9 and r10 may be used to save non-volatile registers 4144 #ifdef _WIN64 4145 // last argument is on stack on Win64 4146 __ movl(k, Address(rsp, 6 * wordSize)); 4147 #endif 4148 __ movptr(r11, rdx); // move offset in rdx to offset(r11) 4149 __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4150 4151 restore_arg_regs(); 4152 4153 __ leave(); // required for proper stackwalking of RuntimeStub frame 4154 __ ret(0); 4155 4156 return start; 4157 } 4158 4159 4160 #undef __ 4161 #define __ masm-> 4162 4163 // Continuation point for throwing of implicit exceptions that are 4164 // not handled in the current activation. Fabricates an exception 4165 // oop and initiates normal exception dispatching in this 4166 // frame. Since we need to preserve callee-saved values (currently 4167 // only for C2, but done for C1 as well) we need a callee-saved oop 4168 // map and therefore have to make these stubs into RuntimeStubs 4169 // rather than BufferBlobs. If the compiler needs all registers to 4170 // be preserved between the fault point and the exception handler 4171 // then it must assume responsibility for that in 4172 // AbstractCompiler::continuation_for_implicit_null_exception or 4173 // continuation_for_implicit_division_by_zero_exception. All other 4174 // implicit exceptions (e.g., NullPointerException or 4175 // AbstractMethodError on entry) are either at call sites or 4176 // otherwise assume that stack unwinding will be initiated, so 4177 // caller saved registers were assumed volatile in the compiler. 4178 address generate_throw_exception(const char* name, 4179 address runtime_entry, 4180 Register arg1 = noreg, 4181 Register arg2 = noreg) { 4182 // Information about frame layout at time of blocking runtime call. 4183 // Note that we only have to preserve callee-saved registers since 4184 // the compilers are responsible for supplying a continuation point 4185 // if they expect all registers to be preserved. 4186 enum layout { 4187 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 4188 rbp_off2, 4189 return_off, 4190 return_off2, 4191 framesize // inclusive of return address 4192 }; 4193 4194 int insts_size = 512; 4195 int locs_size = 64; 4196 4197 CodeBuffer code(name, insts_size, locs_size); 4198 OopMapSet* oop_maps = new OopMapSet(); 4199 MacroAssembler* masm = new MacroAssembler(&code); 4200 4201 address start = __ pc(); 4202 4203 // This is an inlined and slightly modified version of call_VM 4204 // which has the ability to fetch the return PC out of 4205 // thread-local storage and also sets up last_Java_sp slightly 4206 // differently than the real call_VM 4207 4208 __ enter(); // required for proper stackwalking of RuntimeStub frame 4209 4210 assert(is_even(framesize/2), "sp not 16-byte aligned"); 4211 4212 // return address and rbp are already in place 4213 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 4214 4215 int frame_complete = __ pc() - start; 4216 4217 // Set up last_Java_sp and last_Java_fp 4218 address the_pc = __ pc(); 4219 __ set_last_Java_frame(rsp, rbp, the_pc); 4220 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 4221 4222 // Call runtime 4223 if (arg1 != noreg) { 4224 assert(arg2 != c_rarg1, "clobbered"); 4225 __ movptr(c_rarg1, arg1); 4226 } 4227 if (arg2 != noreg) { 4228 __ movptr(c_rarg2, arg2); 4229 } 4230 __ movptr(c_rarg0, r15_thread); 4231 BLOCK_COMMENT("call runtime_entry"); 4232 __ call(RuntimeAddress(runtime_entry)); 4233 4234 // Generate oop map 4235 OopMap* map = new OopMap(framesize, 0); 4236 4237 oop_maps->add_gc_map(the_pc - start, map); 4238 4239 __ reset_last_Java_frame(true, true); 4240 4241 __ leave(); // required for proper stackwalking of RuntimeStub frame 4242 4243 // check for pending exceptions 4244 #ifdef ASSERT 4245 Label L; 4246 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 4247 (int32_t) NULL_WORD); 4248 __ jcc(Assembler::notEqual, L); 4249 __ should_not_reach_here(); 4250 __ bind(L); 4251 #endif // ASSERT 4252 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 4253 4254 4255 // codeBlob framesize is in words (not VMRegImpl::slot_size) 4256 RuntimeStub* stub = 4257 RuntimeStub::new_runtime_stub(name, 4258 &code, 4259 frame_complete, 4260 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 4261 oop_maps, false); 4262 return stub->entry_point(); 4263 } 4264 4265 void create_control_words() { 4266 // Round to nearest, 53-bit mode, exceptions masked 4267 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 4268 // Round to zero, 53-bit mode, exception mased 4269 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 4270 // Round to nearest, 24-bit mode, exceptions masked 4271 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 4272 // Round to nearest, 64-bit mode, exceptions masked 4273 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F; 4274 // Round to nearest, 64-bit mode, exceptions masked 4275 StubRoutines::_mxcsr_std = 0x1F80; 4276 // Note: the following two constants are 80-bit values 4277 // layout is critical for correct loading by FPU. 4278 // Bias for strict fp multiply/divide 4279 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 4280 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 4281 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 4282 // Un-Bias for strict fp multiply/divide 4283 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 4284 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 4285 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 4286 } 4287 4288 // Initialization 4289 void generate_initial() { 4290 // Generates all stubs and initializes the entry points 4291 4292 // This platform-specific settings are needed by generate_call_stub() 4293 create_control_words(); 4294 4295 // entry points that exist in all platforms Note: This is code 4296 // that could be shared among different platforms - however the 4297 // benefit seems to be smaller than the disadvantage of having a 4298 // much more complicated generator structure. See also comment in 4299 // stubRoutines.hpp. 4300 4301 StubRoutines::_forward_exception_entry = generate_forward_exception(); 4302 4303 StubRoutines::_call_stub_entry = 4304 generate_call_stub(StubRoutines::_call_stub_return_address); 4305 4306 // is referenced by megamorphic call 4307 StubRoutines::_catch_exception_entry = generate_catch_exception(); 4308 4309 // atomic calls 4310 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 4311 StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr(); 4312 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 4313 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); 4314 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 4315 StubRoutines::_atomic_add_entry = generate_atomic_add(); 4316 StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr(); 4317 StubRoutines::_fence_entry = generate_orderaccess_fence(); 4318 4319 StubRoutines::_handler_for_unsafe_access_entry = 4320 generate_handler_for_unsafe_access(); 4321 4322 // platform dependent 4323 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 4324 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 4325 4326 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 4327 4328 // Build this early so it's available for the interpreter. 4329 StubRoutines::_throw_StackOverflowError_entry = 4330 generate_throw_exception("StackOverflowError throw_exception", 4331 CAST_FROM_FN_PTR(address, 4332 SharedRuntime:: 4333 throw_StackOverflowError)); 4334 if (UseCRC32Intrinsics) { 4335 // set table address before stub generation which use it 4336 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 4337 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 4338 } 4339 } 4340 4341 void generate_all() { 4342 // Generates all stubs and initializes the entry points 4343 4344 // These entry points require SharedInfo::stack0 to be set up in 4345 // non-core builds and need to be relocatable, so they each 4346 // fabricate a RuntimeStub internally. 4347 StubRoutines::_throw_AbstractMethodError_entry = 4348 generate_throw_exception("AbstractMethodError throw_exception", 4349 CAST_FROM_FN_PTR(address, 4350 SharedRuntime:: 4351 throw_AbstractMethodError)); 4352 4353 StubRoutines::_throw_IncompatibleClassChangeError_entry = 4354 generate_throw_exception("IncompatibleClassChangeError throw_exception", 4355 CAST_FROM_FN_PTR(address, 4356 SharedRuntime:: 4357 throw_IncompatibleClassChangeError)); 4358 4359 StubRoutines::_throw_NullPointerException_at_call_entry = 4360 generate_throw_exception("NullPointerException at call throw_exception", 4361 CAST_FROM_FN_PTR(address, 4362 SharedRuntime:: 4363 throw_NullPointerException_at_call)); 4364 4365 // entry points that are platform specific 4366 if (UseShenandoahGC) { 4367 StubRoutines::x86::_shenandoah_wb = generate_shenandoah_wb(); 4368 } 4369 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 4370 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 4371 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 4372 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 4373 4374 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 4375 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 4376 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 4377 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 4378 4379 // support for verify_oop (must happen after universe_init) 4380 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 4381 4382 // arraycopy stubs used by compilers 4383 generate_arraycopy_stubs(); 4384 4385 generate_math_stubs(); 4386 4387 // don't bother generating these AES intrinsic stubs unless global flag is set 4388 if (UseAESIntrinsics) { 4389 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 4390 4391 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 4392 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 4393 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 4394 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 4395 } 4396 4397 // Generate GHASH intrinsics code 4398 if (UseGHASHIntrinsics) { 4399 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 4400 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 4401 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 4402 } 4403 4404 // Safefetch stubs. 4405 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 4406 &StubRoutines::_safefetch32_fault_pc, 4407 &StubRoutines::_safefetch32_continuation_pc); 4408 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 4409 &StubRoutines::_safefetchN_fault_pc, 4410 &StubRoutines::_safefetchN_continuation_pc); 4411 #ifdef COMPILER2 4412 if (UseMultiplyToLenIntrinsic) { 4413 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 4414 } 4415 if (UseSquareToLenIntrinsic) { 4416 StubRoutines::_squareToLen = generate_squareToLen(); 4417 } 4418 if (UseMulAddIntrinsic) { 4419 StubRoutines::_mulAdd = generate_mulAdd(); 4420 } 4421 4422 #ifndef _WINDOWS 4423 if (UseMontgomeryMultiplyIntrinsic) { 4424 StubRoutines::_montgomeryMultiply 4425 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 4426 } 4427 if (UseMontgomerySquareIntrinsic) { 4428 StubRoutines::_montgomerySquare 4429 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 4430 } 4431 #endif // WINDOWS 4432 #endif // COMPILER2 4433 } 4434 4435 public: 4436 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 4437 if (all) { 4438 generate_all(); 4439 } else { 4440 generate_initial(); 4441 } 4442 } 4443 }; // end class declaration 4444 4445 void StubGenerator_generate(CodeBuffer* code, bool all) { 4446 StubGenerator g(code, all); 4447 }