1 /* 2 * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "nativeInst_x86.hpp" 30 #include "oops/instanceOop.hpp" 31 #include "oops/method.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/frame.inline.hpp" 36 #include "runtime/handles.inline.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubCodeGenerator.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "runtime/thread.inline.hpp" 41 #include "utilities/top.hpp" 42 #ifdef COMPILER2 43 #include "opto/runtime.hpp" 44 #endif 45 46 // Declaration and definition of StubGenerator (no .hpp file). 47 // For a more detailed description of the stub routine structure 48 // see the comment in stubRoutines.hpp 49 50 #define __ _masm-> 51 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 52 #define a__ ((Assembler*)_masm)-> 53 54 #ifdef PRODUCT 55 #define BLOCK_COMMENT(str) /* nothing */ 56 #else 57 #define BLOCK_COMMENT(str) __ block_comment(str) 58 #endif 59 60 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 61 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 62 63 // Stub Code definitions 64 65 static address handle_unsafe_access() { 66 JavaThread* thread = JavaThread::current(); 67 address pc = thread->saved_exception_pc(); 68 // pc is the instruction which we must emulate 69 // doing a no-op is fine: return garbage from the load 70 // therefore, compute npc 71 address npc = Assembler::locate_next_instruction(pc); 72 73 // request an async exception 74 thread->set_pending_unsafe_access_error(); 75 76 // return address of next instruction to execute 77 return npc; 78 } 79 80 class StubGenerator: public StubCodeGenerator { 81 private: 82 83 #ifdef PRODUCT 84 #define inc_counter_np(counter) ((void)0) 85 #else 86 void inc_counter_np_(int& counter) { 87 // This can destroy rscratch1 if counter is far from the code cache 88 __ incrementl(ExternalAddress((address)&counter)); 89 } 90 #define inc_counter_np(counter) \ 91 BLOCK_COMMENT("inc_counter " #counter); \ 92 inc_counter_np_(counter); 93 #endif 94 95 // Call stubs are used to call Java from C 96 // 97 // Linux Arguments: 98 // c_rarg0: call wrapper address address 99 // c_rarg1: result address 100 // c_rarg2: result type BasicType 101 // c_rarg3: method Method* 102 // c_rarg4: (interpreter) entry point address 103 // c_rarg5: parameters intptr_t* 104 // 16(rbp): parameter size (in words) int 105 // 24(rbp): thread Thread* 106 // 107 // [ return_from_Java ] <--- rsp 108 // [ argument word n ] 109 // ... 110 // -12 [ argument word 1 ] 111 // -11 [ saved r15 ] <--- rsp_after_call 112 // -10 [ saved r14 ] 113 // -9 [ saved r13 ] 114 // -8 [ saved r12 ] 115 // -7 [ saved rbx ] 116 // -6 [ call wrapper ] 117 // -5 [ result ] 118 // -4 [ result type ] 119 // -3 [ method ] 120 // -2 [ entry point ] 121 // -1 [ parameters ] 122 // 0 [ saved rbp ] <--- rbp 123 // 1 [ return address ] 124 // 2 [ parameter size ] 125 // 3 [ thread ] 126 // 127 // Windows Arguments: 128 // c_rarg0: call wrapper address address 129 // c_rarg1: result address 130 // c_rarg2: result type BasicType 131 // c_rarg3: method Method* 132 // 48(rbp): (interpreter) entry point address 133 // 56(rbp): parameters intptr_t* 134 // 64(rbp): parameter size (in words) int 135 // 72(rbp): thread Thread* 136 // 137 // [ return_from_Java ] <--- rsp 138 // [ argument word n ] 139 // ... 140 // -60 [ argument word 1 ] 141 // -59 [ saved xmm31 ] <--- rsp after_call 142 // [ saved xmm16-xmm30 ] (EVEX enabled, else the space is blank) 143 // -27 [ saved xmm15 ] 144 // [ saved xmm7-xmm14 ] 145 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 146 // -7 [ saved r15 ] 147 // -6 [ saved r14 ] 148 // -5 [ saved r13 ] 149 // -4 [ saved r12 ] 150 // -3 [ saved rdi ] 151 // -2 [ saved rsi ] 152 // -1 [ saved rbx ] 153 // 0 [ saved rbp ] <--- rbp 154 // 1 [ return address ] 155 // 2 [ call wrapper ] 156 // 3 [ result ] 157 // 4 [ result type ] 158 // 5 [ method ] 159 // 6 [ entry point ] 160 // 7 [ parameters ] 161 // 8 [ parameter size ] 162 // 9 [ thread ] 163 // 164 // Windows reserves the callers stack space for arguments 1-4. 165 // We spill c_rarg0-c_rarg3 to this space. 166 167 // Call stub stack layout word offsets from rbp 168 enum call_stub_layout { 169 #ifdef _WIN64 170 xmm_save_first = 6, // save from xmm6 171 xmm_save_last = 31, // to xmm31 172 xmm_save_base = -9, 173 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 174 r15_off = -7, 175 r14_off = -6, 176 r13_off = -5, 177 r12_off = -4, 178 rdi_off = -3, 179 rsi_off = -2, 180 rbx_off = -1, 181 rbp_off = 0, 182 retaddr_off = 1, 183 call_wrapper_off = 2, 184 result_off = 3, 185 result_type_off = 4, 186 method_off = 5, 187 entry_point_off = 6, 188 parameters_off = 7, 189 parameter_size_off = 8, 190 thread_off = 9 191 #else 192 rsp_after_call_off = -12, 193 mxcsr_off = rsp_after_call_off, 194 r15_off = -11, 195 r14_off = -10, 196 r13_off = -9, 197 r12_off = -8, 198 rbx_off = -7, 199 call_wrapper_off = -6, 200 result_off = -5, 201 result_type_off = -4, 202 method_off = -3, 203 entry_point_off = -2, 204 parameters_off = -1, 205 rbp_off = 0, 206 retaddr_off = 1, 207 parameter_size_off = 2, 208 thread_off = 3 209 #endif 210 }; 211 212 #ifdef _WIN64 213 Address xmm_save(int reg) { 214 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 215 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 216 } 217 #endif 218 219 address generate_call_stub(address& return_address) { 220 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 221 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 222 "adjust this code"); 223 StubCodeMark mark(this, "StubRoutines", "call_stub"); 224 address start = __ pc(); 225 226 // same as in generate_catch_exception()! 227 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 228 229 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 230 const Address result (rbp, result_off * wordSize); 231 const Address result_type (rbp, result_type_off * wordSize); 232 const Address method (rbp, method_off * wordSize); 233 const Address entry_point (rbp, entry_point_off * wordSize); 234 const Address parameters (rbp, parameters_off * wordSize); 235 const Address parameter_size(rbp, parameter_size_off * wordSize); 236 237 // same as in generate_catch_exception()! 238 const Address thread (rbp, thread_off * wordSize); 239 240 const Address r15_save(rbp, r15_off * wordSize); 241 const Address r14_save(rbp, r14_off * wordSize); 242 const Address r13_save(rbp, r13_off * wordSize); 243 const Address r12_save(rbp, r12_off * wordSize); 244 const Address rbx_save(rbp, rbx_off * wordSize); 245 246 // stub code 247 __ enter(); 248 __ subptr(rsp, -rsp_after_call_off * wordSize); 249 250 // save register parameters 251 #ifndef _WIN64 252 __ movptr(parameters, c_rarg5); // parameters 253 __ movptr(entry_point, c_rarg4); // entry_point 254 #endif 255 256 __ movptr(method, c_rarg3); // method 257 __ movl(result_type, c_rarg2); // result type 258 __ movptr(result, c_rarg1); // result 259 __ movptr(call_wrapper, c_rarg0); // call wrapper 260 261 // save regs belonging to calling function 262 __ movptr(rbx_save, rbx); 263 __ movptr(r12_save, r12); 264 __ movptr(r13_save, r13); 265 __ movptr(r14_save, r14); 266 __ movptr(r15_save, r15); 267 if (UseAVX > 2) { 268 __ movl(rbx, 0xffff); 269 __ kmovql(k1, rbx); 270 } 271 #ifdef _WIN64 272 if (UseAVX > 2) { 273 for (int i = 6; i <= 31; i++) { 274 __ movdqu(xmm_save(i), as_XMMRegister(i)); 275 } 276 } else { 277 for (int i = 6; i <= 15; i++) { 278 __ movdqu(xmm_save(i), as_XMMRegister(i)); 279 } 280 } 281 282 const Address rdi_save(rbp, rdi_off * wordSize); 283 const Address rsi_save(rbp, rsi_off * wordSize); 284 285 __ movptr(rsi_save, rsi); 286 __ movptr(rdi_save, rdi); 287 #else 288 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 289 { 290 Label skip_ldmx; 291 __ stmxcsr(mxcsr_save); 292 __ movl(rax, mxcsr_save); 293 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 294 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 295 __ cmp32(rax, mxcsr_std); 296 __ jcc(Assembler::equal, skip_ldmx); 297 __ ldmxcsr(mxcsr_std); 298 __ bind(skip_ldmx); 299 } 300 #endif 301 302 // Load up thread register 303 __ movptr(r15_thread, thread); 304 __ reinit_heapbase(); 305 306 #ifdef ASSERT 307 // make sure we have no pending exceptions 308 { 309 Label L; 310 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 311 __ jcc(Assembler::equal, L); 312 __ stop("StubRoutines::call_stub: entered with pending exception"); 313 __ bind(L); 314 } 315 #endif 316 317 // pass parameters if any 318 BLOCK_COMMENT("pass parameters if any"); 319 Label parameters_done; 320 __ movl(c_rarg3, parameter_size); 321 __ testl(c_rarg3, c_rarg3); 322 __ jcc(Assembler::zero, parameters_done); 323 324 Label loop; 325 __ movptr(c_rarg2, parameters); // parameter pointer 326 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 327 __ BIND(loop); 328 __ movptr(rax, Address(c_rarg2, 0));// get parameter 329 __ addptr(c_rarg2, wordSize); // advance to next parameter 330 __ decrementl(c_rarg1); // decrement counter 331 __ push(rax); // pass parameter 332 __ jcc(Assembler::notZero, loop); 333 334 // call Java function 335 __ BIND(parameters_done); 336 __ movptr(rbx, method); // get Method* 337 __ movptr(c_rarg1, entry_point); // get entry_point 338 __ mov(r13, rsp); // set sender sp 339 BLOCK_COMMENT("call Java function"); 340 __ call(c_rarg1); 341 342 BLOCK_COMMENT("call_stub_return_address:"); 343 return_address = __ pc(); 344 345 // store result depending on type (everything that is not 346 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 347 __ movptr(c_rarg0, result); 348 Label is_long, is_float, is_double, exit; 349 __ movl(c_rarg1, result_type); 350 __ cmpl(c_rarg1, T_OBJECT); 351 __ jcc(Assembler::equal, is_long); 352 __ cmpl(c_rarg1, T_LONG); 353 __ jcc(Assembler::equal, is_long); 354 __ cmpl(c_rarg1, T_FLOAT); 355 __ jcc(Assembler::equal, is_float); 356 __ cmpl(c_rarg1, T_DOUBLE); 357 __ jcc(Assembler::equal, is_double); 358 359 // handle T_INT case 360 __ movl(Address(c_rarg0, 0), rax); 361 362 __ BIND(exit); 363 364 // pop parameters 365 __ lea(rsp, rsp_after_call); 366 367 #ifdef ASSERT 368 // verify that threads correspond 369 { 370 Label L, S; 371 __ cmpptr(r15_thread, thread); 372 __ jcc(Assembler::notEqual, S); 373 __ get_thread(rbx); 374 __ cmpptr(r15_thread, rbx); 375 __ jcc(Assembler::equal, L); 376 __ bind(S); 377 __ jcc(Assembler::equal, L); 378 __ stop("StubRoutines::call_stub: threads must correspond"); 379 __ bind(L); 380 } 381 #endif 382 383 // restore regs belonging to calling function 384 #ifdef _WIN64 385 int xmm_ub = 15; 386 if (UseAVX > 2) { 387 xmm_ub = 31; 388 } 389 // emit the restores for xmm regs 390 for (int i = 6; i <= xmm_ub; i++) { 391 __ movdqu(as_XMMRegister(i), xmm_save(i)); 392 } 393 #endif 394 __ movptr(r15, r15_save); 395 __ movptr(r14, r14_save); 396 __ movptr(r13, r13_save); 397 __ movptr(r12, r12_save); 398 __ movptr(rbx, rbx_save); 399 400 #ifdef _WIN64 401 __ movptr(rdi, rdi_save); 402 __ movptr(rsi, rsi_save); 403 #else 404 __ ldmxcsr(mxcsr_save); 405 #endif 406 407 // restore rsp 408 __ addptr(rsp, -rsp_after_call_off * wordSize); 409 410 // return 411 __ pop(rbp); 412 __ ret(0); 413 414 // handle return types different from T_INT 415 __ BIND(is_long); 416 __ movq(Address(c_rarg0, 0), rax); 417 __ jmp(exit); 418 419 __ BIND(is_float); 420 __ movflt(Address(c_rarg0, 0), xmm0); 421 __ jmp(exit); 422 423 __ BIND(is_double); 424 __ movdbl(Address(c_rarg0, 0), xmm0); 425 __ jmp(exit); 426 427 return start; 428 } 429 430 // Return point for a Java call if there's an exception thrown in 431 // Java code. The exception is caught and transformed into a 432 // pending exception stored in JavaThread that can be tested from 433 // within the VM. 434 // 435 // Note: Usually the parameters are removed by the callee. In case 436 // of an exception crossing an activation frame boundary, that is 437 // not the case if the callee is compiled code => need to setup the 438 // rsp. 439 // 440 // rax: exception oop 441 442 address generate_catch_exception() { 443 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 444 address start = __ pc(); 445 446 // same as in generate_call_stub(): 447 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 448 const Address thread (rbp, thread_off * wordSize); 449 450 #ifdef ASSERT 451 // verify that threads correspond 452 { 453 Label L, S; 454 __ cmpptr(r15_thread, thread); 455 __ jcc(Assembler::notEqual, S); 456 __ get_thread(rbx); 457 __ cmpptr(r15_thread, rbx); 458 __ jcc(Assembler::equal, L); 459 __ bind(S); 460 __ stop("StubRoutines::catch_exception: threads must correspond"); 461 __ bind(L); 462 } 463 #endif 464 465 // set pending exception 466 __ verify_oop(rax); 467 468 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 469 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 470 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 471 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 472 473 // complete return to VM 474 assert(StubRoutines::_call_stub_return_address != NULL, 475 "_call_stub_return_address must have been generated before"); 476 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 477 478 return start; 479 } 480 481 // Continuation point for runtime calls returning with a pending 482 // exception. The pending exception check happened in the runtime 483 // or native call stub. The pending exception in Thread is 484 // converted into a Java-level exception. 485 // 486 // Contract with Java-level exception handlers: 487 // rax: exception 488 // rdx: throwing pc 489 // 490 // NOTE: At entry of this stub, exception-pc must be on stack !! 491 492 address generate_forward_exception() { 493 StubCodeMark mark(this, "StubRoutines", "forward exception"); 494 address start = __ pc(); 495 496 // Upon entry, the sp points to the return address returning into 497 // Java (interpreted or compiled) code; i.e., the return address 498 // becomes the throwing pc. 499 // 500 // Arguments pushed before the runtime call are still on the stack 501 // but the exception handler will reset the stack pointer -> 502 // ignore them. A potential result in registers can be ignored as 503 // well. 504 505 #ifdef ASSERT 506 // make sure this code is only executed if there is a pending exception 507 { 508 Label L; 509 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 510 __ jcc(Assembler::notEqual, L); 511 __ stop("StubRoutines::forward exception: no pending exception (1)"); 512 __ bind(L); 513 } 514 #endif 515 516 // compute exception handler into rbx 517 __ movptr(c_rarg0, Address(rsp, 0)); 518 BLOCK_COMMENT("call exception_handler_for_return_address"); 519 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 520 SharedRuntime::exception_handler_for_return_address), 521 r15_thread, c_rarg0); 522 __ mov(rbx, rax); 523 524 // setup rax & rdx, remove return address & clear pending exception 525 __ pop(rdx); 526 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 527 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 528 529 #ifdef ASSERT 530 // make sure exception is set 531 { 532 Label L; 533 __ testptr(rax, rax); 534 __ jcc(Assembler::notEqual, L); 535 __ stop("StubRoutines::forward exception: no pending exception (2)"); 536 __ bind(L); 537 } 538 #endif 539 540 // continue at exception handler (return address removed) 541 // rax: exception 542 // rbx: exception handler 543 // rdx: throwing pc 544 __ verify_oop(rax); 545 __ jmp(rbx); 546 547 return start; 548 } 549 550 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) 551 // 552 // Arguments : 553 // c_rarg0: exchange_value 554 // c_rarg0: dest 555 // 556 // Result: 557 // *dest <- ex, return (orig *dest) 558 address generate_atomic_xchg() { 559 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 560 address start = __ pc(); 561 562 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 563 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 564 __ ret(0); 565 566 return start; 567 } 568 569 // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) 570 // 571 // Arguments : 572 // c_rarg0: exchange_value 573 // c_rarg1: dest 574 // 575 // Result: 576 // *dest <- ex, return (orig *dest) 577 address generate_atomic_xchg_ptr() { 578 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr"); 579 address start = __ pc(); 580 581 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 582 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 583 __ ret(0); 584 585 return start; 586 } 587 588 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 589 // jint compare_value) 590 // 591 // Arguments : 592 // c_rarg0: exchange_value 593 // c_rarg1: dest 594 // c_rarg2: compare_value 595 // 596 // Result: 597 // if ( compare_value == *dest ) { 598 // *dest = exchange_value 599 // return compare_value; 600 // else 601 // return *dest; 602 address generate_atomic_cmpxchg() { 603 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 604 address start = __ pc(); 605 606 __ movl(rax, c_rarg2); 607 if ( os::is_MP() ) __ lock(); 608 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 609 __ ret(0); 610 611 return start; 612 } 613 614 // Support for jbyte atomic::atomic_cmpxchg(jbyte exchange_value, volatile jbyte* dest, 615 // jbyte compare_value) 616 // 617 // Arguments : 618 // c_rarg0: exchange_value 619 // c_rarg1: dest 620 // c_rarg2: compare_value 621 // 622 // Result: 623 // if ( compare_value == *dest ) { 624 // *dest = exchange_value 625 // return compare_value; 626 // else 627 // return *dest; 628 address generate_atomic_cmpxchg_byte() { 629 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); 630 address start = __ pc(); 631 632 __ movsbq(rax, c_rarg2); 633 if ( os::is_MP() ) __ lock(); 634 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); 635 __ ret(0); 636 637 return start; 638 } 639 640 // Support for jlong atomic::atomic_cmpxchg(jlong exchange_value, 641 // volatile jlong* dest, 642 // jlong compare_value) 643 // Arguments : 644 // c_rarg0: exchange_value 645 // c_rarg1: dest 646 // c_rarg2: compare_value 647 // 648 // Result: 649 // if ( compare_value == *dest ) { 650 // *dest = exchange_value 651 // return compare_value; 652 // else 653 // return *dest; 654 address generate_atomic_cmpxchg_long() { 655 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 656 address start = __ pc(); 657 658 __ movq(rax, c_rarg2); 659 if ( os::is_MP() ) __ lock(); 660 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 661 __ ret(0); 662 663 return start; 664 } 665 666 // Support for jint atomic::add(jint add_value, volatile jint* dest) 667 // 668 // Arguments : 669 // c_rarg0: add_value 670 // c_rarg1: dest 671 // 672 // Result: 673 // *dest += add_value 674 // return *dest; 675 address generate_atomic_add() { 676 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 677 address start = __ pc(); 678 679 __ movl(rax, c_rarg0); 680 if ( os::is_MP() ) __ lock(); 681 __ xaddl(Address(c_rarg1, 0), c_rarg0); 682 __ addl(rax, c_rarg0); 683 __ ret(0); 684 685 return start; 686 } 687 688 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 689 // 690 // Arguments : 691 // c_rarg0: add_value 692 // c_rarg1: dest 693 // 694 // Result: 695 // *dest += add_value 696 // return *dest; 697 address generate_atomic_add_ptr() { 698 StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr"); 699 address start = __ pc(); 700 701 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 702 if ( os::is_MP() ) __ lock(); 703 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 704 __ addptr(rax, c_rarg0); 705 __ ret(0); 706 707 return start; 708 } 709 710 // Support for intptr_t OrderAccess::fence() 711 // 712 // Arguments : 713 // 714 // Result: 715 address generate_orderaccess_fence() { 716 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 717 address start = __ pc(); 718 __ membar(Assembler::StoreLoad); 719 __ ret(0); 720 721 return start; 722 } 723 724 // Support for intptr_t get_previous_fp() 725 // 726 // This routine is used to find the previous frame pointer for the 727 // caller (current_frame_guess). This is used as part of debugging 728 // ps() is seemingly lost trying to find frames. 729 // This code assumes that caller current_frame_guess) has a frame. 730 address generate_get_previous_fp() { 731 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 732 const Address old_fp(rbp, 0); 733 const Address older_fp(rax, 0); 734 address start = __ pc(); 735 736 __ enter(); 737 __ movptr(rax, old_fp); // callers fp 738 __ movptr(rax, older_fp); // the frame for ps() 739 __ pop(rbp); 740 __ ret(0); 741 742 return start; 743 } 744 745 // Support for intptr_t get_previous_sp() 746 // 747 // This routine is used to find the previous stack pointer for the 748 // caller. 749 address generate_get_previous_sp() { 750 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 751 address start = __ pc(); 752 753 __ movptr(rax, rsp); 754 __ addptr(rax, 8); // return address is at the top of the stack. 755 __ ret(0); 756 757 return start; 758 } 759 760 //---------------------------------------------------------------------------------------------------- 761 // Support for void verify_mxcsr() 762 // 763 // This routine is used with -Xcheck:jni to verify that native 764 // JNI code does not return to Java code without restoring the 765 // MXCSR register to our expected state. 766 767 address generate_verify_mxcsr() { 768 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 769 address start = __ pc(); 770 771 const Address mxcsr_save(rsp, 0); 772 773 if (CheckJNICalls) { 774 Label ok_ret; 775 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 776 __ push(rax); 777 __ subptr(rsp, wordSize); // allocate a temp location 778 __ stmxcsr(mxcsr_save); 779 __ movl(rax, mxcsr_save); 780 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 781 __ cmp32(rax, mxcsr_std); 782 __ jcc(Assembler::equal, ok_ret); 783 784 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 785 786 __ ldmxcsr(mxcsr_std); 787 788 __ bind(ok_ret); 789 __ addptr(rsp, wordSize); 790 __ pop(rax); 791 } 792 793 __ ret(0); 794 795 return start; 796 } 797 798 address generate_f2i_fixup() { 799 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 800 Address inout(rsp, 5 * wordSize); // return address + 4 saves 801 802 address start = __ pc(); 803 804 Label L; 805 806 __ push(rax); 807 __ push(c_rarg3); 808 __ push(c_rarg2); 809 __ push(c_rarg1); 810 811 __ movl(rax, 0x7f800000); 812 __ xorl(c_rarg3, c_rarg3); 813 __ movl(c_rarg2, inout); 814 __ movl(c_rarg1, c_rarg2); 815 __ andl(c_rarg1, 0x7fffffff); 816 __ cmpl(rax, c_rarg1); // NaN? -> 0 817 __ jcc(Assembler::negative, L); 818 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 819 __ movl(c_rarg3, 0x80000000); 820 __ movl(rax, 0x7fffffff); 821 __ cmovl(Assembler::positive, c_rarg3, rax); 822 823 __ bind(L); 824 __ movptr(inout, c_rarg3); 825 826 __ pop(c_rarg1); 827 __ pop(c_rarg2); 828 __ pop(c_rarg3); 829 __ pop(rax); 830 831 __ ret(0); 832 833 return start; 834 } 835 836 address generate_f2l_fixup() { 837 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 838 Address inout(rsp, 5 * wordSize); // return address + 4 saves 839 address start = __ pc(); 840 841 Label L; 842 843 __ push(rax); 844 __ push(c_rarg3); 845 __ push(c_rarg2); 846 __ push(c_rarg1); 847 848 __ movl(rax, 0x7f800000); 849 __ xorl(c_rarg3, c_rarg3); 850 __ movl(c_rarg2, inout); 851 __ movl(c_rarg1, c_rarg2); 852 __ andl(c_rarg1, 0x7fffffff); 853 __ cmpl(rax, c_rarg1); // NaN? -> 0 854 __ jcc(Assembler::negative, L); 855 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 856 __ mov64(c_rarg3, 0x8000000000000000); 857 __ mov64(rax, 0x7fffffffffffffff); 858 __ cmov(Assembler::positive, c_rarg3, rax); 859 860 __ bind(L); 861 __ movptr(inout, c_rarg3); 862 863 __ pop(c_rarg1); 864 __ pop(c_rarg2); 865 __ pop(c_rarg3); 866 __ pop(rax); 867 868 __ ret(0); 869 870 return start; 871 } 872 873 address generate_d2i_fixup() { 874 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 875 Address inout(rsp, 6 * wordSize); // return address + 5 saves 876 877 address start = __ pc(); 878 879 Label L; 880 881 __ push(rax); 882 __ push(c_rarg3); 883 __ push(c_rarg2); 884 __ push(c_rarg1); 885 __ push(c_rarg0); 886 887 __ movl(rax, 0x7ff00000); 888 __ movq(c_rarg2, inout); 889 __ movl(c_rarg3, c_rarg2); 890 __ mov(c_rarg1, c_rarg2); 891 __ mov(c_rarg0, c_rarg2); 892 __ negl(c_rarg3); 893 __ shrptr(c_rarg1, 0x20); 894 __ orl(c_rarg3, c_rarg2); 895 __ andl(c_rarg1, 0x7fffffff); 896 __ xorl(c_rarg2, c_rarg2); 897 __ shrl(c_rarg3, 0x1f); 898 __ orl(c_rarg1, c_rarg3); 899 __ cmpl(rax, c_rarg1); 900 __ jcc(Assembler::negative, L); // NaN -> 0 901 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 902 __ movl(c_rarg2, 0x80000000); 903 __ movl(rax, 0x7fffffff); 904 __ cmov(Assembler::positive, c_rarg2, rax); 905 906 __ bind(L); 907 __ movptr(inout, c_rarg2); 908 909 __ pop(c_rarg0); 910 __ pop(c_rarg1); 911 __ pop(c_rarg2); 912 __ pop(c_rarg3); 913 __ pop(rax); 914 915 __ ret(0); 916 917 return start; 918 } 919 920 address generate_d2l_fixup() { 921 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 922 Address inout(rsp, 6 * wordSize); // return address + 5 saves 923 924 address start = __ pc(); 925 926 Label L; 927 928 __ push(rax); 929 __ push(c_rarg3); 930 __ push(c_rarg2); 931 __ push(c_rarg1); 932 __ push(c_rarg0); 933 934 __ movl(rax, 0x7ff00000); 935 __ movq(c_rarg2, inout); 936 __ movl(c_rarg3, c_rarg2); 937 __ mov(c_rarg1, c_rarg2); 938 __ mov(c_rarg0, c_rarg2); 939 __ negl(c_rarg3); 940 __ shrptr(c_rarg1, 0x20); 941 __ orl(c_rarg3, c_rarg2); 942 __ andl(c_rarg1, 0x7fffffff); 943 __ xorl(c_rarg2, c_rarg2); 944 __ shrl(c_rarg3, 0x1f); 945 __ orl(c_rarg1, c_rarg3); 946 __ cmpl(rax, c_rarg1); 947 __ jcc(Assembler::negative, L); // NaN -> 0 948 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 949 __ mov64(c_rarg2, 0x8000000000000000); 950 __ mov64(rax, 0x7fffffffffffffff); 951 __ cmovq(Assembler::positive, c_rarg2, rax); 952 953 __ bind(L); 954 __ movq(inout, c_rarg2); 955 956 __ pop(c_rarg0); 957 __ pop(c_rarg1); 958 __ pop(c_rarg2); 959 __ pop(c_rarg3); 960 __ pop(rax); 961 962 __ ret(0); 963 964 return start; 965 } 966 967 address generate_fp_mask(const char *stub_name, int64_t mask) { 968 __ align(CodeEntryAlignment); 969 StubCodeMark mark(this, "StubRoutines", stub_name); 970 address start = __ pc(); 971 972 __ emit_data64( mask, relocInfo::none ); 973 __ emit_data64( mask, relocInfo::none ); 974 975 return start; 976 } 977 978 // The following routine generates a subroutine to throw an 979 // asynchronous UnknownError when an unsafe access gets a fault that 980 // could not be reasonably prevented by the programmer. (Example: 981 // SIGBUS/OBJERR.) 982 address generate_handler_for_unsafe_access() { 983 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); 984 address start = __ pc(); 985 986 __ push(0); // hole for return address-to-be 987 __ pusha(); // push registers 988 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord); 989 990 // FIXME: this probably needs alignment logic 991 992 __ subptr(rsp, frame::arg_reg_save_area_bytes); 993 BLOCK_COMMENT("call handle_unsafe_access"); 994 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access))); 995 __ addptr(rsp, frame::arg_reg_save_area_bytes); 996 997 __ movptr(next_pc, rax); // stuff next address 998 __ popa(); 999 __ ret(0); // jump to next address 1000 1001 return start; 1002 } 1003 1004 // Non-destructive plausibility checks for oops 1005 // 1006 // Arguments: 1007 // all args on stack! 1008 // 1009 // Stack after saving c_rarg3: 1010 // [tos + 0]: saved c_rarg3 1011 // [tos + 1]: saved c_rarg2 1012 // [tos + 2]: saved r12 (several TemplateTable methods use it) 1013 // [tos + 3]: saved flags 1014 // [tos + 4]: return address 1015 // * [tos + 5]: error message (char*) 1016 // * [tos + 6]: object to verify (oop) 1017 // * [tos + 7]: saved rax - saved by caller and bashed 1018 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 1019 // * = popped on exit 1020 address generate_verify_oop() { 1021 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 1022 address start = __ pc(); 1023 1024 Label exit, error; 1025 1026 __ pushf(); 1027 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1028 1029 __ push(r12); 1030 1031 // save c_rarg2 and c_rarg3 1032 __ push(c_rarg2); 1033 __ push(c_rarg3); 1034 1035 enum { 1036 // After previous pushes. 1037 oop_to_verify = 6 * wordSize, 1038 saved_rax = 7 * wordSize, 1039 saved_r10 = 8 * wordSize, 1040 1041 // Before the call to MacroAssembler::debug(), see below. 1042 return_addr = 16 * wordSize, 1043 error_msg = 17 * wordSize 1044 }; 1045 1046 // get object 1047 __ movptr(rax, Address(rsp, oop_to_verify)); 1048 1049 // make sure object is 'reasonable' 1050 __ testptr(rax, rax); 1051 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1052 // Check if the oop is in the right area of memory 1053 __ movptr(c_rarg2, rax); 1054 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1055 __ andptr(c_rarg2, c_rarg3); 1056 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1057 __ cmpptr(c_rarg2, c_rarg3); 1058 __ jcc(Assembler::notZero, error); 1059 1060 // set r12 to heapbase for load_klass() 1061 __ reinit_heapbase(); 1062 1063 // make sure klass is 'reasonable', which is not zero. 1064 __ load_klass(rax, rax); // get klass 1065 __ testptr(rax, rax); 1066 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1067 1068 // return if everything seems ok 1069 __ bind(exit); 1070 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1071 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1072 __ pop(c_rarg3); // restore c_rarg3 1073 __ pop(c_rarg2); // restore c_rarg2 1074 __ pop(r12); // restore r12 1075 __ popf(); // restore flags 1076 __ ret(4 * wordSize); // pop caller saved stuff 1077 1078 // handle errors 1079 __ bind(error); 1080 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1081 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1082 __ pop(c_rarg3); // get saved c_rarg3 back 1083 __ pop(c_rarg2); // get saved c_rarg2 back 1084 __ pop(r12); // get saved r12 back 1085 __ popf(); // get saved flags off stack -- 1086 // will be ignored 1087 1088 __ pusha(); // push registers 1089 // (rip is already 1090 // already pushed) 1091 // debug(char* msg, int64_t pc, int64_t regs[]) 1092 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1093 // pushed all the registers, so now the stack looks like: 1094 // [tos + 0] 16 saved registers 1095 // [tos + 16] return address 1096 // * [tos + 17] error message (char*) 1097 // * [tos + 18] object to verify (oop) 1098 // * [tos + 19] saved rax - saved by caller and bashed 1099 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1100 // * = popped on exit 1101 1102 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1103 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1104 __ movq(c_rarg2, rsp); // pass address of regs on stack 1105 __ mov(r12, rsp); // remember rsp 1106 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1107 __ andptr(rsp, -16); // align stack as required by ABI 1108 BLOCK_COMMENT("call MacroAssembler::debug"); 1109 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1110 __ mov(rsp, r12); // restore rsp 1111 __ popa(); // pop registers (includes r12) 1112 __ ret(4 * wordSize); // pop caller saved stuff 1113 1114 return start; 1115 } 1116 1117 // 1118 // Verify that a register contains clean 32-bits positive value 1119 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1120 // 1121 // Input: 1122 // Rint - 32-bits value 1123 // Rtmp - scratch 1124 // 1125 void assert_clean_int(Register Rint, Register Rtmp) { 1126 #ifdef ASSERT 1127 Label L; 1128 assert_different_registers(Rtmp, Rint); 1129 __ movslq(Rtmp, Rint); 1130 __ cmpq(Rtmp, Rint); 1131 __ jcc(Assembler::equal, L); 1132 __ stop("high 32-bits of int value are not 0"); 1133 __ bind(L); 1134 #endif 1135 } 1136 1137 // Generate overlap test for array copy stubs 1138 // 1139 // Input: 1140 // c_rarg0 - from 1141 // c_rarg1 - to 1142 // c_rarg2 - element count 1143 // 1144 // Output: 1145 // rax - &from[element count - 1] 1146 // 1147 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1148 assert(no_overlap_target != NULL, "must be generated"); 1149 array_overlap_test(no_overlap_target, NULL, sf); 1150 } 1151 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1152 array_overlap_test(NULL, &L_no_overlap, sf); 1153 } 1154 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1155 const Register from = c_rarg0; 1156 const Register to = c_rarg1; 1157 const Register count = c_rarg2; 1158 const Register end_from = rax; 1159 1160 __ cmpptr(to, from); 1161 __ lea(end_from, Address(from, count, sf, 0)); 1162 if (NOLp == NULL) { 1163 ExternalAddress no_overlap(no_overlap_target); 1164 __ jump_cc(Assembler::belowEqual, no_overlap); 1165 __ cmpptr(to, end_from); 1166 __ jump_cc(Assembler::aboveEqual, no_overlap); 1167 } else { 1168 __ jcc(Assembler::belowEqual, (*NOLp)); 1169 __ cmpptr(to, end_from); 1170 __ jcc(Assembler::aboveEqual, (*NOLp)); 1171 } 1172 } 1173 1174 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1175 // 1176 // Outputs: 1177 // rdi - rcx 1178 // rsi - rdx 1179 // rdx - r8 1180 // rcx - r9 1181 // 1182 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1183 // are non-volatile. r9 and r10 should not be used by the caller. 1184 // 1185 void setup_arg_regs(int nargs = 3) { 1186 const Register saved_rdi = r9; 1187 const Register saved_rsi = r10; 1188 assert(nargs == 3 || nargs == 4, "else fix"); 1189 #ifdef _WIN64 1190 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1191 "unexpected argument registers"); 1192 if (nargs >= 4) 1193 __ mov(rax, r9); // r9 is also saved_rdi 1194 __ movptr(saved_rdi, rdi); 1195 __ movptr(saved_rsi, rsi); 1196 __ mov(rdi, rcx); // c_rarg0 1197 __ mov(rsi, rdx); // c_rarg1 1198 __ mov(rdx, r8); // c_rarg2 1199 if (nargs >= 4) 1200 __ mov(rcx, rax); // c_rarg3 (via rax) 1201 #else 1202 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1203 "unexpected argument registers"); 1204 #endif 1205 } 1206 1207 void restore_arg_regs() { 1208 const Register saved_rdi = r9; 1209 const Register saved_rsi = r10; 1210 #ifdef _WIN64 1211 __ movptr(rdi, saved_rdi); 1212 __ movptr(rsi, saved_rsi); 1213 #endif 1214 } 1215 1216 // Generate code for an array write pre barrier 1217 // 1218 // addr - starting address 1219 // count - element count 1220 // tmp - scratch register 1221 // 1222 // Destroy no registers! 1223 // 1224 void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { 1225 BarrierSet* bs = Universe::heap()->barrier_set(); 1226 switch (bs->kind()) { 1227 case BarrierSet::G1SATBCTLogging: 1228 // With G1, don't generate the call if we statically know that the target in uninitialized 1229 if (!dest_uninitialized) { 1230 __ pusha(); // push registers 1231 if (count == c_rarg0) { 1232 if (addr == c_rarg1) { 1233 // exactly backwards!! 1234 __ xchgptr(c_rarg1, c_rarg0); 1235 } else { 1236 __ movptr(c_rarg1, count); 1237 __ movptr(c_rarg0, addr); 1238 } 1239 } else { 1240 __ movptr(c_rarg0, addr); 1241 __ movptr(c_rarg1, count); 1242 } 1243 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2); 1244 __ popa(); 1245 } 1246 break; 1247 case BarrierSet::CardTableModRef: 1248 case BarrierSet::CardTableExtension: 1249 case BarrierSet::ModRef: 1250 break; 1251 default: 1252 ShouldNotReachHere(); 1253 1254 } 1255 } 1256 1257 // 1258 // Generate code for an array write post barrier 1259 // 1260 // Input: 1261 // start - register containing starting address of destination array 1262 // count - elements count 1263 // scratch - scratch register 1264 // 1265 // The input registers are overwritten. 1266 // 1267 void gen_write_ref_array_post_barrier(Register start, Register count, Register scratch) { 1268 assert_different_registers(start, count, scratch); 1269 BarrierSet* bs = Universe::heap()->barrier_set(); 1270 switch (bs->kind()) { 1271 case BarrierSet::G1SATBCTLogging: 1272 { 1273 __ pusha(); // push registers (overkill) 1274 if (c_rarg0 == count) { // On win64 c_rarg0 == rcx 1275 assert_different_registers(c_rarg1, start); 1276 __ mov(c_rarg1, count); 1277 __ mov(c_rarg0, start); 1278 } else { 1279 assert_different_registers(c_rarg0, count); 1280 __ mov(c_rarg0, start); 1281 __ mov(c_rarg1, count); 1282 } 1283 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2); 1284 __ popa(); 1285 } 1286 break; 1287 case BarrierSet::CardTableModRef: 1288 case BarrierSet::CardTableExtension: 1289 { 1290 CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs); 1291 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 1292 1293 Label L_loop; 1294 const Register end = count; 1295 1296 __ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size 1297 __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive 1298 __ shrptr(start, CardTableModRefBS::card_shift); 1299 __ shrptr(end, CardTableModRefBS::card_shift); 1300 __ subptr(end, start); // end --> cards count 1301 1302 int64_t disp = (int64_t) ct->byte_map_base; 1303 __ mov64(scratch, disp); 1304 __ addptr(start, scratch); 1305 __ BIND(L_loop); 1306 __ movb(Address(start, count, Address::times_1), 0); 1307 __ decrement(count); 1308 __ jcc(Assembler::greaterEqual, L_loop); 1309 } 1310 break; 1311 default: 1312 ShouldNotReachHere(); 1313 1314 } 1315 } 1316 1317 1318 // Copy big chunks forward 1319 // 1320 // Inputs: 1321 // end_from - source arrays end address 1322 // end_to - destination array end address 1323 // qword_count - 64-bits element count, negative 1324 // to - scratch 1325 // L_copy_bytes - entry label 1326 // L_copy_8_bytes - exit label 1327 // 1328 void copy_bytes_forward(Register end_from, Register end_to, 1329 Register qword_count, Register to, 1330 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1331 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1332 Label L_loop; 1333 __ align(OptoLoopAlignment); 1334 if (UseUnalignedLoadStores) { 1335 Label L_end; 1336 // Copy 64-bytes per iteration 1337 __ BIND(L_loop); 1338 if (UseAVX > 2) { 1339 __ evmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit); 1340 __ evmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit); 1341 } else if (UseAVX == 2) { 1342 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1343 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1344 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1345 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1346 } else { 1347 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1348 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1349 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1350 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1351 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1352 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1353 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1354 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1355 } 1356 __ BIND(L_copy_bytes); 1357 __ addptr(qword_count, 8); 1358 __ jcc(Assembler::lessEqual, L_loop); 1359 __ subptr(qword_count, 4); // sub(8) and add(4) 1360 __ jccb(Assembler::greater, L_end); 1361 // Copy trailing 32 bytes 1362 if (UseAVX >= 2) { 1363 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1364 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1365 } else { 1366 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1367 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1368 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1369 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1370 } 1371 __ addptr(qword_count, 4); 1372 __ BIND(L_end); 1373 if (UseAVX >= 2) { 1374 // clean upper bits of YMM registers 1375 __ vpxor(xmm0, xmm0); 1376 __ vpxor(xmm1, xmm1); 1377 } 1378 } else { 1379 // Copy 32-bytes per iteration 1380 __ BIND(L_loop); 1381 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1382 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1383 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1384 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1385 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1386 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1387 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1388 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1389 1390 __ BIND(L_copy_bytes); 1391 __ addptr(qword_count, 4); 1392 __ jcc(Assembler::lessEqual, L_loop); 1393 } 1394 __ subptr(qword_count, 4); 1395 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1396 } 1397 1398 // Copy big chunks backward 1399 // 1400 // Inputs: 1401 // from - source arrays address 1402 // dest - destination array address 1403 // qword_count - 64-bits element count 1404 // to - scratch 1405 // L_copy_bytes - entry label 1406 // L_copy_8_bytes - exit label 1407 // 1408 void copy_bytes_backward(Register from, Register dest, 1409 Register qword_count, Register to, 1410 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1411 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1412 Label L_loop; 1413 __ align(OptoLoopAlignment); 1414 if (UseUnalignedLoadStores) { 1415 Label L_end; 1416 // Copy 64-bytes per iteration 1417 __ BIND(L_loop); 1418 if (UseAVX > 2) { 1419 __ evmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32), Assembler::AVX_512bit); 1420 __ evmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0, Assembler::AVX_512bit); 1421 } else if (UseAVX == 2) { 1422 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1423 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1424 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1425 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1426 } else { 1427 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1428 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1429 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1430 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1431 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1432 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1433 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1434 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1435 } 1436 __ BIND(L_copy_bytes); 1437 __ subptr(qword_count, 8); 1438 __ jcc(Assembler::greaterEqual, L_loop); 1439 1440 __ addptr(qword_count, 4); // add(8) and sub(4) 1441 __ jccb(Assembler::less, L_end); 1442 // Copy trailing 32 bytes 1443 if (UseAVX >= 2) { 1444 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1445 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1446 } else { 1447 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1448 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1449 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1450 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1451 } 1452 __ subptr(qword_count, 4); 1453 __ BIND(L_end); 1454 if (UseAVX >= 2) { 1455 // clean upper bits of YMM registers 1456 __ vpxor(xmm0, xmm0); 1457 __ vpxor(xmm1, xmm1); 1458 } 1459 } else { 1460 // Copy 32-bytes per iteration 1461 __ BIND(L_loop); 1462 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1463 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1464 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1465 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1466 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1467 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1468 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1469 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1470 1471 __ BIND(L_copy_bytes); 1472 __ subptr(qword_count, 4); 1473 __ jcc(Assembler::greaterEqual, L_loop); 1474 } 1475 __ addptr(qword_count, 4); 1476 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1477 } 1478 1479 1480 // Arguments: 1481 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1482 // ignored 1483 // name - stub name string 1484 // 1485 // Inputs: 1486 // c_rarg0 - source array address 1487 // c_rarg1 - destination array address 1488 // c_rarg2 - element count, treated as ssize_t, can be zero 1489 // 1490 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1491 // we let the hardware handle it. The one to eight bytes within words, 1492 // dwords or qwords that span cache line boundaries will still be loaded 1493 // and stored atomically. 1494 // 1495 // Side Effects: 1496 // disjoint_byte_copy_entry is set to the no-overlap entry point 1497 // used by generate_conjoint_byte_copy(). 1498 // 1499 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1500 __ align(CodeEntryAlignment); 1501 StubCodeMark mark(this, "StubRoutines", name); 1502 address start = __ pc(); 1503 1504 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1505 Label L_copy_byte, L_exit; 1506 const Register from = rdi; // source array address 1507 const Register to = rsi; // destination array address 1508 const Register count = rdx; // elements count 1509 const Register byte_count = rcx; 1510 const Register qword_count = count; 1511 const Register end_from = from; // source array end address 1512 const Register end_to = to; // destination array end address 1513 // End pointers are inclusive, and if count is not zero they point 1514 // to the last unit copied: end_to[0] := end_from[0] 1515 1516 __ enter(); // required for proper stackwalking of RuntimeStub frame 1517 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1518 1519 if (entry != NULL) { 1520 *entry = __ pc(); 1521 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1522 BLOCK_COMMENT("Entry:"); 1523 } 1524 1525 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1526 // r9 and r10 may be used to save non-volatile registers 1527 1528 // 'from', 'to' and 'count' are now valid 1529 __ movptr(byte_count, count); 1530 __ shrptr(count, 3); // count => qword_count 1531 1532 // Copy from low to high addresses. Use 'to' as scratch. 1533 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1534 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1535 __ negptr(qword_count); // make the count negative 1536 __ jmp(L_copy_bytes); 1537 1538 // Copy trailing qwords 1539 __ BIND(L_copy_8_bytes); 1540 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1541 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1542 __ increment(qword_count); 1543 __ jcc(Assembler::notZero, L_copy_8_bytes); 1544 1545 // Check for and copy trailing dword 1546 __ BIND(L_copy_4_bytes); 1547 __ testl(byte_count, 4); 1548 __ jccb(Assembler::zero, L_copy_2_bytes); 1549 __ movl(rax, Address(end_from, 8)); 1550 __ movl(Address(end_to, 8), rax); 1551 1552 __ addptr(end_from, 4); 1553 __ addptr(end_to, 4); 1554 1555 // Check for and copy trailing word 1556 __ BIND(L_copy_2_bytes); 1557 __ testl(byte_count, 2); 1558 __ jccb(Assembler::zero, L_copy_byte); 1559 __ movw(rax, Address(end_from, 8)); 1560 __ movw(Address(end_to, 8), rax); 1561 1562 __ addptr(end_from, 2); 1563 __ addptr(end_to, 2); 1564 1565 // Check for and copy trailing byte 1566 __ BIND(L_copy_byte); 1567 __ testl(byte_count, 1); 1568 __ jccb(Assembler::zero, L_exit); 1569 __ movb(rax, Address(end_from, 8)); 1570 __ movb(Address(end_to, 8), rax); 1571 1572 __ BIND(L_exit); 1573 restore_arg_regs(); 1574 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1575 __ xorptr(rax, rax); // return 0 1576 __ leave(); // required for proper stackwalking of RuntimeStub frame 1577 __ ret(0); 1578 1579 // Copy in multi-bytes chunks 1580 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1581 __ jmp(L_copy_4_bytes); 1582 1583 return start; 1584 } 1585 1586 // Arguments: 1587 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1588 // ignored 1589 // name - stub name string 1590 // 1591 // Inputs: 1592 // c_rarg0 - source array address 1593 // c_rarg1 - destination array address 1594 // c_rarg2 - element count, treated as ssize_t, can be zero 1595 // 1596 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1597 // we let the hardware handle it. The one to eight bytes within words, 1598 // dwords or qwords that span cache line boundaries will still be loaded 1599 // and stored atomically. 1600 // 1601 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1602 address* entry, const char *name) { 1603 __ align(CodeEntryAlignment); 1604 StubCodeMark mark(this, "StubRoutines", name); 1605 address start = __ pc(); 1606 1607 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1608 const Register from = rdi; // source array address 1609 const Register to = rsi; // destination array address 1610 const Register count = rdx; // elements count 1611 const Register byte_count = rcx; 1612 const Register qword_count = count; 1613 1614 __ enter(); // required for proper stackwalking of RuntimeStub frame 1615 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1616 1617 if (entry != NULL) { 1618 *entry = __ pc(); 1619 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1620 BLOCK_COMMENT("Entry:"); 1621 } 1622 1623 array_overlap_test(nooverlap_target, Address::times_1); 1624 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1625 // r9 and r10 may be used to save non-volatile registers 1626 1627 // 'from', 'to' and 'count' are now valid 1628 __ movptr(byte_count, count); 1629 __ shrptr(count, 3); // count => qword_count 1630 1631 // Copy from high to low addresses. 1632 1633 // Check for and copy trailing byte 1634 __ testl(byte_count, 1); 1635 __ jcc(Assembler::zero, L_copy_2_bytes); 1636 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1637 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1638 __ decrement(byte_count); // Adjust for possible trailing word 1639 1640 // Check for and copy trailing word 1641 __ BIND(L_copy_2_bytes); 1642 __ testl(byte_count, 2); 1643 __ jcc(Assembler::zero, L_copy_4_bytes); 1644 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1645 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1646 1647 // Check for and copy trailing dword 1648 __ BIND(L_copy_4_bytes); 1649 __ testl(byte_count, 4); 1650 __ jcc(Assembler::zero, L_copy_bytes); 1651 __ movl(rax, Address(from, qword_count, Address::times_8)); 1652 __ movl(Address(to, qword_count, Address::times_8), rax); 1653 __ jmp(L_copy_bytes); 1654 1655 // Copy trailing qwords 1656 __ BIND(L_copy_8_bytes); 1657 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1658 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1659 __ decrement(qword_count); 1660 __ jcc(Assembler::notZero, L_copy_8_bytes); 1661 1662 restore_arg_regs(); 1663 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1664 __ xorptr(rax, rax); // return 0 1665 __ leave(); // required for proper stackwalking of RuntimeStub frame 1666 __ ret(0); 1667 1668 // Copy in multi-bytes chunks 1669 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1670 1671 restore_arg_regs(); 1672 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1673 __ xorptr(rax, rax); // return 0 1674 __ leave(); // required for proper stackwalking of RuntimeStub frame 1675 __ ret(0); 1676 1677 return start; 1678 } 1679 1680 // Arguments: 1681 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1682 // ignored 1683 // name - stub name string 1684 // 1685 // Inputs: 1686 // c_rarg0 - source array address 1687 // c_rarg1 - destination array address 1688 // c_rarg2 - element count, treated as ssize_t, can be zero 1689 // 1690 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1691 // let the hardware handle it. The two or four words within dwords 1692 // or qwords that span cache line boundaries will still be loaded 1693 // and stored atomically. 1694 // 1695 // Side Effects: 1696 // disjoint_short_copy_entry is set to the no-overlap entry point 1697 // used by generate_conjoint_short_copy(). 1698 // 1699 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1700 __ align(CodeEntryAlignment); 1701 StubCodeMark mark(this, "StubRoutines", name); 1702 address start = __ pc(); 1703 1704 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1705 const Register from = rdi; // source array address 1706 const Register to = rsi; // destination array address 1707 const Register count = rdx; // elements count 1708 const Register word_count = rcx; 1709 const Register qword_count = count; 1710 const Register end_from = from; // source array end address 1711 const Register end_to = to; // destination array end address 1712 // End pointers are inclusive, and if count is not zero they point 1713 // to the last unit copied: end_to[0] := end_from[0] 1714 1715 __ enter(); // required for proper stackwalking of RuntimeStub frame 1716 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1717 1718 if (entry != NULL) { 1719 *entry = __ pc(); 1720 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1721 BLOCK_COMMENT("Entry:"); 1722 } 1723 1724 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1725 // r9 and r10 may be used to save non-volatile registers 1726 1727 // 'from', 'to' and 'count' are now valid 1728 __ movptr(word_count, count); 1729 __ shrptr(count, 2); // count => qword_count 1730 1731 // Copy from low to high addresses. Use 'to' as scratch. 1732 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1733 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1734 __ negptr(qword_count); 1735 __ jmp(L_copy_bytes); 1736 1737 // Copy trailing qwords 1738 __ BIND(L_copy_8_bytes); 1739 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1740 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1741 __ increment(qword_count); 1742 __ jcc(Assembler::notZero, L_copy_8_bytes); 1743 1744 // Original 'dest' is trashed, so we can't use it as a 1745 // base register for a possible trailing word copy 1746 1747 // Check for and copy trailing dword 1748 __ BIND(L_copy_4_bytes); 1749 __ testl(word_count, 2); 1750 __ jccb(Assembler::zero, L_copy_2_bytes); 1751 __ movl(rax, Address(end_from, 8)); 1752 __ movl(Address(end_to, 8), rax); 1753 1754 __ addptr(end_from, 4); 1755 __ addptr(end_to, 4); 1756 1757 // Check for and copy trailing word 1758 __ BIND(L_copy_2_bytes); 1759 __ testl(word_count, 1); 1760 __ jccb(Assembler::zero, L_exit); 1761 __ movw(rax, Address(end_from, 8)); 1762 __ movw(Address(end_to, 8), rax); 1763 1764 __ BIND(L_exit); 1765 restore_arg_regs(); 1766 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1767 __ xorptr(rax, rax); // return 0 1768 __ leave(); // required for proper stackwalking of RuntimeStub frame 1769 __ ret(0); 1770 1771 // Copy in multi-bytes chunks 1772 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1773 __ jmp(L_copy_4_bytes); 1774 1775 return start; 1776 } 1777 1778 address generate_fill(BasicType t, bool aligned, const char *name) { 1779 __ align(CodeEntryAlignment); 1780 StubCodeMark mark(this, "StubRoutines", name); 1781 address start = __ pc(); 1782 1783 BLOCK_COMMENT("Entry:"); 1784 1785 const Register to = c_rarg0; // source array address 1786 const Register value = c_rarg1; // value 1787 const Register count = c_rarg2; // elements count 1788 1789 __ enter(); // required for proper stackwalking of RuntimeStub frame 1790 1791 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1792 1793 __ leave(); // required for proper stackwalking of RuntimeStub frame 1794 __ ret(0); 1795 return start; 1796 } 1797 1798 // Arguments: 1799 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1800 // ignored 1801 // name - stub name string 1802 // 1803 // Inputs: 1804 // c_rarg0 - source array address 1805 // c_rarg1 - destination array address 1806 // c_rarg2 - element count, treated as ssize_t, can be zero 1807 // 1808 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1809 // let the hardware handle it. The two or four words within dwords 1810 // or qwords that span cache line boundaries will still be loaded 1811 // and stored atomically. 1812 // 1813 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1814 address *entry, const char *name) { 1815 __ align(CodeEntryAlignment); 1816 StubCodeMark mark(this, "StubRoutines", name); 1817 address start = __ pc(); 1818 1819 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1820 const Register from = rdi; // source array address 1821 const Register to = rsi; // destination array address 1822 const Register count = rdx; // elements count 1823 const Register word_count = rcx; 1824 const Register qword_count = count; 1825 1826 __ enter(); // required for proper stackwalking of RuntimeStub frame 1827 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1828 1829 if (entry != NULL) { 1830 *entry = __ pc(); 1831 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1832 BLOCK_COMMENT("Entry:"); 1833 } 1834 1835 array_overlap_test(nooverlap_target, Address::times_2); 1836 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1837 // r9 and r10 may be used to save non-volatile registers 1838 1839 // 'from', 'to' and 'count' are now valid 1840 __ movptr(word_count, count); 1841 __ shrptr(count, 2); // count => qword_count 1842 1843 // Copy from high to low addresses. Use 'to' as scratch. 1844 1845 // Check for and copy trailing word 1846 __ testl(word_count, 1); 1847 __ jccb(Assembler::zero, L_copy_4_bytes); 1848 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1849 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1850 1851 // Check for and copy trailing dword 1852 __ BIND(L_copy_4_bytes); 1853 __ testl(word_count, 2); 1854 __ jcc(Assembler::zero, L_copy_bytes); 1855 __ movl(rax, Address(from, qword_count, Address::times_8)); 1856 __ movl(Address(to, qword_count, Address::times_8), rax); 1857 __ jmp(L_copy_bytes); 1858 1859 // Copy trailing qwords 1860 __ BIND(L_copy_8_bytes); 1861 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1862 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1863 __ decrement(qword_count); 1864 __ jcc(Assembler::notZero, L_copy_8_bytes); 1865 1866 restore_arg_regs(); 1867 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1868 __ xorptr(rax, rax); // return 0 1869 __ leave(); // required for proper stackwalking of RuntimeStub frame 1870 __ ret(0); 1871 1872 // Copy in multi-bytes chunks 1873 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1874 1875 restore_arg_regs(); 1876 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1877 __ xorptr(rax, rax); // return 0 1878 __ leave(); // required for proper stackwalking of RuntimeStub frame 1879 __ ret(0); 1880 1881 return start; 1882 } 1883 1884 // Arguments: 1885 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1886 // ignored 1887 // is_oop - true => oop array, so generate store check code 1888 // name - stub name string 1889 // 1890 // Inputs: 1891 // c_rarg0 - source array address 1892 // c_rarg1 - destination array address 1893 // c_rarg2 - element count, treated as ssize_t, can be zero 1894 // 1895 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1896 // the hardware handle it. The two dwords within qwords that span 1897 // cache line boundaries will still be loaded and stored atomicly. 1898 // 1899 // Side Effects: 1900 // disjoint_int_copy_entry is set to the no-overlap entry point 1901 // used by generate_conjoint_int_oop_copy(). 1902 // 1903 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 1904 const char *name, bool dest_uninitialized = false) { 1905 __ align(CodeEntryAlignment); 1906 StubCodeMark mark(this, "StubRoutines", name); 1907 address start = __ pc(); 1908 1909 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 1910 const Register from = rdi; // source array address 1911 const Register to = rsi; // destination array address 1912 const Register count = rdx; // elements count 1913 const Register dword_count = rcx; 1914 const Register qword_count = count; 1915 const Register end_from = from; // source array end address 1916 const Register end_to = to; // destination array end address 1917 const Register saved_to = r11; // saved destination array address 1918 // End pointers are inclusive, and if count is not zero they point 1919 // to the last unit copied: end_to[0] := end_from[0] 1920 1921 __ enter(); // required for proper stackwalking of RuntimeStub frame 1922 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1923 1924 if (entry != NULL) { 1925 *entry = __ pc(); 1926 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1927 BLOCK_COMMENT("Entry:"); 1928 } 1929 1930 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1931 // r9 and r10 may be used to save non-volatile registers 1932 if (is_oop) { 1933 __ movq(saved_to, to); 1934 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 1935 } 1936 1937 // 'from', 'to' and 'count' are now valid 1938 __ movptr(dword_count, count); 1939 __ shrptr(count, 1); // count => qword_count 1940 1941 // Copy from low to high addresses. Use 'to' as scratch. 1942 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1943 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1944 __ negptr(qword_count); 1945 __ jmp(L_copy_bytes); 1946 1947 // Copy trailing qwords 1948 __ BIND(L_copy_8_bytes); 1949 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1950 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1951 __ increment(qword_count); 1952 __ jcc(Assembler::notZero, L_copy_8_bytes); 1953 1954 // Check for and copy trailing dword 1955 __ BIND(L_copy_4_bytes); 1956 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 1957 __ jccb(Assembler::zero, L_exit); 1958 __ movl(rax, Address(end_from, 8)); 1959 __ movl(Address(end_to, 8), rax); 1960 1961 __ BIND(L_exit); 1962 if (is_oop) { 1963 gen_write_ref_array_post_barrier(saved_to, dword_count, rax); 1964 } 1965 restore_arg_regs(); 1966 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1967 __ xorptr(rax, rax); // return 0 1968 __ leave(); // required for proper stackwalking of RuntimeStub frame 1969 __ ret(0); 1970 1971 // Copy in multi-bytes chunks 1972 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1973 __ jmp(L_copy_4_bytes); 1974 1975 return start; 1976 } 1977 1978 // Arguments: 1979 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1980 // ignored 1981 // is_oop - true => oop array, so generate store check code 1982 // name - stub name string 1983 // 1984 // Inputs: 1985 // c_rarg0 - source array address 1986 // c_rarg1 - destination array address 1987 // c_rarg2 - element count, treated as ssize_t, can be zero 1988 // 1989 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1990 // the hardware handle it. The two dwords within qwords that span 1991 // cache line boundaries will still be loaded and stored atomicly. 1992 // 1993 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 1994 address *entry, const char *name, 1995 bool dest_uninitialized = false) { 1996 __ align(CodeEntryAlignment); 1997 StubCodeMark mark(this, "StubRoutines", name); 1998 address start = __ pc(); 1999 2000 Label L_copy_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit; 2001 const Register from = rdi; // source array address 2002 const Register to = rsi; // destination array address 2003 const Register count = rdx; // elements count 2004 const Register dword_count = rcx; 2005 const Register qword_count = count; 2006 2007 __ enter(); // required for proper stackwalking of RuntimeStub frame 2008 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2009 2010 if (entry != NULL) { 2011 *entry = __ pc(); 2012 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2013 BLOCK_COMMENT("Entry:"); 2014 } 2015 2016 array_overlap_test(nooverlap_target, Address::times_4); 2017 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2018 // r9 and r10 may be used to save non-volatile registers 2019 2020 if (is_oop) { 2021 // no registers are destroyed by this call 2022 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2023 } 2024 2025 assert_clean_int(count, rax); // Make sure 'count' is clean int. 2026 // 'from', 'to' and 'count' are now valid 2027 __ movptr(dword_count, count); 2028 __ shrptr(count, 1); // count => qword_count 2029 2030 // Copy from high to low addresses. Use 'to' as scratch. 2031 2032 // Check for and copy trailing dword 2033 __ testl(dword_count, 1); 2034 __ jcc(Assembler::zero, L_copy_bytes); 2035 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 2036 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 2037 __ jmp(L_copy_bytes); 2038 2039 // Copy trailing qwords 2040 __ BIND(L_copy_8_bytes); 2041 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2042 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2043 __ decrement(qword_count); 2044 __ jcc(Assembler::notZero, L_copy_8_bytes); 2045 2046 if (is_oop) { 2047 __ jmp(L_exit); 2048 } 2049 restore_arg_regs(); 2050 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2051 __ xorptr(rax, rax); // return 0 2052 __ leave(); // required for proper stackwalking of RuntimeStub frame 2053 __ ret(0); 2054 2055 // Copy in multi-bytes chunks 2056 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2057 2058 __ BIND(L_exit); 2059 if (is_oop) { 2060 gen_write_ref_array_post_barrier(to, dword_count, rax); 2061 } 2062 restore_arg_regs(); 2063 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2064 __ xorptr(rax, rax); // return 0 2065 __ leave(); // required for proper stackwalking of RuntimeStub frame 2066 __ ret(0); 2067 2068 return start; 2069 } 2070 2071 // Arguments: 2072 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2073 // ignored 2074 // is_oop - true => oop array, so generate store check code 2075 // name - stub name string 2076 // 2077 // Inputs: 2078 // c_rarg0 - source array address 2079 // c_rarg1 - destination array address 2080 // c_rarg2 - element count, treated as ssize_t, can be zero 2081 // 2082 // Side Effects: 2083 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 2084 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 2085 // 2086 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 2087 const char *name, bool dest_uninitialized = false) { 2088 __ align(CodeEntryAlignment); 2089 StubCodeMark mark(this, "StubRoutines", name); 2090 address start = __ pc(); 2091 2092 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2093 const Register from = rdi; // source array address 2094 const Register to = rsi; // destination array address 2095 const Register qword_count = rdx; // elements count 2096 const Register end_from = from; // source array end address 2097 const Register end_to = rcx; // destination array end address 2098 const Register saved_to = to; 2099 const Register saved_count = r11; 2100 // End pointers are inclusive, and if count is not zero they point 2101 // to the last unit copied: end_to[0] := end_from[0] 2102 2103 __ enter(); // required for proper stackwalking of RuntimeStub frame 2104 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2105 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2106 2107 if (entry != NULL) { 2108 *entry = __ pc(); 2109 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2110 BLOCK_COMMENT("Entry:"); 2111 } 2112 2113 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2114 // r9 and r10 may be used to save non-volatile registers 2115 // 'from', 'to' and 'qword_count' are now valid 2116 if (is_oop) { 2117 // Save to and count for store barrier 2118 __ movptr(saved_count, qword_count); 2119 // no registers are destroyed by this call 2120 gen_write_ref_array_pre_barrier(to, qword_count, dest_uninitialized); 2121 } 2122 2123 // Copy from low to high addresses. Use 'to' as scratch. 2124 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2125 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2126 __ negptr(qword_count); 2127 __ jmp(L_copy_bytes); 2128 2129 // Copy trailing qwords 2130 __ BIND(L_copy_8_bytes); 2131 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2132 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2133 __ increment(qword_count); 2134 __ jcc(Assembler::notZero, L_copy_8_bytes); 2135 2136 if (is_oop) { 2137 __ jmp(L_exit); 2138 } else { 2139 restore_arg_regs(); 2140 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2141 __ xorptr(rax, rax); // return 0 2142 __ leave(); // required for proper stackwalking of RuntimeStub frame 2143 __ ret(0); 2144 } 2145 2146 // Copy in multi-bytes chunks 2147 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2148 2149 if (is_oop) { 2150 __ BIND(L_exit); 2151 gen_write_ref_array_post_barrier(saved_to, saved_count, rax); 2152 } 2153 restore_arg_regs(); 2154 if (is_oop) { 2155 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2156 } else { 2157 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2158 } 2159 __ xorptr(rax, rax); // return 0 2160 __ leave(); // required for proper stackwalking of RuntimeStub frame 2161 __ ret(0); 2162 2163 return start; 2164 } 2165 2166 // Arguments: 2167 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2168 // ignored 2169 // is_oop - true => oop array, so generate store check code 2170 // name - stub name string 2171 // 2172 // Inputs: 2173 // c_rarg0 - source array address 2174 // c_rarg1 - destination array address 2175 // c_rarg2 - element count, treated as ssize_t, can be zero 2176 // 2177 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2178 address nooverlap_target, address *entry, 2179 const char *name, bool dest_uninitialized = false) { 2180 __ align(CodeEntryAlignment); 2181 StubCodeMark mark(this, "StubRoutines", name); 2182 address start = __ pc(); 2183 2184 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2185 const Register from = rdi; // source array address 2186 const Register to = rsi; // destination array address 2187 const Register qword_count = rdx; // elements count 2188 const Register saved_count = rcx; 2189 2190 __ enter(); // required for proper stackwalking of RuntimeStub frame 2191 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2192 2193 if (entry != NULL) { 2194 *entry = __ pc(); 2195 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2196 BLOCK_COMMENT("Entry:"); 2197 } 2198 2199 array_overlap_test(nooverlap_target, Address::times_8); 2200 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2201 // r9 and r10 may be used to save non-volatile registers 2202 // 'from', 'to' and 'qword_count' are now valid 2203 if (is_oop) { 2204 // Save to and count for store barrier 2205 __ movptr(saved_count, qword_count); 2206 // No registers are destroyed by this call 2207 gen_write_ref_array_pre_barrier(to, saved_count, dest_uninitialized); 2208 } 2209 2210 __ jmp(L_copy_bytes); 2211 2212 // Copy trailing qwords 2213 __ BIND(L_copy_8_bytes); 2214 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2215 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2216 __ decrement(qword_count); 2217 __ jcc(Assembler::notZero, L_copy_8_bytes); 2218 2219 if (is_oop) { 2220 __ jmp(L_exit); 2221 } else { 2222 restore_arg_regs(); 2223 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2224 __ xorptr(rax, rax); // return 0 2225 __ leave(); // required for proper stackwalking of RuntimeStub frame 2226 __ ret(0); 2227 } 2228 2229 // Copy in multi-bytes chunks 2230 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2231 2232 if (is_oop) { 2233 __ BIND(L_exit); 2234 gen_write_ref_array_post_barrier(to, saved_count, rax); 2235 } 2236 restore_arg_regs(); 2237 if (is_oop) { 2238 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2239 } else { 2240 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2241 } 2242 __ xorptr(rax, rax); // return 0 2243 __ leave(); // required for proper stackwalking of RuntimeStub frame 2244 __ ret(0); 2245 2246 return start; 2247 } 2248 2249 2250 // Helper for generating a dynamic type check. 2251 // Smashes no registers. 2252 void generate_type_check(Register sub_klass, 2253 Register super_check_offset, 2254 Register super_klass, 2255 Label& L_success) { 2256 assert_different_registers(sub_klass, super_check_offset, super_klass); 2257 2258 BLOCK_COMMENT("type_check:"); 2259 2260 Label L_miss; 2261 2262 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2263 super_check_offset); 2264 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2265 2266 // Fall through on failure! 2267 __ BIND(L_miss); 2268 } 2269 2270 // 2271 // Generate checkcasting array copy stub 2272 // 2273 // Input: 2274 // c_rarg0 - source array address 2275 // c_rarg1 - destination array address 2276 // c_rarg2 - element count, treated as ssize_t, can be zero 2277 // c_rarg3 - size_t ckoff (super_check_offset) 2278 // not Win64 2279 // c_rarg4 - oop ckval (super_klass) 2280 // Win64 2281 // rsp+40 - oop ckval (super_klass) 2282 // 2283 // Output: 2284 // rax == 0 - success 2285 // rax == -1^K - failure, where K is partial transfer count 2286 // 2287 address generate_checkcast_copy(const char *name, address *entry, 2288 bool dest_uninitialized = false) { 2289 2290 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2291 2292 // Input registers (after setup_arg_regs) 2293 const Register from = rdi; // source array address 2294 const Register to = rsi; // destination array address 2295 const Register length = rdx; // elements count 2296 const Register ckoff = rcx; // super_check_offset 2297 const Register ckval = r8; // super_klass 2298 2299 // Registers used as temps (r13, r14 are save-on-entry) 2300 const Register end_from = from; // source array end address 2301 const Register end_to = r13; // destination array end address 2302 const Register count = rdx; // -(count_remaining) 2303 const Register r14_length = r14; // saved copy of length 2304 // End pointers are inclusive, and if length is not zero they point 2305 // to the last unit copied: end_to[0] := end_from[0] 2306 2307 const Register rax_oop = rax; // actual oop copied 2308 const Register r11_klass = r11; // oop._klass 2309 2310 //--------------------------------------------------------------- 2311 // Assembler stub will be used for this call to arraycopy 2312 // if the two arrays are subtypes of Object[] but the 2313 // destination array type is not equal to or a supertype 2314 // of the source type. Each element must be separately 2315 // checked. 2316 2317 __ align(CodeEntryAlignment); 2318 StubCodeMark mark(this, "StubRoutines", name); 2319 address start = __ pc(); 2320 2321 __ enter(); // required for proper stackwalking of RuntimeStub frame 2322 2323 #ifdef ASSERT 2324 // caller guarantees that the arrays really are different 2325 // otherwise, we would have to make conjoint checks 2326 { Label L; 2327 array_overlap_test(L, TIMES_OOP); 2328 __ stop("checkcast_copy within a single array"); 2329 __ bind(L); 2330 } 2331 #endif //ASSERT 2332 2333 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2334 // ckoff => rcx, ckval => r8 2335 // r9 and r10 may be used to save non-volatile registers 2336 #ifdef _WIN64 2337 // last argument (#4) is on stack on Win64 2338 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2339 #endif 2340 2341 // Caller of this entry point must set up the argument registers. 2342 if (entry != NULL) { 2343 *entry = __ pc(); 2344 BLOCK_COMMENT("Entry:"); 2345 } 2346 2347 // allocate spill slots for r13, r14 2348 enum { 2349 saved_r13_offset, 2350 saved_r14_offset, 2351 saved_rbp_offset 2352 }; 2353 __ subptr(rsp, saved_rbp_offset * wordSize); 2354 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2355 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2356 2357 // check that int operands are properly extended to size_t 2358 assert_clean_int(length, rax); 2359 assert_clean_int(ckoff, rax); 2360 2361 #ifdef ASSERT 2362 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2363 // The ckoff and ckval must be mutually consistent, 2364 // even though caller generates both. 2365 { Label L; 2366 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2367 __ cmpl(ckoff, Address(ckval, sco_offset)); 2368 __ jcc(Assembler::equal, L); 2369 __ stop("super_check_offset inconsistent"); 2370 __ bind(L); 2371 } 2372 #endif //ASSERT 2373 2374 // Loop-invariant addresses. They are exclusive end pointers. 2375 Address end_from_addr(from, length, TIMES_OOP, 0); 2376 Address end_to_addr(to, length, TIMES_OOP, 0); 2377 // Loop-variant addresses. They assume post-incremented count < 0. 2378 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2379 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2380 2381 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2382 2383 // Copy from low to high addresses, indexed from the end of each array. 2384 __ lea(end_from, end_from_addr); 2385 __ lea(end_to, end_to_addr); 2386 __ movptr(r14_length, length); // save a copy of the length 2387 assert(length == count, ""); // else fix next line: 2388 __ negptr(count); // negate and test the length 2389 __ jcc(Assembler::notZero, L_load_element); 2390 2391 // Empty array: Nothing to do. 2392 __ xorptr(rax, rax); // return 0 on (trivial) success 2393 __ jmp(L_done); 2394 2395 // ======== begin loop ======== 2396 // (Loop is rotated; its entry is L_load_element.) 2397 // Loop control: 2398 // for (count = -count; count != 0; count++) 2399 // Base pointers src, dst are biased by 8*(count-1),to last element. 2400 __ align(OptoLoopAlignment); 2401 2402 __ BIND(L_store_element); 2403 __ store_heap_oop(to_element_addr, rax_oop); // store the oop 2404 __ increment(count); // increment the count toward zero 2405 __ jcc(Assembler::zero, L_do_card_marks); 2406 2407 // ======== loop entry is here ======== 2408 __ BIND(L_load_element); 2409 __ load_heap_oop(rax_oop, from_element_addr); // load the oop 2410 __ testptr(rax_oop, rax_oop); 2411 __ jcc(Assembler::zero, L_store_element); 2412 2413 __ load_klass(r11_klass, rax_oop);// query the object klass 2414 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2415 // ======== end loop ======== 2416 2417 // It was a real error; we must depend on the caller to finish the job. 2418 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2419 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2420 // and report their number to the caller. 2421 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2422 Label L_post_barrier; 2423 __ addptr(r14_length, count); // K = (original - remaining) oops 2424 __ movptr(rax, r14_length); // save the value 2425 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2426 __ jccb(Assembler::notZero, L_post_barrier); 2427 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2428 2429 // Come here on success only. 2430 __ BIND(L_do_card_marks); 2431 __ xorptr(rax, rax); // return 0 on success 2432 2433 __ BIND(L_post_barrier); 2434 gen_write_ref_array_post_barrier(to, r14_length, rscratch1); 2435 2436 // Common exit point (success or failure). 2437 __ BIND(L_done); 2438 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2439 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2440 restore_arg_regs(); 2441 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2442 __ leave(); // required for proper stackwalking of RuntimeStub frame 2443 __ ret(0); 2444 2445 return start; 2446 } 2447 2448 // 2449 // Generate 'unsafe' array copy stub 2450 // Though just as safe as the other stubs, it takes an unscaled 2451 // size_t argument instead of an element count. 2452 // 2453 // Input: 2454 // c_rarg0 - source array address 2455 // c_rarg1 - destination array address 2456 // c_rarg2 - byte count, treated as ssize_t, can be zero 2457 // 2458 // Examines the alignment of the operands and dispatches 2459 // to a long, int, short, or byte copy loop. 2460 // 2461 address generate_unsafe_copy(const char *name, 2462 address byte_copy_entry, address short_copy_entry, 2463 address int_copy_entry, address long_copy_entry) { 2464 2465 Label L_long_aligned, L_int_aligned, L_short_aligned; 2466 2467 // Input registers (before setup_arg_regs) 2468 const Register from = c_rarg0; // source array address 2469 const Register to = c_rarg1; // destination array address 2470 const Register size = c_rarg2; // byte count (size_t) 2471 2472 // Register used as a temp 2473 const Register bits = rax; // test copy of low bits 2474 2475 __ align(CodeEntryAlignment); 2476 StubCodeMark mark(this, "StubRoutines", name); 2477 address start = __ pc(); 2478 2479 __ enter(); // required for proper stackwalking of RuntimeStub frame 2480 2481 // bump this on entry, not on exit: 2482 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2483 2484 __ mov(bits, from); 2485 __ orptr(bits, to); 2486 __ orptr(bits, size); 2487 2488 __ testb(bits, BytesPerLong-1); 2489 __ jccb(Assembler::zero, L_long_aligned); 2490 2491 __ testb(bits, BytesPerInt-1); 2492 __ jccb(Assembler::zero, L_int_aligned); 2493 2494 __ testb(bits, BytesPerShort-1); 2495 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2496 2497 __ BIND(L_short_aligned); 2498 __ shrptr(size, LogBytesPerShort); // size => short_count 2499 __ jump(RuntimeAddress(short_copy_entry)); 2500 2501 __ BIND(L_int_aligned); 2502 __ shrptr(size, LogBytesPerInt); // size => int_count 2503 __ jump(RuntimeAddress(int_copy_entry)); 2504 2505 __ BIND(L_long_aligned); 2506 __ shrptr(size, LogBytesPerLong); // size => qword_count 2507 __ jump(RuntimeAddress(long_copy_entry)); 2508 2509 return start; 2510 } 2511 2512 // Perform range checks on the proposed arraycopy. 2513 // Kills temp, but nothing else. 2514 // Also, clean the sign bits of src_pos and dst_pos. 2515 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2516 Register src_pos, // source position (c_rarg1) 2517 Register dst, // destination array oo (c_rarg2) 2518 Register dst_pos, // destination position (c_rarg3) 2519 Register length, 2520 Register temp, 2521 Label& L_failed) { 2522 BLOCK_COMMENT("arraycopy_range_checks:"); 2523 2524 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2525 __ movl(temp, length); 2526 __ addl(temp, src_pos); // src_pos + length 2527 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2528 __ jcc(Assembler::above, L_failed); 2529 2530 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2531 __ movl(temp, length); 2532 __ addl(temp, dst_pos); // dst_pos + length 2533 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2534 __ jcc(Assembler::above, L_failed); 2535 2536 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2537 // Move with sign extension can be used since they are positive. 2538 __ movslq(src_pos, src_pos); 2539 __ movslq(dst_pos, dst_pos); 2540 2541 BLOCK_COMMENT("arraycopy_range_checks done"); 2542 } 2543 2544 // 2545 // Generate generic array copy stubs 2546 // 2547 // Input: 2548 // c_rarg0 - src oop 2549 // c_rarg1 - src_pos (32-bits) 2550 // c_rarg2 - dst oop 2551 // c_rarg3 - dst_pos (32-bits) 2552 // not Win64 2553 // c_rarg4 - element count (32-bits) 2554 // Win64 2555 // rsp+40 - element count (32-bits) 2556 // 2557 // Output: 2558 // rax == 0 - success 2559 // rax == -1^K - failure, where K is partial transfer count 2560 // 2561 address generate_generic_copy(const char *name, 2562 address byte_copy_entry, address short_copy_entry, 2563 address int_copy_entry, address oop_copy_entry, 2564 address long_copy_entry, address checkcast_copy_entry) { 2565 2566 Label L_failed, L_failed_0, L_objArray; 2567 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2568 2569 // Input registers 2570 const Register src = c_rarg0; // source array oop 2571 const Register src_pos = c_rarg1; // source position 2572 const Register dst = c_rarg2; // destination array oop 2573 const Register dst_pos = c_rarg3; // destination position 2574 #ifndef _WIN64 2575 const Register length = c_rarg4; 2576 #else 2577 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2578 #endif 2579 2580 { int modulus = CodeEntryAlignment; 2581 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2582 int advance = target - (__ offset() % modulus); 2583 if (advance < 0) advance += modulus; 2584 if (advance > 0) __ nop(advance); 2585 } 2586 StubCodeMark mark(this, "StubRoutines", name); 2587 2588 // Short-hop target to L_failed. Makes for denser prologue code. 2589 __ BIND(L_failed_0); 2590 __ jmp(L_failed); 2591 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2592 2593 __ align(CodeEntryAlignment); 2594 address start = __ pc(); 2595 2596 __ enter(); // required for proper stackwalking of RuntimeStub frame 2597 2598 // bump this on entry, not on exit: 2599 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2600 2601 //----------------------------------------------------------------------- 2602 // Assembler stub will be used for this call to arraycopy 2603 // if the following conditions are met: 2604 // 2605 // (1) src and dst must not be null. 2606 // (2) src_pos must not be negative. 2607 // (3) dst_pos must not be negative. 2608 // (4) length must not be negative. 2609 // (5) src klass and dst klass should be the same and not NULL. 2610 // (6) src and dst should be arrays. 2611 // (7) src_pos + length must not exceed length of src. 2612 // (8) dst_pos + length must not exceed length of dst. 2613 // 2614 2615 // if (src == NULL) return -1; 2616 __ testptr(src, src); // src oop 2617 size_t j1off = __ offset(); 2618 __ jccb(Assembler::zero, L_failed_0); 2619 2620 // if (src_pos < 0) return -1; 2621 __ testl(src_pos, src_pos); // src_pos (32-bits) 2622 __ jccb(Assembler::negative, L_failed_0); 2623 2624 // if (dst == NULL) return -1; 2625 __ testptr(dst, dst); // dst oop 2626 __ jccb(Assembler::zero, L_failed_0); 2627 2628 // if (dst_pos < 0) return -1; 2629 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2630 size_t j4off = __ offset(); 2631 __ jccb(Assembler::negative, L_failed_0); 2632 2633 // The first four tests are very dense code, 2634 // but not quite dense enough to put four 2635 // jumps in a 16-byte instruction fetch buffer. 2636 // That's good, because some branch predicters 2637 // do not like jumps so close together. 2638 // Make sure of this. 2639 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2640 2641 // registers used as temp 2642 const Register r11_length = r11; // elements count to copy 2643 const Register r10_src_klass = r10; // array klass 2644 2645 // if (length < 0) return -1; 2646 __ movl(r11_length, length); // length (elements count, 32-bits value) 2647 __ testl(r11_length, r11_length); 2648 __ jccb(Assembler::negative, L_failed_0); 2649 2650 __ load_klass(r10_src_klass, src); 2651 #ifdef ASSERT 2652 // assert(src->klass() != NULL); 2653 { 2654 BLOCK_COMMENT("assert klasses not null {"); 2655 Label L1, L2; 2656 __ testptr(r10_src_klass, r10_src_klass); 2657 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2658 __ bind(L1); 2659 __ stop("broken null klass"); 2660 __ bind(L2); 2661 __ load_klass(rax, dst); 2662 __ cmpq(rax, 0); 2663 __ jcc(Assembler::equal, L1); // this would be broken also 2664 BLOCK_COMMENT("} assert klasses not null done"); 2665 } 2666 #endif 2667 2668 // Load layout helper (32-bits) 2669 // 2670 // |array_tag| | header_size | element_type | |log2_element_size| 2671 // 32 30 24 16 8 2 0 2672 // 2673 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2674 // 2675 2676 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2677 2678 // Handle objArrays completely differently... 2679 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2680 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2681 __ jcc(Assembler::equal, L_objArray); 2682 2683 // if (src->klass() != dst->klass()) return -1; 2684 __ load_klass(rax, dst); 2685 __ cmpq(r10_src_klass, rax); 2686 __ jcc(Assembler::notEqual, L_failed); 2687 2688 const Register rax_lh = rax; // layout helper 2689 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2690 2691 // if (!src->is_Array()) return -1; 2692 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2693 __ jcc(Assembler::greaterEqual, L_failed); 2694 2695 // At this point, it is known to be a typeArray (array_tag 0x3). 2696 #ifdef ASSERT 2697 { 2698 BLOCK_COMMENT("assert primitive array {"); 2699 Label L; 2700 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2701 __ jcc(Assembler::greaterEqual, L); 2702 __ stop("must be a primitive array"); 2703 __ bind(L); 2704 BLOCK_COMMENT("} assert primitive array done"); 2705 } 2706 #endif 2707 2708 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2709 r10, L_failed); 2710 2711 // TypeArrayKlass 2712 // 2713 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2714 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2715 // 2716 2717 const Register r10_offset = r10; // array offset 2718 const Register rax_elsize = rax_lh; // element size 2719 2720 __ movl(r10_offset, rax_lh); 2721 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2722 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2723 __ addptr(src, r10_offset); // src array offset 2724 __ addptr(dst, r10_offset); // dst array offset 2725 BLOCK_COMMENT("choose copy loop based on element size"); 2726 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2727 2728 // next registers should be set before the jump to corresponding stub 2729 const Register from = c_rarg0; // source array address 2730 const Register to = c_rarg1; // destination array address 2731 const Register count = c_rarg2; // elements count 2732 2733 // 'from', 'to', 'count' registers should be set in such order 2734 // since they are the same as 'src', 'src_pos', 'dst'. 2735 2736 __ BIND(L_copy_bytes); 2737 __ cmpl(rax_elsize, 0); 2738 __ jccb(Assembler::notEqual, L_copy_shorts); 2739 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2740 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2741 __ movl2ptr(count, r11_length); // length 2742 __ jump(RuntimeAddress(byte_copy_entry)); 2743 2744 __ BIND(L_copy_shorts); 2745 __ cmpl(rax_elsize, LogBytesPerShort); 2746 __ jccb(Assembler::notEqual, L_copy_ints); 2747 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2748 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2749 __ movl2ptr(count, r11_length); // length 2750 __ jump(RuntimeAddress(short_copy_entry)); 2751 2752 __ BIND(L_copy_ints); 2753 __ cmpl(rax_elsize, LogBytesPerInt); 2754 __ jccb(Assembler::notEqual, L_copy_longs); 2755 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2756 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2757 __ movl2ptr(count, r11_length); // length 2758 __ jump(RuntimeAddress(int_copy_entry)); 2759 2760 __ BIND(L_copy_longs); 2761 #ifdef ASSERT 2762 { 2763 BLOCK_COMMENT("assert long copy {"); 2764 Label L; 2765 __ cmpl(rax_elsize, LogBytesPerLong); 2766 __ jcc(Assembler::equal, L); 2767 __ stop("must be long copy, but elsize is wrong"); 2768 __ bind(L); 2769 BLOCK_COMMENT("} assert long copy done"); 2770 } 2771 #endif 2772 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2773 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2774 __ movl2ptr(count, r11_length); // length 2775 __ jump(RuntimeAddress(long_copy_entry)); 2776 2777 // ObjArrayKlass 2778 __ BIND(L_objArray); 2779 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2780 2781 Label L_plain_copy, L_checkcast_copy; 2782 // test array classes for subtyping 2783 __ load_klass(rax, dst); 2784 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2785 __ jcc(Assembler::notEqual, L_checkcast_copy); 2786 2787 // Identically typed arrays can be copied without element-wise checks. 2788 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2789 r10, L_failed); 2790 2791 __ lea(from, Address(src, src_pos, TIMES_OOP, 2792 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2793 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2794 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2795 __ movl2ptr(count, r11_length); // length 2796 __ BIND(L_plain_copy); 2797 __ jump(RuntimeAddress(oop_copy_entry)); 2798 2799 __ BIND(L_checkcast_copy); 2800 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2801 { 2802 // Before looking at dst.length, make sure dst is also an objArray. 2803 __ cmpl(Address(rax, lh_offset), objArray_lh); 2804 __ jcc(Assembler::notEqual, L_failed); 2805 2806 // It is safe to examine both src.length and dst.length. 2807 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2808 rax, L_failed); 2809 2810 const Register r11_dst_klass = r11; 2811 __ load_klass(r11_dst_klass, dst); // reload 2812 2813 // Marshal the base address arguments now, freeing registers. 2814 __ lea(from, Address(src, src_pos, TIMES_OOP, 2815 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2816 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2817 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2818 __ movl(count, length); // length (reloaded) 2819 Register sco_temp = c_rarg3; // this register is free now 2820 assert_different_registers(from, to, count, sco_temp, 2821 r11_dst_klass, r10_src_klass); 2822 assert_clean_int(count, sco_temp); 2823 2824 // Generate the type check. 2825 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2826 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2827 assert_clean_int(sco_temp, rax); 2828 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2829 2830 // Fetch destination element klass from the ObjArrayKlass header. 2831 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2832 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2833 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2834 assert_clean_int(sco_temp, rax); 2835 2836 // the checkcast_copy loop needs two extra arguments: 2837 assert(c_rarg3 == sco_temp, "#3 already in place"); 2838 // Set up arguments for checkcast_copy_entry. 2839 setup_arg_regs(4); 2840 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2841 __ jump(RuntimeAddress(checkcast_copy_entry)); 2842 } 2843 2844 __ BIND(L_failed); 2845 __ xorptr(rax, rax); 2846 __ notptr(rax); // return -1 2847 __ leave(); // required for proper stackwalking of RuntimeStub frame 2848 __ ret(0); 2849 2850 return start; 2851 } 2852 2853 void generate_arraycopy_stubs() { 2854 address entry; 2855 address entry_jbyte_arraycopy; 2856 address entry_jshort_arraycopy; 2857 address entry_jint_arraycopy; 2858 address entry_oop_arraycopy; 2859 address entry_jlong_arraycopy; 2860 address entry_checkcast_arraycopy; 2861 2862 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 2863 "jbyte_disjoint_arraycopy"); 2864 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 2865 "jbyte_arraycopy"); 2866 2867 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 2868 "jshort_disjoint_arraycopy"); 2869 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 2870 "jshort_arraycopy"); 2871 2872 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 2873 "jint_disjoint_arraycopy"); 2874 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 2875 &entry_jint_arraycopy, "jint_arraycopy"); 2876 2877 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 2878 "jlong_disjoint_arraycopy"); 2879 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 2880 &entry_jlong_arraycopy, "jlong_arraycopy"); 2881 2882 2883 if (UseCompressedOops) { 2884 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 2885 "oop_disjoint_arraycopy"); 2886 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 2887 &entry_oop_arraycopy, "oop_arraycopy"); 2888 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 2889 "oop_disjoint_arraycopy_uninit", 2890 /*dest_uninitialized*/true); 2891 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 2892 NULL, "oop_arraycopy_uninit", 2893 /*dest_uninitialized*/true); 2894 } else { 2895 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 2896 "oop_disjoint_arraycopy"); 2897 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 2898 &entry_oop_arraycopy, "oop_arraycopy"); 2899 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 2900 "oop_disjoint_arraycopy_uninit", 2901 /*dest_uninitialized*/true); 2902 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 2903 NULL, "oop_arraycopy_uninit", 2904 /*dest_uninitialized*/true); 2905 } 2906 2907 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 2908 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 2909 /*dest_uninitialized*/true); 2910 2911 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 2912 entry_jbyte_arraycopy, 2913 entry_jshort_arraycopy, 2914 entry_jint_arraycopy, 2915 entry_jlong_arraycopy); 2916 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 2917 entry_jbyte_arraycopy, 2918 entry_jshort_arraycopy, 2919 entry_jint_arraycopy, 2920 entry_oop_arraycopy, 2921 entry_jlong_arraycopy, 2922 entry_checkcast_arraycopy); 2923 2924 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2925 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2926 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2927 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2928 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2929 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2930 2931 // We don't generate specialized code for HeapWord-aligned source 2932 // arrays, so just use the code we've already generated 2933 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 2934 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 2935 2936 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 2937 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 2938 2939 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2940 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2941 2942 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2943 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2944 2945 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2946 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2947 2948 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 2949 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 2950 } 2951 2952 void generate_math_stubs() { 2953 { 2954 StubCodeMark mark(this, "StubRoutines", "log"); 2955 StubRoutines::_intrinsic_log = (double (*)(double)) __ pc(); 2956 2957 __ subq(rsp, 8); 2958 __ movdbl(Address(rsp, 0), xmm0); 2959 __ fld_d(Address(rsp, 0)); 2960 __ flog(); 2961 __ fstp_d(Address(rsp, 0)); 2962 __ movdbl(xmm0, Address(rsp, 0)); 2963 __ addq(rsp, 8); 2964 __ ret(0); 2965 } 2966 { 2967 StubCodeMark mark(this, "StubRoutines", "log10"); 2968 StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc(); 2969 2970 __ subq(rsp, 8); 2971 __ movdbl(Address(rsp, 0), xmm0); 2972 __ fld_d(Address(rsp, 0)); 2973 __ flog10(); 2974 __ fstp_d(Address(rsp, 0)); 2975 __ movdbl(xmm0, Address(rsp, 0)); 2976 __ addq(rsp, 8); 2977 __ ret(0); 2978 } 2979 { 2980 StubCodeMark mark(this, "StubRoutines", "sin"); 2981 StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc(); 2982 2983 __ subq(rsp, 8); 2984 __ movdbl(Address(rsp, 0), xmm0); 2985 __ fld_d(Address(rsp, 0)); 2986 __ trigfunc('s'); 2987 __ fstp_d(Address(rsp, 0)); 2988 __ movdbl(xmm0, Address(rsp, 0)); 2989 __ addq(rsp, 8); 2990 __ ret(0); 2991 } 2992 { 2993 StubCodeMark mark(this, "StubRoutines", "cos"); 2994 StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc(); 2995 2996 __ subq(rsp, 8); 2997 __ movdbl(Address(rsp, 0), xmm0); 2998 __ fld_d(Address(rsp, 0)); 2999 __ trigfunc('c'); 3000 __ fstp_d(Address(rsp, 0)); 3001 __ movdbl(xmm0, Address(rsp, 0)); 3002 __ addq(rsp, 8); 3003 __ ret(0); 3004 } 3005 { 3006 StubCodeMark mark(this, "StubRoutines", "tan"); 3007 StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc(); 3008 3009 __ subq(rsp, 8); 3010 __ movdbl(Address(rsp, 0), xmm0); 3011 __ fld_d(Address(rsp, 0)); 3012 __ trigfunc('t'); 3013 __ fstp_d(Address(rsp, 0)); 3014 __ movdbl(xmm0, Address(rsp, 0)); 3015 __ addq(rsp, 8); 3016 __ ret(0); 3017 } 3018 { 3019 StubCodeMark mark(this, "StubRoutines", "exp"); 3020 StubRoutines::_intrinsic_exp = (double (*)(double)) __ pc(); 3021 3022 __ subq(rsp, 8); 3023 __ movdbl(Address(rsp, 0), xmm0); 3024 __ fld_d(Address(rsp, 0)); 3025 __ exp_with_fallback(0); 3026 __ fstp_d(Address(rsp, 0)); 3027 __ movdbl(xmm0, Address(rsp, 0)); 3028 __ addq(rsp, 8); 3029 __ ret(0); 3030 } 3031 { 3032 StubCodeMark mark(this, "StubRoutines", "pow"); 3033 StubRoutines::_intrinsic_pow = (double (*)(double,double)) __ pc(); 3034 3035 __ subq(rsp, 8); 3036 __ movdbl(Address(rsp, 0), xmm1); 3037 __ fld_d(Address(rsp, 0)); 3038 __ movdbl(Address(rsp, 0), xmm0); 3039 __ fld_d(Address(rsp, 0)); 3040 __ pow_with_fallback(0); 3041 __ fstp_d(Address(rsp, 0)); 3042 __ movdbl(xmm0, Address(rsp, 0)); 3043 __ addq(rsp, 8); 3044 __ ret(0); 3045 } 3046 } 3047 3048 // AES intrinsic stubs 3049 enum {AESBlockSize = 16}; 3050 3051 address generate_key_shuffle_mask() { 3052 __ align(16); 3053 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 3054 address start = __ pc(); 3055 __ emit_data64( 0x0405060700010203, relocInfo::none ); 3056 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 3057 return start; 3058 } 3059 3060 // Utility routine for loading a 128-bit key word in little endian format 3061 // can optionally specify that the shuffle mask is already in an xmmregister 3062 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 3063 __ movdqu(xmmdst, Address(key, offset)); 3064 if (xmm_shuf_mask != NULL) { 3065 __ pshufb(xmmdst, xmm_shuf_mask); 3066 } else { 3067 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3068 } 3069 } 3070 3071 // Arguments: 3072 // 3073 // Inputs: 3074 // c_rarg0 - source byte array address 3075 // c_rarg1 - destination byte array address 3076 // c_rarg2 - K (key) in little endian int array 3077 // 3078 address generate_aescrypt_encryptBlock() { 3079 assert(UseAES, "need AES instructions and misaligned SSE support"); 3080 __ align(CodeEntryAlignment); 3081 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 3082 Label L_doLast; 3083 address start = __ pc(); 3084 3085 const Register from = c_rarg0; // source array address 3086 const Register to = c_rarg1; // destination array address 3087 const Register key = c_rarg2; // key array address 3088 const Register keylen = rax; 3089 3090 const XMMRegister xmm_result = xmm0; 3091 const XMMRegister xmm_key_shuf_mask = xmm1; 3092 // On win64 xmm6-xmm15 must be preserved so don't use them. 3093 const XMMRegister xmm_temp1 = xmm2; 3094 const XMMRegister xmm_temp2 = xmm3; 3095 const XMMRegister xmm_temp3 = xmm4; 3096 const XMMRegister xmm_temp4 = xmm5; 3097 3098 __ enter(); // required for proper stackwalking of RuntimeStub frame 3099 3100 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3101 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3102 3103 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3104 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 3105 3106 // For encryption, the java expanded key ordering is just what we need 3107 // we don't know if the key is aligned, hence not using load-execute form 3108 3109 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 3110 __ pxor(xmm_result, xmm_temp1); 3111 3112 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3113 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3114 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3115 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3116 3117 __ aesenc(xmm_result, xmm_temp1); 3118 __ aesenc(xmm_result, xmm_temp2); 3119 __ aesenc(xmm_result, xmm_temp3); 3120 __ aesenc(xmm_result, xmm_temp4); 3121 3122 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3123 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3124 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3125 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3126 3127 __ aesenc(xmm_result, xmm_temp1); 3128 __ aesenc(xmm_result, xmm_temp2); 3129 __ aesenc(xmm_result, xmm_temp3); 3130 __ aesenc(xmm_result, xmm_temp4); 3131 3132 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3133 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3134 3135 __ cmpl(keylen, 44); 3136 __ jccb(Assembler::equal, L_doLast); 3137 3138 __ aesenc(xmm_result, xmm_temp1); 3139 __ aesenc(xmm_result, xmm_temp2); 3140 3141 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3142 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3143 3144 __ cmpl(keylen, 52); 3145 __ jccb(Assembler::equal, L_doLast); 3146 3147 __ aesenc(xmm_result, xmm_temp1); 3148 __ aesenc(xmm_result, xmm_temp2); 3149 3150 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3151 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3152 3153 __ BIND(L_doLast); 3154 __ aesenc(xmm_result, xmm_temp1); 3155 __ aesenclast(xmm_result, xmm_temp2); 3156 __ movdqu(Address(to, 0), xmm_result); // store the result 3157 __ xorptr(rax, rax); // return 0 3158 __ leave(); // required for proper stackwalking of RuntimeStub frame 3159 __ ret(0); 3160 3161 return start; 3162 } 3163 3164 3165 // Arguments: 3166 // 3167 // Inputs: 3168 // c_rarg0 - source byte array address 3169 // c_rarg1 - destination byte array address 3170 // c_rarg2 - K (key) in little endian int array 3171 // 3172 address generate_aescrypt_decryptBlock() { 3173 assert(UseAES, "need AES instructions and misaligned SSE support"); 3174 __ align(CodeEntryAlignment); 3175 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3176 Label L_doLast; 3177 address start = __ pc(); 3178 3179 const Register from = c_rarg0; // source array address 3180 const Register to = c_rarg1; // destination array address 3181 const Register key = c_rarg2; // key array address 3182 const Register keylen = rax; 3183 3184 const XMMRegister xmm_result = xmm0; 3185 const XMMRegister xmm_key_shuf_mask = xmm1; 3186 // On win64 xmm6-xmm15 must be preserved so don't use them. 3187 const XMMRegister xmm_temp1 = xmm2; 3188 const XMMRegister xmm_temp2 = xmm3; 3189 const XMMRegister xmm_temp3 = xmm4; 3190 const XMMRegister xmm_temp4 = xmm5; 3191 3192 __ enter(); // required for proper stackwalking of RuntimeStub frame 3193 3194 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3195 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3196 3197 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3198 __ movdqu(xmm_result, Address(from, 0)); 3199 3200 // for decryption java expanded key ordering is rotated one position from what we want 3201 // so we start from 0x10 here and hit 0x00 last 3202 // we don't know if the key is aligned, hence not using load-execute form 3203 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3204 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3205 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3206 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3207 3208 __ pxor (xmm_result, xmm_temp1); 3209 __ aesdec(xmm_result, xmm_temp2); 3210 __ aesdec(xmm_result, xmm_temp3); 3211 __ aesdec(xmm_result, xmm_temp4); 3212 3213 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3214 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3215 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3216 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3217 3218 __ aesdec(xmm_result, xmm_temp1); 3219 __ aesdec(xmm_result, xmm_temp2); 3220 __ aesdec(xmm_result, xmm_temp3); 3221 __ aesdec(xmm_result, xmm_temp4); 3222 3223 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3224 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3225 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3226 3227 __ cmpl(keylen, 44); 3228 __ jccb(Assembler::equal, L_doLast); 3229 3230 __ aesdec(xmm_result, xmm_temp1); 3231 __ aesdec(xmm_result, xmm_temp2); 3232 3233 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3234 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3235 3236 __ cmpl(keylen, 52); 3237 __ jccb(Assembler::equal, L_doLast); 3238 3239 __ aesdec(xmm_result, xmm_temp1); 3240 __ aesdec(xmm_result, xmm_temp2); 3241 3242 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3243 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3244 3245 __ BIND(L_doLast); 3246 __ aesdec(xmm_result, xmm_temp1); 3247 __ aesdec(xmm_result, xmm_temp2); 3248 3249 // for decryption the aesdeclast operation is always on key+0x00 3250 __ aesdeclast(xmm_result, xmm_temp3); 3251 __ movdqu(Address(to, 0), xmm_result); // store the result 3252 __ xorptr(rax, rax); // return 0 3253 __ leave(); // required for proper stackwalking of RuntimeStub frame 3254 __ ret(0); 3255 3256 return start; 3257 } 3258 3259 3260 // Arguments: 3261 // 3262 // Inputs: 3263 // c_rarg0 - source byte array address 3264 // c_rarg1 - destination byte array address 3265 // c_rarg2 - K (key) in little endian int array 3266 // c_rarg3 - r vector byte array address 3267 // c_rarg4 - input length 3268 // 3269 // Output: 3270 // rax - input length 3271 // 3272 address generate_cipherBlockChaining_encryptAESCrypt() { 3273 assert(UseAES, "need AES instructions and misaligned SSE support"); 3274 __ align(CodeEntryAlignment); 3275 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3276 address start = __ pc(); 3277 3278 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3279 const Register from = c_rarg0; // source array address 3280 const Register to = c_rarg1; // destination array address 3281 const Register key = c_rarg2; // key array address 3282 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3283 // and left with the results of the last encryption block 3284 #ifndef _WIN64 3285 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3286 #else 3287 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3288 const Register len_reg = r10; // pick the first volatile windows register 3289 #endif 3290 const Register pos = rax; 3291 3292 // xmm register assignments for the loops below 3293 const XMMRegister xmm_result = xmm0; 3294 const XMMRegister xmm_temp = xmm1; 3295 // keys 0-10 preloaded into xmm2-xmm12 3296 const int XMM_REG_NUM_KEY_FIRST = 2; 3297 const int XMM_REG_NUM_KEY_LAST = 15; 3298 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3299 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3300 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3301 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3302 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3303 3304 __ enter(); // required for proper stackwalking of RuntimeStub frame 3305 3306 #ifdef _WIN64 3307 // on win64, fill len_reg from stack position 3308 __ movl(len_reg, len_mem); 3309 // save the xmm registers which must be preserved 6-15 3310 __ subptr(rsp, -rsp_after_call_off * wordSize); 3311 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3312 __ movdqu(xmm_save(i), as_XMMRegister(i)); 3313 } 3314 #else 3315 __ push(len_reg); // Save 3316 #endif 3317 3318 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3319 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3320 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3321 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3322 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3323 offset += 0x10; 3324 } 3325 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3326 3327 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3328 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3329 __ cmpl(rax, 44); 3330 __ jcc(Assembler::notEqual, L_key_192_256); 3331 3332 // 128 bit code follows here 3333 __ movptr(pos, 0); 3334 __ align(OptoLoopAlignment); 3335 3336 __ BIND(L_loopTop_128); 3337 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3338 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3339 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3340 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3341 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3342 } 3343 __ aesenclast(xmm_result, xmm_key10); 3344 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3345 // no need to store r to memory until we exit 3346 __ addptr(pos, AESBlockSize); 3347 __ subptr(len_reg, AESBlockSize); 3348 __ jcc(Assembler::notEqual, L_loopTop_128); 3349 3350 __ BIND(L_exit); 3351 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3352 3353 #ifdef _WIN64 3354 // restore xmm regs belonging to calling function 3355 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3356 __ movdqu(as_XMMRegister(i), xmm_save(i)); 3357 } 3358 __ movl(rax, len_mem); 3359 #else 3360 __ pop(rax); // return length 3361 #endif 3362 __ leave(); // required for proper stackwalking of RuntimeStub frame 3363 __ ret(0); 3364 3365 __ BIND(L_key_192_256); 3366 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3367 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3368 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3369 __ cmpl(rax, 52); 3370 __ jcc(Assembler::notEqual, L_key_256); 3371 3372 // 192-bit code follows here (could be changed to use more xmm registers) 3373 __ movptr(pos, 0); 3374 __ align(OptoLoopAlignment); 3375 3376 __ BIND(L_loopTop_192); 3377 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3378 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3379 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3380 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3381 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3382 } 3383 __ aesenclast(xmm_result, xmm_key12); 3384 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3385 // no need to store r to memory until we exit 3386 __ addptr(pos, AESBlockSize); 3387 __ subptr(len_reg, AESBlockSize); 3388 __ jcc(Assembler::notEqual, L_loopTop_192); 3389 __ jmp(L_exit); 3390 3391 __ BIND(L_key_256); 3392 // 256-bit code follows here (could be changed to use more xmm registers) 3393 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3394 __ movptr(pos, 0); 3395 __ align(OptoLoopAlignment); 3396 3397 __ BIND(L_loopTop_256); 3398 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3399 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3400 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3401 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3402 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3403 } 3404 load_key(xmm_temp, key, 0xe0); 3405 __ aesenclast(xmm_result, xmm_temp); 3406 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3407 // no need to store r to memory until we exit 3408 __ addptr(pos, AESBlockSize); 3409 __ subptr(len_reg, AESBlockSize); 3410 __ jcc(Assembler::notEqual, L_loopTop_256); 3411 __ jmp(L_exit); 3412 3413 return start; 3414 } 3415 3416 // Safefetch stubs. 3417 void generate_safefetch(const char* name, int size, address* entry, 3418 address* fault_pc, address* continuation_pc) { 3419 // safefetch signatures: 3420 // int SafeFetch32(int* adr, int errValue); 3421 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3422 // 3423 // arguments: 3424 // c_rarg0 = adr 3425 // c_rarg1 = errValue 3426 // 3427 // result: 3428 // PPC_RET = *adr or errValue 3429 3430 StubCodeMark mark(this, "StubRoutines", name); 3431 3432 // Entry point, pc or function descriptor. 3433 *entry = __ pc(); 3434 3435 // Load *adr into c_rarg1, may fault. 3436 *fault_pc = __ pc(); 3437 switch (size) { 3438 case 4: 3439 // int32_t 3440 __ movl(c_rarg1, Address(c_rarg0, 0)); 3441 break; 3442 case 8: 3443 // int64_t 3444 __ movq(c_rarg1, Address(c_rarg0, 0)); 3445 break; 3446 default: 3447 ShouldNotReachHere(); 3448 } 3449 3450 // return errValue or *adr 3451 *continuation_pc = __ pc(); 3452 __ movq(rax, c_rarg1); 3453 __ ret(0); 3454 } 3455 3456 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3457 // to hide instruction latency 3458 // 3459 // Arguments: 3460 // 3461 // Inputs: 3462 // c_rarg0 - source byte array address 3463 // c_rarg1 - destination byte array address 3464 // c_rarg2 - K (key) in little endian int array 3465 // c_rarg3 - r vector byte array address 3466 // c_rarg4 - input length 3467 // 3468 // Output: 3469 // rax - input length 3470 // 3471 3472 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3473 assert(UseAES, "need AES instructions and misaligned SSE support"); 3474 __ align(CodeEntryAlignment); 3475 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3476 address start = __ pc(); 3477 3478 Label L_exit, L_key_192_256, L_key_256; 3479 Label L_singleBlock_loopTop_128, L_multiBlock_loopTop_128; 3480 Label L_singleBlock_loopTop_192, L_singleBlock_loopTop_256; 3481 const Register from = c_rarg0; // source array address 3482 const Register to = c_rarg1; // destination array address 3483 const Register key = c_rarg2; // key array address 3484 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3485 // and left with the results of the last encryption block 3486 #ifndef _WIN64 3487 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3488 #else 3489 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3490 const Register len_reg = r10; // pick the first volatile windows register 3491 #endif 3492 const Register pos = rax; 3493 3494 // keys 0-10 preloaded into xmm2-xmm12 3495 const int XMM_REG_NUM_KEY_FIRST = 5; 3496 const int XMM_REG_NUM_KEY_LAST = 15; 3497 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3498 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3499 3500 __ enter(); // required for proper stackwalking of RuntimeStub frame 3501 3502 #ifdef _WIN64 3503 // on win64, fill len_reg from stack position 3504 __ movl(len_reg, len_mem); 3505 // save the xmm registers which must be preserved 6-15 3506 __ subptr(rsp, -rsp_after_call_off * wordSize); 3507 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3508 __ movdqu(xmm_save(i), as_XMMRegister(i)); 3509 } 3510 #else 3511 __ push(len_reg); // Save 3512 #endif 3513 3514 // the java expanded key ordering is rotated one position from what we want 3515 // so we start from 0x10 here and hit 0x00 last 3516 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3517 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3518 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3519 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3520 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3521 offset += 0x10; 3522 } 3523 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3524 3525 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3526 3527 // registers holding the four results in the parallelized loop 3528 const XMMRegister xmm_result0 = xmm0; 3529 const XMMRegister xmm_result1 = xmm2; 3530 const XMMRegister xmm_result2 = xmm3; 3531 const XMMRegister xmm_result3 = xmm4; 3532 3533 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3534 3535 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3536 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3537 __ cmpl(rax, 44); 3538 __ jcc(Assembler::notEqual, L_key_192_256); 3539 3540 3541 // 128-bit code follows here, parallelized 3542 __ movptr(pos, 0); 3543 __ align(OptoLoopAlignment); 3544 __ BIND(L_multiBlock_loopTop_128); 3545 __ cmpptr(len_reg, 4*AESBlockSize); // see if at least 4 blocks left 3546 __ jcc(Assembler::less, L_singleBlock_loopTop_128); 3547 3548 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0*AESBlockSize)); // get next 4 blocks into xmmresult registers 3549 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1*AESBlockSize)); 3550 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2*AESBlockSize)); 3551 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3*AESBlockSize)); 3552 3553 #define DoFour(opc, src_reg) \ 3554 __ opc(xmm_result0, src_reg); \ 3555 __ opc(xmm_result1, src_reg); \ 3556 __ opc(xmm_result2, src_reg); \ 3557 __ opc(xmm_result3, src_reg); 3558 3559 DoFour(pxor, xmm_key_first); 3560 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) { 3561 DoFour(aesdec, as_XMMRegister(rnum)); 3562 } 3563 DoFour(aesdeclast, xmm_key_last); 3564 // for each result, xor with the r vector of previous cipher block 3565 __ pxor(xmm_result0, xmm_prev_block_cipher); 3566 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0*AESBlockSize)); 3567 __ pxor(xmm_result1, xmm_prev_block_cipher); 3568 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1*AESBlockSize)); 3569 __ pxor(xmm_result2, xmm_prev_block_cipher); 3570 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2*AESBlockSize)); 3571 __ pxor(xmm_result3, xmm_prev_block_cipher); 3572 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3*AESBlockSize)); // this will carry over to next set of blocks 3573 3574 __ movdqu(Address(to, pos, Address::times_1, 0*AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3575 __ movdqu(Address(to, pos, Address::times_1, 1*AESBlockSize), xmm_result1); 3576 __ movdqu(Address(to, pos, Address::times_1, 2*AESBlockSize), xmm_result2); 3577 __ movdqu(Address(to, pos, Address::times_1, 3*AESBlockSize), xmm_result3); 3578 3579 __ addptr(pos, 4*AESBlockSize); 3580 __ subptr(len_reg, 4*AESBlockSize); 3581 __ jmp(L_multiBlock_loopTop_128); 3582 3583 // registers used in the non-parallelized loops 3584 // xmm register assignments for the loops below 3585 const XMMRegister xmm_result = xmm0; 3586 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3587 const XMMRegister xmm_key11 = xmm3; 3588 const XMMRegister xmm_key12 = xmm4; 3589 const XMMRegister xmm_temp = xmm4; 3590 3591 __ align(OptoLoopAlignment); 3592 __ BIND(L_singleBlock_loopTop_128); 3593 __ cmpptr(len_reg, 0); // any blocks left?? 3594 __ jcc(Assembler::equal, L_exit); 3595 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3596 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3597 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 3598 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) { 3599 __ aesdec(xmm_result, as_XMMRegister(rnum)); 3600 } 3601 __ aesdeclast(xmm_result, xmm_key_last); 3602 __ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3603 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3604 // no need to store r to memory until we exit 3605 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3606 3607 __ addptr(pos, AESBlockSize); 3608 __ subptr(len_reg, AESBlockSize); 3609 __ jmp(L_singleBlock_loopTop_128); 3610 3611 3612 __ BIND(L_exit); 3613 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3614 #ifdef _WIN64 3615 // restore regs belonging to calling function 3616 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3617 __ movdqu(as_XMMRegister(i), xmm_save(i)); 3618 } 3619 __ movl(rax, len_mem); 3620 #else 3621 __ pop(rax); // return length 3622 #endif 3623 __ leave(); // required for proper stackwalking of RuntimeStub frame 3624 __ ret(0); 3625 3626 3627 __ BIND(L_key_192_256); 3628 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3629 load_key(xmm_key11, key, 0xb0); 3630 __ cmpl(rax, 52); 3631 __ jcc(Assembler::notEqual, L_key_256); 3632 3633 // 192-bit code follows here (could be optimized to use parallelism) 3634 load_key(xmm_key12, key, 0xc0); // 192-bit key goes up to c0 3635 __ movptr(pos, 0); 3636 __ align(OptoLoopAlignment); 3637 3638 __ BIND(L_singleBlock_loopTop_192); 3639 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3640 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3641 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 3642 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) { 3643 __ aesdec(xmm_result, as_XMMRegister(rnum)); 3644 } 3645 __ aesdec(xmm_result, xmm_key11); 3646 __ aesdec(xmm_result, xmm_key12); 3647 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3648 __ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3649 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3650 // no need to store r to memory until we exit 3651 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3652 __ addptr(pos, AESBlockSize); 3653 __ subptr(len_reg, AESBlockSize); 3654 __ jcc(Assembler::notEqual,L_singleBlock_loopTop_192); 3655 __ jmp(L_exit); 3656 3657 __ BIND(L_key_256); 3658 // 256-bit code follows here (could be optimized to use parallelism) 3659 __ movptr(pos, 0); 3660 __ align(OptoLoopAlignment); 3661 3662 __ BIND(L_singleBlock_loopTop_256); 3663 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3664 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3665 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 3666 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) { 3667 __ aesdec(xmm_result, as_XMMRegister(rnum)); 3668 } 3669 __ aesdec(xmm_result, xmm_key11); 3670 load_key(xmm_temp, key, 0xc0); 3671 __ aesdec(xmm_result, xmm_temp); 3672 load_key(xmm_temp, key, 0xd0); 3673 __ aesdec(xmm_result, xmm_temp); 3674 load_key(xmm_temp, key, 0xe0); // 256-bit key goes up to e0 3675 __ aesdec(xmm_result, xmm_temp); 3676 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 came from key+0 3677 __ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3678 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3679 // no need to store r to memory until we exit 3680 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3681 __ addptr(pos, AESBlockSize); 3682 __ subptr(len_reg, AESBlockSize); 3683 __ jcc(Assembler::notEqual,L_singleBlock_loopTop_256); 3684 __ jmp(L_exit); 3685 3686 return start; 3687 } 3688 3689 /** 3690 * Arguments: 3691 * 3692 * Inputs: 3693 * c_rarg0 - int crc 3694 * c_rarg1 - byte* buf 3695 * c_rarg2 - int length 3696 * 3697 * Ouput: 3698 * rax - int crc result 3699 */ 3700 address generate_updateBytesCRC32() { 3701 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 3702 3703 __ align(CodeEntryAlignment); 3704 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 3705 3706 address start = __ pc(); 3707 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 3708 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 3709 // rscratch1: r10 3710 const Register crc = c_rarg0; // crc 3711 const Register buf = c_rarg1; // source java byte array address 3712 const Register len = c_rarg2; // length 3713 const Register table = c_rarg3; // crc_table address (reuse register) 3714 const Register tmp = r11; 3715 assert_different_registers(crc, buf, len, table, tmp, rax); 3716 3717 BLOCK_COMMENT("Entry:"); 3718 __ enter(); // required for proper stackwalking of RuntimeStub frame 3719 3720 __ kernel_crc32(crc, buf, len, table, tmp); 3721 3722 __ movl(rax, crc); 3723 __ leave(); // required for proper stackwalking of RuntimeStub frame 3724 __ ret(0); 3725 3726 return start; 3727 } 3728 3729 3730 /** 3731 * Arguments: 3732 * 3733 * Input: 3734 * c_rarg0 - x address 3735 * c_rarg1 - x length 3736 * c_rarg2 - y address 3737 * c_rarg3 - y lenth 3738 * not Win64 3739 * c_rarg4 - z address 3740 * c_rarg5 - z length 3741 * Win64 3742 * rsp+40 - z address 3743 * rsp+48 - z length 3744 */ 3745 address generate_multiplyToLen() { 3746 __ align(CodeEntryAlignment); 3747 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 3748 3749 address start = __ pc(); 3750 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 3751 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 3752 const Register x = rdi; 3753 const Register xlen = rax; 3754 const Register y = rsi; 3755 const Register ylen = rcx; 3756 const Register z = r8; 3757 const Register zlen = r11; 3758 3759 // Next registers will be saved on stack in multiply_to_len(). 3760 const Register tmp1 = r12; 3761 const Register tmp2 = r13; 3762 const Register tmp3 = r14; 3763 const Register tmp4 = r15; 3764 const Register tmp5 = rbx; 3765 3766 BLOCK_COMMENT("Entry:"); 3767 __ enter(); // required for proper stackwalking of RuntimeStub frame 3768 3769 #ifndef _WIN64 3770 __ movptr(zlen, r9); // Save r9 in r11 - zlen 3771 #endif 3772 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 3773 // ylen => rcx, z => r8, zlen => r11 3774 // r9 and r10 may be used to save non-volatile registers 3775 #ifdef _WIN64 3776 // last 2 arguments (#4, #5) are on stack on Win64 3777 __ movptr(z, Address(rsp, 6 * wordSize)); 3778 __ movptr(zlen, Address(rsp, 7 * wordSize)); 3779 #endif 3780 3781 __ movptr(xlen, rsi); 3782 __ movptr(y, rdx); 3783 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 3784 3785 restore_arg_regs(); 3786 3787 __ leave(); // required for proper stackwalking of RuntimeStub frame 3788 __ ret(0); 3789 3790 return start; 3791 } 3792 3793 #undef __ 3794 #define __ masm-> 3795 3796 // Continuation point for throwing of implicit exceptions that are 3797 // not handled in the current activation. Fabricates an exception 3798 // oop and initiates normal exception dispatching in this 3799 // frame. Since we need to preserve callee-saved values (currently 3800 // only for C2, but done for C1 as well) we need a callee-saved oop 3801 // map and therefore have to make these stubs into RuntimeStubs 3802 // rather than BufferBlobs. If the compiler needs all registers to 3803 // be preserved between the fault point and the exception handler 3804 // then it must assume responsibility for that in 3805 // AbstractCompiler::continuation_for_implicit_null_exception or 3806 // continuation_for_implicit_division_by_zero_exception. All other 3807 // implicit exceptions (e.g., NullPointerException or 3808 // AbstractMethodError on entry) are either at call sites or 3809 // otherwise assume that stack unwinding will be initiated, so 3810 // caller saved registers were assumed volatile in the compiler. 3811 address generate_throw_exception(const char* name, 3812 address runtime_entry, 3813 Register arg1 = noreg, 3814 Register arg2 = noreg) { 3815 // Information about frame layout at time of blocking runtime call. 3816 // Note that we only have to preserve callee-saved registers since 3817 // the compilers are responsible for supplying a continuation point 3818 // if they expect all registers to be preserved. 3819 enum layout { 3820 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 3821 rbp_off2, 3822 return_off, 3823 return_off2, 3824 framesize // inclusive of return address 3825 }; 3826 3827 int insts_size = 512; 3828 int locs_size = 64; 3829 3830 CodeBuffer code(name, insts_size, locs_size); 3831 OopMapSet* oop_maps = new OopMapSet(); 3832 MacroAssembler* masm = new MacroAssembler(&code); 3833 3834 address start = __ pc(); 3835 3836 // This is an inlined and slightly modified version of call_VM 3837 // which has the ability to fetch the return PC out of 3838 // thread-local storage and also sets up last_Java_sp slightly 3839 // differently than the real call_VM 3840 3841 __ enter(); // required for proper stackwalking of RuntimeStub frame 3842 3843 assert(is_even(framesize/2), "sp not 16-byte aligned"); 3844 3845 // return address and rbp are already in place 3846 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 3847 3848 int frame_complete = __ pc() - start; 3849 3850 // Set up last_Java_sp and last_Java_fp 3851 address the_pc = __ pc(); 3852 __ set_last_Java_frame(rsp, rbp, the_pc); 3853 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 3854 3855 // Call runtime 3856 if (arg1 != noreg) { 3857 assert(arg2 != c_rarg1, "clobbered"); 3858 __ movptr(c_rarg1, arg1); 3859 } 3860 if (arg2 != noreg) { 3861 __ movptr(c_rarg2, arg2); 3862 } 3863 __ movptr(c_rarg0, r15_thread); 3864 BLOCK_COMMENT("call runtime_entry"); 3865 __ call(RuntimeAddress(runtime_entry)); 3866 3867 // Generate oop map 3868 OopMap* map = new OopMap(framesize, 0); 3869 3870 oop_maps->add_gc_map(the_pc - start, map); 3871 3872 __ reset_last_Java_frame(true, true); 3873 3874 __ leave(); // required for proper stackwalking of RuntimeStub frame 3875 3876 // check for pending exceptions 3877 #ifdef ASSERT 3878 Label L; 3879 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 3880 (int32_t) NULL_WORD); 3881 __ jcc(Assembler::notEqual, L); 3882 __ should_not_reach_here(); 3883 __ bind(L); 3884 #endif // ASSERT 3885 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 3886 3887 3888 // codeBlob framesize is in words (not VMRegImpl::slot_size) 3889 RuntimeStub* stub = 3890 RuntimeStub::new_runtime_stub(name, 3891 &code, 3892 frame_complete, 3893 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 3894 oop_maps, false); 3895 return stub->entry_point(); 3896 } 3897 3898 void create_control_words() { 3899 // Round to nearest, 53-bit mode, exceptions masked 3900 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 3901 // Round to zero, 53-bit mode, exception mased 3902 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 3903 // Round to nearest, 24-bit mode, exceptions masked 3904 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 3905 // Round to nearest, 64-bit mode, exceptions masked 3906 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F; 3907 // Round to nearest, 64-bit mode, exceptions masked 3908 StubRoutines::_mxcsr_std = 0x1F80; 3909 // Note: the following two constants are 80-bit values 3910 // layout is critical for correct loading by FPU. 3911 // Bias for strict fp multiply/divide 3912 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 3913 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 3914 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 3915 // Un-Bias for strict fp multiply/divide 3916 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 3917 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 3918 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 3919 } 3920 3921 // Initialization 3922 void generate_initial() { 3923 // Generates all stubs and initializes the entry points 3924 3925 // This platform-specific settings are needed by generate_call_stub() 3926 create_control_words(); 3927 3928 // entry points that exist in all platforms Note: This is code 3929 // that could be shared among different platforms - however the 3930 // benefit seems to be smaller than the disadvantage of having a 3931 // much more complicated generator structure. See also comment in 3932 // stubRoutines.hpp. 3933 3934 StubRoutines::_forward_exception_entry = generate_forward_exception(); 3935 3936 StubRoutines::_call_stub_entry = 3937 generate_call_stub(StubRoutines::_call_stub_return_address); 3938 3939 // is referenced by megamorphic call 3940 StubRoutines::_catch_exception_entry = generate_catch_exception(); 3941 3942 // atomic calls 3943 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 3944 StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr(); 3945 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 3946 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); 3947 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 3948 StubRoutines::_atomic_add_entry = generate_atomic_add(); 3949 StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr(); 3950 StubRoutines::_fence_entry = generate_orderaccess_fence(); 3951 3952 StubRoutines::_handler_for_unsafe_access_entry = 3953 generate_handler_for_unsafe_access(); 3954 3955 // platform dependent 3956 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 3957 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 3958 3959 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 3960 3961 // Build this early so it's available for the interpreter. 3962 StubRoutines::_throw_StackOverflowError_entry = 3963 generate_throw_exception("StackOverflowError throw_exception", 3964 CAST_FROM_FN_PTR(address, 3965 SharedRuntime:: 3966 throw_StackOverflowError)); 3967 if (UseCRC32Intrinsics) { 3968 // set table address before stub generation which use it 3969 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 3970 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 3971 } 3972 } 3973 3974 void generate_all() { 3975 // Generates all stubs and initializes the entry points 3976 3977 // These entry points require SharedInfo::stack0 to be set up in 3978 // non-core builds and need to be relocatable, so they each 3979 // fabricate a RuntimeStub internally. 3980 StubRoutines::_throw_AbstractMethodError_entry = 3981 generate_throw_exception("AbstractMethodError throw_exception", 3982 CAST_FROM_FN_PTR(address, 3983 SharedRuntime:: 3984 throw_AbstractMethodError)); 3985 3986 StubRoutines::_throw_IncompatibleClassChangeError_entry = 3987 generate_throw_exception("IncompatibleClassChangeError throw_exception", 3988 CAST_FROM_FN_PTR(address, 3989 SharedRuntime:: 3990 throw_IncompatibleClassChangeError)); 3991 3992 StubRoutines::_throw_NullPointerException_at_call_entry = 3993 generate_throw_exception("NullPointerException at call throw_exception", 3994 CAST_FROM_FN_PTR(address, 3995 SharedRuntime:: 3996 throw_NullPointerException_at_call)); 3997 3998 // entry points that are platform specific 3999 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 4000 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 4001 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 4002 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 4003 4004 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 4005 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 4006 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 4007 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 4008 4009 // support for verify_oop (must happen after universe_init) 4010 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 4011 4012 // arraycopy stubs used by compilers 4013 generate_arraycopy_stubs(); 4014 4015 generate_math_stubs(); 4016 4017 // don't bother generating these AES intrinsic stubs unless global flag is set 4018 if (UseAESIntrinsics) { 4019 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 4020 4021 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 4022 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 4023 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 4024 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 4025 } 4026 4027 // Safefetch stubs. 4028 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 4029 &StubRoutines::_safefetch32_fault_pc, 4030 &StubRoutines::_safefetch32_continuation_pc); 4031 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 4032 &StubRoutines::_safefetchN_fault_pc, 4033 &StubRoutines::_safefetchN_continuation_pc); 4034 #ifdef COMPILER2 4035 if (UseMultiplyToLenIntrinsic) { 4036 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 4037 } 4038 #endif 4039 } 4040 4041 public: 4042 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 4043 if (all) { 4044 generate_all(); 4045 } else { 4046 generate_initial(); 4047 } 4048 } 4049 }; // end class declaration 4050 4051 void StubGenerator_generate(CodeBuffer* code, bool all) { 4052 StubGenerator g(code, all); 4053 }