1 /* 2 * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "nativeInst_x86.hpp" 30 #include "oops/instanceOop.hpp" 31 #include "oops/method.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/frame.inline.hpp" 36 #include "runtime/handles.inline.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubCodeGenerator.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "runtime/thread.inline.hpp" 41 #include "utilities/top.hpp" 42 #ifdef COMPILER2 43 #include "opto/runtime.hpp" 44 #endif 45 46 // Declaration and definition of StubGenerator (no .hpp file). 47 // For a more detailed description of the stub routine structure 48 // see the comment in stubRoutines.hpp 49 50 #define __ _masm-> 51 #define a__ ((Assembler*)_masm)-> 52 53 #ifdef PRODUCT 54 #define BLOCK_COMMENT(str) /* nothing */ 55 #else 56 #define BLOCK_COMMENT(str) __ block_comment(str) 57 #endif 58 59 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 60 61 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 62 const int FPU_CNTRL_WRD_MASK = 0xFFFF; 63 64 // ------------------------------------------------------------------------------------------------------------------------- 65 // Stub Code definitions 66 67 static address handle_unsafe_access() { 68 JavaThread* thread = JavaThread::current(); 69 address pc = thread->saved_exception_pc(); 70 // pc is the instruction which we must emulate 71 // doing a no-op is fine: return garbage from the load 72 // therefore, compute npc 73 address npc = Assembler::locate_next_instruction(pc); 74 75 // request an async exception 76 thread->set_pending_unsafe_access_error(); 77 78 // return address of next instruction to execute 79 return npc; 80 } 81 82 class StubGenerator: public StubCodeGenerator { 83 private: 84 85 #ifdef PRODUCT 86 #define inc_counter_np(counter) ((void)0) 87 #else 88 void inc_counter_np_(int& counter) { 89 __ incrementl(ExternalAddress((address)&counter)); 90 } 91 #define inc_counter_np(counter) \ 92 BLOCK_COMMENT("inc_counter " #counter); \ 93 inc_counter_np_(counter); 94 #endif //PRODUCT 95 96 void inc_copy_counter_np(BasicType t) { 97 #ifndef PRODUCT 98 switch (t) { 99 case T_BYTE: inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); return; 100 case T_SHORT: inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); return; 101 case T_INT: inc_counter_np(SharedRuntime::_jint_array_copy_ctr); return; 102 case T_LONG: inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); return; 103 case T_OBJECT: inc_counter_np(SharedRuntime::_oop_array_copy_ctr); return; 104 } 105 ShouldNotReachHere(); 106 #endif //PRODUCT 107 } 108 109 //------------------------------------------------------------------------------------------------------------------------ 110 // Call stubs are used to call Java from C 111 // 112 // [ return_from_Java ] <--- rsp 113 // [ argument word n ] 114 // ... 115 // -N [ argument word 1 ] 116 // -7 [ Possible padding for stack alignment ] 117 // -6 [ Possible padding for stack alignment ] 118 // -5 [ Possible padding for stack alignment ] 119 // -4 [ mxcsr save ] <--- rsp_after_call 120 // -3 [ saved rbx, ] 121 // -2 [ saved rsi ] 122 // -1 [ saved rdi ] 123 // 0 [ saved rbp, ] <--- rbp, 124 // 1 [ return address ] 125 // 2 [ ptr. to call wrapper ] 126 // 3 [ result ] 127 // 4 [ result_type ] 128 // 5 [ method ] 129 // 6 [ entry_point ] 130 // 7 [ parameters ] 131 // 8 [ parameter_size ] 132 // 9 [ thread ] 133 134 135 address generate_call_stub(address& return_address) { 136 StubCodeMark mark(this, "StubRoutines", "call_stub"); 137 address start = __ pc(); 138 139 // stub code parameters / addresses 140 assert(frame::entry_frame_call_wrapper_offset == 2, "adjust this code"); 141 bool sse_save = false; 142 const Address rsp_after_call(rbp, -4 * wordSize); // same as in generate_catch_exception()! 143 const int locals_count_in_bytes (4*wordSize); 144 const Address mxcsr_save (rbp, -4 * wordSize); 145 const Address saved_rbx (rbp, -3 * wordSize); 146 const Address saved_rsi (rbp, -2 * wordSize); 147 const Address saved_rdi (rbp, -1 * wordSize); 148 const Address result (rbp, 3 * wordSize); 149 const Address result_type (rbp, 4 * wordSize); 150 const Address method (rbp, 5 * wordSize); 151 const Address entry_point (rbp, 6 * wordSize); 152 const Address parameters (rbp, 7 * wordSize); 153 const Address parameter_size(rbp, 8 * wordSize); 154 const Address thread (rbp, 9 * wordSize); // same as in generate_catch_exception()! 155 sse_save = UseSSE > 0; 156 157 // stub code 158 __ enter(); 159 __ movptr(rcx, parameter_size); // parameter counter 160 __ shlptr(rcx, Interpreter::logStackElementSize); // convert parameter count to bytes 161 __ addptr(rcx, locals_count_in_bytes); // reserve space for register saves 162 __ subptr(rsp, rcx); 163 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 164 165 // save rdi, rsi, & rbx, according to C calling conventions 166 __ movptr(saved_rdi, rdi); 167 __ movptr(saved_rsi, rsi); 168 __ movptr(saved_rbx, rbx); 169 // save and initialize %mxcsr 170 if (sse_save) { 171 Label skip_ldmx; 172 __ stmxcsr(mxcsr_save); 173 __ movl(rax, mxcsr_save); 174 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 175 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 176 __ cmp32(rax, mxcsr_std); 177 __ jcc(Assembler::equal, skip_ldmx); 178 __ ldmxcsr(mxcsr_std); 179 __ bind(skip_ldmx); 180 } 181 182 // make sure the control word is correct. 183 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 184 185 #ifdef ASSERT 186 // make sure we have no pending exceptions 187 { Label L; 188 __ movptr(rcx, thread); 189 __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 190 __ jcc(Assembler::equal, L); 191 __ stop("StubRoutines::call_stub: entered with pending exception"); 192 __ bind(L); 193 } 194 #endif 195 196 // pass parameters if any 197 BLOCK_COMMENT("pass parameters if any"); 198 Label parameters_done; 199 __ movl(rcx, parameter_size); // parameter counter 200 __ testl(rcx, rcx); 201 __ jcc(Assembler::zero, parameters_done); 202 203 // parameter passing loop 204 205 Label loop; 206 // Copy Java parameters in reverse order (receiver last) 207 // Note that the argument order is inverted in the process 208 // source is rdx[rcx: N-1..0] 209 // dest is rsp[rbx: 0..N-1] 210 211 __ movptr(rdx, parameters); // parameter pointer 212 __ xorptr(rbx, rbx); 213 214 __ BIND(loop); 215 216 // get parameter 217 __ movptr(rax, Address(rdx, rcx, Interpreter::stackElementScale(), -wordSize)); 218 __ movptr(Address(rsp, rbx, Interpreter::stackElementScale(), 219 Interpreter::expr_offset_in_bytes(0)), rax); // store parameter 220 __ increment(rbx); 221 __ decrement(rcx); 222 __ jcc(Assembler::notZero, loop); 223 224 // call Java function 225 __ BIND(parameters_done); 226 __ movptr(rbx, method); // get Method* 227 __ movptr(rax, entry_point); // get entry_point 228 __ mov(rsi, rsp); // set sender sp 229 BLOCK_COMMENT("call Java function"); 230 __ call(rax); 231 232 BLOCK_COMMENT("call_stub_return_address:"); 233 return_address = __ pc(); 234 235 #ifdef COMPILER2 236 { 237 Label L_skip; 238 if (UseSSE >= 2) { 239 __ verify_FPU(0, "call_stub_return"); 240 } else { 241 for (int i = 1; i < 8; i++) { 242 __ ffree(i); 243 } 244 245 // UseSSE <= 1 so double result should be left on TOS 246 __ movl(rsi, result_type); 247 __ cmpl(rsi, T_DOUBLE); 248 __ jcc(Assembler::equal, L_skip); 249 if (UseSSE == 0) { 250 // UseSSE == 0 so float result should be left on TOS 251 __ cmpl(rsi, T_FLOAT); 252 __ jcc(Assembler::equal, L_skip); 253 } 254 __ ffree(0); 255 } 256 __ BIND(L_skip); 257 } 258 #endif // COMPILER2 259 260 // store result depending on type 261 // (everything that is not T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 262 __ movptr(rdi, result); 263 Label is_long, is_float, is_double, exit; 264 __ movl(rsi, result_type); 265 __ cmpl(rsi, T_LONG); 266 __ jcc(Assembler::equal, is_long); 267 __ cmpl(rsi, T_FLOAT); 268 __ jcc(Assembler::equal, is_float); 269 __ cmpl(rsi, T_DOUBLE); 270 __ jcc(Assembler::equal, is_double); 271 272 // handle T_INT case 273 __ movl(Address(rdi, 0), rax); 274 __ BIND(exit); 275 276 // check that FPU stack is empty 277 __ verify_FPU(0, "generate_call_stub"); 278 279 // pop parameters 280 __ lea(rsp, rsp_after_call); 281 282 // restore %mxcsr 283 if (sse_save) { 284 __ ldmxcsr(mxcsr_save); 285 } 286 287 // restore rdi, rsi and rbx, 288 __ movptr(rbx, saved_rbx); 289 __ movptr(rsi, saved_rsi); 290 __ movptr(rdi, saved_rdi); 291 __ addptr(rsp, 4*wordSize); 292 293 // return 294 __ pop(rbp); 295 __ ret(0); 296 297 // handle return types different from T_INT 298 __ BIND(is_long); 299 __ movl(Address(rdi, 0 * wordSize), rax); 300 __ movl(Address(rdi, 1 * wordSize), rdx); 301 __ jmp(exit); 302 303 __ BIND(is_float); 304 // interpreter uses xmm0 for return values 305 if (UseSSE >= 1) { 306 __ movflt(Address(rdi, 0), xmm0); 307 } else { 308 __ fstp_s(Address(rdi, 0)); 309 } 310 __ jmp(exit); 311 312 __ BIND(is_double); 313 // interpreter uses xmm0 for return values 314 if (UseSSE >= 2) { 315 __ movdbl(Address(rdi, 0), xmm0); 316 } else { 317 __ fstp_d(Address(rdi, 0)); 318 } 319 __ jmp(exit); 320 321 return start; 322 } 323 324 325 //------------------------------------------------------------------------------------------------------------------------ 326 // Return point for a Java call if there's an exception thrown in Java code. 327 // The exception is caught and transformed into a pending exception stored in 328 // JavaThread that can be tested from within the VM. 329 // 330 // Note: Usually the parameters are removed by the callee. In case of an exception 331 // crossing an activation frame boundary, that is not the case if the callee 332 // is compiled code => need to setup the rsp. 333 // 334 // rax,: exception oop 335 336 address generate_catch_exception() { 337 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 338 const Address rsp_after_call(rbp, -4 * wordSize); // same as in generate_call_stub()! 339 const Address thread (rbp, 9 * wordSize); // same as in generate_call_stub()! 340 address start = __ pc(); 341 342 // get thread directly 343 __ movptr(rcx, thread); 344 #ifdef ASSERT 345 // verify that threads correspond 346 { Label L; 347 __ get_thread(rbx); 348 __ cmpptr(rbx, rcx); 349 __ jcc(Assembler::equal, L); 350 __ stop("StubRoutines::catch_exception: threads must correspond"); 351 __ bind(L); 352 } 353 #endif 354 // set pending exception 355 __ verify_oop(rax); 356 __ movptr(Address(rcx, Thread::pending_exception_offset()), rax ); 357 __ lea(Address(rcx, Thread::exception_file_offset ()), 358 ExternalAddress((address)__FILE__)); 359 __ movl(Address(rcx, Thread::exception_line_offset ()), __LINE__ ); 360 // complete return to VM 361 assert(StubRoutines::_call_stub_return_address != NULL, "_call_stub_return_address must have been generated before"); 362 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 363 364 return start; 365 } 366 367 368 //------------------------------------------------------------------------------------------------------------------------ 369 // Continuation point for runtime calls returning with a pending exception. 370 // The pending exception check happened in the runtime or native call stub. 371 // The pending exception in Thread is converted into a Java-level exception. 372 // 373 // Contract with Java-level exception handlers: 374 // rax: exception 375 // rdx: throwing pc 376 // 377 // NOTE: At entry of this stub, exception-pc must be on stack !! 378 379 address generate_forward_exception() { 380 StubCodeMark mark(this, "StubRoutines", "forward exception"); 381 address start = __ pc(); 382 const Register thread = rcx; 383 384 // other registers used in this stub 385 const Register exception_oop = rax; 386 const Register handler_addr = rbx; 387 const Register exception_pc = rdx; 388 389 // Upon entry, the sp points to the return address returning into Java 390 // (interpreted or compiled) code; i.e., the return address becomes the 391 // throwing pc. 392 // 393 // Arguments pushed before the runtime call are still on the stack but 394 // the exception handler will reset the stack pointer -> ignore them. 395 // A potential result in registers can be ignored as well. 396 397 #ifdef ASSERT 398 // make sure this code is only executed if there is a pending exception 399 { Label L; 400 __ get_thread(thread); 401 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 402 __ jcc(Assembler::notEqual, L); 403 __ stop("StubRoutines::forward exception: no pending exception (1)"); 404 __ bind(L); 405 } 406 #endif 407 408 // compute exception handler into rbx, 409 __ get_thread(thread); 410 __ movptr(exception_pc, Address(rsp, 0)); 411 BLOCK_COMMENT("call exception_handler_for_return_address"); 412 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc); 413 __ mov(handler_addr, rax); 414 415 // setup rax & rdx, remove return address & clear pending exception 416 __ get_thread(thread); 417 __ pop(exception_pc); 418 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); 419 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 420 421 #ifdef ASSERT 422 // make sure exception is set 423 { Label L; 424 __ testptr(exception_oop, exception_oop); 425 __ jcc(Assembler::notEqual, L); 426 __ stop("StubRoutines::forward exception: no pending exception (2)"); 427 __ bind(L); 428 } 429 #endif 430 431 // Verify that there is really a valid exception in RAX. 432 __ verify_oop(exception_oop); 433 434 // continue at exception handler (return address removed) 435 // rax: exception 436 // rbx: exception handler 437 // rdx: throwing pc 438 __ jmp(handler_addr); 439 440 return start; 441 } 442 443 444 //---------------------------------------------------------------------------------------------------- 445 // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest) 446 // 447 // xchg exists as far back as 8086, lock needed for MP only 448 // Stack layout immediately after call: 449 // 450 // 0 [ret addr ] <--- rsp 451 // 1 [ ex ] 452 // 2 [ dest ] 453 // 454 // Result: *dest <- ex, return (old *dest) 455 // 456 // Note: win32 does not currently use this code 457 458 address generate_atomic_xchg() { 459 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 460 address start = __ pc(); 461 462 __ push(rdx); 463 Address exchange(rsp, 2 * wordSize); 464 Address dest_addr(rsp, 3 * wordSize); 465 __ movl(rax, exchange); 466 __ movptr(rdx, dest_addr); 467 __ xchgl(rax, Address(rdx, 0)); 468 __ pop(rdx); 469 __ ret(0); 470 471 return start; 472 } 473 474 //---------------------------------------------------------------------------------------------------- 475 // Support for void verify_mxcsr() 476 // 477 // This routine is used with -Xcheck:jni to verify that native 478 // JNI code does not return to Java code without restoring the 479 // MXCSR register to our expected state. 480 481 482 address generate_verify_mxcsr() { 483 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 484 address start = __ pc(); 485 486 const Address mxcsr_save(rsp, 0); 487 488 if (CheckJNICalls && UseSSE > 0 ) { 489 Label ok_ret; 490 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 491 __ push(rax); 492 __ subptr(rsp, wordSize); // allocate a temp location 493 __ stmxcsr(mxcsr_save); 494 __ movl(rax, mxcsr_save); 495 __ andl(rax, MXCSR_MASK); 496 __ cmp32(rax, mxcsr_std); 497 __ jcc(Assembler::equal, ok_ret); 498 499 __ warn("MXCSR changed by native JNI code."); 500 501 __ ldmxcsr(mxcsr_std); 502 503 __ bind(ok_ret); 504 __ addptr(rsp, wordSize); 505 __ pop(rax); 506 } 507 508 __ ret(0); 509 510 return start; 511 } 512 513 514 //--------------------------------------------------------------------------- 515 // Support for void verify_fpu_cntrl_wrd() 516 // 517 // This routine is used with -Xcheck:jni to verify that native 518 // JNI code does not return to Java code without restoring the 519 // FP control word to our expected state. 520 521 address generate_verify_fpu_cntrl_wrd() { 522 StubCodeMark mark(this, "StubRoutines", "verify_spcw"); 523 address start = __ pc(); 524 525 const Address fpu_cntrl_wrd_save(rsp, 0); 526 527 if (CheckJNICalls) { 528 Label ok_ret; 529 __ push(rax); 530 __ subptr(rsp, wordSize); // allocate a temp location 531 __ fnstcw(fpu_cntrl_wrd_save); 532 __ movl(rax, fpu_cntrl_wrd_save); 533 __ andl(rax, FPU_CNTRL_WRD_MASK); 534 ExternalAddress fpu_std(StubRoutines::addr_fpu_cntrl_wrd_std()); 535 __ cmp32(rax, fpu_std); 536 __ jcc(Assembler::equal, ok_ret); 537 538 __ warn("Floating point control word changed by native JNI code."); 539 540 __ fldcw(fpu_std); 541 542 __ bind(ok_ret); 543 __ addptr(rsp, wordSize); 544 __ pop(rax); 545 } 546 547 __ ret(0); 548 549 return start; 550 } 551 552 //--------------------------------------------------------------------------- 553 // Wrapper for slow-case handling of double-to-integer conversion 554 // d2i or f2i fast case failed either because it is nan or because 555 // of under/overflow. 556 // Input: FPU TOS: float value 557 // Output: rax, (rdx): integer (long) result 558 559 address generate_d2i_wrapper(BasicType t, address fcn) { 560 StubCodeMark mark(this, "StubRoutines", "d2i_wrapper"); 561 address start = __ pc(); 562 563 // Capture info about frame layout 564 enum layout { FPUState_off = 0, 565 rbp_off = FPUStateSizeInWords, 566 rdi_off, 567 rsi_off, 568 rcx_off, 569 rbx_off, 570 saved_argument_off, 571 saved_argument_off2, // 2nd half of double 572 framesize 573 }; 574 575 assert(FPUStateSizeInWords == 27, "update stack layout"); 576 577 // Save outgoing argument to stack across push_FPU_state() 578 __ subptr(rsp, wordSize * 2); 579 __ fstp_d(Address(rsp, 0)); 580 581 // Save CPU & FPU state 582 __ push(rbx); 583 __ push(rcx); 584 __ push(rsi); 585 __ push(rdi); 586 __ push(rbp); 587 __ push_FPU_state(); 588 589 // push_FPU_state() resets the FP top of stack 590 // Load original double into FP top of stack 591 __ fld_d(Address(rsp, saved_argument_off * wordSize)); 592 // Store double into stack as outgoing argument 593 __ subptr(rsp, wordSize*2); 594 __ fst_d(Address(rsp, 0)); 595 596 // Prepare FPU for doing math in C-land 597 __ empty_FPU_stack(); 598 // Call the C code to massage the double. Result in EAX 599 if (t == T_INT) 600 { BLOCK_COMMENT("SharedRuntime::d2i"); } 601 else if (t == T_LONG) 602 { BLOCK_COMMENT("SharedRuntime::d2l"); } 603 __ call_VM_leaf( fcn, 2 ); 604 605 // Restore CPU & FPU state 606 __ pop_FPU_state(); 607 __ pop(rbp); 608 __ pop(rdi); 609 __ pop(rsi); 610 __ pop(rcx); 611 __ pop(rbx); 612 __ addptr(rsp, wordSize * 2); 613 614 __ ret(0); 615 616 return start; 617 } 618 619 620 //--------------------------------------------------------------------------- 621 // The following routine generates a subroutine to throw an asynchronous 622 // UnknownError when an unsafe access gets a fault that could not be 623 // reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.) 624 address generate_handler_for_unsafe_access() { 625 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); 626 address start = __ pc(); 627 628 __ push(0); // hole for return address-to-be 629 __ pusha(); // push registers 630 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord); 631 BLOCK_COMMENT("call handle_unsafe_access"); 632 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access))); 633 __ movptr(next_pc, rax); // stuff next address 634 __ popa(); 635 __ ret(0); // jump to next address 636 637 return start; 638 } 639 640 641 //---------------------------------------------------------------------------------------------------- 642 // Non-destructive plausibility checks for oops 643 644 address generate_verify_oop() { 645 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 646 address start = __ pc(); 647 648 // Incoming arguments on stack after saving rax,: 649 // 650 // [tos ]: saved rdx 651 // [tos + 1]: saved EFLAGS 652 // [tos + 2]: return address 653 // [tos + 3]: char* error message 654 // [tos + 4]: oop object to verify 655 // [tos + 5]: saved rax, - saved by caller and bashed 656 657 Label exit, error; 658 __ pushf(); 659 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 660 __ push(rdx); // save rdx 661 // make sure object is 'reasonable' 662 __ movptr(rax, Address(rsp, 4 * wordSize)); // get object 663 __ testptr(rax, rax); 664 __ jcc(Assembler::zero, exit); // if obj is NULL it is ok 665 666 // Check if the oop is in the right area of memory 667 const int oop_mask = Universe::verify_oop_mask(); 668 const int oop_bits = Universe::verify_oop_bits(); 669 __ mov(rdx, rax); 670 __ andptr(rdx, oop_mask); 671 __ cmpptr(rdx, oop_bits); 672 __ jcc(Assembler::notZero, error); 673 674 // make sure klass is 'reasonable', which is not zero. 675 __ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass 676 __ testptr(rax, rax); 677 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 678 679 // return if everything seems ok 680 __ bind(exit); 681 __ movptr(rax, Address(rsp, 5 * wordSize)); // get saved rax, back 682 __ pop(rdx); // restore rdx 683 __ popf(); // restore EFLAGS 684 __ ret(3 * wordSize); // pop arguments 685 686 // handle errors 687 __ bind(error); 688 __ movptr(rax, Address(rsp, 5 * wordSize)); // get saved rax, back 689 __ pop(rdx); // get saved rdx back 690 __ popf(); // get saved EFLAGS off stack -- will be ignored 691 __ pusha(); // push registers (eip = return address & msg are already pushed) 692 BLOCK_COMMENT("call MacroAssembler::debug"); 693 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); 694 __ popa(); 695 __ ret(3 * wordSize); // pop arguments 696 return start; 697 } 698 699 // 700 // Generate pre-barrier for array stores 701 // 702 // Input: 703 // start - starting address 704 // count - element count 705 void gen_write_ref_array_pre_barrier(Register start, Register count, bool uninitialized_target) { 706 assert_different_registers(start, count); 707 BarrierSet* bs = Universe::heap()->barrier_set(); 708 switch (bs->kind()) { 709 case BarrierSet::G1SATBCT: 710 case BarrierSet::G1SATBCTLogging: 711 // With G1, don't generate the call if we statically know that the target in uninitialized 712 if (!uninitialized_target) { 713 __ pusha(); // push registers 714 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 715 start, count); 716 __ popa(); 717 } 718 break; 719 case BarrierSet::CardTableModRef: 720 case BarrierSet::CardTableExtension: 721 case BarrierSet::ModRef: 722 case BarrierSet::Epsilon: 723 break; 724 default : 725 ShouldNotReachHere(); 726 727 } 728 } 729 730 731 // 732 // Generate a post-barrier for an array store 733 // 734 // start - starting address 735 // count - element count 736 // 737 // The two input registers are overwritten. 738 // 739 void gen_write_ref_array_post_barrier(Register start, Register count) { 740 BarrierSet* bs = Universe::heap()->barrier_set(); 741 assert_different_registers(start, count); 742 switch (bs->kind()) { 743 case BarrierSet::G1SATBCT: 744 case BarrierSet::G1SATBCTLogging: 745 { 746 __ pusha(); // push registers 747 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 748 start, count); 749 __ popa(); 750 } 751 break; 752 753 case BarrierSet::CardTableModRef: 754 case BarrierSet::CardTableExtension: 755 { 756 CardTableModRefBS* ct = (CardTableModRefBS*)bs; 757 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 758 759 Label L_loop; 760 const Register end = count; // elements count; end == start+count-1 761 assert_different_registers(start, end); 762 763 __ lea(end, Address(start, count, Address::times_ptr, -wordSize)); 764 __ shrptr(start, CardTableModRefBS::card_shift); 765 __ shrptr(end, CardTableModRefBS::card_shift); 766 __ subptr(end, start); // end --> count 767 __ BIND(L_loop); 768 intptr_t disp = (intptr_t) ct->byte_map_base; 769 Address cardtable(start, count, Address::times_1, disp); 770 __ movb(cardtable, 0); 771 __ decrement(count); 772 __ jcc(Assembler::greaterEqual, L_loop); 773 } 774 break; 775 case BarrierSet::ModRef: 776 case BarrierSet::Epsilon: 777 break; 778 default : 779 ShouldNotReachHere(); 780 781 } 782 } 783 784 785 // Copy 64 bytes chunks 786 // 787 // Inputs: 788 // from - source array address 789 // to_from - destination array address - from 790 // qword_count - 8-bytes element count, negative 791 // 792 void xmm_copy_forward(Register from, Register to_from, Register qword_count) { 793 assert( UseSSE >= 2, "supported cpu only" ); 794 Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit; 795 // Copy 64-byte chunks 796 __ jmpb(L_copy_64_bytes); 797 __ align(OptoLoopAlignment); 798 __ BIND(L_copy_64_bytes_loop); 799 800 if (UseUnalignedLoadStores) { 801 if (UseAVX >= 2) { 802 __ vmovdqu(xmm0, Address(from, 0)); 803 __ vmovdqu(Address(from, to_from, Address::times_1, 0), xmm0); 804 __ vmovdqu(xmm1, Address(from, 32)); 805 __ vmovdqu(Address(from, to_from, Address::times_1, 32), xmm1); 806 } else { 807 __ movdqu(xmm0, Address(from, 0)); 808 __ movdqu(Address(from, to_from, Address::times_1, 0), xmm0); 809 __ movdqu(xmm1, Address(from, 16)); 810 __ movdqu(Address(from, to_from, Address::times_1, 16), xmm1); 811 __ movdqu(xmm2, Address(from, 32)); 812 __ movdqu(Address(from, to_from, Address::times_1, 32), xmm2); 813 __ movdqu(xmm3, Address(from, 48)); 814 __ movdqu(Address(from, to_from, Address::times_1, 48), xmm3); 815 } 816 } else { 817 __ movq(xmm0, Address(from, 0)); 818 __ movq(Address(from, to_from, Address::times_1, 0), xmm0); 819 __ movq(xmm1, Address(from, 8)); 820 __ movq(Address(from, to_from, Address::times_1, 8), xmm1); 821 __ movq(xmm2, Address(from, 16)); 822 __ movq(Address(from, to_from, Address::times_1, 16), xmm2); 823 __ movq(xmm3, Address(from, 24)); 824 __ movq(Address(from, to_from, Address::times_1, 24), xmm3); 825 __ movq(xmm4, Address(from, 32)); 826 __ movq(Address(from, to_from, Address::times_1, 32), xmm4); 827 __ movq(xmm5, Address(from, 40)); 828 __ movq(Address(from, to_from, Address::times_1, 40), xmm5); 829 __ movq(xmm6, Address(from, 48)); 830 __ movq(Address(from, to_from, Address::times_1, 48), xmm6); 831 __ movq(xmm7, Address(from, 56)); 832 __ movq(Address(from, to_from, Address::times_1, 56), xmm7); 833 } 834 835 __ addl(from, 64); 836 __ BIND(L_copy_64_bytes); 837 __ subl(qword_count, 8); 838 __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop); 839 840 if (UseUnalignedLoadStores && (UseAVX >= 2)) { 841 // clean upper bits of YMM registers 842 __ vpxor(xmm0, xmm0); 843 __ vpxor(xmm1, xmm1); 844 } 845 __ addl(qword_count, 8); 846 __ jccb(Assembler::zero, L_exit); 847 // 848 // length is too short, just copy qwords 849 // 850 __ BIND(L_copy_8_bytes); 851 __ movq(xmm0, Address(from, 0)); 852 __ movq(Address(from, to_from, Address::times_1), xmm0); 853 __ addl(from, 8); 854 __ decrement(qword_count); 855 __ jcc(Assembler::greater, L_copy_8_bytes); 856 __ BIND(L_exit); 857 } 858 859 // Copy 64 bytes chunks 860 // 861 // Inputs: 862 // from - source array address 863 // to_from - destination array address - from 864 // qword_count - 8-bytes element count, negative 865 // 866 void mmx_copy_forward(Register from, Register to_from, Register qword_count) { 867 assert( VM_Version::supports_mmx(), "supported cpu only" ); 868 Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit; 869 // Copy 64-byte chunks 870 __ jmpb(L_copy_64_bytes); 871 __ align(OptoLoopAlignment); 872 __ BIND(L_copy_64_bytes_loop); 873 __ movq(mmx0, Address(from, 0)); 874 __ movq(mmx1, Address(from, 8)); 875 __ movq(mmx2, Address(from, 16)); 876 __ movq(Address(from, to_from, Address::times_1, 0), mmx0); 877 __ movq(mmx3, Address(from, 24)); 878 __ movq(Address(from, to_from, Address::times_1, 8), mmx1); 879 __ movq(mmx4, Address(from, 32)); 880 __ movq(Address(from, to_from, Address::times_1, 16), mmx2); 881 __ movq(mmx5, Address(from, 40)); 882 __ movq(Address(from, to_from, Address::times_1, 24), mmx3); 883 __ movq(mmx6, Address(from, 48)); 884 __ movq(Address(from, to_from, Address::times_1, 32), mmx4); 885 __ movq(mmx7, Address(from, 56)); 886 __ movq(Address(from, to_from, Address::times_1, 40), mmx5); 887 __ movq(Address(from, to_from, Address::times_1, 48), mmx6); 888 __ movq(Address(from, to_from, Address::times_1, 56), mmx7); 889 __ addptr(from, 64); 890 __ BIND(L_copy_64_bytes); 891 __ subl(qword_count, 8); 892 __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop); 893 __ addl(qword_count, 8); 894 __ jccb(Assembler::zero, L_exit); 895 // 896 // length is too short, just copy qwords 897 // 898 __ BIND(L_copy_8_bytes); 899 __ movq(mmx0, Address(from, 0)); 900 __ movq(Address(from, to_from, Address::times_1), mmx0); 901 __ addptr(from, 8); 902 __ decrement(qword_count); 903 __ jcc(Assembler::greater, L_copy_8_bytes); 904 __ BIND(L_exit); 905 __ emms(); 906 } 907 908 address generate_disjoint_copy(BasicType t, bool aligned, 909 Address::ScaleFactor sf, 910 address* entry, const char *name, 911 bool dest_uninitialized = false) { 912 __ align(CodeEntryAlignment); 913 StubCodeMark mark(this, "StubRoutines", name); 914 address start = __ pc(); 915 916 Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte; 917 Label L_copy_2_bytes, L_copy_4_bytes, L_copy_64_bytes; 918 919 int shift = Address::times_ptr - sf; 920 921 const Register from = rsi; // source array address 922 const Register to = rdi; // destination array address 923 const Register count = rcx; // elements count 924 const Register to_from = to; // (to - from) 925 const Register saved_to = rdx; // saved destination array address 926 927 __ enter(); // required for proper stackwalking of RuntimeStub frame 928 __ push(rsi); 929 __ push(rdi); 930 __ movptr(from , Address(rsp, 12+ 4)); 931 __ movptr(to , Address(rsp, 12+ 8)); 932 __ movl(count, Address(rsp, 12+ 12)); 933 934 if (entry != NULL) { 935 *entry = __ pc(); // Entry point from conjoint arraycopy stub. 936 BLOCK_COMMENT("Entry:"); 937 } 938 939 if (t == T_OBJECT) { 940 __ testl(count, count); 941 __ jcc(Assembler::zero, L_0_count); 942 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 943 __ mov(saved_to, to); // save 'to' 944 } 945 946 __ subptr(to, from); // to --> to_from 947 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element 948 __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp 949 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) { 950 // align source address at 4 bytes address boundary 951 if (t == T_BYTE) { 952 // One byte misalignment happens only for byte arrays 953 __ testl(from, 1); 954 __ jccb(Assembler::zero, L_skip_align1); 955 __ movb(rax, Address(from, 0)); 956 __ movb(Address(from, to_from, Address::times_1, 0), rax); 957 __ increment(from); 958 __ decrement(count); 959 __ BIND(L_skip_align1); 960 } 961 // Two bytes misalignment happens only for byte and short (char) arrays 962 __ testl(from, 2); 963 __ jccb(Assembler::zero, L_skip_align2); 964 __ movw(rax, Address(from, 0)); 965 __ movw(Address(from, to_from, Address::times_1, 0), rax); 966 __ addptr(from, 2); 967 __ subl(count, 1<<(shift-1)); 968 __ BIND(L_skip_align2); 969 } 970 if (!VM_Version::supports_mmx()) { 971 __ mov(rax, count); // save 'count' 972 __ shrl(count, shift); // bytes count 973 __ addptr(to_from, from);// restore 'to' 974 __ rep_mov(); 975 __ subptr(to_from, from);// restore 'to_from' 976 __ mov(count, rax); // restore 'count' 977 __ jmpb(L_copy_2_bytes); // all dwords were copied 978 } else { 979 if (!UseUnalignedLoadStores) { 980 // align to 8 bytes, we know we are 4 byte aligned to start 981 __ testptr(from, 4); 982 __ jccb(Assembler::zero, L_copy_64_bytes); 983 __ movl(rax, Address(from, 0)); 984 __ movl(Address(from, to_from, Address::times_1, 0), rax); 985 __ addptr(from, 4); 986 __ subl(count, 1<<shift); 987 } 988 __ BIND(L_copy_64_bytes); 989 __ mov(rax, count); 990 __ shrl(rax, shift+1); // 8 bytes chunk count 991 // 992 // Copy 8-byte chunks through MMX registers, 8 per iteration of the loop 993 // 994 if (UseXMMForArrayCopy) { 995 xmm_copy_forward(from, to_from, rax); 996 } else { 997 mmx_copy_forward(from, to_from, rax); 998 } 999 } 1000 // copy tailing dword 1001 __ BIND(L_copy_4_bytes); 1002 __ testl(count, 1<<shift); 1003 __ jccb(Assembler::zero, L_copy_2_bytes); 1004 __ movl(rax, Address(from, 0)); 1005 __ movl(Address(from, to_from, Address::times_1, 0), rax); 1006 if (t == T_BYTE || t == T_SHORT) { 1007 __ addptr(from, 4); 1008 __ BIND(L_copy_2_bytes); 1009 // copy tailing word 1010 __ testl(count, 1<<(shift-1)); 1011 __ jccb(Assembler::zero, L_copy_byte); 1012 __ movw(rax, Address(from, 0)); 1013 __ movw(Address(from, to_from, Address::times_1, 0), rax); 1014 if (t == T_BYTE) { 1015 __ addptr(from, 2); 1016 __ BIND(L_copy_byte); 1017 // copy tailing byte 1018 __ testl(count, 1); 1019 __ jccb(Assembler::zero, L_exit); 1020 __ movb(rax, Address(from, 0)); 1021 __ movb(Address(from, to_from, Address::times_1, 0), rax); 1022 __ BIND(L_exit); 1023 } else { 1024 __ BIND(L_copy_byte); 1025 } 1026 } else { 1027 __ BIND(L_copy_2_bytes); 1028 } 1029 1030 if (t == T_OBJECT) { 1031 __ movl(count, Address(rsp, 12+12)); // reread 'count' 1032 __ mov(to, saved_to); // restore 'to' 1033 gen_write_ref_array_post_barrier(to, count); 1034 __ BIND(L_0_count); 1035 } 1036 inc_copy_counter_np(t); 1037 __ pop(rdi); 1038 __ pop(rsi); 1039 __ leave(); // required for proper stackwalking of RuntimeStub frame 1040 __ xorptr(rax, rax); // return 0 1041 __ ret(0); 1042 return start; 1043 } 1044 1045 1046 address generate_fill(BasicType t, bool aligned, const char *name) { 1047 __ align(CodeEntryAlignment); 1048 StubCodeMark mark(this, "StubRoutines", name); 1049 address start = __ pc(); 1050 1051 BLOCK_COMMENT("Entry:"); 1052 1053 const Register to = rdi; // source array address 1054 const Register value = rdx; // value 1055 const Register count = rsi; // elements count 1056 1057 __ enter(); // required for proper stackwalking of RuntimeStub frame 1058 __ push(rsi); 1059 __ push(rdi); 1060 __ movptr(to , Address(rsp, 12+ 4)); 1061 __ movl(value, Address(rsp, 12+ 8)); 1062 __ movl(count, Address(rsp, 12+ 12)); 1063 1064 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1065 1066 __ pop(rdi); 1067 __ pop(rsi); 1068 __ leave(); // required for proper stackwalking of RuntimeStub frame 1069 __ ret(0); 1070 return start; 1071 } 1072 1073 address generate_conjoint_copy(BasicType t, bool aligned, 1074 Address::ScaleFactor sf, 1075 address nooverlap_target, 1076 address* entry, const char *name, 1077 bool dest_uninitialized = false) { 1078 __ align(CodeEntryAlignment); 1079 StubCodeMark mark(this, "StubRoutines", name); 1080 address start = __ pc(); 1081 1082 Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte; 1083 Label L_copy_2_bytes, L_copy_4_bytes, L_copy_8_bytes, L_copy_8_bytes_loop; 1084 1085 int shift = Address::times_ptr - sf; 1086 1087 const Register src = rax; // source array address 1088 const Register dst = rdx; // destination array address 1089 const Register from = rsi; // source array address 1090 const Register to = rdi; // destination array address 1091 const Register count = rcx; // elements count 1092 const Register end = rax; // array end address 1093 1094 __ enter(); // required for proper stackwalking of RuntimeStub frame 1095 __ push(rsi); 1096 __ push(rdi); 1097 __ movptr(src , Address(rsp, 12+ 4)); // from 1098 __ movptr(dst , Address(rsp, 12+ 8)); // to 1099 __ movl2ptr(count, Address(rsp, 12+12)); // count 1100 1101 if (entry != NULL) { 1102 *entry = __ pc(); // Entry point from generic arraycopy stub. 1103 BLOCK_COMMENT("Entry:"); 1104 } 1105 1106 // nooverlap_target expects arguments in rsi and rdi. 1107 __ mov(from, src); 1108 __ mov(to , dst); 1109 1110 // arrays overlap test: dispatch to disjoint stub if necessary. 1111 RuntimeAddress nooverlap(nooverlap_target); 1112 __ cmpptr(dst, src); 1113 __ lea(end, Address(src, count, sf, 0)); // src + count * elem_size 1114 __ jump_cc(Assembler::belowEqual, nooverlap); 1115 __ cmpptr(dst, end); 1116 __ jump_cc(Assembler::aboveEqual, nooverlap); 1117 1118 if (t == T_OBJECT) { 1119 __ testl(count, count); 1120 __ jcc(Assembler::zero, L_0_count); 1121 gen_write_ref_array_pre_barrier(dst, count, dest_uninitialized); 1122 } 1123 1124 // copy from high to low 1125 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element 1126 __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp 1127 if (t == T_BYTE || t == T_SHORT) { 1128 // Align the end of destination array at 4 bytes address boundary 1129 __ lea(end, Address(dst, count, sf, 0)); 1130 if (t == T_BYTE) { 1131 // One byte misalignment happens only for byte arrays 1132 __ testl(end, 1); 1133 __ jccb(Assembler::zero, L_skip_align1); 1134 __ decrement(count); 1135 __ movb(rdx, Address(from, count, sf, 0)); 1136 __ movb(Address(to, count, sf, 0), rdx); 1137 __ BIND(L_skip_align1); 1138 } 1139 // Two bytes misalignment happens only for byte and short (char) arrays 1140 __ testl(end, 2); 1141 __ jccb(Assembler::zero, L_skip_align2); 1142 __ subptr(count, 1<<(shift-1)); 1143 __ movw(rdx, Address(from, count, sf, 0)); 1144 __ movw(Address(to, count, sf, 0), rdx); 1145 __ BIND(L_skip_align2); 1146 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element 1147 __ jcc(Assembler::below, L_copy_4_bytes); 1148 } 1149 1150 if (!VM_Version::supports_mmx()) { 1151 __ std(); 1152 __ mov(rax, count); // Save 'count' 1153 __ mov(rdx, to); // Save 'to' 1154 __ lea(rsi, Address(from, count, sf, -4)); 1155 __ lea(rdi, Address(to , count, sf, -4)); 1156 __ shrptr(count, shift); // bytes count 1157 __ rep_mov(); 1158 __ cld(); 1159 __ mov(count, rax); // restore 'count' 1160 __ andl(count, (1<<shift)-1); // mask the number of rest elements 1161 __ movptr(from, Address(rsp, 12+4)); // reread 'from' 1162 __ mov(to, rdx); // restore 'to' 1163 __ jmpb(L_copy_2_bytes); // all dword were copied 1164 } else { 1165 // Align to 8 bytes the end of array. It is aligned to 4 bytes already. 1166 __ testptr(end, 4); 1167 __ jccb(Assembler::zero, L_copy_8_bytes); 1168 __ subl(count, 1<<shift); 1169 __ movl(rdx, Address(from, count, sf, 0)); 1170 __ movl(Address(to, count, sf, 0), rdx); 1171 __ jmpb(L_copy_8_bytes); 1172 1173 __ align(OptoLoopAlignment); 1174 // Move 8 bytes 1175 __ BIND(L_copy_8_bytes_loop); 1176 if (UseXMMForArrayCopy) { 1177 __ movq(xmm0, Address(from, count, sf, 0)); 1178 __ movq(Address(to, count, sf, 0), xmm0); 1179 } else { 1180 __ movq(mmx0, Address(from, count, sf, 0)); 1181 __ movq(Address(to, count, sf, 0), mmx0); 1182 } 1183 __ BIND(L_copy_8_bytes); 1184 __ subl(count, 2<<shift); 1185 __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); 1186 __ addl(count, 2<<shift); 1187 if (!UseXMMForArrayCopy) { 1188 __ emms(); 1189 } 1190 } 1191 __ BIND(L_copy_4_bytes); 1192 // copy prefix qword 1193 __ testl(count, 1<<shift); 1194 __ jccb(Assembler::zero, L_copy_2_bytes); 1195 __ movl(rdx, Address(from, count, sf, -4)); 1196 __ movl(Address(to, count, sf, -4), rdx); 1197 1198 if (t == T_BYTE || t == T_SHORT) { 1199 __ subl(count, (1<<shift)); 1200 __ BIND(L_copy_2_bytes); 1201 // copy prefix dword 1202 __ testl(count, 1<<(shift-1)); 1203 __ jccb(Assembler::zero, L_copy_byte); 1204 __ movw(rdx, Address(from, count, sf, -2)); 1205 __ movw(Address(to, count, sf, -2), rdx); 1206 if (t == T_BYTE) { 1207 __ subl(count, 1<<(shift-1)); 1208 __ BIND(L_copy_byte); 1209 // copy prefix byte 1210 __ testl(count, 1); 1211 __ jccb(Assembler::zero, L_exit); 1212 __ movb(rdx, Address(from, 0)); 1213 __ movb(Address(to, 0), rdx); 1214 __ BIND(L_exit); 1215 } else { 1216 __ BIND(L_copy_byte); 1217 } 1218 } else { 1219 __ BIND(L_copy_2_bytes); 1220 } 1221 if (t == T_OBJECT) { 1222 __ movl2ptr(count, Address(rsp, 12+12)); // reread count 1223 gen_write_ref_array_post_barrier(to, count); 1224 __ BIND(L_0_count); 1225 } 1226 inc_copy_counter_np(t); 1227 __ pop(rdi); 1228 __ pop(rsi); 1229 __ leave(); // required for proper stackwalking of RuntimeStub frame 1230 __ xorptr(rax, rax); // return 0 1231 __ ret(0); 1232 return start; 1233 } 1234 1235 1236 address generate_disjoint_long_copy(address* entry, const char *name) { 1237 __ align(CodeEntryAlignment); 1238 StubCodeMark mark(this, "StubRoutines", name); 1239 address start = __ pc(); 1240 1241 Label L_copy_8_bytes, L_copy_8_bytes_loop; 1242 const Register from = rax; // source array address 1243 const Register to = rdx; // destination array address 1244 const Register count = rcx; // elements count 1245 const Register to_from = rdx; // (to - from) 1246 1247 __ enter(); // required for proper stackwalking of RuntimeStub frame 1248 __ movptr(from , Address(rsp, 8+0)); // from 1249 __ movptr(to , Address(rsp, 8+4)); // to 1250 __ movl2ptr(count, Address(rsp, 8+8)); // count 1251 1252 *entry = __ pc(); // Entry point from conjoint arraycopy stub. 1253 BLOCK_COMMENT("Entry:"); 1254 1255 __ subptr(to, from); // to --> to_from 1256 if (VM_Version::supports_mmx()) { 1257 if (UseXMMForArrayCopy) { 1258 xmm_copy_forward(from, to_from, count); 1259 } else { 1260 mmx_copy_forward(from, to_from, count); 1261 } 1262 } else { 1263 __ jmpb(L_copy_8_bytes); 1264 __ align(OptoLoopAlignment); 1265 __ BIND(L_copy_8_bytes_loop); 1266 __ fild_d(Address(from, 0)); 1267 __ fistp_d(Address(from, to_from, Address::times_1)); 1268 __ addptr(from, 8); 1269 __ BIND(L_copy_8_bytes); 1270 __ decrement(count); 1271 __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); 1272 } 1273 inc_copy_counter_np(T_LONG); 1274 __ leave(); // required for proper stackwalking of RuntimeStub frame 1275 __ xorptr(rax, rax); // return 0 1276 __ ret(0); 1277 return start; 1278 } 1279 1280 address generate_conjoint_long_copy(address nooverlap_target, 1281 address* entry, const char *name) { 1282 __ align(CodeEntryAlignment); 1283 StubCodeMark mark(this, "StubRoutines", name); 1284 address start = __ pc(); 1285 1286 Label L_copy_8_bytes, L_copy_8_bytes_loop; 1287 const Register from = rax; // source array address 1288 const Register to = rdx; // destination array address 1289 const Register count = rcx; // elements count 1290 const Register end_from = rax; // source array end address 1291 1292 __ enter(); // required for proper stackwalking of RuntimeStub frame 1293 __ movptr(from , Address(rsp, 8+0)); // from 1294 __ movptr(to , Address(rsp, 8+4)); // to 1295 __ movl2ptr(count, Address(rsp, 8+8)); // count 1296 1297 *entry = __ pc(); // Entry point from generic arraycopy stub. 1298 BLOCK_COMMENT("Entry:"); 1299 1300 // arrays overlap test 1301 __ cmpptr(to, from); 1302 RuntimeAddress nooverlap(nooverlap_target); 1303 __ jump_cc(Assembler::belowEqual, nooverlap); 1304 __ lea(end_from, Address(from, count, Address::times_8, 0)); 1305 __ cmpptr(to, end_from); 1306 __ movptr(from, Address(rsp, 8)); // from 1307 __ jump_cc(Assembler::aboveEqual, nooverlap); 1308 1309 __ jmpb(L_copy_8_bytes); 1310 1311 __ align(OptoLoopAlignment); 1312 __ BIND(L_copy_8_bytes_loop); 1313 if (VM_Version::supports_mmx()) { 1314 if (UseXMMForArrayCopy) { 1315 __ movq(xmm0, Address(from, count, Address::times_8)); 1316 __ movq(Address(to, count, Address::times_8), xmm0); 1317 } else { 1318 __ movq(mmx0, Address(from, count, Address::times_8)); 1319 __ movq(Address(to, count, Address::times_8), mmx0); 1320 } 1321 } else { 1322 __ fild_d(Address(from, count, Address::times_8)); 1323 __ fistp_d(Address(to, count, Address::times_8)); 1324 } 1325 __ BIND(L_copy_8_bytes); 1326 __ decrement(count); 1327 __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); 1328 1329 if (VM_Version::supports_mmx() && !UseXMMForArrayCopy) { 1330 __ emms(); 1331 } 1332 inc_copy_counter_np(T_LONG); 1333 __ leave(); // required for proper stackwalking of RuntimeStub frame 1334 __ xorptr(rax, rax); // return 0 1335 __ ret(0); 1336 return start; 1337 } 1338 1339 1340 // Helper for generating a dynamic type check. 1341 // The sub_klass must be one of {rbx, rdx, rsi}. 1342 // The temp is killed. 1343 void generate_type_check(Register sub_klass, 1344 Address& super_check_offset_addr, 1345 Address& super_klass_addr, 1346 Register temp, 1347 Label* L_success, Label* L_failure) { 1348 BLOCK_COMMENT("type_check:"); 1349 1350 Label L_fallthrough; 1351 #define LOCAL_JCC(assembler_con, label_ptr) \ 1352 if (label_ptr != NULL) __ jcc(assembler_con, *(label_ptr)); \ 1353 else __ jcc(assembler_con, L_fallthrough) /*omit semi*/ 1354 1355 // The following is a strange variation of the fast path which requires 1356 // one less register, because needed values are on the argument stack. 1357 // __ check_klass_subtype_fast_path(sub_klass, *super_klass*, temp, 1358 // L_success, L_failure, NULL); 1359 assert_different_registers(sub_klass, temp); 1360 1361 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1362 1363 // if the pointers are equal, we are done (e.g., String[] elements) 1364 __ cmpptr(sub_klass, super_klass_addr); 1365 LOCAL_JCC(Assembler::equal, L_success); 1366 1367 // check the supertype display: 1368 __ movl2ptr(temp, super_check_offset_addr); 1369 Address super_check_addr(sub_klass, temp, Address::times_1, 0); 1370 __ movptr(temp, super_check_addr); // load displayed supertype 1371 __ cmpptr(temp, super_klass_addr); // test the super type 1372 LOCAL_JCC(Assembler::equal, L_success); 1373 1374 // if it was a primary super, we can just fail immediately 1375 __ cmpl(super_check_offset_addr, sc_offset); 1376 LOCAL_JCC(Assembler::notEqual, L_failure); 1377 1378 // The repne_scan instruction uses fixed registers, which will get spilled. 1379 // We happen to know this works best when super_klass is in rax. 1380 Register super_klass = temp; 1381 __ movptr(super_klass, super_klass_addr); 1382 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, 1383 L_success, L_failure); 1384 1385 __ bind(L_fallthrough); 1386 1387 if (L_success == NULL) { BLOCK_COMMENT("L_success:"); } 1388 if (L_failure == NULL) { BLOCK_COMMENT("L_failure:"); } 1389 1390 #undef LOCAL_JCC 1391 } 1392 1393 // 1394 // Generate checkcasting array copy stub 1395 // 1396 // Input: 1397 // 4(rsp) - source array address 1398 // 8(rsp) - destination array address 1399 // 12(rsp) - element count, can be zero 1400 // 16(rsp) - size_t ckoff (super_check_offset) 1401 // 20(rsp) - oop ckval (super_klass) 1402 // 1403 // Output: 1404 // rax, == 0 - success 1405 // rax, == -1^K - failure, where K is partial transfer count 1406 // 1407 address generate_checkcast_copy(const char *name, address* entry, bool dest_uninitialized = false) { 1408 __ align(CodeEntryAlignment); 1409 StubCodeMark mark(this, "StubRoutines", name); 1410 address start = __ pc(); 1411 1412 Label L_load_element, L_store_element, L_do_card_marks, L_done; 1413 1414 // register use: 1415 // rax, rdx, rcx -- loop control (end_from, end_to, count) 1416 // rdi, rsi -- element access (oop, klass) 1417 // rbx, -- temp 1418 const Register from = rax; // source array address 1419 const Register to = rdx; // destination array address 1420 const Register length = rcx; // elements count 1421 const Register elem = rdi; // each oop copied 1422 const Register elem_klass = rsi; // each elem._klass (sub_klass) 1423 const Register temp = rbx; // lone remaining temp 1424 1425 __ enter(); // required for proper stackwalking of RuntimeStub frame 1426 1427 __ push(rsi); 1428 __ push(rdi); 1429 __ push(rbx); 1430 1431 Address from_arg(rsp, 16+ 4); // from 1432 Address to_arg(rsp, 16+ 8); // to 1433 Address length_arg(rsp, 16+12); // elements count 1434 Address ckoff_arg(rsp, 16+16); // super_check_offset 1435 Address ckval_arg(rsp, 16+20); // super_klass 1436 1437 // Load up: 1438 __ movptr(from, from_arg); 1439 __ movptr(to, to_arg); 1440 __ movl2ptr(length, length_arg); 1441 1442 if (entry != NULL) { 1443 *entry = __ pc(); // Entry point from generic arraycopy stub. 1444 BLOCK_COMMENT("Entry:"); 1445 } 1446 1447 //--------------------------------------------------------------- 1448 // Assembler stub will be used for this call to arraycopy 1449 // if the two arrays are subtypes of Object[] but the 1450 // destination array type is not equal to or a supertype 1451 // of the source type. Each element must be separately 1452 // checked. 1453 1454 // Loop-invariant addresses. They are exclusive end pointers. 1455 Address end_from_addr(from, length, Address::times_ptr, 0); 1456 Address end_to_addr(to, length, Address::times_ptr, 0); 1457 1458 Register end_from = from; // re-use 1459 Register end_to = to; // re-use 1460 Register count = length; // re-use 1461 1462 // Loop-variant addresses. They assume post-incremented count < 0. 1463 Address from_element_addr(end_from, count, Address::times_ptr, 0); 1464 Address to_element_addr(end_to, count, Address::times_ptr, 0); 1465 Address elem_klass_addr(elem, oopDesc::klass_offset_in_bytes()); 1466 1467 // Copy from low to high addresses, indexed from the end of each array. 1468 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 1469 __ lea(end_from, end_from_addr); 1470 __ lea(end_to, end_to_addr); 1471 assert(length == count, ""); // else fix next line: 1472 __ negptr(count); // negate and test the length 1473 __ jccb(Assembler::notZero, L_load_element); 1474 1475 // Empty array: Nothing to do. 1476 __ xorptr(rax, rax); // return 0 on (trivial) success 1477 __ jmp(L_done); 1478 1479 // ======== begin loop ======== 1480 // (Loop is rotated; its entry is L_load_element.) 1481 // Loop control: 1482 // for (count = -count; count != 0; count++) 1483 // Base pointers src, dst are biased by 8*count,to last element. 1484 __ align(OptoLoopAlignment); 1485 1486 __ BIND(L_store_element); 1487 __ movptr(to_element_addr, elem); // store the oop 1488 __ increment(count); // increment the count toward zero 1489 __ jccb(Assembler::zero, L_do_card_marks); 1490 1491 // ======== loop entry is here ======== 1492 __ BIND(L_load_element); 1493 __ movptr(elem, from_element_addr); // load the oop 1494 __ testptr(elem, elem); 1495 __ jccb(Assembler::zero, L_store_element); 1496 1497 // (Could do a trick here: Remember last successful non-null 1498 // element stored and make a quick oop equality check on it.) 1499 1500 __ movptr(elem_klass, elem_klass_addr); // query the object klass 1501 generate_type_check(elem_klass, ckoff_arg, ckval_arg, temp, 1502 &L_store_element, NULL); 1503 // (On fall-through, we have failed the element type check.) 1504 // ======== end loop ======== 1505 1506 // It was a real error; we must depend on the caller to finish the job. 1507 // Register "count" = -1 * number of *remaining* oops, length_arg = *total* oops. 1508 // Emit GC store barriers for the oops we have copied (length_arg + count), 1509 // and report their number to the caller. 1510 assert_different_registers(to, count, rax); 1511 Label L_post_barrier; 1512 __ addl(count, length_arg); // transfers = (length - remaining) 1513 __ movl2ptr(rax, count); // save the value 1514 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 1515 __ jccb(Assembler::notZero, L_post_barrier); 1516 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 1517 1518 // Come here on success only. 1519 __ BIND(L_do_card_marks); 1520 __ xorptr(rax, rax); // return 0 on success 1521 __ movl2ptr(count, length_arg); 1522 1523 __ BIND(L_post_barrier); 1524 __ movptr(to, to_arg); // reload 1525 gen_write_ref_array_post_barrier(to, count); 1526 1527 // Common exit point (success or failure). 1528 __ BIND(L_done); 1529 __ pop(rbx); 1530 __ pop(rdi); 1531 __ pop(rsi); 1532 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); 1533 __ leave(); // required for proper stackwalking of RuntimeStub frame 1534 __ ret(0); 1535 1536 return start; 1537 } 1538 1539 // 1540 // Generate 'unsafe' array copy stub 1541 // Though just as safe as the other stubs, it takes an unscaled 1542 // size_t argument instead of an element count. 1543 // 1544 // Input: 1545 // 4(rsp) - source array address 1546 // 8(rsp) - destination array address 1547 // 12(rsp) - byte count, can be zero 1548 // 1549 // Output: 1550 // rax, == 0 - success 1551 // rax, == -1 - need to call System.arraycopy 1552 // 1553 // Examines the alignment of the operands and dispatches 1554 // to a long, int, short, or byte copy loop. 1555 // 1556 address generate_unsafe_copy(const char *name, 1557 address byte_copy_entry, 1558 address short_copy_entry, 1559 address int_copy_entry, 1560 address long_copy_entry) { 1561 1562 Label L_long_aligned, L_int_aligned, L_short_aligned; 1563 1564 __ align(CodeEntryAlignment); 1565 StubCodeMark mark(this, "StubRoutines", name); 1566 address start = __ pc(); 1567 1568 const Register from = rax; // source array address 1569 const Register to = rdx; // destination array address 1570 const Register count = rcx; // elements count 1571 1572 __ enter(); // required for proper stackwalking of RuntimeStub frame 1573 __ push(rsi); 1574 __ push(rdi); 1575 Address from_arg(rsp, 12+ 4); // from 1576 Address to_arg(rsp, 12+ 8); // to 1577 Address count_arg(rsp, 12+12); // byte count 1578 1579 // Load up: 1580 __ movptr(from , from_arg); 1581 __ movptr(to , to_arg); 1582 __ movl2ptr(count, count_arg); 1583 1584 // bump this on entry, not on exit: 1585 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 1586 1587 const Register bits = rsi; 1588 __ mov(bits, from); 1589 __ orptr(bits, to); 1590 __ orptr(bits, count); 1591 1592 __ testl(bits, BytesPerLong-1); 1593 __ jccb(Assembler::zero, L_long_aligned); 1594 1595 __ testl(bits, BytesPerInt-1); 1596 __ jccb(Assembler::zero, L_int_aligned); 1597 1598 __ testl(bits, BytesPerShort-1); 1599 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 1600 1601 __ BIND(L_short_aligned); 1602 __ shrptr(count, LogBytesPerShort); // size => short_count 1603 __ movl(count_arg, count); // update 'count' 1604 __ jump(RuntimeAddress(short_copy_entry)); 1605 1606 __ BIND(L_int_aligned); 1607 __ shrptr(count, LogBytesPerInt); // size => int_count 1608 __ movl(count_arg, count); // update 'count' 1609 __ jump(RuntimeAddress(int_copy_entry)); 1610 1611 __ BIND(L_long_aligned); 1612 __ shrptr(count, LogBytesPerLong); // size => qword_count 1613 __ movl(count_arg, count); // update 'count' 1614 __ pop(rdi); // Do pops here since jlong_arraycopy stub does not do it. 1615 __ pop(rsi); 1616 __ jump(RuntimeAddress(long_copy_entry)); 1617 1618 return start; 1619 } 1620 1621 1622 // Perform range checks on the proposed arraycopy. 1623 // Smashes src_pos and dst_pos. (Uses them up for temps.) 1624 void arraycopy_range_checks(Register src, 1625 Register src_pos, 1626 Register dst, 1627 Register dst_pos, 1628 Address& length, 1629 Label& L_failed) { 1630 BLOCK_COMMENT("arraycopy_range_checks:"); 1631 const Register src_end = src_pos; // source array end position 1632 const Register dst_end = dst_pos; // destination array end position 1633 __ addl(src_end, length); // src_pos + length 1634 __ addl(dst_end, length); // dst_pos + length 1635 1636 // if (src_pos + length > arrayOop(src)->length() ) FAIL; 1637 __ cmpl(src_end, Address(src, arrayOopDesc::length_offset_in_bytes())); 1638 __ jcc(Assembler::above, L_failed); 1639 1640 // if (dst_pos + length > arrayOop(dst)->length() ) FAIL; 1641 __ cmpl(dst_end, Address(dst, arrayOopDesc::length_offset_in_bytes())); 1642 __ jcc(Assembler::above, L_failed); 1643 1644 BLOCK_COMMENT("arraycopy_range_checks done"); 1645 } 1646 1647 1648 // 1649 // Generate generic array copy stubs 1650 // 1651 // Input: 1652 // 4(rsp) - src oop 1653 // 8(rsp) - src_pos 1654 // 12(rsp) - dst oop 1655 // 16(rsp) - dst_pos 1656 // 20(rsp) - element count 1657 // 1658 // Output: 1659 // rax, == 0 - success 1660 // rax, == -1^K - failure, where K is partial transfer count 1661 // 1662 address generate_generic_copy(const char *name, 1663 address entry_jbyte_arraycopy, 1664 address entry_jshort_arraycopy, 1665 address entry_jint_arraycopy, 1666 address entry_oop_arraycopy, 1667 address entry_jlong_arraycopy, 1668 address entry_checkcast_arraycopy) { 1669 Label L_failed, L_failed_0, L_objArray; 1670 1671 { int modulus = CodeEntryAlignment; 1672 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 1673 int advance = target - (__ offset() % modulus); 1674 if (advance < 0) advance += modulus; 1675 if (advance > 0) __ nop(advance); 1676 } 1677 StubCodeMark mark(this, "StubRoutines", name); 1678 1679 // Short-hop target to L_failed. Makes for denser prologue code. 1680 __ BIND(L_failed_0); 1681 __ jmp(L_failed); 1682 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 1683 1684 __ align(CodeEntryAlignment); 1685 address start = __ pc(); 1686 1687 __ enter(); // required for proper stackwalking of RuntimeStub frame 1688 __ push(rsi); 1689 __ push(rdi); 1690 1691 // bump this on entry, not on exit: 1692 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 1693 1694 // Input values 1695 Address SRC (rsp, 12+ 4); 1696 Address SRC_POS (rsp, 12+ 8); 1697 Address DST (rsp, 12+12); 1698 Address DST_POS (rsp, 12+16); 1699 Address LENGTH (rsp, 12+20); 1700 1701 //----------------------------------------------------------------------- 1702 // Assembler stub will be used for this call to arraycopy 1703 // if the following conditions are met: 1704 // 1705 // (1) src and dst must not be null. 1706 // (2) src_pos must not be negative. 1707 // (3) dst_pos must not be negative. 1708 // (4) length must not be negative. 1709 // (5) src klass and dst klass should be the same and not NULL. 1710 // (6) src and dst should be arrays. 1711 // (7) src_pos + length must not exceed length of src. 1712 // (8) dst_pos + length must not exceed length of dst. 1713 // 1714 1715 const Register src = rax; // source array oop 1716 const Register src_pos = rsi; 1717 const Register dst = rdx; // destination array oop 1718 const Register dst_pos = rdi; 1719 const Register length = rcx; // transfer count 1720 1721 // if (src == NULL) return -1; 1722 __ movptr(src, SRC); // src oop 1723 __ testptr(src, src); 1724 __ jccb(Assembler::zero, L_failed_0); 1725 1726 // if (src_pos < 0) return -1; 1727 __ movl2ptr(src_pos, SRC_POS); // src_pos 1728 __ testl(src_pos, src_pos); 1729 __ jccb(Assembler::negative, L_failed_0); 1730 1731 // if (dst == NULL) return -1; 1732 __ movptr(dst, DST); // dst oop 1733 __ testptr(dst, dst); 1734 __ jccb(Assembler::zero, L_failed_0); 1735 1736 // if (dst_pos < 0) return -1; 1737 __ movl2ptr(dst_pos, DST_POS); // dst_pos 1738 __ testl(dst_pos, dst_pos); 1739 __ jccb(Assembler::negative, L_failed_0); 1740 1741 // if (length < 0) return -1; 1742 __ movl2ptr(length, LENGTH); // length 1743 __ testl(length, length); 1744 __ jccb(Assembler::negative, L_failed_0); 1745 1746 // if (src->klass() == NULL) return -1; 1747 Address src_klass_addr(src, oopDesc::klass_offset_in_bytes()); 1748 Address dst_klass_addr(dst, oopDesc::klass_offset_in_bytes()); 1749 const Register rcx_src_klass = rcx; // array klass 1750 __ movptr(rcx_src_klass, Address(src, oopDesc::klass_offset_in_bytes())); 1751 1752 #ifdef ASSERT 1753 // assert(src->klass() != NULL); 1754 BLOCK_COMMENT("assert klasses not null"); 1755 { Label L1, L2; 1756 __ testptr(rcx_src_klass, rcx_src_klass); 1757 __ jccb(Assembler::notZero, L2); // it is broken if klass is NULL 1758 __ bind(L1); 1759 __ stop("broken null klass"); 1760 __ bind(L2); 1761 __ cmpptr(dst_klass_addr, (int32_t)NULL_WORD); 1762 __ jccb(Assembler::equal, L1); // this would be broken also 1763 BLOCK_COMMENT("assert done"); 1764 } 1765 #endif //ASSERT 1766 1767 // Load layout helper (32-bits) 1768 // 1769 // |array_tag| | header_size | element_type | |log2_element_size| 1770 // 32 30 24 16 8 2 0 1771 // 1772 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 1773 // 1774 1775 int lh_offset = in_bytes(Klass::layout_helper_offset()); 1776 Address src_klass_lh_addr(rcx_src_klass, lh_offset); 1777 1778 // Handle objArrays completely differently... 1779 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 1780 __ cmpl(src_klass_lh_addr, objArray_lh); 1781 __ jcc(Assembler::equal, L_objArray); 1782 1783 // if (src->klass() != dst->klass()) return -1; 1784 __ cmpptr(rcx_src_klass, dst_klass_addr); 1785 __ jccb(Assembler::notEqual, L_failed_0); 1786 1787 const Register rcx_lh = rcx; // layout helper 1788 assert(rcx_lh == rcx_src_klass, "known alias"); 1789 __ movl(rcx_lh, src_klass_lh_addr); 1790 1791 // if (!src->is_Array()) return -1; 1792 __ cmpl(rcx_lh, Klass::_lh_neutral_value); 1793 __ jcc(Assembler::greaterEqual, L_failed_0); // signed cmp 1794 1795 // At this point, it is known to be a typeArray (array_tag 0x3). 1796 #ifdef ASSERT 1797 { Label L; 1798 __ cmpl(rcx_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 1799 __ jcc(Assembler::greaterEqual, L); // signed cmp 1800 __ stop("must be a primitive array"); 1801 __ bind(L); 1802 } 1803 #endif 1804 1805 assert_different_registers(src, src_pos, dst, dst_pos, rcx_lh); 1806 arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed); 1807 1808 // TypeArrayKlass 1809 // 1810 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 1811 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 1812 // 1813 const Register rsi_offset = rsi; // array offset 1814 const Register src_array = src; // src array offset 1815 const Register dst_array = dst; // dst array offset 1816 const Register rdi_elsize = rdi; // log2 element size 1817 1818 __ mov(rsi_offset, rcx_lh); 1819 __ shrptr(rsi_offset, Klass::_lh_header_size_shift); 1820 __ andptr(rsi_offset, Klass::_lh_header_size_mask); // array_offset 1821 __ addptr(src_array, rsi_offset); // src array offset 1822 __ addptr(dst_array, rsi_offset); // dst array offset 1823 __ andptr(rcx_lh, Klass::_lh_log2_element_size_mask); // log2 elsize 1824 1825 // next registers should be set before the jump to corresponding stub 1826 const Register from = src; // source array address 1827 const Register to = dst; // destination array address 1828 const Register count = rcx; // elements count 1829 // some of them should be duplicated on stack 1830 #define FROM Address(rsp, 12+ 4) 1831 #define TO Address(rsp, 12+ 8) // Not used now 1832 #define COUNT Address(rsp, 12+12) // Only for oop arraycopy 1833 1834 BLOCK_COMMENT("scale indexes to element size"); 1835 __ movl2ptr(rsi, SRC_POS); // src_pos 1836 __ shlptr(rsi); // src_pos << rcx (log2 elsize) 1837 assert(src_array == from, ""); 1838 __ addptr(from, rsi); // from = src_array + SRC_POS << log2 elsize 1839 __ movl2ptr(rdi, DST_POS); // dst_pos 1840 __ shlptr(rdi); // dst_pos << rcx (log2 elsize) 1841 assert(dst_array == to, ""); 1842 __ addptr(to, rdi); // to = dst_array + DST_POS << log2 elsize 1843 __ movptr(FROM, from); // src_addr 1844 __ mov(rdi_elsize, rcx_lh); // log2 elsize 1845 __ movl2ptr(count, LENGTH); // elements count 1846 1847 BLOCK_COMMENT("choose copy loop based on element size"); 1848 __ cmpl(rdi_elsize, 0); 1849 1850 __ jump_cc(Assembler::equal, RuntimeAddress(entry_jbyte_arraycopy)); 1851 __ cmpl(rdi_elsize, LogBytesPerShort); 1852 __ jump_cc(Assembler::equal, RuntimeAddress(entry_jshort_arraycopy)); 1853 __ cmpl(rdi_elsize, LogBytesPerInt); 1854 __ jump_cc(Assembler::equal, RuntimeAddress(entry_jint_arraycopy)); 1855 #ifdef ASSERT 1856 __ cmpl(rdi_elsize, LogBytesPerLong); 1857 __ jccb(Assembler::notEqual, L_failed); 1858 #endif 1859 __ pop(rdi); // Do pops here since jlong_arraycopy stub does not do it. 1860 __ pop(rsi); 1861 __ jump(RuntimeAddress(entry_jlong_arraycopy)); 1862 1863 __ BIND(L_failed); 1864 __ xorptr(rax, rax); 1865 __ notptr(rax); // return -1 1866 __ pop(rdi); 1867 __ pop(rsi); 1868 __ leave(); // required for proper stackwalking of RuntimeStub frame 1869 __ ret(0); 1870 1871 // ObjArrayKlass 1872 __ BIND(L_objArray); 1873 // live at this point: rcx_src_klass, src[_pos], dst[_pos] 1874 1875 Label L_plain_copy, L_checkcast_copy; 1876 // test array classes for subtyping 1877 __ cmpptr(rcx_src_klass, dst_klass_addr); // usual case is exact equality 1878 __ jccb(Assembler::notEqual, L_checkcast_copy); 1879 1880 // Identically typed arrays can be copied without element-wise checks. 1881 assert_different_registers(src, src_pos, dst, dst_pos, rcx_src_klass); 1882 arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed); 1883 1884 __ BIND(L_plain_copy); 1885 __ movl2ptr(count, LENGTH); // elements count 1886 __ movl2ptr(src_pos, SRC_POS); // reload src_pos 1887 __ lea(from, Address(src, src_pos, Address::times_ptr, 1888 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 1889 __ movl2ptr(dst_pos, DST_POS); // reload dst_pos 1890 __ lea(to, Address(dst, dst_pos, Address::times_ptr, 1891 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 1892 __ movptr(FROM, from); // src_addr 1893 __ movptr(TO, to); // dst_addr 1894 __ movl(COUNT, count); // count 1895 __ jump(RuntimeAddress(entry_oop_arraycopy)); 1896 1897 __ BIND(L_checkcast_copy); 1898 // live at this point: rcx_src_klass, dst[_pos], src[_pos] 1899 { 1900 // Handy offsets: 1901 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 1902 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1903 1904 Register rsi_dst_klass = rsi; 1905 Register rdi_temp = rdi; 1906 assert(rsi_dst_klass == src_pos, "expected alias w/ src_pos"); 1907 assert(rdi_temp == dst_pos, "expected alias w/ dst_pos"); 1908 Address dst_klass_lh_addr(rsi_dst_klass, lh_offset); 1909 1910 // Before looking at dst.length, make sure dst is also an objArray. 1911 __ movptr(rsi_dst_klass, dst_klass_addr); 1912 __ cmpl(dst_klass_lh_addr, objArray_lh); 1913 __ jccb(Assembler::notEqual, L_failed); 1914 1915 // It is safe to examine both src.length and dst.length. 1916 __ movl2ptr(src_pos, SRC_POS); // reload rsi 1917 arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed); 1918 // (Now src_pos and dst_pos are killed, but not src and dst.) 1919 1920 // We'll need this temp (don't forget to pop it after the type check). 1921 __ push(rbx); 1922 Register rbx_src_klass = rbx; 1923 1924 __ mov(rbx_src_klass, rcx_src_klass); // spill away from rcx 1925 __ movptr(rsi_dst_klass, dst_klass_addr); 1926 Address super_check_offset_addr(rsi_dst_klass, sco_offset); 1927 Label L_fail_array_check; 1928 generate_type_check(rbx_src_klass, 1929 super_check_offset_addr, dst_klass_addr, 1930 rdi_temp, NULL, &L_fail_array_check); 1931 // (On fall-through, we have passed the array type check.) 1932 __ pop(rbx); 1933 __ jmp(L_plain_copy); 1934 1935 __ BIND(L_fail_array_check); 1936 // Reshuffle arguments so we can call checkcast_arraycopy: 1937 1938 // match initial saves for checkcast_arraycopy 1939 // push(rsi); // already done; see above 1940 // push(rdi); // already done; see above 1941 // push(rbx); // already done; see above 1942 1943 // Marshal outgoing arguments now, freeing registers. 1944 Address from_arg(rsp, 16+ 4); // from 1945 Address to_arg(rsp, 16+ 8); // to 1946 Address length_arg(rsp, 16+12); // elements count 1947 Address ckoff_arg(rsp, 16+16); // super_check_offset 1948 Address ckval_arg(rsp, 16+20); // super_klass 1949 1950 Address SRC_POS_arg(rsp, 16+ 8); 1951 Address DST_POS_arg(rsp, 16+16); 1952 Address LENGTH_arg(rsp, 16+20); 1953 // push rbx, changed the incoming offsets (why not just use rbp,??) 1954 // assert(SRC_POS_arg.disp() == SRC_POS.disp() + 4, ""); 1955 1956 __ movptr(rbx, Address(rsi_dst_klass, ek_offset)); 1957 __ movl2ptr(length, LENGTH_arg); // reload elements count 1958 __ movl2ptr(src_pos, SRC_POS_arg); // reload src_pos 1959 __ movl2ptr(dst_pos, DST_POS_arg); // reload dst_pos 1960 1961 __ movptr(ckval_arg, rbx); // destination element type 1962 __ movl(rbx, Address(rbx, sco_offset)); 1963 __ movl(ckoff_arg, rbx); // corresponding class check offset 1964 1965 __ movl(length_arg, length); // outgoing length argument 1966 1967 __ lea(from, Address(src, src_pos, Address::times_ptr, 1968 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 1969 __ movptr(from_arg, from); 1970 1971 __ lea(to, Address(dst, dst_pos, Address::times_ptr, 1972 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 1973 __ movptr(to_arg, to); 1974 __ jump(RuntimeAddress(entry_checkcast_arraycopy)); 1975 } 1976 1977 return start; 1978 } 1979 1980 void generate_arraycopy_stubs() { 1981 address entry; 1982 address entry_jbyte_arraycopy; 1983 address entry_jshort_arraycopy; 1984 address entry_jint_arraycopy; 1985 address entry_oop_arraycopy; 1986 address entry_jlong_arraycopy; 1987 address entry_checkcast_arraycopy; 1988 1989 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = 1990 generate_disjoint_copy(T_BYTE, true, Address::times_1, &entry, 1991 "arrayof_jbyte_disjoint_arraycopy"); 1992 StubRoutines::_arrayof_jbyte_arraycopy = 1993 generate_conjoint_copy(T_BYTE, true, Address::times_1, entry, 1994 NULL, "arrayof_jbyte_arraycopy"); 1995 StubRoutines::_jbyte_disjoint_arraycopy = 1996 generate_disjoint_copy(T_BYTE, false, Address::times_1, &entry, 1997 "jbyte_disjoint_arraycopy"); 1998 StubRoutines::_jbyte_arraycopy = 1999 generate_conjoint_copy(T_BYTE, false, Address::times_1, entry, 2000 &entry_jbyte_arraycopy, "jbyte_arraycopy"); 2001 2002 StubRoutines::_arrayof_jshort_disjoint_arraycopy = 2003 generate_disjoint_copy(T_SHORT, true, Address::times_2, &entry, 2004 "arrayof_jshort_disjoint_arraycopy"); 2005 StubRoutines::_arrayof_jshort_arraycopy = 2006 generate_conjoint_copy(T_SHORT, true, Address::times_2, entry, 2007 NULL, "arrayof_jshort_arraycopy"); 2008 StubRoutines::_jshort_disjoint_arraycopy = 2009 generate_disjoint_copy(T_SHORT, false, Address::times_2, &entry, 2010 "jshort_disjoint_arraycopy"); 2011 StubRoutines::_jshort_arraycopy = 2012 generate_conjoint_copy(T_SHORT, false, Address::times_2, entry, 2013 &entry_jshort_arraycopy, "jshort_arraycopy"); 2014 2015 // Next arrays are always aligned on 4 bytes at least. 2016 StubRoutines::_jint_disjoint_arraycopy = 2017 generate_disjoint_copy(T_INT, true, Address::times_4, &entry, 2018 "jint_disjoint_arraycopy"); 2019 StubRoutines::_jint_arraycopy = 2020 generate_conjoint_copy(T_INT, true, Address::times_4, entry, 2021 &entry_jint_arraycopy, "jint_arraycopy"); 2022 2023 StubRoutines::_oop_disjoint_arraycopy = 2024 generate_disjoint_copy(T_OBJECT, true, Address::times_ptr, &entry, 2025 "oop_disjoint_arraycopy"); 2026 StubRoutines::_oop_arraycopy = 2027 generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry, 2028 &entry_oop_arraycopy, "oop_arraycopy"); 2029 2030 StubRoutines::_oop_disjoint_arraycopy_uninit = 2031 generate_disjoint_copy(T_OBJECT, true, Address::times_ptr, &entry, 2032 "oop_disjoint_arraycopy_uninit", 2033 /*dest_uninitialized*/true); 2034 StubRoutines::_oop_arraycopy_uninit = 2035 generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry, 2036 NULL, "oop_arraycopy_uninit", 2037 /*dest_uninitialized*/true); 2038 2039 StubRoutines::_jlong_disjoint_arraycopy = 2040 generate_disjoint_long_copy(&entry, "jlong_disjoint_arraycopy"); 2041 StubRoutines::_jlong_arraycopy = 2042 generate_conjoint_long_copy(entry, &entry_jlong_arraycopy, 2043 "jlong_arraycopy"); 2044 2045 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2046 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2047 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2048 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2049 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2050 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2051 2052 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2053 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2054 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 2055 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2056 2057 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2058 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2059 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 2060 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2061 2062 StubRoutines::_checkcast_arraycopy = 2063 generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 2064 StubRoutines::_checkcast_arraycopy_uninit = 2065 generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, /*dest_uninitialized*/true); 2066 2067 StubRoutines::_unsafe_arraycopy = 2068 generate_unsafe_copy("unsafe_arraycopy", 2069 entry_jbyte_arraycopy, 2070 entry_jshort_arraycopy, 2071 entry_jint_arraycopy, 2072 entry_jlong_arraycopy); 2073 2074 StubRoutines::_generic_arraycopy = 2075 generate_generic_copy("generic_arraycopy", 2076 entry_jbyte_arraycopy, 2077 entry_jshort_arraycopy, 2078 entry_jint_arraycopy, 2079 entry_oop_arraycopy, 2080 entry_jlong_arraycopy, 2081 entry_checkcast_arraycopy); 2082 } 2083 2084 void generate_math_stubs() { 2085 { 2086 StubCodeMark mark(this, "StubRoutines", "log"); 2087 StubRoutines::_intrinsic_log = (double (*)(double)) __ pc(); 2088 2089 __ fld_d(Address(rsp, 4)); 2090 __ flog(); 2091 __ ret(0); 2092 } 2093 { 2094 StubCodeMark mark(this, "StubRoutines", "log10"); 2095 StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc(); 2096 2097 __ fld_d(Address(rsp, 4)); 2098 __ flog10(); 2099 __ ret(0); 2100 } 2101 { 2102 StubCodeMark mark(this, "StubRoutines", "sin"); 2103 StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc(); 2104 2105 __ fld_d(Address(rsp, 4)); 2106 __ trigfunc('s'); 2107 __ ret(0); 2108 } 2109 { 2110 StubCodeMark mark(this, "StubRoutines", "cos"); 2111 StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc(); 2112 2113 __ fld_d(Address(rsp, 4)); 2114 __ trigfunc('c'); 2115 __ ret(0); 2116 } 2117 { 2118 StubCodeMark mark(this, "StubRoutines", "tan"); 2119 StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc(); 2120 2121 __ fld_d(Address(rsp, 4)); 2122 __ trigfunc('t'); 2123 __ ret(0); 2124 } 2125 { 2126 StubCodeMark mark(this, "StubRoutines", "exp"); 2127 StubRoutines::_intrinsic_exp = (double (*)(double)) __ pc(); 2128 2129 __ fld_d(Address(rsp, 4)); 2130 __ exp_with_fallback(0); 2131 __ ret(0); 2132 } 2133 { 2134 StubCodeMark mark(this, "StubRoutines", "pow"); 2135 StubRoutines::_intrinsic_pow = (double (*)(double,double)) __ pc(); 2136 2137 __ fld_d(Address(rsp, 12)); 2138 __ fld_d(Address(rsp, 4)); 2139 __ pow_with_fallback(0); 2140 __ ret(0); 2141 } 2142 } 2143 2144 // AES intrinsic stubs 2145 enum {AESBlockSize = 16}; 2146 2147 address generate_key_shuffle_mask() { 2148 __ align(16); 2149 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 2150 address start = __ pc(); 2151 __ emit_data(0x00010203, relocInfo::none, 0 ); 2152 __ emit_data(0x04050607, relocInfo::none, 0 ); 2153 __ emit_data(0x08090a0b, relocInfo::none, 0 ); 2154 __ emit_data(0x0c0d0e0f, relocInfo::none, 0 ); 2155 return start; 2156 } 2157 2158 // Utility routine for loading a 128-bit key word in little endian format 2159 // can optionally specify that the shuffle mask is already in an xmmregister 2160 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2161 __ movdqu(xmmdst, Address(key, offset)); 2162 if (xmm_shuf_mask != NULL) { 2163 __ pshufb(xmmdst, xmm_shuf_mask); 2164 } else { 2165 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2166 } 2167 } 2168 2169 // aesenc using specified key+offset 2170 // can optionally specify that the shuffle mask is already in an xmmregister 2171 void aes_enc_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2172 load_key(xmmtmp, key, offset, xmm_shuf_mask); 2173 __ aesenc(xmmdst, xmmtmp); 2174 } 2175 2176 // aesdec using specified key+offset 2177 // can optionally specify that the shuffle mask is already in an xmmregister 2178 void aes_dec_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2179 load_key(xmmtmp, key, offset, xmm_shuf_mask); 2180 __ aesdec(xmmdst, xmmtmp); 2181 } 2182 2183 2184 // Arguments: 2185 // 2186 // Inputs: 2187 // c_rarg0 - source byte array address 2188 // c_rarg1 - destination byte array address 2189 // c_rarg2 - K (key) in little endian int array 2190 // 2191 address generate_aescrypt_encryptBlock() { 2192 assert(UseAES, "need AES instructions and misaligned SSE support"); 2193 __ align(CodeEntryAlignment); 2194 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 2195 Label L_doLast; 2196 address start = __ pc(); 2197 2198 const Register from = rdx; // source array address 2199 const Register to = rdx; // destination array address 2200 const Register key = rcx; // key array address 2201 const Register keylen = rax; 2202 const Address from_param(rbp, 8+0); 2203 const Address to_param (rbp, 8+4); 2204 const Address key_param (rbp, 8+8); 2205 2206 const XMMRegister xmm_result = xmm0; 2207 const XMMRegister xmm_key_shuf_mask = xmm1; 2208 const XMMRegister xmm_temp1 = xmm2; 2209 const XMMRegister xmm_temp2 = xmm3; 2210 const XMMRegister xmm_temp3 = xmm4; 2211 const XMMRegister xmm_temp4 = xmm5; 2212 2213 __ enter(); // required for proper stackwalking of RuntimeStub frame 2214 __ movptr(from, from_param); 2215 __ movptr(key, key_param); 2216 2217 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 2218 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 2219 2220 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2221 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 2222 __ movptr(to, to_param); 2223 2224 // For encryption, the java expanded key ordering is just what we need 2225 2226 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 2227 __ pxor(xmm_result, xmm_temp1); 2228 2229 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 2230 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 2231 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 2232 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 2233 2234 __ aesenc(xmm_result, xmm_temp1); 2235 __ aesenc(xmm_result, xmm_temp2); 2236 __ aesenc(xmm_result, xmm_temp3); 2237 __ aesenc(xmm_result, xmm_temp4); 2238 2239 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 2240 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 2241 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 2242 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 2243 2244 __ aesenc(xmm_result, xmm_temp1); 2245 __ aesenc(xmm_result, xmm_temp2); 2246 __ aesenc(xmm_result, xmm_temp3); 2247 __ aesenc(xmm_result, xmm_temp4); 2248 2249 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 2250 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 2251 2252 __ cmpl(keylen, 44); 2253 __ jccb(Assembler::equal, L_doLast); 2254 2255 __ aesenc(xmm_result, xmm_temp1); 2256 __ aesenc(xmm_result, xmm_temp2); 2257 2258 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 2259 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 2260 2261 __ cmpl(keylen, 52); 2262 __ jccb(Assembler::equal, L_doLast); 2263 2264 __ aesenc(xmm_result, xmm_temp1); 2265 __ aesenc(xmm_result, xmm_temp2); 2266 2267 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 2268 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 2269 2270 __ BIND(L_doLast); 2271 __ aesenc(xmm_result, xmm_temp1); 2272 __ aesenclast(xmm_result, xmm_temp2); 2273 __ movdqu(Address(to, 0), xmm_result); // store the result 2274 __ xorptr(rax, rax); // return 0 2275 __ leave(); // required for proper stackwalking of RuntimeStub frame 2276 __ ret(0); 2277 2278 return start; 2279 } 2280 2281 2282 // Arguments: 2283 // 2284 // Inputs: 2285 // c_rarg0 - source byte array address 2286 // c_rarg1 - destination byte array address 2287 // c_rarg2 - K (key) in little endian int array 2288 // 2289 address generate_aescrypt_decryptBlock() { 2290 assert(UseAES, "need AES instructions and misaligned SSE support"); 2291 __ align(CodeEntryAlignment); 2292 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 2293 Label L_doLast; 2294 address start = __ pc(); 2295 2296 const Register from = rdx; // source array address 2297 const Register to = rdx; // destination array address 2298 const Register key = rcx; // key array address 2299 const Register keylen = rax; 2300 const Address from_param(rbp, 8+0); 2301 const Address to_param (rbp, 8+4); 2302 const Address key_param (rbp, 8+8); 2303 2304 const XMMRegister xmm_result = xmm0; 2305 const XMMRegister xmm_key_shuf_mask = xmm1; 2306 const XMMRegister xmm_temp1 = xmm2; 2307 const XMMRegister xmm_temp2 = xmm3; 2308 const XMMRegister xmm_temp3 = xmm4; 2309 const XMMRegister xmm_temp4 = xmm5; 2310 2311 __ enter(); // required for proper stackwalking of RuntimeStub frame 2312 __ movptr(from, from_param); 2313 __ movptr(key, key_param); 2314 2315 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 2316 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 2317 2318 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2319 __ movdqu(xmm_result, Address(from, 0)); 2320 __ movptr(to, to_param); 2321 2322 // for decryption java expanded key ordering is rotated one position from what we want 2323 // so we start from 0x10 here and hit 0x00 last 2324 // we don't know if the key is aligned, hence not using load-execute form 2325 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 2326 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 2327 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 2328 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 2329 2330 __ pxor (xmm_result, xmm_temp1); 2331 __ aesdec(xmm_result, xmm_temp2); 2332 __ aesdec(xmm_result, xmm_temp3); 2333 __ aesdec(xmm_result, xmm_temp4); 2334 2335 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 2336 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 2337 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 2338 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 2339 2340 __ aesdec(xmm_result, xmm_temp1); 2341 __ aesdec(xmm_result, xmm_temp2); 2342 __ aesdec(xmm_result, xmm_temp3); 2343 __ aesdec(xmm_result, xmm_temp4); 2344 2345 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 2346 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 2347 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 2348 2349 __ cmpl(keylen, 44); 2350 __ jccb(Assembler::equal, L_doLast); 2351 2352 __ aesdec(xmm_result, xmm_temp1); 2353 __ aesdec(xmm_result, xmm_temp2); 2354 2355 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 2356 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 2357 2358 __ cmpl(keylen, 52); 2359 __ jccb(Assembler::equal, L_doLast); 2360 2361 __ aesdec(xmm_result, xmm_temp1); 2362 __ aesdec(xmm_result, xmm_temp2); 2363 2364 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 2365 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 2366 2367 __ BIND(L_doLast); 2368 __ aesdec(xmm_result, xmm_temp1); 2369 __ aesdec(xmm_result, xmm_temp2); 2370 2371 // for decryption the aesdeclast operation is always on key+0x00 2372 __ aesdeclast(xmm_result, xmm_temp3); 2373 __ movdqu(Address(to, 0), xmm_result); // store the result 2374 __ xorptr(rax, rax); // return 0 2375 __ leave(); // required for proper stackwalking of RuntimeStub frame 2376 __ ret(0); 2377 2378 return start; 2379 } 2380 2381 void handleSOERegisters(bool saving) { 2382 const int saveFrameSizeInBytes = 4 * wordSize; 2383 const Address saved_rbx (rbp, -3 * wordSize); 2384 const Address saved_rsi (rbp, -2 * wordSize); 2385 const Address saved_rdi (rbp, -1 * wordSize); 2386 2387 if (saving) { 2388 __ subptr(rsp, saveFrameSizeInBytes); 2389 __ movptr(saved_rsi, rsi); 2390 __ movptr(saved_rdi, rdi); 2391 __ movptr(saved_rbx, rbx); 2392 } else { 2393 // restoring 2394 __ movptr(rsi, saved_rsi); 2395 __ movptr(rdi, saved_rdi); 2396 __ movptr(rbx, saved_rbx); 2397 } 2398 } 2399 2400 // Arguments: 2401 // 2402 // Inputs: 2403 // c_rarg0 - source byte array address 2404 // c_rarg1 - destination byte array address 2405 // c_rarg2 - K (key) in little endian int array 2406 // c_rarg3 - r vector byte array address 2407 // c_rarg4 - input length 2408 // 2409 // Output: 2410 // rax - input length 2411 // 2412 address generate_cipherBlockChaining_encryptAESCrypt() { 2413 assert(UseAES, "need AES instructions and misaligned SSE support"); 2414 __ align(CodeEntryAlignment); 2415 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 2416 address start = __ pc(); 2417 2418 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 2419 const Register from = rsi; // source array address 2420 const Register to = rdx; // destination array address 2421 const Register key = rcx; // key array address 2422 const Register rvec = rdi; // r byte array initialized from initvector array address 2423 // and left with the results of the last encryption block 2424 const Register len_reg = rbx; // src len (must be multiple of blocksize 16) 2425 const Register pos = rax; 2426 2427 // xmm register assignments for the loops below 2428 const XMMRegister xmm_result = xmm0; 2429 const XMMRegister xmm_temp = xmm1; 2430 // first 6 keys preloaded into xmm2-xmm7 2431 const int XMM_REG_NUM_KEY_FIRST = 2; 2432 const int XMM_REG_NUM_KEY_LAST = 7; 2433 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 2434 2435 __ enter(); // required for proper stackwalking of RuntimeStub frame 2436 handleSOERegisters(true /*saving*/); 2437 2438 // load registers from incoming parameters 2439 const Address from_param(rbp, 8+0); 2440 const Address to_param (rbp, 8+4); 2441 const Address key_param (rbp, 8+8); 2442 const Address rvec_param (rbp, 8+12); 2443 const Address len_param (rbp, 8+16); 2444 __ movptr(from , from_param); 2445 __ movptr(to , to_param); 2446 __ movptr(key , key_param); 2447 __ movptr(rvec , rvec_param); 2448 __ movptr(len_reg , len_param); 2449 2450 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 2451 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2452 // load up xmm regs 2 thru 7 with keys 0-5 2453 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2454 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 2455 offset += 0x10; 2456 } 2457 2458 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 2459 2460 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 2461 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 2462 __ cmpl(rax, 44); 2463 __ jcc(Assembler::notEqual, L_key_192_256); 2464 2465 // 128 bit code follows here 2466 __ movl(pos, 0); 2467 __ align(OptoLoopAlignment); 2468 __ BIND(L_loopTop_128); 2469 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 2470 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 2471 2472 __ pxor (xmm_result, xmm_key0); // do the aes rounds 2473 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2474 __ aesenc(xmm_result, as_XMMRegister(rnum)); 2475 } 2476 for (int key_offset = 0x60; key_offset <= 0x90; key_offset += 0x10) { 2477 aes_enc_key(xmm_result, xmm_temp, key, key_offset); 2478 } 2479 load_key(xmm_temp, key, 0xa0); 2480 __ aesenclast(xmm_result, xmm_temp); 2481 2482 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 2483 // no need to store r to memory until we exit 2484 __ addptr(pos, AESBlockSize); 2485 __ subptr(len_reg, AESBlockSize); 2486 __ jcc(Assembler::notEqual, L_loopTop_128); 2487 2488 __ BIND(L_exit); 2489 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 2490 2491 handleSOERegisters(false /*restoring*/); 2492 __ movptr(rax, len_param); // return length 2493 __ leave(); // required for proper stackwalking of RuntimeStub frame 2494 __ ret(0); 2495 2496 __ BIND(L_key_192_256); 2497 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 2498 __ cmpl(rax, 52); 2499 __ jcc(Assembler::notEqual, L_key_256); 2500 2501 // 192-bit code follows here (could be changed to use more xmm registers) 2502 __ movl(pos, 0); 2503 __ align(OptoLoopAlignment); 2504 __ BIND(L_loopTop_192); 2505 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 2506 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 2507 2508 __ pxor (xmm_result, xmm_key0); // do the aes rounds 2509 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2510 __ aesenc(xmm_result, as_XMMRegister(rnum)); 2511 } 2512 for (int key_offset = 0x60; key_offset <= 0xb0; key_offset += 0x10) { 2513 aes_enc_key(xmm_result, xmm_temp, key, key_offset); 2514 } 2515 load_key(xmm_temp, key, 0xc0); 2516 __ aesenclast(xmm_result, xmm_temp); 2517 2518 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 2519 // no need to store r to memory until we exit 2520 __ addptr(pos, AESBlockSize); 2521 __ subptr(len_reg, AESBlockSize); 2522 __ jcc(Assembler::notEqual, L_loopTop_192); 2523 __ jmp(L_exit); 2524 2525 __ BIND(L_key_256); 2526 // 256-bit code follows here (could be changed to use more xmm registers) 2527 __ movl(pos, 0); 2528 __ align(OptoLoopAlignment); 2529 __ BIND(L_loopTop_256); 2530 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 2531 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 2532 2533 __ pxor (xmm_result, xmm_key0); // do the aes rounds 2534 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2535 __ aesenc(xmm_result, as_XMMRegister(rnum)); 2536 } 2537 for (int key_offset = 0x60; key_offset <= 0xd0; key_offset += 0x10) { 2538 aes_enc_key(xmm_result, xmm_temp, key, key_offset); 2539 } 2540 load_key(xmm_temp, key, 0xe0); 2541 __ aesenclast(xmm_result, xmm_temp); 2542 2543 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 2544 // no need to store r to memory until we exit 2545 __ addptr(pos, AESBlockSize); 2546 __ subptr(len_reg, AESBlockSize); 2547 __ jcc(Assembler::notEqual, L_loopTop_256); 2548 __ jmp(L_exit); 2549 2550 return start; 2551 } 2552 2553 2554 // CBC AES Decryption. 2555 // In 32-bit stub, because of lack of registers we do not try to parallelize 4 blocks at a time. 2556 // 2557 // Arguments: 2558 // 2559 // Inputs: 2560 // c_rarg0 - source byte array address 2561 // c_rarg1 - destination byte array address 2562 // c_rarg2 - K (key) in little endian int array 2563 // c_rarg3 - r vector byte array address 2564 // c_rarg4 - input length 2565 // 2566 // Output: 2567 // rax - input length 2568 // 2569 2570 address generate_cipherBlockChaining_decryptAESCrypt() { 2571 assert(UseAES, "need AES instructions and misaligned SSE support"); 2572 __ align(CodeEntryAlignment); 2573 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 2574 address start = __ pc(); 2575 2576 Label L_exit, L_key_192_256, L_key_256; 2577 Label L_singleBlock_loopTop_128; 2578 Label L_singleBlock_loopTop_192, L_singleBlock_loopTop_256; 2579 const Register from = rsi; // source array address 2580 const Register to = rdx; // destination array address 2581 const Register key = rcx; // key array address 2582 const Register rvec = rdi; // r byte array initialized from initvector array address 2583 // and left with the results of the last encryption block 2584 const Register len_reg = rbx; // src len (must be multiple of blocksize 16) 2585 const Register pos = rax; 2586 2587 // xmm register assignments for the loops below 2588 const XMMRegister xmm_result = xmm0; 2589 const XMMRegister xmm_temp = xmm1; 2590 // first 6 keys preloaded into xmm2-xmm7 2591 const int XMM_REG_NUM_KEY_FIRST = 2; 2592 const int XMM_REG_NUM_KEY_LAST = 7; 2593 const int FIRST_NON_REG_KEY_offset = 0x70; 2594 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 2595 2596 __ enter(); // required for proper stackwalking of RuntimeStub frame 2597 handleSOERegisters(true /*saving*/); 2598 2599 // load registers from incoming parameters 2600 const Address from_param(rbp, 8+0); 2601 const Address to_param (rbp, 8+4); 2602 const Address key_param (rbp, 8+8); 2603 const Address rvec_param (rbp, 8+12); 2604 const Address len_param (rbp, 8+16); 2605 __ movptr(from , from_param); 2606 __ movptr(to , to_param); 2607 __ movptr(key , key_param); 2608 __ movptr(rvec , rvec_param); 2609 __ movptr(len_reg , len_param); 2610 2611 // the java expanded key ordering is rotated one position from what we want 2612 // so we start from 0x10 here and hit 0x00 last 2613 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 2614 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2615 // load up xmm regs 2 thru 6 with first 5 keys 2616 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2617 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 2618 offset += 0x10; 2619 } 2620 2621 // inside here, use the rvec register to point to previous block cipher 2622 // with which we xor at the end of each newly decrypted block 2623 const Register prev_block_cipher_ptr = rvec; 2624 2625 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 2626 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 2627 __ cmpl(rax, 44); 2628 __ jcc(Assembler::notEqual, L_key_192_256); 2629 2630 2631 // 128-bit code follows here, parallelized 2632 __ movl(pos, 0); 2633 __ align(OptoLoopAlignment); 2634 __ BIND(L_singleBlock_loopTop_128); 2635 __ cmpptr(len_reg, 0); // any blocks left?? 2636 __ jcc(Assembler::equal, L_exit); 2637 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 2638 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 2639 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2640 __ aesdec(xmm_result, as_XMMRegister(rnum)); 2641 } 2642 for (int key_offset = FIRST_NON_REG_KEY_offset; key_offset <= 0xa0; key_offset += 0x10) { // 128-bit runs up to key offset a0 2643 aes_dec_key(xmm_result, xmm_temp, key, key_offset); 2644 } 2645 load_key(xmm_temp, key, 0x00); // final key is stored in java expanded array at offset 0 2646 __ aesdeclast(xmm_result, xmm_temp); 2647 __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00)); 2648 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 2649 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 2650 // no need to store r to memory until we exit 2651 __ lea(prev_block_cipher_ptr, Address(from, pos, Address::times_1, 0)); // set up new ptr 2652 __ addptr(pos, AESBlockSize); 2653 __ subptr(len_reg, AESBlockSize); 2654 __ jmp(L_singleBlock_loopTop_128); 2655 2656 2657 __ BIND(L_exit); 2658 __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00)); 2659 __ movptr(rvec , rvec_param); // restore this since used in loop 2660 __ movdqu(Address(rvec, 0), xmm_temp); // final value of r stored in rvec of CipherBlockChaining object 2661 handleSOERegisters(false /*restoring*/); 2662 __ movptr(rax, len_param); // return length 2663 __ leave(); // required for proper stackwalking of RuntimeStub frame 2664 __ ret(0); 2665 2666 2667 __ BIND(L_key_192_256); 2668 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 2669 __ cmpl(rax, 52); 2670 __ jcc(Assembler::notEqual, L_key_256); 2671 2672 // 192-bit code follows here (could be optimized to use parallelism) 2673 __ movl(pos, 0); 2674 __ align(OptoLoopAlignment); 2675 __ BIND(L_singleBlock_loopTop_192); 2676 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 2677 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 2678 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2679 __ aesdec(xmm_result, as_XMMRegister(rnum)); 2680 } 2681 for (int key_offset = FIRST_NON_REG_KEY_offset; key_offset <= 0xc0; key_offset += 0x10) { // 192-bit runs up to key offset c0 2682 aes_dec_key(xmm_result, xmm_temp, key, key_offset); 2683 } 2684 load_key(xmm_temp, key, 0x00); // final key is stored in java expanded array at offset 0 2685 __ aesdeclast(xmm_result, xmm_temp); 2686 __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00)); 2687 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 2688 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 2689 // no need to store r to memory until we exit 2690 __ lea(prev_block_cipher_ptr, Address(from, pos, Address::times_1, 0)); // set up new ptr 2691 __ addptr(pos, AESBlockSize); 2692 __ subptr(len_reg, AESBlockSize); 2693 __ jcc(Assembler::notEqual,L_singleBlock_loopTop_192); 2694 __ jmp(L_exit); 2695 2696 __ BIND(L_key_256); 2697 // 256-bit code follows here (could be optimized to use parallelism) 2698 __ movl(pos, 0); 2699 __ align(OptoLoopAlignment); 2700 __ BIND(L_singleBlock_loopTop_256); 2701 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 2702 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 2703 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2704 __ aesdec(xmm_result, as_XMMRegister(rnum)); 2705 } 2706 for (int key_offset = FIRST_NON_REG_KEY_offset; key_offset <= 0xe0; key_offset += 0x10) { // 256-bit runs up to key offset e0 2707 aes_dec_key(xmm_result, xmm_temp, key, key_offset); 2708 } 2709 load_key(xmm_temp, key, 0x00); // final key is stored in java expanded array at offset 0 2710 __ aesdeclast(xmm_result, xmm_temp); 2711 __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00)); 2712 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 2713 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 2714 // no need to store r to memory until we exit 2715 __ lea(prev_block_cipher_ptr, Address(from, pos, Address::times_1, 0)); // set up new ptr 2716 __ addptr(pos, AESBlockSize); 2717 __ subptr(len_reg, AESBlockSize); 2718 __ jcc(Assembler::notEqual,L_singleBlock_loopTop_256); 2719 __ jmp(L_exit); 2720 2721 return start; 2722 } 2723 2724 /** 2725 * Arguments: 2726 * 2727 * Inputs: 2728 * rsp(4) - int crc 2729 * rsp(8) - byte* buf 2730 * rsp(12) - int length 2731 * 2732 * Ouput: 2733 * rax - int crc result 2734 */ 2735 address generate_updateBytesCRC32() { 2736 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 2737 2738 __ align(CodeEntryAlignment); 2739 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 2740 2741 address start = __ pc(); 2742 2743 const Register crc = rdx; // crc 2744 const Register buf = rsi; // source java byte array address 2745 const Register len = rcx; // length 2746 const Register table = rdi; // crc_table address (reuse register) 2747 const Register tmp = rbx; 2748 assert_different_registers(crc, buf, len, table, tmp, rax); 2749 2750 BLOCK_COMMENT("Entry:"); 2751 __ enter(); // required for proper stackwalking of RuntimeStub frame 2752 __ push(rsi); 2753 __ push(rdi); 2754 __ push(rbx); 2755 2756 Address crc_arg(rbp, 8 + 0); 2757 Address buf_arg(rbp, 8 + 4); 2758 Address len_arg(rbp, 8 + 8); 2759 2760 // Load up: 2761 __ movl(crc, crc_arg); 2762 __ movptr(buf, buf_arg); 2763 __ movl(len, len_arg); 2764 2765 __ kernel_crc32(crc, buf, len, table, tmp); 2766 2767 __ movl(rax, crc); 2768 __ pop(rbx); 2769 __ pop(rdi); 2770 __ pop(rsi); 2771 __ leave(); // required for proper stackwalking of RuntimeStub frame 2772 __ ret(0); 2773 2774 return start; 2775 } 2776 2777 // Safefetch stubs. 2778 void generate_safefetch(const char* name, int size, address* entry, 2779 address* fault_pc, address* continuation_pc) { 2780 // safefetch signatures: 2781 // int SafeFetch32(int* adr, int errValue); 2782 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 2783 2784 StubCodeMark mark(this, "StubRoutines", name); 2785 2786 // Entry point, pc or function descriptor. 2787 *entry = __ pc(); 2788 2789 __ movl(rax, Address(rsp, 0x8)); 2790 __ movl(rcx, Address(rsp, 0x4)); 2791 // Load *adr into eax, may fault. 2792 *fault_pc = __ pc(); 2793 switch (size) { 2794 case 4: 2795 // int32_t 2796 __ movl(rax, Address(rcx, 0)); 2797 break; 2798 case 8: 2799 // int64_t 2800 Unimplemented(); 2801 break; 2802 default: 2803 ShouldNotReachHere(); 2804 } 2805 2806 // Return errValue or *adr. 2807 *continuation_pc = __ pc(); 2808 __ ret(0); 2809 } 2810 2811 public: 2812 // Information about frame layout at time of blocking runtime call. 2813 // Note that we only have to preserve callee-saved registers since 2814 // the compilers are responsible for supplying a continuation point 2815 // if they expect all registers to be preserved. 2816 enum layout { 2817 thread_off, // last_java_sp 2818 arg1_off, 2819 arg2_off, 2820 rbp_off, // callee saved register 2821 ret_pc, 2822 framesize 2823 }; 2824 2825 private: 2826 2827 #undef __ 2828 #define __ masm-> 2829 2830 //------------------------------------------------------------------------------------------------------------------------ 2831 // Continuation point for throwing of implicit exceptions that are not handled in 2832 // the current activation. Fabricates an exception oop and initiates normal 2833 // exception dispatching in this frame. 2834 // 2835 // Previously the compiler (c2) allowed for callee save registers on Java calls. 2836 // This is no longer true after adapter frames were removed but could possibly 2837 // be brought back in the future if the interpreter code was reworked and it 2838 // was deemed worthwhile. The comment below was left to describe what must 2839 // happen here if callee saves were resurrected. As it stands now this stub 2840 // could actually be a vanilla BufferBlob and have now oopMap at all. 2841 // Since it doesn't make much difference we've chosen to leave it the 2842 // way it was in the callee save days and keep the comment. 2843 2844 // If we need to preserve callee-saved values we need a callee-saved oop map and 2845 // therefore have to make these stubs into RuntimeStubs rather than BufferBlobs. 2846 // If the compiler needs all registers to be preserved between the fault 2847 // point and the exception handler then it must assume responsibility for that in 2848 // AbstractCompiler::continuation_for_implicit_null_exception or 2849 // continuation_for_implicit_division_by_zero_exception. All other implicit 2850 // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are 2851 // either at call sites or otherwise assume that stack unwinding will be initiated, 2852 // so caller saved registers were assumed volatile in the compiler. 2853 address generate_throw_exception(const char* name, address runtime_entry, 2854 Register arg1 = noreg, Register arg2 = noreg) { 2855 2856 int insts_size = 256; 2857 int locs_size = 32; 2858 2859 CodeBuffer code(name, insts_size, locs_size); 2860 OopMapSet* oop_maps = new OopMapSet(); 2861 MacroAssembler* masm = new MacroAssembler(&code); 2862 2863 address start = __ pc(); 2864 2865 // This is an inlined and slightly modified version of call_VM 2866 // which has the ability to fetch the return PC out of 2867 // thread-local storage and also sets up last_Java_sp slightly 2868 // differently than the real call_VM 2869 Register java_thread = rbx; 2870 __ get_thread(java_thread); 2871 2872 __ enter(); // required for proper stackwalking of RuntimeStub frame 2873 2874 // pc and rbp, already pushed 2875 __ subptr(rsp, (framesize-2) * wordSize); // prolog 2876 2877 // Frame is now completed as far as size and linkage. 2878 2879 int frame_complete = __ pc() - start; 2880 2881 // push java thread (becomes first argument of C function) 2882 __ movptr(Address(rsp, thread_off * wordSize), java_thread); 2883 if (arg1 != noreg) { 2884 __ movptr(Address(rsp, arg1_off * wordSize), arg1); 2885 } 2886 if (arg2 != noreg) { 2887 assert(arg1 != noreg, "missing reg arg"); 2888 __ movptr(Address(rsp, arg2_off * wordSize), arg2); 2889 } 2890 2891 // Set up last_Java_sp and last_Java_fp 2892 __ set_last_Java_frame(java_thread, rsp, rbp, NULL); 2893 2894 // Call runtime 2895 BLOCK_COMMENT("call runtime_entry"); 2896 __ call(RuntimeAddress(runtime_entry)); 2897 // Generate oop map 2898 OopMap* map = new OopMap(framesize, 0); 2899 oop_maps->add_gc_map(__ pc() - start, map); 2900 2901 // restore the thread (cannot use the pushed argument since arguments 2902 // may be overwritten by C code generated by an optimizing compiler); 2903 // however can use the register value directly if it is callee saved. 2904 __ get_thread(java_thread); 2905 2906 __ reset_last_Java_frame(java_thread, true); 2907 2908 __ leave(); // required for proper stackwalking of RuntimeStub frame 2909 2910 // check for pending exceptions 2911 #ifdef ASSERT 2912 Label L; 2913 __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 2914 __ jcc(Assembler::notEqual, L); 2915 __ should_not_reach_here(); 2916 __ bind(L); 2917 #endif /* ASSERT */ 2918 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 2919 2920 2921 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, framesize, oop_maps, false); 2922 return stub->entry_point(); 2923 } 2924 2925 2926 void create_control_words() { 2927 // Round to nearest, 53-bit mode, exceptions masked 2928 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 2929 // Round to zero, 53-bit mode, exception mased 2930 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 2931 // Round to nearest, 24-bit mode, exceptions masked 2932 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 2933 // Round to nearest, 64-bit mode, exceptions masked 2934 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F; 2935 // Round to nearest, 64-bit mode, exceptions masked 2936 StubRoutines::_mxcsr_std = 0x1F80; 2937 // Note: the following two constants are 80-bit values 2938 // layout is critical for correct loading by FPU. 2939 // Bias for strict fp multiply/divide 2940 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 2941 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 2942 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 2943 // Un-Bias for strict fp multiply/divide 2944 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 2945 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 2946 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 2947 } 2948 2949 //--------------------------------------------------------------------------- 2950 // Initialization 2951 2952 void generate_initial() { 2953 // Generates all stubs and initializes the entry points 2954 2955 //------------------------------------------------------------------------------------------------------------------------ 2956 // entry points that exist in all platforms 2957 // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than 2958 // the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp. 2959 StubRoutines::_forward_exception_entry = generate_forward_exception(); 2960 2961 StubRoutines::_call_stub_entry = 2962 generate_call_stub(StubRoutines::_call_stub_return_address); 2963 // is referenced by megamorphic call 2964 StubRoutines::_catch_exception_entry = generate_catch_exception(); 2965 2966 // These are currently used by Solaris/Intel 2967 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 2968 2969 StubRoutines::_handler_for_unsafe_access_entry = 2970 generate_handler_for_unsafe_access(); 2971 2972 // platform dependent 2973 create_control_words(); 2974 2975 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 2976 StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = generate_verify_fpu_cntrl_wrd(); 2977 StubRoutines::_d2i_wrapper = generate_d2i_wrapper(T_INT, 2978 CAST_FROM_FN_PTR(address, SharedRuntime::d2i)); 2979 StubRoutines::_d2l_wrapper = generate_d2i_wrapper(T_LONG, 2980 CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); 2981 2982 // Build this early so it's available for the interpreter 2983 StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); 2984 2985 if (UseCRC32Intrinsics) { 2986 // set table address before stub generation which use it 2987 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 2988 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 2989 } 2990 } 2991 2992 2993 void generate_all() { 2994 // Generates all stubs and initializes the entry points 2995 2996 // These entry points require SharedInfo::stack0 to be set up in non-core builds 2997 // and need to be relocatable, so they each fabricate a RuntimeStub internally. 2998 StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError)); 2999 StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError)); 3000 StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call)); 3001 3002 //------------------------------------------------------------------------------------------------------------------------ 3003 // entry points that are platform specific 3004 3005 // support for verify_oop (must happen after universe_init) 3006 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 3007 3008 // arraycopy stubs used by compilers 3009 generate_arraycopy_stubs(); 3010 3011 generate_math_stubs(); 3012 3013 // don't bother generating these AES intrinsic stubs unless global flag is set 3014 if (UseAESIntrinsics) { 3015 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // might be needed by the others 3016 3017 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 3018 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 3019 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 3020 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt(); 3021 } 3022 3023 // Safefetch stubs. 3024 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 3025 &StubRoutines::_safefetch32_fault_pc, 3026 &StubRoutines::_safefetch32_continuation_pc); 3027 StubRoutines::_safefetchN_entry = StubRoutines::_safefetch32_entry; 3028 StubRoutines::_safefetchN_fault_pc = StubRoutines::_safefetch32_fault_pc; 3029 StubRoutines::_safefetchN_continuation_pc = StubRoutines::_safefetch32_continuation_pc; 3030 } 3031 3032 3033 public: 3034 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 3035 if (all) { 3036 generate_all(); 3037 } else { 3038 generate_initial(); 3039 } 3040 } 3041 }; // end class declaration 3042 3043 3044 void StubGenerator_generate(CodeBuffer* code, bool all) { 3045 StubGenerator g(code, all); 3046 }