1 /* 2 * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "assembler_x86.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "nativeInst_x86.hpp" 30 #include "oops/instanceOop.hpp" 31 #include "oops/methodOop.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/frame.inline.hpp" 36 #include "runtime/handles.inline.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubCodeGenerator.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "utilities/top.hpp" 41 #ifdef TARGET_OS_FAMILY_linux 42 # include "thread_linux.inline.hpp" 43 #endif 44 #ifdef TARGET_OS_FAMILY_solaris 45 # include "thread_solaris.inline.hpp" 46 #endif 47 #ifdef TARGET_OS_FAMILY_windows 48 # include "thread_windows.inline.hpp" 49 #endif 50 #ifdef TARGET_OS_FAMILY_bsd 51 # include "thread_bsd.inline.hpp" 52 #endif 53 #ifdef COMPILER2 54 #include "opto/runtime.hpp" 55 #endif 56 57 // Declaration and definition of StubGenerator (no .hpp file). 58 // For a more detailed description of the stub routine structure 59 // see the comment in stubRoutines.hpp 60 61 #define __ _masm-> 62 #define a__ ((Assembler*)_masm)-> 63 64 #ifdef PRODUCT 65 #define BLOCK_COMMENT(str) /* nothing */ 66 #else 67 #define BLOCK_COMMENT(str) __ block_comment(str) 68 #endif 69 70 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 71 72 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 73 const int FPU_CNTRL_WRD_MASK = 0xFFFF; 74 75 // ------------------------------------------------------------------------------------------------------------------------- 76 // Stub Code definitions 77 78 static address handle_unsafe_access() { 79 JavaThread* thread = JavaThread::current(); 80 address pc = thread->saved_exception_pc(); 81 // pc is the instruction which we must emulate 82 // doing a no-op is fine: return garbage from the load 83 // therefore, compute npc 84 address npc = Assembler::locate_next_instruction(pc); 85 86 // request an async exception 87 thread->set_pending_unsafe_access_error(); 88 89 // return address of next instruction to execute 90 return npc; 91 } 92 93 class StubGenerator: public StubCodeGenerator { 94 private: 95 96 #ifdef PRODUCT 97 #define inc_counter_np(counter) (0) 98 #else 99 void inc_counter_np_(int& counter) { 100 __ incrementl(ExternalAddress((address)&counter)); 101 } 102 #define inc_counter_np(counter) \ 103 BLOCK_COMMENT("inc_counter " #counter); \ 104 inc_counter_np_(counter); 105 #endif //PRODUCT 106 107 void inc_copy_counter_np(BasicType t) { 108 #ifndef PRODUCT 109 switch (t) { 110 case T_BYTE: inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); return; 111 case T_SHORT: inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); return; 112 case T_INT: inc_counter_np(SharedRuntime::_jint_array_copy_ctr); return; 113 case T_LONG: inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); return; 114 case T_OBJECT: inc_counter_np(SharedRuntime::_oop_array_copy_ctr); return; 115 } 116 ShouldNotReachHere(); 117 #endif //PRODUCT 118 } 119 120 //------------------------------------------------------------------------------------------------------------------------ 121 // Call stubs are used to call Java from C 122 // 123 // [ return_from_Java ] <--- rsp 124 // [ argument word n ] 125 // ... 126 // -N [ argument word 1 ] 127 // -7 [ Possible padding for stack alignment ] 128 // -6 [ Possible padding for stack alignment ] 129 // -5 [ Possible padding for stack alignment ] 130 // -4 [ mxcsr save ] <--- rsp_after_call 131 // -3 [ saved rbx, ] 132 // -2 [ saved rsi ] 133 // -1 [ saved rdi ] 134 // 0 [ saved rbp, ] <--- rbp, 135 // 1 [ return address ] 136 // 2 [ ptr. to call wrapper ] 137 // 3 [ result ] 138 // 4 [ result_type ] 139 // 5 [ method ] 140 // 6 [ entry_point ] 141 // 7 [ parameters ] 142 // 8 [ parameter_size ] 143 // 9 [ thread ] 144 145 146 address generate_call_stub(address& return_address) { 147 StubCodeMark mark(this, "StubRoutines", "call_stub"); 148 address start = __ pc(); 149 150 // stub code parameters / addresses 151 assert(frame::entry_frame_call_wrapper_offset == 2, "adjust this code"); 152 bool sse_save = false; 153 const Address rsp_after_call(rbp, -4 * wordSize); // same as in generate_catch_exception()! 154 const int locals_count_in_bytes (4*wordSize); 155 const Address mxcsr_save (rbp, -4 * wordSize); 156 const Address saved_rbx (rbp, -3 * wordSize); 157 const Address saved_rsi (rbp, -2 * wordSize); 158 const Address saved_rdi (rbp, -1 * wordSize); 159 const Address result (rbp, 3 * wordSize); 160 const Address result_type (rbp, 4 * wordSize); 161 const Address method (rbp, 5 * wordSize); 162 const Address entry_point (rbp, 6 * wordSize); 163 const Address parameters (rbp, 7 * wordSize); 164 const Address parameter_size(rbp, 8 * wordSize); 165 const Address thread (rbp, 9 * wordSize); // same as in generate_catch_exception()! 166 sse_save = UseSSE > 0; 167 168 // stub code 169 __ enter(); 170 __ movptr(rcx, parameter_size); // parameter counter 171 __ shlptr(rcx, Interpreter::logStackElementSize); // convert parameter count to bytes 172 __ addptr(rcx, locals_count_in_bytes); // reserve space for register saves 173 __ subptr(rsp, rcx); 174 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 175 176 // save rdi, rsi, & rbx, according to C calling conventions 177 __ movptr(saved_rdi, rdi); 178 __ movptr(saved_rsi, rsi); 179 __ movptr(saved_rbx, rbx); 180 // save and initialize %mxcsr 181 if (sse_save) { 182 Label skip_ldmx; 183 __ stmxcsr(mxcsr_save); 184 __ movl(rax, mxcsr_save); 185 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 186 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 187 __ cmp32(rax, mxcsr_std); 188 __ jcc(Assembler::equal, skip_ldmx); 189 __ ldmxcsr(mxcsr_std); 190 __ bind(skip_ldmx); 191 } 192 193 // make sure the control word is correct. 194 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 195 196 #ifdef ASSERT 197 // make sure we have no pending exceptions 198 { Label L; 199 __ movptr(rcx, thread); 200 __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 201 __ jcc(Assembler::equal, L); 202 __ stop("StubRoutines::call_stub: entered with pending exception"); 203 __ bind(L); 204 } 205 #endif 206 207 // pass parameters if any 208 BLOCK_COMMENT("pass parameters if any"); 209 Label parameters_done; 210 __ movl(rcx, parameter_size); // parameter counter 211 __ testl(rcx, rcx); 212 __ jcc(Assembler::zero, parameters_done); 213 214 // parameter passing loop 215 216 Label loop; 217 // Copy Java parameters in reverse order (receiver last) 218 // Note that the argument order is inverted in the process 219 // source is rdx[rcx: N-1..0] 220 // dest is rsp[rbx: 0..N-1] 221 222 __ movptr(rdx, parameters); // parameter pointer 223 __ xorptr(rbx, rbx); 224 225 __ BIND(loop); 226 227 // get parameter 228 __ movptr(rax, Address(rdx, rcx, Interpreter::stackElementScale(), -wordSize)); 229 __ movptr(Address(rsp, rbx, Interpreter::stackElementScale(), 230 Interpreter::expr_offset_in_bytes(0)), rax); // store parameter 231 __ increment(rbx); 232 __ decrement(rcx); 233 __ jcc(Assembler::notZero, loop); 234 235 // call Java function 236 __ BIND(parameters_done); 237 __ movptr(rbx, method); // get methodOop 238 __ movptr(rax, entry_point); // get entry_point 239 __ mov(rsi, rsp); // set sender sp 240 BLOCK_COMMENT("call Java function"); 241 __ call(rax); 242 243 BLOCK_COMMENT("call_stub_return_address:"); 244 return_address = __ pc(); 245 246 #ifdef COMPILER2 247 { 248 Label L_skip; 249 if (UseSSE >= 2) { 250 __ verify_FPU(0, "call_stub_return"); 251 } else { 252 for (int i = 1; i < 8; i++) { 253 __ ffree(i); 254 } 255 256 // UseSSE <= 1 so double result should be left on TOS 257 __ movl(rsi, result_type); 258 __ cmpl(rsi, T_DOUBLE); 259 __ jcc(Assembler::equal, L_skip); 260 if (UseSSE == 0) { 261 // UseSSE == 0 so float result should be left on TOS 262 __ cmpl(rsi, T_FLOAT); 263 __ jcc(Assembler::equal, L_skip); 264 } 265 __ ffree(0); 266 } 267 __ BIND(L_skip); 268 } 269 #endif // COMPILER2 270 271 // store result depending on type 272 // (everything that is not T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 273 __ movptr(rdi, result); 274 Label is_long, is_float, is_double, exit; 275 __ movl(rsi, result_type); 276 __ cmpl(rsi, T_LONG); 277 __ jcc(Assembler::equal, is_long); 278 __ cmpl(rsi, T_FLOAT); 279 __ jcc(Assembler::equal, is_float); 280 __ cmpl(rsi, T_DOUBLE); 281 __ jcc(Assembler::equal, is_double); 282 283 // handle T_INT case 284 __ movl(Address(rdi, 0), rax); 285 __ BIND(exit); 286 287 // check that FPU stack is empty 288 __ verify_FPU(0, "generate_call_stub"); 289 290 // pop parameters 291 __ lea(rsp, rsp_after_call); 292 293 // restore %mxcsr 294 if (sse_save) { 295 __ ldmxcsr(mxcsr_save); 296 } 297 298 // restore rdi, rsi and rbx, 299 __ movptr(rbx, saved_rbx); 300 __ movptr(rsi, saved_rsi); 301 __ movptr(rdi, saved_rdi); 302 __ addptr(rsp, 4*wordSize); 303 304 // return 305 __ pop(rbp); 306 __ ret(0); 307 308 // handle return types different from T_INT 309 __ BIND(is_long); 310 __ movl(Address(rdi, 0 * wordSize), rax); 311 __ movl(Address(rdi, 1 * wordSize), rdx); 312 __ jmp(exit); 313 314 __ BIND(is_float); 315 // interpreter uses xmm0 for return values 316 if (UseSSE >= 1) { 317 __ movflt(Address(rdi, 0), xmm0); 318 } else { 319 __ fstp_s(Address(rdi, 0)); 320 } 321 __ jmp(exit); 322 323 __ BIND(is_double); 324 // interpreter uses xmm0 for return values 325 if (UseSSE >= 2) { 326 __ movdbl(Address(rdi, 0), xmm0); 327 } else { 328 __ fstp_d(Address(rdi, 0)); 329 } 330 __ jmp(exit); 331 332 return start; 333 } 334 335 336 //------------------------------------------------------------------------------------------------------------------------ 337 // Return point for a Java call if there's an exception thrown in Java code. 338 // The exception is caught and transformed into a pending exception stored in 339 // JavaThread that can be tested from within the VM. 340 // 341 // Note: Usually the parameters are removed by the callee. In case of an exception 342 // crossing an activation frame boundary, that is not the case if the callee 343 // is compiled code => need to setup the rsp. 344 // 345 // rax,: exception oop 346 347 address generate_catch_exception() { 348 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 349 const Address rsp_after_call(rbp, -4 * wordSize); // same as in generate_call_stub()! 350 const Address thread (rbp, 9 * wordSize); // same as in generate_call_stub()! 351 address start = __ pc(); 352 353 // get thread directly 354 __ movptr(rcx, thread); 355 #ifdef ASSERT 356 // verify that threads correspond 357 { Label L; 358 __ get_thread(rbx); 359 __ cmpptr(rbx, rcx); 360 __ jcc(Assembler::equal, L); 361 __ stop("StubRoutines::catch_exception: threads must correspond"); 362 __ bind(L); 363 } 364 #endif 365 // set pending exception 366 __ verify_oop(rax); 367 __ movptr(Address(rcx, Thread::pending_exception_offset()), rax ); 368 __ lea(Address(rcx, Thread::exception_file_offset ()), 369 ExternalAddress((address)__FILE__)); 370 __ movl(Address(rcx, Thread::exception_line_offset ()), __LINE__ ); 371 // complete return to VM 372 assert(StubRoutines::_call_stub_return_address != NULL, "_call_stub_return_address must have been generated before"); 373 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 374 375 return start; 376 } 377 378 379 //------------------------------------------------------------------------------------------------------------------------ 380 // Continuation point for runtime calls returning with a pending exception. 381 // The pending exception check happened in the runtime or native call stub. 382 // The pending exception in Thread is converted into a Java-level exception. 383 // 384 // Contract with Java-level exception handlers: 385 // rax: exception 386 // rdx: throwing pc 387 // 388 // NOTE: At entry of this stub, exception-pc must be on stack !! 389 390 address generate_forward_exception() { 391 StubCodeMark mark(this, "StubRoutines", "forward exception"); 392 address start = __ pc(); 393 const Register thread = rcx; 394 395 // other registers used in this stub 396 const Register exception_oop = rax; 397 const Register handler_addr = rbx; 398 const Register exception_pc = rdx; 399 400 // Upon entry, the sp points to the return address returning into Java 401 // (interpreted or compiled) code; i.e., the return address becomes the 402 // throwing pc. 403 // 404 // Arguments pushed before the runtime call are still on the stack but 405 // the exception handler will reset the stack pointer -> ignore them. 406 // A potential result in registers can be ignored as well. 407 408 #ifdef ASSERT 409 // make sure this code is only executed if there is a pending exception 410 { Label L; 411 __ get_thread(thread); 412 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 413 __ jcc(Assembler::notEqual, L); 414 __ stop("StubRoutines::forward exception: no pending exception (1)"); 415 __ bind(L); 416 } 417 #endif 418 419 // compute exception handler into rbx, 420 __ get_thread(thread); 421 __ movptr(exception_pc, Address(rsp, 0)); 422 BLOCK_COMMENT("call exception_handler_for_return_address"); 423 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc); 424 __ mov(handler_addr, rax); 425 426 // setup rax & rdx, remove return address & clear pending exception 427 __ get_thread(thread); 428 __ pop(exception_pc); 429 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); 430 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 431 432 #ifdef ASSERT 433 // make sure exception is set 434 { Label L; 435 __ testptr(exception_oop, exception_oop); 436 __ jcc(Assembler::notEqual, L); 437 __ stop("StubRoutines::forward exception: no pending exception (2)"); 438 __ bind(L); 439 } 440 #endif 441 442 // Verify that there is really a valid exception in RAX. 443 __ verify_oop(exception_oop); 444 445 // continue at exception handler (return address removed) 446 // rax: exception 447 // rbx: exception handler 448 // rdx: throwing pc 449 __ jmp(handler_addr); 450 451 return start; 452 } 453 454 455 //---------------------------------------------------------------------------------------------------- 456 // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest) 457 // 458 // xchg exists as far back as 8086, lock needed for MP only 459 // Stack layout immediately after call: 460 // 461 // 0 [ret addr ] <--- rsp 462 // 1 [ ex ] 463 // 2 [ dest ] 464 // 465 // Result: *dest <- ex, return (old *dest) 466 // 467 // Note: win32 does not currently use this code 468 469 address generate_atomic_xchg() { 470 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 471 address start = __ pc(); 472 473 __ push(rdx); 474 Address exchange(rsp, 2 * wordSize); 475 Address dest_addr(rsp, 3 * wordSize); 476 __ movl(rax, exchange); 477 __ movptr(rdx, dest_addr); 478 __ xchgl(rax, Address(rdx, 0)); 479 __ pop(rdx); 480 __ ret(0); 481 482 return start; 483 } 484 485 //---------------------------------------------------------------------------------------------------- 486 // Support for void verify_mxcsr() 487 // 488 // This routine is used with -Xcheck:jni to verify that native 489 // JNI code does not return to Java code without restoring the 490 // MXCSR register to our expected state. 491 492 493 address generate_verify_mxcsr() { 494 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 495 address start = __ pc(); 496 497 const Address mxcsr_save(rsp, 0); 498 499 if (CheckJNICalls && UseSSE > 0 ) { 500 Label ok_ret; 501 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 502 __ push(rax); 503 __ subptr(rsp, wordSize); // allocate a temp location 504 __ stmxcsr(mxcsr_save); 505 __ movl(rax, mxcsr_save); 506 __ andl(rax, MXCSR_MASK); 507 __ cmp32(rax, mxcsr_std); 508 __ jcc(Assembler::equal, ok_ret); 509 510 __ warn("MXCSR changed by native JNI code."); 511 512 __ ldmxcsr(mxcsr_std); 513 514 __ bind(ok_ret); 515 __ addptr(rsp, wordSize); 516 __ pop(rax); 517 } 518 519 __ ret(0); 520 521 return start; 522 } 523 524 525 //--------------------------------------------------------------------------- 526 // Support for void verify_fpu_cntrl_wrd() 527 // 528 // This routine is used with -Xcheck:jni to verify that native 529 // JNI code does not return to Java code without restoring the 530 // FP control word to our expected state. 531 532 address generate_verify_fpu_cntrl_wrd() { 533 StubCodeMark mark(this, "StubRoutines", "verify_spcw"); 534 address start = __ pc(); 535 536 const Address fpu_cntrl_wrd_save(rsp, 0); 537 538 if (CheckJNICalls) { 539 Label ok_ret; 540 __ push(rax); 541 __ subptr(rsp, wordSize); // allocate a temp location 542 __ fnstcw(fpu_cntrl_wrd_save); 543 __ movl(rax, fpu_cntrl_wrd_save); 544 __ andl(rax, FPU_CNTRL_WRD_MASK); 545 ExternalAddress fpu_std(StubRoutines::addr_fpu_cntrl_wrd_std()); 546 __ cmp32(rax, fpu_std); 547 __ jcc(Assembler::equal, ok_ret); 548 549 __ warn("Floating point control word changed by native JNI code."); 550 551 __ fldcw(fpu_std); 552 553 __ bind(ok_ret); 554 __ addptr(rsp, wordSize); 555 __ pop(rax); 556 } 557 558 __ ret(0); 559 560 return start; 561 } 562 563 //--------------------------------------------------------------------------- 564 // Wrapper for slow-case handling of double-to-integer conversion 565 // d2i or f2i fast case failed either because it is nan or because 566 // of under/overflow. 567 // Input: FPU TOS: float value 568 // Output: rax, (rdx): integer (long) result 569 570 address generate_d2i_wrapper(BasicType t, address fcn) { 571 StubCodeMark mark(this, "StubRoutines", "d2i_wrapper"); 572 address start = __ pc(); 573 574 // Capture info about frame layout 575 enum layout { FPUState_off = 0, 576 rbp_off = FPUStateSizeInWords, 577 rdi_off, 578 rsi_off, 579 rcx_off, 580 rbx_off, 581 saved_argument_off, 582 saved_argument_off2, // 2nd half of double 583 framesize 584 }; 585 586 assert(FPUStateSizeInWords == 27, "update stack layout"); 587 588 // Save outgoing argument to stack across push_FPU_state() 589 __ subptr(rsp, wordSize * 2); 590 __ fstp_d(Address(rsp, 0)); 591 592 // Save CPU & FPU state 593 __ push(rbx); 594 __ push(rcx); 595 __ push(rsi); 596 __ push(rdi); 597 __ push(rbp); 598 __ push_FPU_state(); 599 600 // push_FPU_state() resets the FP top of stack 601 // Load original double into FP top of stack 602 __ fld_d(Address(rsp, saved_argument_off * wordSize)); 603 // Store double into stack as outgoing argument 604 __ subptr(rsp, wordSize*2); 605 __ fst_d(Address(rsp, 0)); 606 607 // Prepare FPU for doing math in C-land 608 __ empty_FPU_stack(); 609 // Call the C code to massage the double. Result in EAX 610 if (t == T_INT) 611 { BLOCK_COMMENT("SharedRuntime::d2i"); } 612 else if (t == T_LONG) 613 { BLOCK_COMMENT("SharedRuntime::d2l"); } 614 __ call_VM_leaf( fcn, 2 ); 615 616 // Restore CPU & FPU state 617 __ pop_FPU_state(); 618 __ pop(rbp); 619 __ pop(rdi); 620 __ pop(rsi); 621 __ pop(rcx); 622 __ pop(rbx); 623 __ addptr(rsp, wordSize * 2); 624 625 __ ret(0); 626 627 return start; 628 } 629 630 631 //--------------------------------------------------------------------------- 632 // The following routine generates a subroutine to throw an asynchronous 633 // UnknownError when an unsafe access gets a fault that could not be 634 // reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.) 635 address generate_handler_for_unsafe_access() { 636 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); 637 address start = __ pc(); 638 639 __ push(0); // hole for return address-to-be 640 __ pusha(); // push registers 641 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord); 642 BLOCK_COMMENT("call handle_unsafe_access"); 643 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access))); 644 __ movptr(next_pc, rax); // stuff next address 645 __ popa(); 646 __ ret(0); // jump to next address 647 648 return start; 649 } 650 651 652 //---------------------------------------------------------------------------------------------------- 653 // Non-destructive plausibility checks for oops 654 655 address generate_verify_oop() { 656 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 657 address start = __ pc(); 658 659 // Incoming arguments on stack after saving rax,: 660 // 661 // [tos ]: saved rdx 662 // [tos + 1]: saved EFLAGS 663 // [tos + 2]: return address 664 // [tos + 3]: char* error message 665 // [tos + 4]: oop object to verify 666 // [tos + 5]: saved rax, - saved by caller and bashed 667 668 Label exit, error; 669 __ pushf(); 670 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 671 __ push(rdx); // save rdx 672 // make sure object is 'reasonable' 673 __ movptr(rax, Address(rsp, 4 * wordSize)); // get object 674 __ testptr(rax, rax); 675 __ jcc(Assembler::zero, exit); // if obj is NULL it is ok 676 677 // Check if the oop is in the right area of memory 678 const int oop_mask = Universe::verify_oop_mask(); 679 const int oop_bits = Universe::verify_oop_bits(); 680 __ mov(rdx, rax); 681 __ andptr(rdx, oop_mask); 682 __ cmpptr(rdx, oop_bits); 683 __ jcc(Assembler::notZero, error); 684 685 // make sure klass is 'reasonable' 686 __ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass 687 __ testptr(rax, rax); 688 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 689 690 // Check if the klass is in the right area of memory 691 const int klass_mask = Universe::verify_klass_mask(); 692 const int klass_bits = Universe::verify_klass_bits(); 693 __ mov(rdx, rax); 694 __ andptr(rdx, klass_mask); 695 __ cmpptr(rdx, klass_bits); 696 __ jcc(Assembler::notZero, error); 697 698 // make sure klass' klass is 'reasonable' 699 __ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass' klass 700 __ testptr(rax, rax); 701 __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken 702 703 __ mov(rdx, rax); 704 __ andptr(rdx, klass_mask); 705 __ cmpptr(rdx, klass_bits); 706 __ jcc(Assembler::notZero, error); // if klass not in right area 707 // of memory it is broken too. 708 709 // return if everything seems ok 710 __ bind(exit); 711 __ movptr(rax, Address(rsp, 5 * wordSize)); // get saved rax, back 712 __ pop(rdx); // restore rdx 713 __ popf(); // restore EFLAGS 714 __ ret(3 * wordSize); // pop arguments 715 716 // handle errors 717 __ bind(error); 718 __ movptr(rax, Address(rsp, 5 * wordSize)); // get saved rax, back 719 __ pop(rdx); // get saved rdx back 720 __ popf(); // get saved EFLAGS off stack -- will be ignored 721 __ pusha(); // push registers (eip = return address & msg are already pushed) 722 BLOCK_COMMENT("call MacroAssembler::debug"); 723 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); 724 __ popa(); 725 __ ret(3 * wordSize); // pop arguments 726 return start; 727 } 728 729 // 730 // Generate pre-barrier for array stores 731 // 732 // Input: 733 // start - starting address 734 // count - element count 735 void gen_write_ref_array_pre_barrier(Register start, Register count, bool uninitialized_target) { 736 assert_different_registers(start, count); 737 BarrierSet* bs = Universe::heap()->barrier_set(); 738 switch (bs->kind()) { 739 case BarrierSet::G1SATBCT: 740 case BarrierSet::G1SATBCTLogging: 741 // With G1, don't generate the call if we statically know that the target in uninitialized 742 if (!uninitialized_target) { 743 __ pusha(); // push registers 744 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 745 start, count); 746 __ popa(); 747 } 748 break; 749 case BarrierSet::CardTableModRef: 750 case BarrierSet::CardTableExtension: 751 case BarrierSet::ModRef: 752 break; 753 default : 754 ShouldNotReachHere(); 755 756 } 757 } 758 759 760 // 761 // Generate a post-barrier for an array store 762 // 763 // start - starting address 764 // count - element count 765 // 766 // The two input registers are overwritten. 767 // 768 void gen_write_ref_array_post_barrier(Register start, Register count) { 769 BarrierSet* bs = Universe::heap()->barrier_set(); 770 assert_different_registers(start, count); 771 switch (bs->kind()) { 772 case BarrierSet::G1SATBCT: 773 case BarrierSet::G1SATBCTLogging: 774 { 775 __ pusha(); // push registers 776 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 777 start, count); 778 __ popa(); 779 } 780 break; 781 782 case BarrierSet::CardTableModRef: 783 case BarrierSet::CardTableExtension: 784 { 785 CardTableModRefBS* ct = (CardTableModRefBS*)bs; 786 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 787 788 Label L_loop; 789 const Register end = count; // elements count; end == start+count-1 790 assert_different_registers(start, end); 791 792 __ lea(end, Address(start, count, Address::times_ptr, -wordSize)); 793 __ shrptr(start, CardTableModRefBS::card_shift); 794 __ shrptr(end, CardTableModRefBS::card_shift); 795 __ subptr(end, start); // end --> count 796 __ BIND(L_loop); 797 intptr_t disp = (intptr_t) ct->byte_map_base; 798 Address cardtable(start, count, Address::times_1, disp); 799 __ movb(cardtable, 0); 800 __ decrement(count); 801 __ jcc(Assembler::greaterEqual, L_loop); 802 } 803 break; 804 case BarrierSet::ModRef: 805 break; 806 default : 807 ShouldNotReachHere(); 808 809 } 810 } 811 812 813 // Copy 64 bytes chunks 814 // 815 // Inputs: 816 // from - source array address 817 // to_from - destination array address - from 818 // qword_count - 8-bytes element count, negative 819 // 820 void xmm_copy_forward(Register from, Register to_from, Register qword_count) { 821 assert( UseSSE >= 2, "supported cpu only" ); 822 Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit; 823 // Copy 64-byte chunks 824 __ jmpb(L_copy_64_bytes); 825 __ align(OptoLoopAlignment); 826 __ BIND(L_copy_64_bytes_loop); 827 828 if (UseUnalignedLoadStores) { 829 if (UseAVX >= 2) { 830 __ vmovdqu(xmm0, Address(from, 0)); 831 __ vmovdqu(Address(from, to_from, Address::times_1, 0), xmm0); 832 __ vmovdqu(xmm1, Address(from, 32)); 833 __ vmovdqu(Address(from, to_from, Address::times_1, 32), xmm1); 834 } else { 835 __ movdqu(xmm0, Address(from, 0)); 836 __ movdqu(Address(from, to_from, Address::times_1, 0), xmm0); 837 __ movdqu(xmm1, Address(from, 16)); 838 __ movdqu(Address(from, to_from, Address::times_1, 16), xmm1); 839 __ movdqu(xmm2, Address(from, 32)); 840 __ movdqu(Address(from, to_from, Address::times_1, 32), xmm2); 841 __ movdqu(xmm3, Address(from, 48)); 842 __ movdqu(Address(from, to_from, Address::times_1, 48), xmm3); 843 } 844 } else { 845 __ movq(xmm0, Address(from, 0)); 846 __ movq(Address(from, to_from, Address::times_1, 0), xmm0); 847 __ movq(xmm1, Address(from, 8)); 848 __ movq(Address(from, to_from, Address::times_1, 8), xmm1); 849 __ movq(xmm2, Address(from, 16)); 850 __ movq(Address(from, to_from, Address::times_1, 16), xmm2); 851 __ movq(xmm3, Address(from, 24)); 852 __ movq(Address(from, to_from, Address::times_1, 24), xmm3); 853 __ movq(xmm4, Address(from, 32)); 854 __ movq(Address(from, to_from, Address::times_1, 32), xmm4); 855 __ movq(xmm5, Address(from, 40)); 856 __ movq(Address(from, to_from, Address::times_1, 40), xmm5); 857 __ movq(xmm6, Address(from, 48)); 858 __ movq(Address(from, to_from, Address::times_1, 48), xmm6); 859 __ movq(xmm7, Address(from, 56)); 860 __ movq(Address(from, to_from, Address::times_1, 56), xmm7); 861 } 862 863 __ addl(from, 64); 864 __ BIND(L_copy_64_bytes); 865 __ subl(qword_count, 8); 866 __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop); 867 868 if (UseUnalignedLoadStores && (UseAVX >= 2)) { 869 // clean upper bits of YMM registers 870 __ vzeroupper(); 871 } 872 __ addl(qword_count, 8); 873 __ jccb(Assembler::zero, L_exit); 874 // 875 // length is too short, just copy qwords 876 // 877 __ BIND(L_copy_8_bytes); 878 __ movq(xmm0, Address(from, 0)); 879 __ movq(Address(from, to_from, Address::times_1), xmm0); 880 __ addl(from, 8); 881 __ decrement(qword_count); 882 __ jcc(Assembler::greater, L_copy_8_bytes); 883 __ BIND(L_exit); 884 } 885 886 // Copy 64 bytes chunks 887 // 888 // Inputs: 889 // from - source array address 890 // to_from - destination array address - from 891 // qword_count - 8-bytes element count, negative 892 // 893 void mmx_copy_forward(Register from, Register to_from, Register qword_count) { 894 assert( VM_Version::supports_mmx(), "supported cpu only" ); 895 Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit; 896 // Copy 64-byte chunks 897 __ jmpb(L_copy_64_bytes); 898 __ align(OptoLoopAlignment); 899 __ BIND(L_copy_64_bytes_loop); 900 __ movq(mmx0, Address(from, 0)); 901 __ movq(mmx1, Address(from, 8)); 902 __ movq(mmx2, Address(from, 16)); 903 __ movq(Address(from, to_from, Address::times_1, 0), mmx0); 904 __ movq(mmx3, Address(from, 24)); 905 __ movq(Address(from, to_from, Address::times_1, 8), mmx1); 906 __ movq(mmx4, Address(from, 32)); 907 __ movq(Address(from, to_from, Address::times_1, 16), mmx2); 908 __ movq(mmx5, Address(from, 40)); 909 __ movq(Address(from, to_from, Address::times_1, 24), mmx3); 910 __ movq(mmx6, Address(from, 48)); 911 __ movq(Address(from, to_from, Address::times_1, 32), mmx4); 912 __ movq(mmx7, Address(from, 56)); 913 __ movq(Address(from, to_from, Address::times_1, 40), mmx5); 914 __ movq(Address(from, to_from, Address::times_1, 48), mmx6); 915 __ movq(Address(from, to_from, Address::times_1, 56), mmx7); 916 __ addptr(from, 64); 917 __ BIND(L_copy_64_bytes); 918 __ subl(qword_count, 8); 919 __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop); 920 __ addl(qword_count, 8); 921 __ jccb(Assembler::zero, L_exit); 922 // 923 // length is too short, just copy qwords 924 // 925 __ BIND(L_copy_8_bytes); 926 __ movq(mmx0, Address(from, 0)); 927 __ movq(Address(from, to_from, Address::times_1), mmx0); 928 __ addptr(from, 8); 929 __ decrement(qword_count); 930 __ jcc(Assembler::greater, L_copy_8_bytes); 931 __ BIND(L_exit); 932 __ emms(); 933 } 934 935 address generate_disjoint_copy(BasicType t, bool aligned, 936 Address::ScaleFactor sf, 937 address* entry, const char *name, 938 bool dest_uninitialized = false) { 939 __ align(CodeEntryAlignment); 940 StubCodeMark mark(this, "StubRoutines", name); 941 address start = __ pc(); 942 943 Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte; 944 Label L_copy_2_bytes, L_copy_4_bytes, L_copy_64_bytes; 945 946 int shift = Address::times_ptr - sf; 947 948 const Register from = rsi; // source array address 949 const Register to = rdi; // destination array address 950 const Register count = rcx; // elements count 951 const Register to_from = to; // (to - from) 952 const Register saved_to = rdx; // saved destination array address 953 954 __ enter(); // required for proper stackwalking of RuntimeStub frame 955 __ push(rsi); 956 __ push(rdi); 957 __ movptr(from , Address(rsp, 12+ 4)); 958 __ movptr(to , Address(rsp, 12+ 8)); 959 __ movl(count, Address(rsp, 12+ 12)); 960 961 if (entry != NULL) { 962 *entry = __ pc(); // Entry point from conjoint arraycopy stub. 963 BLOCK_COMMENT("Entry:"); 964 } 965 966 if (t == T_OBJECT) { 967 __ testl(count, count); 968 __ jcc(Assembler::zero, L_0_count); 969 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 970 __ mov(saved_to, to); // save 'to' 971 } 972 973 __ subptr(to, from); // to --> to_from 974 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element 975 __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp 976 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) { 977 // align source address at 4 bytes address boundary 978 if (t == T_BYTE) { 979 // One byte misalignment happens only for byte arrays 980 __ testl(from, 1); 981 __ jccb(Assembler::zero, L_skip_align1); 982 __ movb(rax, Address(from, 0)); 983 __ movb(Address(from, to_from, Address::times_1, 0), rax); 984 __ increment(from); 985 __ decrement(count); 986 __ BIND(L_skip_align1); 987 } 988 // Two bytes misalignment happens only for byte and short (char) arrays 989 __ testl(from, 2); 990 __ jccb(Assembler::zero, L_skip_align2); 991 __ movw(rax, Address(from, 0)); 992 __ movw(Address(from, to_from, Address::times_1, 0), rax); 993 __ addptr(from, 2); 994 __ subl(count, 1<<(shift-1)); 995 __ BIND(L_skip_align2); 996 } 997 if (!VM_Version::supports_mmx()) { 998 __ mov(rax, count); // save 'count' 999 __ shrl(count, shift); // bytes count 1000 __ addptr(to_from, from);// restore 'to' 1001 __ rep_mov(); 1002 __ subptr(to_from, from);// restore 'to_from' 1003 __ mov(count, rax); // restore 'count' 1004 __ jmpb(L_copy_2_bytes); // all dwords were copied 1005 } else { 1006 if (!UseUnalignedLoadStores) { 1007 // align to 8 bytes, we know we are 4 byte aligned to start 1008 __ testptr(from, 4); 1009 __ jccb(Assembler::zero, L_copy_64_bytes); 1010 __ movl(rax, Address(from, 0)); 1011 __ movl(Address(from, to_from, Address::times_1, 0), rax); 1012 __ addptr(from, 4); 1013 __ subl(count, 1<<shift); 1014 } 1015 __ BIND(L_copy_64_bytes); 1016 __ mov(rax, count); 1017 __ shrl(rax, shift+1); // 8 bytes chunk count 1018 // 1019 // Copy 8-byte chunks through MMX registers, 8 per iteration of the loop 1020 // 1021 if (UseXMMForArrayCopy) { 1022 xmm_copy_forward(from, to_from, rax); 1023 } else { 1024 mmx_copy_forward(from, to_from, rax); 1025 } 1026 } 1027 // copy tailing dword 1028 __ BIND(L_copy_4_bytes); 1029 __ testl(count, 1<<shift); 1030 __ jccb(Assembler::zero, L_copy_2_bytes); 1031 __ movl(rax, Address(from, 0)); 1032 __ movl(Address(from, to_from, Address::times_1, 0), rax); 1033 if (t == T_BYTE || t == T_SHORT) { 1034 __ addptr(from, 4); 1035 __ BIND(L_copy_2_bytes); 1036 // copy tailing word 1037 __ testl(count, 1<<(shift-1)); 1038 __ jccb(Assembler::zero, L_copy_byte); 1039 __ movw(rax, Address(from, 0)); 1040 __ movw(Address(from, to_from, Address::times_1, 0), rax); 1041 if (t == T_BYTE) { 1042 __ addptr(from, 2); 1043 __ BIND(L_copy_byte); 1044 // copy tailing byte 1045 __ testl(count, 1); 1046 __ jccb(Assembler::zero, L_exit); 1047 __ movb(rax, Address(from, 0)); 1048 __ movb(Address(from, to_from, Address::times_1, 0), rax); 1049 __ BIND(L_exit); 1050 } else { 1051 __ BIND(L_copy_byte); 1052 } 1053 } else { 1054 __ BIND(L_copy_2_bytes); 1055 } 1056 1057 if (t == T_OBJECT) { 1058 __ movl(count, Address(rsp, 12+12)); // reread 'count' 1059 __ mov(to, saved_to); // restore 'to' 1060 gen_write_ref_array_post_barrier(to, count); 1061 __ BIND(L_0_count); 1062 } 1063 inc_copy_counter_np(t); 1064 __ pop(rdi); 1065 __ pop(rsi); 1066 __ leave(); // required for proper stackwalking of RuntimeStub frame 1067 __ xorptr(rax, rax); // return 0 1068 __ ret(0); 1069 return start; 1070 } 1071 1072 1073 address generate_fill(BasicType t, bool aligned, const char *name) { 1074 __ align(CodeEntryAlignment); 1075 StubCodeMark mark(this, "StubRoutines", name); 1076 address start = __ pc(); 1077 1078 BLOCK_COMMENT("Entry:"); 1079 1080 const Register to = rdi; // source array address 1081 const Register value = rdx; // value 1082 const Register count = rsi; // elements count 1083 1084 __ enter(); // required for proper stackwalking of RuntimeStub frame 1085 __ push(rsi); 1086 __ push(rdi); 1087 __ movptr(to , Address(rsp, 12+ 4)); 1088 __ movl(value, Address(rsp, 12+ 8)); 1089 __ movl(count, Address(rsp, 12+ 12)); 1090 1091 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1092 1093 __ pop(rdi); 1094 __ pop(rsi); 1095 __ leave(); // required for proper stackwalking of RuntimeStub frame 1096 __ ret(0); 1097 return start; 1098 } 1099 1100 address generate_conjoint_copy(BasicType t, bool aligned, 1101 Address::ScaleFactor sf, 1102 address nooverlap_target, 1103 address* entry, const char *name, 1104 bool dest_uninitialized = false) { 1105 __ align(CodeEntryAlignment); 1106 StubCodeMark mark(this, "StubRoutines", name); 1107 address start = __ pc(); 1108 1109 Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte; 1110 Label L_copy_2_bytes, L_copy_4_bytes, L_copy_8_bytes, L_copy_8_bytes_loop; 1111 1112 int shift = Address::times_ptr - sf; 1113 1114 const Register src = rax; // source array address 1115 const Register dst = rdx; // destination array address 1116 const Register from = rsi; // source array address 1117 const Register to = rdi; // destination array address 1118 const Register count = rcx; // elements count 1119 const Register end = rax; // array end address 1120 1121 __ enter(); // required for proper stackwalking of RuntimeStub frame 1122 __ push(rsi); 1123 __ push(rdi); 1124 __ movptr(src , Address(rsp, 12+ 4)); // from 1125 __ movptr(dst , Address(rsp, 12+ 8)); // to 1126 __ movl2ptr(count, Address(rsp, 12+12)); // count 1127 1128 if (entry != NULL) { 1129 *entry = __ pc(); // Entry point from generic arraycopy stub. 1130 BLOCK_COMMENT("Entry:"); 1131 } 1132 1133 // nooverlap_target expects arguments in rsi and rdi. 1134 __ mov(from, src); 1135 __ mov(to , dst); 1136 1137 // arrays overlap test: dispatch to disjoint stub if necessary. 1138 RuntimeAddress nooverlap(nooverlap_target); 1139 __ cmpptr(dst, src); 1140 __ lea(end, Address(src, count, sf, 0)); // src + count * elem_size 1141 __ jump_cc(Assembler::belowEqual, nooverlap); 1142 __ cmpptr(dst, end); 1143 __ jump_cc(Assembler::aboveEqual, nooverlap); 1144 1145 if (t == T_OBJECT) { 1146 __ testl(count, count); 1147 __ jcc(Assembler::zero, L_0_count); 1148 gen_write_ref_array_pre_barrier(dst, count, dest_uninitialized); 1149 } 1150 1151 // copy from high to low 1152 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element 1153 __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp 1154 if (t == T_BYTE || t == T_SHORT) { 1155 // Align the end of destination array at 4 bytes address boundary 1156 __ lea(end, Address(dst, count, sf, 0)); 1157 if (t == T_BYTE) { 1158 // One byte misalignment happens only for byte arrays 1159 __ testl(end, 1); 1160 __ jccb(Assembler::zero, L_skip_align1); 1161 __ decrement(count); 1162 __ movb(rdx, Address(from, count, sf, 0)); 1163 __ movb(Address(to, count, sf, 0), rdx); 1164 __ BIND(L_skip_align1); 1165 } 1166 // Two bytes misalignment happens only for byte and short (char) arrays 1167 __ testl(end, 2); 1168 __ jccb(Assembler::zero, L_skip_align2); 1169 __ subptr(count, 1<<(shift-1)); 1170 __ movw(rdx, Address(from, count, sf, 0)); 1171 __ movw(Address(to, count, sf, 0), rdx); 1172 __ BIND(L_skip_align2); 1173 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element 1174 __ jcc(Assembler::below, L_copy_4_bytes); 1175 } 1176 1177 if (!VM_Version::supports_mmx()) { 1178 __ std(); 1179 __ mov(rax, count); // Save 'count' 1180 __ mov(rdx, to); // Save 'to' 1181 __ lea(rsi, Address(from, count, sf, -4)); 1182 __ lea(rdi, Address(to , count, sf, -4)); 1183 __ shrptr(count, shift); // bytes count 1184 __ rep_mov(); 1185 __ cld(); 1186 __ mov(count, rax); // restore 'count' 1187 __ andl(count, (1<<shift)-1); // mask the number of rest elements 1188 __ movptr(from, Address(rsp, 12+4)); // reread 'from' 1189 __ mov(to, rdx); // restore 'to' 1190 __ jmpb(L_copy_2_bytes); // all dword were copied 1191 } else { 1192 // Align to 8 bytes the end of array. It is aligned to 4 bytes already. 1193 __ testptr(end, 4); 1194 __ jccb(Assembler::zero, L_copy_8_bytes); 1195 __ subl(count, 1<<shift); 1196 __ movl(rdx, Address(from, count, sf, 0)); 1197 __ movl(Address(to, count, sf, 0), rdx); 1198 __ jmpb(L_copy_8_bytes); 1199 1200 __ align(OptoLoopAlignment); 1201 // Move 8 bytes 1202 __ BIND(L_copy_8_bytes_loop); 1203 if (UseXMMForArrayCopy) { 1204 __ movq(xmm0, Address(from, count, sf, 0)); 1205 __ movq(Address(to, count, sf, 0), xmm0); 1206 } else { 1207 __ movq(mmx0, Address(from, count, sf, 0)); 1208 __ movq(Address(to, count, sf, 0), mmx0); 1209 } 1210 __ BIND(L_copy_8_bytes); 1211 __ subl(count, 2<<shift); 1212 __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); 1213 __ addl(count, 2<<shift); 1214 if (!UseXMMForArrayCopy) { 1215 __ emms(); 1216 } 1217 } 1218 __ BIND(L_copy_4_bytes); 1219 // copy prefix qword 1220 __ testl(count, 1<<shift); 1221 __ jccb(Assembler::zero, L_copy_2_bytes); 1222 __ movl(rdx, Address(from, count, sf, -4)); 1223 __ movl(Address(to, count, sf, -4), rdx); 1224 1225 if (t == T_BYTE || t == T_SHORT) { 1226 __ subl(count, (1<<shift)); 1227 __ BIND(L_copy_2_bytes); 1228 // copy prefix dword 1229 __ testl(count, 1<<(shift-1)); 1230 __ jccb(Assembler::zero, L_copy_byte); 1231 __ movw(rdx, Address(from, count, sf, -2)); 1232 __ movw(Address(to, count, sf, -2), rdx); 1233 if (t == T_BYTE) { 1234 __ subl(count, 1<<(shift-1)); 1235 __ BIND(L_copy_byte); 1236 // copy prefix byte 1237 __ testl(count, 1); 1238 __ jccb(Assembler::zero, L_exit); 1239 __ movb(rdx, Address(from, 0)); 1240 __ movb(Address(to, 0), rdx); 1241 __ BIND(L_exit); 1242 } else { 1243 __ BIND(L_copy_byte); 1244 } 1245 } else { 1246 __ BIND(L_copy_2_bytes); 1247 } 1248 if (t == T_OBJECT) { 1249 __ movl2ptr(count, Address(rsp, 12+12)); // reread count 1250 gen_write_ref_array_post_barrier(to, count); 1251 __ BIND(L_0_count); 1252 } 1253 inc_copy_counter_np(t); 1254 __ pop(rdi); 1255 __ pop(rsi); 1256 __ leave(); // required for proper stackwalking of RuntimeStub frame 1257 __ xorptr(rax, rax); // return 0 1258 __ ret(0); 1259 return start; 1260 } 1261 1262 1263 address generate_disjoint_long_copy(address* entry, const char *name) { 1264 __ align(CodeEntryAlignment); 1265 StubCodeMark mark(this, "StubRoutines", name); 1266 address start = __ pc(); 1267 1268 Label L_copy_8_bytes, L_copy_8_bytes_loop; 1269 const Register from = rax; // source array address 1270 const Register to = rdx; // destination array address 1271 const Register count = rcx; // elements count 1272 const Register to_from = rdx; // (to - from) 1273 1274 __ enter(); // required for proper stackwalking of RuntimeStub frame 1275 __ movptr(from , Address(rsp, 8+0)); // from 1276 __ movptr(to , Address(rsp, 8+4)); // to 1277 __ movl2ptr(count, Address(rsp, 8+8)); // count 1278 1279 *entry = __ pc(); // Entry point from conjoint arraycopy stub. 1280 BLOCK_COMMENT("Entry:"); 1281 1282 __ subptr(to, from); // to --> to_from 1283 if (VM_Version::supports_mmx()) { 1284 if (UseXMMForArrayCopy) { 1285 xmm_copy_forward(from, to_from, count); 1286 } else { 1287 mmx_copy_forward(from, to_from, count); 1288 } 1289 } else { 1290 __ jmpb(L_copy_8_bytes); 1291 __ align(OptoLoopAlignment); 1292 __ BIND(L_copy_8_bytes_loop); 1293 __ fild_d(Address(from, 0)); 1294 __ fistp_d(Address(from, to_from, Address::times_1)); 1295 __ addptr(from, 8); 1296 __ BIND(L_copy_8_bytes); 1297 __ decrement(count); 1298 __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); 1299 } 1300 inc_copy_counter_np(T_LONG); 1301 __ leave(); // required for proper stackwalking of RuntimeStub frame 1302 __ xorptr(rax, rax); // return 0 1303 __ ret(0); 1304 return start; 1305 } 1306 1307 address generate_conjoint_long_copy(address nooverlap_target, 1308 address* entry, const char *name) { 1309 __ align(CodeEntryAlignment); 1310 StubCodeMark mark(this, "StubRoutines", name); 1311 address start = __ pc(); 1312 1313 Label L_copy_8_bytes, L_copy_8_bytes_loop; 1314 const Register from = rax; // source array address 1315 const Register to = rdx; // destination array address 1316 const Register count = rcx; // elements count 1317 const Register end_from = rax; // source array end address 1318 1319 __ enter(); // required for proper stackwalking of RuntimeStub frame 1320 __ movptr(from , Address(rsp, 8+0)); // from 1321 __ movptr(to , Address(rsp, 8+4)); // to 1322 __ movl2ptr(count, Address(rsp, 8+8)); // count 1323 1324 *entry = __ pc(); // Entry point from generic arraycopy stub. 1325 BLOCK_COMMENT("Entry:"); 1326 1327 // arrays overlap test 1328 __ cmpptr(to, from); 1329 RuntimeAddress nooverlap(nooverlap_target); 1330 __ jump_cc(Assembler::belowEqual, nooverlap); 1331 __ lea(end_from, Address(from, count, Address::times_8, 0)); 1332 __ cmpptr(to, end_from); 1333 __ movptr(from, Address(rsp, 8)); // from 1334 __ jump_cc(Assembler::aboveEqual, nooverlap); 1335 1336 __ jmpb(L_copy_8_bytes); 1337 1338 __ align(OptoLoopAlignment); 1339 __ BIND(L_copy_8_bytes_loop); 1340 if (VM_Version::supports_mmx()) { 1341 if (UseXMMForArrayCopy) { 1342 __ movq(xmm0, Address(from, count, Address::times_8)); 1343 __ movq(Address(to, count, Address::times_8), xmm0); 1344 } else { 1345 __ movq(mmx0, Address(from, count, Address::times_8)); 1346 __ movq(Address(to, count, Address::times_8), mmx0); 1347 } 1348 } else { 1349 __ fild_d(Address(from, count, Address::times_8)); 1350 __ fistp_d(Address(to, count, Address::times_8)); 1351 } 1352 __ BIND(L_copy_8_bytes); 1353 __ decrement(count); 1354 __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); 1355 1356 if (VM_Version::supports_mmx() && !UseXMMForArrayCopy) { 1357 __ emms(); 1358 } 1359 inc_copy_counter_np(T_LONG); 1360 __ leave(); // required for proper stackwalking of RuntimeStub frame 1361 __ xorptr(rax, rax); // return 0 1362 __ ret(0); 1363 return start; 1364 } 1365 1366 1367 // Helper for generating a dynamic type check. 1368 // The sub_klass must be one of {rbx, rdx, rsi}. 1369 // The temp is killed. 1370 void generate_type_check(Register sub_klass, 1371 Address& super_check_offset_addr, 1372 Address& super_klass_addr, 1373 Register temp, 1374 Label* L_success, Label* L_failure) { 1375 BLOCK_COMMENT("type_check:"); 1376 1377 Label L_fallthrough; 1378 #define LOCAL_JCC(assembler_con, label_ptr) \ 1379 if (label_ptr != NULL) __ jcc(assembler_con, *(label_ptr)); \ 1380 else __ jcc(assembler_con, L_fallthrough) /*omit semi*/ 1381 1382 // The following is a strange variation of the fast path which requires 1383 // one less register, because needed values are on the argument stack. 1384 // __ check_klass_subtype_fast_path(sub_klass, *super_klass*, temp, 1385 // L_success, L_failure, NULL); 1386 assert_different_registers(sub_klass, temp); 1387 1388 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1389 1390 // if the pointers are equal, we are done (e.g., String[] elements) 1391 __ cmpptr(sub_klass, super_klass_addr); 1392 LOCAL_JCC(Assembler::equal, L_success); 1393 1394 // check the supertype display: 1395 __ movl2ptr(temp, super_check_offset_addr); 1396 Address super_check_addr(sub_klass, temp, Address::times_1, 0); 1397 __ movptr(temp, super_check_addr); // load displayed supertype 1398 __ cmpptr(temp, super_klass_addr); // test the super type 1399 LOCAL_JCC(Assembler::equal, L_success); 1400 1401 // if it was a primary super, we can just fail immediately 1402 __ cmpl(super_check_offset_addr, sc_offset); 1403 LOCAL_JCC(Assembler::notEqual, L_failure); 1404 1405 // The repne_scan instruction uses fixed registers, which will get spilled. 1406 // We happen to know this works best when super_klass is in rax. 1407 Register super_klass = temp; 1408 __ movptr(super_klass, super_klass_addr); 1409 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, 1410 L_success, L_failure); 1411 1412 __ bind(L_fallthrough); 1413 1414 if (L_success == NULL) { BLOCK_COMMENT("L_success:"); } 1415 if (L_failure == NULL) { BLOCK_COMMENT("L_failure:"); } 1416 1417 #undef LOCAL_JCC 1418 } 1419 1420 // 1421 // Generate checkcasting array copy stub 1422 // 1423 // Input: 1424 // 4(rsp) - source array address 1425 // 8(rsp) - destination array address 1426 // 12(rsp) - element count, can be zero 1427 // 16(rsp) - size_t ckoff (super_check_offset) 1428 // 20(rsp) - oop ckval (super_klass) 1429 // 1430 // Output: 1431 // rax, == 0 - success 1432 // rax, == -1^K - failure, where K is partial transfer count 1433 // 1434 address generate_checkcast_copy(const char *name, address* entry, bool dest_uninitialized = false) { 1435 __ align(CodeEntryAlignment); 1436 StubCodeMark mark(this, "StubRoutines", name); 1437 address start = __ pc(); 1438 1439 Label L_load_element, L_store_element, L_do_card_marks, L_done; 1440 1441 // register use: 1442 // rax, rdx, rcx -- loop control (end_from, end_to, count) 1443 // rdi, rsi -- element access (oop, klass) 1444 // rbx, -- temp 1445 const Register from = rax; // source array address 1446 const Register to = rdx; // destination array address 1447 const Register length = rcx; // elements count 1448 const Register elem = rdi; // each oop copied 1449 const Register elem_klass = rsi; // each elem._klass (sub_klass) 1450 const Register temp = rbx; // lone remaining temp 1451 1452 __ enter(); // required for proper stackwalking of RuntimeStub frame 1453 1454 __ push(rsi); 1455 __ push(rdi); 1456 __ push(rbx); 1457 1458 Address from_arg(rsp, 16+ 4); // from 1459 Address to_arg(rsp, 16+ 8); // to 1460 Address length_arg(rsp, 16+12); // elements count 1461 Address ckoff_arg(rsp, 16+16); // super_check_offset 1462 Address ckval_arg(rsp, 16+20); // super_klass 1463 1464 // Load up: 1465 __ movptr(from, from_arg); 1466 __ movptr(to, to_arg); 1467 __ movl2ptr(length, length_arg); 1468 1469 if (entry != NULL) { 1470 *entry = __ pc(); // Entry point from generic arraycopy stub. 1471 BLOCK_COMMENT("Entry:"); 1472 } 1473 1474 //--------------------------------------------------------------- 1475 // Assembler stub will be used for this call to arraycopy 1476 // if the two arrays are subtypes of Object[] but the 1477 // destination array type is not equal to or a supertype 1478 // of the source type. Each element must be separately 1479 // checked. 1480 1481 // Loop-invariant addresses. They are exclusive end pointers. 1482 Address end_from_addr(from, length, Address::times_ptr, 0); 1483 Address end_to_addr(to, length, Address::times_ptr, 0); 1484 1485 Register end_from = from; // re-use 1486 Register end_to = to; // re-use 1487 Register count = length; // re-use 1488 1489 // Loop-variant addresses. They assume post-incremented count < 0. 1490 Address from_element_addr(end_from, count, Address::times_ptr, 0); 1491 Address to_element_addr(end_to, count, Address::times_ptr, 0); 1492 Address elem_klass_addr(elem, oopDesc::klass_offset_in_bytes()); 1493 1494 // Copy from low to high addresses, indexed from the end of each array. 1495 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 1496 __ lea(end_from, end_from_addr); 1497 __ lea(end_to, end_to_addr); 1498 assert(length == count, ""); // else fix next line: 1499 __ negptr(count); // negate and test the length 1500 __ jccb(Assembler::notZero, L_load_element); 1501 1502 // Empty array: Nothing to do. 1503 __ xorptr(rax, rax); // return 0 on (trivial) success 1504 __ jmp(L_done); 1505 1506 // ======== begin loop ======== 1507 // (Loop is rotated; its entry is L_load_element.) 1508 // Loop control: 1509 // for (count = -count; count != 0; count++) 1510 // Base pointers src, dst are biased by 8*count,to last element. 1511 __ align(OptoLoopAlignment); 1512 1513 __ BIND(L_store_element); 1514 __ movptr(to_element_addr, elem); // store the oop 1515 __ increment(count); // increment the count toward zero 1516 __ jccb(Assembler::zero, L_do_card_marks); 1517 1518 // ======== loop entry is here ======== 1519 __ BIND(L_load_element); 1520 __ movptr(elem, from_element_addr); // load the oop 1521 __ testptr(elem, elem); 1522 __ jccb(Assembler::zero, L_store_element); 1523 1524 // (Could do a trick here: Remember last successful non-null 1525 // element stored and make a quick oop equality check on it.) 1526 1527 __ movptr(elem_klass, elem_klass_addr); // query the object klass 1528 generate_type_check(elem_klass, ckoff_arg, ckval_arg, temp, 1529 &L_store_element, NULL); 1530 // (On fall-through, we have failed the element type check.) 1531 // ======== end loop ======== 1532 1533 // It was a real error; we must depend on the caller to finish the job. 1534 // Register "count" = -1 * number of *remaining* oops, length_arg = *total* oops. 1535 // Emit GC store barriers for the oops we have copied (length_arg + count), 1536 // and report their number to the caller. 1537 __ addl(count, length_arg); // transfers = (length - remaining) 1538 __ movl2ptr(rax, count); // save the value 1539 __ notptr(rax); // report (-1^K) to caller 1540 __ movptr(to, to_arg); // reload 1541 assert_different_registers(to, count, rax); 1542 gen_write_ref_array_post_barrier(to, count); 1543 __ jmpb(L_done); 1544 1545 // Come here on success only. 1546 __ BIND(L_do_card_marks); 1547 __ movl2ptr(count, length_arg); 1548 __ movptr(to, to_arg); // reload 1549 gen_write_ref_array_post_barrier(to, count); 1550 __ xorptr(rax, rax); // return 0 on success 1551 1552 // Common exit point (success or failure). 1553 __ BIND(L_done); 1554 __ pop(rbx); 1555 __ pop(rdi); 1556 __ pop(rsi); 1557 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); 1558 __ leave(); // required for proper stackwalking of RuntimeStub frame 1559 __ ret(0); 1560 1561 return start; 1562 } 1563 1564 // 1565 // Generate 'unsafe' array copy stub 1566 // Though just as safe as the other stubs, it takes an unscaled 1567 // size_t argument instead of an element count. 1568 // 1569 // Input: 1570 // 4(rsp) - source array address 1571 // 8(rsp) - destination array address 1572 // 12(rsp) - byte count, can be zero 1573 // 1574 // Output: 1575 // rax, == 0 - success 1576 // rax, == -1 - need to call System.arraycopy 1577 // 1578 // Examines the alignment of the operands and dispatches 1579 // to a long, int, short, or byte copy loop. 1580 // 1581 address generate_unsafe_copy(const char *name, 1582 address byte_copy_entry, 1583 address short_copy_entry, 1584 address int_copy_entry, 1585 address long_copy_entry) { 1586 1587 Label L_long_aligned, L_int_aligned, L_short_aligned; 1588 1589 __ align(CodeEntryAlignment); 1590 StubCodeMark mark(this, "StubRoutines", name); 1591 address start = __ pc(); 1592 1593 const Register from = rax; // source array address 1594 const Register to = rdx; // destination array address 1595 const Register count = rcx; // elements count 1596 1597 __ enter(); // required for proper stackwalking of RuntimeStub frame 1598 __ push(rsi); 1599 __ push(rdi); 1600 Address from_arg(rsp, 12+ 4); // from 1601 Address to_arg(rsp, 12+ 8); // to 1602 Address count_arg(rsp, 12+12); // byte count 1603 1604 // Load up: 1605 __ movptr(from , from_arg); 1606 __ movptr(to , to_arg); 1607 __ movl2ptr(count, count_arg); 1608 1609 // bump this on entry, not on exit: 1610 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 1611 1612 const Register bits = rsi; 1613 __ mov(bits, from); 1614 __ orptr(bits, to); 1615 __ orptr(bits, count); 1616 1617 __ testl(bits, BytesPerLong-1); 1618 __ jccb(Assembler::zero, L_long_aligned); 1619 1620 __ testl(bits, BytesPerInt-1); 1621 __ jccb(Assembler::zero, L_int_aligned); 1622 1623 __ testl(bits, BytesPerShort-1); 1624 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 1625 1626 __ BIND(L_short_aligned); 1627 __ shrptr(count, LogBytesPerShort); // size => short_count 1628 __ movl(count_arg, count); // update 'count' 1629 __ jump(RuntimeAddress(short_copy_entry)); 1630 1631 __ BIND(L_int_aligned); 1632 __ shrptr(count, LogBytesPerInt); // size => int_count 1633 __ movl(count_arg, count); // update 'count' 1634 __ jump(RuntimeAddress(int_copy_entry)); 1635 1636 __ BIND(L_long_aligned); 1637 __ shrptr(count, LogBytesPerLong); // size => qword_count 1638 __ movl(count_arg, count); // update 'count' 1639 __ pop(rdi); // Do pops here since jlong_arraycopy stub does not do it. 1640 __ pop(rsi); 1641 __ jump(RuntimeAddress(long_copy_entry)); 1642 1643 return start; 1644 } 1645 1646 1647 // Perform range checks on the proposed arraycopy. 1648 // Smashes src_pos and dst_pos. (Uses them up for temps.) 1649 void arraycopy_range_checks(Register src, 1650 Register src_pos, 1651 Register dst, 1652 Register dst_pos, 1653 Address& length, 1654 Label& L_failed) { 1655 BLOCK_COMMENT("arraycopy_range_checks:"); 1656 const Register src_end = src_pos; // source array end position 1657 const Register dst_end = dst_pos; // destination array end position 1658 __ addl(src_end, length); // src_pos + length 1659 __ addl(dst_end, length); // dst_pos + length 1660 1661 // if (src_pos + length > arrayOop(src)->length() ) FAIL; 1662 __ cmpl(src_end, Address(src, arrayOopDesc::length_offset_in_bytes())); 1663 __ jcc(Assembler::above, L_failed); 1664 1665 // if (dst_pos + length > arrayOop(dst)->length() ) FAIL; 1666 __ cmpl(dst_end, Address(dst, arrayOopDesc::length_offset_in_bytes())); 1667 __ jcc(Assembler::above, L_failed); 1668 1669 BLOCK_COMMENT("arraycopy_range_checks done"); 1670 } 1671 1672 1673 // 1674 // Generate generic array copy stubs 1675 // 1676 // Input: 1677 // 4(rsp) - src oop 1678 // 8(rsp) - src_pos 1679 // 12(rsp) - dst oop 1680 // 16(rsp) - dst_pos 1681 // 20(rsp) - element count 1682 // 1683 // Output: 1684 // rax, == 0 - success 1685 // rax, == -1^K - failure, where K is partial transfer count 1686 // 1687 address generate_generic_copy(const char *name, 1688 address entry_jbyte_arraycopy, 1689 address entry_jshort_arraycopy, 1690 address entry_jint_arraycopy, 1691 address entry_oop_arraycopy, 1692 address entry_jlong_arraycopy, 1693 address entry_checkcast_arraycopy) { 1694 Label L_failed, L_failed_0, L_objArray; 1695 1696 { int modulus = CodeEntryAlignment; 1697 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 1698 int advance = target - (__ offset() % modulus); 1699 if (advance < 0) advance += modulus; 1700 if (advance > 0) __ nop(advance); 1701 } 1702 StubCodeMark mark(this, "StubRoutines", name); 1703 1704 // Short-hop target to L_failed. Makes for denser prologue code. 1705 __ BIND(L_failed_0); 1706 __ jmp(L_failed); 1707 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 1708 1709 __ align(CodeEntryAlignment); 1710 address start = __ pc(); 1711 1712 __ enter(); // required for proper stackwalking of RuntimeStub frame 1713 __ push(rsi); 1714 __ push(rdi); 1715 1716 // bump this on entry, not on exit: 1717 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 1718 1719 // Input values 1720 Address SRC (rsp, 12+ 4); 1721 Address SRC_POS (rsp, 12+ 8); 1722 Address DST (rsp, 12+12); 1723 Address DST_POS (rsp, 12+16); 1724 Address LENGTH (rsp, 12+20); 1725 1726 //----------------------------------------------------------------------- 1727 // Assembler stub will be used for this call to arraycopy 1728 // if the following conditions are met: 1729 // 1730 // (1) src and dst must not be null. 1731 // (2) src_pos must not be negative. 1732 // (3) dst_pos must not be negative. 1733 // (4) length must not be negative. 1734 // (5) src klass and dst klass should be the same and not NULL. 1735 // (6) src and dst should be arrays. 1736 // (7) src_pos + length must not exceed length of src. 1737 // (8) dst_pos + length must not exceed length of dst. 1738 // 1739 1740 const Register src = rax; // source array oop 1741 const Register src_pos = rsi; 1742 const Register dst = rdx; // destination array oop 1743 const Register dst_pos = rdi; 1744 const Register length = rcx; // transfer count 1745 1746 // if (src == NULL) return -1; 1747 __ movptr(src, SRC); // src oop 1748 __ testptr(src, src); 1749 __ jccb(Assembler::zero, L_failed_0); 1750 1751 // if (src_pos < 0) return -1; 1752 __ movl2ptr(src_pos, SRC_POS); // src_pos 1753 __ testl(src_pos, src_pos); 1754 __ jccb(Assembler::negative, L_failed_0); 1755 1756 // if (dst == NULL) return -1; 1757 __ movptr(dst, DST); // dst oop 1758 __ testptr(dst, dst); 1759 __ jccb(Assembler::zero, L_failed_0); 1760 1761 // if (dst_pos < 0) return -1; 1762 __ movl2ptr(dst_pos, DST_POS); // dst_pos 1763 __ testl(dst_pos, dst_pos); 1764 __ jccb(Assembler::negative, L_failed_0); 1765 1766 // if (length < 0) return -1; 1767 __ movl2ptr(length, LENGTH); // length 1768 __ testl(length, length); 1769 __ jccb(Assembler::negative, L_failed_0); 1770 1771 // if (src->klass() == NULL) return -1; 1772 Address src_klass_addr(src, oopDesc::klass_offset_in_bytes()); 1773 Address dst_klass_addr(dst, oopDesc::klass_offset_in_bytes()); 1774 const Register rcx_src_klass = rcx; // array klass 1775 __ movptr(rcx_src_klass, Address(src, oopDesc::klass_offset_in_bytes())); 1776 1777 #ifdef ASSERT 1778 // assert(src->klass() != NULL); 1779 BLOCK_COMMENT("assert klasses not null"); 1780 { Label L1, L2; 1781 __ testptr(rcx_src_klass, rcx_src_klass); 1782 __ jccb(Assembler::notZero, L2); // it is broken if klass is NULL 1783 __ bind(L1); 1784 __ stop("broken null klass"); 1785 __ bind(L2); 1786 __ cmpptr(dst_klass_addr, (int32_t)NULL_WORD); 1787 __ jccb(Assembler::equal, L1); // this would be broken also 1788 BLOCK_COMMENT("assert done"); 1789 } 1790 #endif //ASSERT 1791 1792 // Load layout helper (32-bits) 1793 // 1794 // |array_tag| | header_size | element_type | |log2_element_size| 1795 // 32 30 24 16 8 2 0 1796 // 1797 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 1798 // 1799 1800 int lh_offset = in_bytes(Klass::layout_helper_offset()); 1801 Address src_klass_lh_addr(rcx_src_klass, lh_offset); 1802 1803 // Handle objArrays completely differently... 1804 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 1805 __ cmpl(src_klass_lh_addr, objArray_lh); 1806 __ jcc(Assembler::equal, L_objArray); 1807 1808 // if (src->klass() != dst->klass()) return -1; 1809 __ cmpptr(rcx_src_klass, dst_klass_addr); 1810 __ jccb(Assembler::notEqual, L_failed_0); 1811 1812 const Register rcx_lh = rcx; // layout helper 1813 assert(rcx_lh == rcx_src_klass, "known alias"); 1814 __ movl(rcx_lh, src_klass_lh_addr); 1815 1816 // if (!src->is_Array()) return -1; 1817 __ cmpl(rcx_lh, Klass::_lh_neutral_value); 1818 __ jcc(Assembler::greaterEqual, L_failed_0); // signed cmp 1819 1820 // At this point, it is known to be a typeArray (array_tag 0x3). 1821 #ifdef ASSERT 1822 { Label L; 1823 __ cmpl(rcx_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 1824 __ jcc(Assembler::greaterEqual, L); // signed cmp 1825 __ stop("must be a primitive array"); 1826 __ bind(L); 1827 } 1828 #endif 1829 1830 assert_different_registers(src, src_pos, dst, dst_pos, rcx_lh); 1831 arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed); 1832 1833 // typeArrayKlass 1834 // 1835 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 1836 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 1837 // 1838 const Register rsi_offset = rsi; // array offset 1839 const Register src_array = src; // src array offset 1840 const Register dst_array = dst; // dst array offset 1841 const Register rdi_elsize = rdi; // log2 element size 1842 1843 __ mov(rsi_offset, rcx_lh); 1844 __ shrptr(rsi_offset, Klass::_lh_header_size_shift); 1845 __ andptr(rsi_offset, Klass::_lh_header_size_mask); // array_offset 1846 __ addptr(src_array, rsi_offset); // src array offset 1847 __ addptr(dst_array, rsi_offset); // dst array offset 1848 __ andptr(rcx_lh, Klass::_lh_log2_element_size_mask); // log2 elsize 1849 1850 // next registers should be set before the jump to corresponding stub 1851 const Register from = src; // source array address 1852 const Register to = dst; // destination array address 1853 const Register count = rcx; // elements count 1854 // some of them should be duplicated on stack 1855 #define FROM Address(rsp, 12+ 4) 1856 #define TO Address(rsp, 12+ 8) // Not used now 1857 #define COUNT Address(rsp, 12+12) // Only for oop arraycopy 1858 1859 BLOCK_COMMENT("scale indexes to element size"); 1860 __ movl2ptr(rsi, SRC_POS); // src_pos 1861 __ shlptr(rsi); // src_pos << rcx (log2 elsize) 1862 assert(src_array == from, ""); 1863 __ addptr(from, rsi); // from = src_array + SRC_POS << log2 elsize 1864 __ movl2ptr(rdi, DST_POS); // dst_pos 1865 __ shlptr(rdi); // dst_pos << rcx (log2 elsize) 1866 assert(dst_array == to, ""); 1867 __ addptr(to, rdi); // to = dst_array + DST_POS << log2 elsize 1868 __ movptr(FROM, from); // src_addr 1869 __ mov(rdi_elsize, rcx_lh); // log2 elsize 1870 __ movl2ptr(count, LENGTH); // elements count 1871 1872 BLOCK_COMMENT("choose copy loop based on element size"); 1873 __ cmpl(rdi_elsize, 0); 1874 1875 __ jump_cc(Assembler::equal, RuntimeAddress(entry_jbyte_arraycopy)); 1876 __ cmpl(rdi_elsize, LogBytesPerShort); 1877 __ jump_cc(Assembler::equal, RuntimeAddress(entry_jshort_arraycopy)); 1878 __ cmpl(rdi_elsize, LogBytesPerInt); 1879 __ jump_cc(Assembler::equal, RuntimeAddress(entry_jint_arraycopy)); 1880 #ifdef ASSERT 1881 __ cmpl(rdi_elsize, LogBytesPerLong); 1882 __ jccb(Assembler::notEqual, L_failed); 1883 #endif 1884 __ pop(rdi); // Do pops here since jlong_arraycopy stub does not do it. 1885 __ pop(rsi); 1886 __ jump(RuntimeAddress(entry_jlong_arraycopy)); 1887 1888 __ BIND(L_failed); 1889 __ xorptr(rax, rax); 1890 __ notptr(rax); // return -1 1891 __ pop(rdi); 1892 __ pop(rsi); 1893 __ leave(); // required for proper stackwalking of RuntimeStub frame 1894 __ ret(0); 1895 1896 // objArrayKlass 1897 __ BIND(L_objArray); 1898 // live at this point: rcx_src_klass, src[_pos], dst[_pos] 1899 1900 Label L_plain_copy, L_checkcast_copy; 1901 // test array classes for subtyping 1902 __ cmpptr(rcx_src_klass, dst_klass_addr); // usual case is exact equality 1903 __ jccb(Assembler::notEqual, L_checkcast_copy); 1904 1905 // Identically typed arrays can be copied without element-wise checks. 1906 assert_different_registers(src, src_pos, dst, dst_pos, rcx_src_klass); 1907 arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed); 1908 1909 __ BIND(L_plain_copy); 1910 __ movl2ptr(count, LENGTH); // elements count 1911 __ movl2ptr(src_pos, SRC_POS); // reload src_pos 1912 __ lea(from, Address(src, src_pos, Address::times_ptr, 1913 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 1914 __ movl2ptr(dst_pos, DST_POS); // reload dst_pos 1915 __ lea(to, Address(dst, dst_pos, Address::times_ptr, 1916 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 1917 __ movptr(FROM, from); // src_addr 1918 __ movptr(TO, to); // dst_addr 1919 __ movl(COUNT, count); // count 1920 __ jump(RuntimeAddress(entry_oop_arraycopy)); 1921 1922 __ BIND(L_checkcast_copy); 1923 // live at this point: rcx_src_klass, dst[_pos], src[_pos] 1924 { 1925 // Handy offsets: 1926 int ek_offset = in_bytes(objArrayKlass::element_klass_offset()); 1927 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1928 1929 Register rsi_dst_klass = rsi; 1930 Register rdi_temp = rdi; 1931 assert(rsi_dst_klass == src_pos, "expected alias w/ src_pos"); 1932 assert(rdi_temp == dst_pos, "expected alias w/ dst_pos"); 1933 Address dst_klass_lh_addr(rsi_dst_klass, lh_offset); 1934 1935 // Before looking at dst.length, make sure dst is also an objArray. 1936 __ movptr(rsi_dst_klass, dst_klass_addr); 1937 __ cmpl(dst_klass_lh_addr, objArray_lh); 1938 __ jccb(Assembler::notEqual, L_failed); 1939 1940 // It is safe to examine both src.length and dst.length. 1941 __ movl2ptr(src_pos, SRC_POS); // reload rsi 1942 arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed); 1943 // (Now src_pos and dst_pos are killed, but not src and dst.) 1944 1945 // We'll need this temp (don't forget to pop it after the type check). 1946 __ push(rbx); 1947 Register rbx_src_klass = rbx; 1948 1949 __ mov(rbx_src_klass, rcx_src_klass); // spill away from rcx 1950 __ movptr(rsi_dst_klass, dst_klass_addr); 1951 Address super_check_offset_addr(rsi_dst_klass, sco_offset); 1952 Label L_fail_array_check; 1953 generate_type_check(rbx_src_klass, 1954 super_check_offset_addr, dst_klass_addr, 1955 rdi_temp, NULL, &L_fail_array_check); 1956 // (On fall-through, we have passed the array type check.) 1957 __ pop(rbx); 1958 __ jmp(L_plain_copy); 1959 1960 __ BIND(L_fail_array_check); 1961 // Reshuffle arguments so we can call checkcast_arraycopy: 1962 1963 // match initial saves for checkcast_arraycopy 1964 // push(rsi); // already done; see above 1965 // push(rdi); // already done; see above 1966 // push(rbx); // already done; see above 1967 1968 // Marshal outgoing arguments now, freeing registers. 1969 Address from_arg(rsp, 16+ 4); // from 1970 Address to_arg(rsp, 16+ 8); // to 1971 Address length_arg(rsp, 16+12); // elements count 1972 Address ckoff_arg(rsp, 16+16); // super_check_offset 1973 Address ckval_arg(rsp, 16+20); // super_klass 1974 1975 Address SRC_POS_arg(rsp, 16+ 8); 1976 Address DST_POS_arg(rsp, 16+16); 1977 Address LENGTH_arg(rsp, 16+20); 1978 // push rbx, changed the incoming offsets (why not just use rbp,??) 1979 // assert(SRC_POS_arg.disp() == SRC_POS.disp() + 4, ""); 1980 1981 __ movptr(rbx, Address(rsi_dst_klass, ek_offset)); 1982 __ movl2ptr(length, LENGTH_arg); // reload elements count 1983 __ movl2ptr(src_pos, SRC_POS_arg); // reload src_pos 1984 __ movl2ptr(dst_pos, DST_POS_arg); // reload dst_pos 1985 1986 __ movptr(ckval_arg, rbx); // destination element type 1987 __ movl(rbx, Address(rbx, sco_offset)); 1988 __ movl(ckoff_arg, rbx); // corresponding class check offset 1989 1990 __ movl(length_arg, length); // outgoing length argument 1991 1992 __ lea(from, Address(src, src_pos, Address::times_ptr, 1993 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 1994 __ movptr(from_arg, from); 1995 1996 __ lea(to, Address(dst, dst_pos, Address::times_ptr, 1997 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 1998 __ movptr(to_arg, to); 1999 __ jump(RuntimeAddress(entry_checkcast_arraycopy)); 2000 } 2001 2002 return start; 2003 } 2004 2005 void generate_arraycopy_stubs() { 2006 address entry; 2007 address entry_jbyte_arraycopy; 2008 address entry_jshort_arraycopy; 2009 address entry_jint_arraycopy; 2010 address entry_oop_arraycopy; 2011 address entry_jlong_arraycopy; 2012 address entry_checkcast_arraycopy; 2013 2014 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = 2015 generate_disjoint_copy(T_BYTE, true, Address::times_1, &entry, 2016 "arrayof_jbyte_disjoint_arraycopy"); 2017 StubRoutines::_arrayof_jbyte_arraycopy = 2018 generate_conjoint_copy(T_BYTE, true, Address::times_1, entry, 2019 NULL, "arrayof_jbyte_arraycopy"); 2020 StubRoutines::_jbyte_disjoint_arraycopy = 2021 generate_disjoint_copy(T_BYTE, false, Address::times_1, &entry, 2022 "jbyte_disjoint_arraycopy"); 2023 StubRoutines::_jbyte_arraycopy = 2024 generate_conjoint_copy(T_BYTE, false, Address::times_1, entry, 2025 &entry_jbyte_arraycopy, "jbyte_arraycopy"); 2026 2027 StubRoutines::_arrayof_jshort_disjoint_arraycopy = 2028 generate_disjoint_copy(T_SHORT, true, Address::times_2, &entry, 2029 "arrayof_jshort_disjoint_arraycopy"); 2030 StubRoutines::_arrayof_jshort_arraycopy = 2031 generate_conjoint_copy(T_SHORT, true, Address::times_2, entry, 2032 NULL, "arrayof_jshort_arraycopy"); 2033 StubRoutines::_jshort_disjoint_arraycopy = 2034 generate_disjoint_copy(T_SHORT, false, Address::times_2, &entry, 2035 "jshort_disjoint_arraycopy"); 2036 StubRoutines::_jshort_arraycopy = 2037 generate_conjoint_copy(T_SHORT, false, Address::times_2, entry, 2038 &entry_jshort_arraycopy, "jshort_arraycopy"); 2039 2040 // Next arrays are always aligned on 4 bytes at least. 2041 StubRoutines::_jint_disjoint_arraycopy = 2042 generate_disjoint_copy(T_INT, true, Address::times_4, &entry, 2043 "jint_disjoint_arraycopy"); 2044 StubRoutines::_jint_arraycopy = 2045 generate_conjoint_copy(T_INT, true, Address::times_4, entry, 2046 &entry_jint_arraycopy, "jint_arraycopy"); 2047 2048 StubRoutines::_oop_disjoint_arraycopy = 2049 generate_disjoint_copy(T_OBJECT, true, Address::times_ptr, &entry, 2050 "oop_disjoint_arraycopy"); 2051 StubRoutines::_oop_arraycopy = 2052 generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry, 2053 &entry_oop_arraycopy, "oop_arraycopy"); 2054 2055 StubRoutines::_oop_disjoint_arraycopy_uninit = 2056 generate_disjoint_copy(T_OBJECT, true, Address::times_ptr, &entry, 2057 "oop_disjoint_arraycopy_uninit", 2058 /*dest_uninitialized*/true); 2059 StubRoutines::_oop_arraycopy_uninit = 2060 generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry, 2061 NULL, "oop_arraycopy_uninit", 2062 /*dest_uninitialized*/true); 2063 2064 StubRoutines::_jlong_disjoint_arraycopy = 2065 generate_disjoint_long_copy(&entry, "jlong_disjoint_arraycopy"); 2066 StubRoutines::_jlong_arraycopy = 2067 generate_conjoint_long_copy(entry, &entry_jlong_arraycopy, 2068 "jlong_arraycopy"); 2069 2070 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2071 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2072 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2073 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2074 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2075 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2076 2077 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2078 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2079 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 2080 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2081 2082 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2083 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2084 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 2085 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2086 2087 StubRoutines::_checkcast_arraycopy = 2088 generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 2089 StubRoutines::_checkcast_arraycopy_uninit = 2090 generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, /*dest_uninitialized*/true); 2091 2092 StubRoutines::_unsafe_arraycopy = 2093 generate_unsafe_copy("unsafe_arraycopy", 2094 entry_jbyte_arraycopy, 2095 entry_jshort_arraycopy, 2096 entry_jint_arraycopy, 2097 entry_jlong_arraycopy); 2098 2099 StubRoutines::_generic_arraycopy = 2100 generate_generic_copy("generic_arraycopy", 2101 entry_jbyte_arraycopy, 2102 entry_jshort_arraycopy, 2103 entry_jint_arraycopy, 2104 entry_oop_arraycopy, 2105 entry_jlong_arraycopy, 2106 entry_checkcast_arraycopy); 2107 } 2108 2109 void generate_math_stubs() { 2110 { 2111 StubCodeMark mark(this, "StubRoutines", "log"); 2112 StubRoutines::_intrinsic_log = (double (*)(double)) __ pc(); 2113 2114 __ fld_d(Address(rsp, 4)); 2115 __ flog(); 2116 __ ret(0); 2117 } 2118 { 2119 StubCodeMark mark(this, "StubRoutines", "log10"); 2120 StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc(); 2121 2122 __ fld_d(Address(rsp, 4)); 2123 __ flog10(); 2124 __ ret(0); 2125 } 2126 { 2127 StubCodeMark mark(this, "StubRoutines", "sin"); 2128 StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc(); 2129 2130 __ fld_d(Address(rsp, 4)); 2131 __ trigfunc('s'); 2132 __ ret(0); 2133 } 2134 { 2135 StubCodeMark mark(this, "StubRoutines", "cos"); 2136 StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc(); 2137 2138 __ fld_d(Address(rsp, 4)); 2139 __ trigfunc('c'); 2140 __ ret(0); 2141 } 2142 { 2143 StubCodeMark mark(this, "StubRoutines", "tan"); 2144 StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc(); 2145 2146 __ fld_d(Address(rsp, 4)); 2147 __ trigfunc('t'); 2148 __ ret(0); 2149 } 2150 { 2151 StubCodeMark mark(this, "StubRoutines", "exp"); 2152 StubRoutines::_intrinsic_exp = (double (*)(double)) __ pc(); 2153 2154 __ fld_d(Address(rsp, 4)); 2155 __ exp_with_fallback(0); 2156 __ ret(0); 2157 } 2158 { 2159 StubCodeMark mark(this, "StubRoutines", "pow"); 2160 StubRoutines::_intrinsic_pow = (double (*)(double,double)) __ pc(); 2161 2162 __ fld_d(Address(rsp, 12)); 2163 __ fld_d(Address(rsp, 4)); 2164 __ pow_with_fallback(0); 2165 __ ret(0); 2166 } 2167 } 2168 2169 // AES intrinsic stubs 2170 enum {AESBlockSize = 16}; 2171 2172 address generate_key_shuffle_mask() { 2173 __ align(16); 2174 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 2175 address start = __ pc(); 2176 __ emit_data(0x00010203, relocInfo::none, 0 ); 2177 __ emit_data(0x04050607, relocInfo::none, 0 ); 2178 __ emit_data(0x08090a0b, relocInfo::none, 0 ); 2179 __ emit_data(0x0c0d0e0f, relocInfo::none, 0 ); 2180 return start; 2181 } 2182 2183 // Utility routine for loading a 128-bit key word in little endian format 2184 // can optionally specify that the shuffle mask is already in an xmmregister 2185 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2186 __ movdqu(xmmdst, Address(key, offset)); 2187 if (xmm_shuf_mask != NULL) { 2188 __ pshufb(xmmdst, xmm_shuf_mask); 2189 } else { 2190 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2191 } 2192 } 2193 2194 // aesenc using specified key+offset 2195 // can optionally specify that the shuffle mask is already in an xmmregister 2196 void aes_enc_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2197 load_key(xmmtmp, key, offset, xmm_shuf_mask); 2198 __ aesenc(xmmdst, xmmtmp); 2199 } 2200 2201 // aesdec using specified key+offset 2202 // can optionally specify that the shuffle mask is already in an xmmregister 2203 void aes_dec_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2204 load_key(xmmtmp, key, offset, xmm_shuf_mask); 2205 __ aesdec(xmmdst, xmmtmp); 2206 } 2207 2208 2209 // Arguments: 2210 // 2211 // Inputs: 2212 // c_rarg0 - source byte array address 2213 // c_rarg1 - destination byte array address 2214 // c_rarg2 - K (key) in little endian int array 2215 // 2216 address generate_aescrypt_encryptBlock() { 2217 assert(UseAES, "need AES instructions and misaligned SSE support"); 2218 __ align(CodeEntryAlignment); 2219 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 2220 Label L_doLast; 2221 address start = __ pc(); 2222 2223 const Register from = rdx; // source array address 2224 const Register to = rdx; // destination array address 2225 const Register key = rcx; // key array address 2226 const Register keylen = rax; 2227 const Address from_param(rbp, 8+0); 2228 const Address to_param (rbp, 8+4); 2229 const Address key_param (rbp, 8+8); 2230 2231 const XMMRegister xmm_result = xmm0; 2232 const XMMRegister xmm_key_shuf_mask = xmm1; 2233 const XMMRegister xmm_temp1 = xmm2; 2234 const XMMRegister xmm_temp2 = xmm3; 2235 const XMMRegister xmm_temp3 = xmm4; 2236 const XMMRegister xmm_temp4 = xmm5; 2237 2238 __ enter(); // required for proper stackwalking of RuntimeStub frame 2239 __ movptr(from, from_param); 2240 __ movptr(key, key_param); 2241 2242 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 2243 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 2244 2245 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2246 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 2247 __ movptr(to, to_param); 2248 2249 // For encryption, the java expanded key ordering is just what we need 2250 2251 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 2252 __ pxor(xmm_result, xmm_temp1); 2253 2254 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 2255 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 2256 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 2257 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 2258 2259 __ aesenc(xmm_result, xmm_temp1); 2260 __ aesenc(xmm_result, xmm_temp2); 2261 __ aesenc(xmm_result, xmm_temp3); 2262 __ aesenc(xmm_result, xmm_temp4); 2263 2264 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 2265 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 2266 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 2267 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 2268 2269 __ aesenc(xmm_result, xmm_temp1); 2270 __ aesenc(xmm_result, xmm_temp2); 2271 __ aesenc(xmm_result, xmm_temp3); 2272 __ aesenc(xmm_result, xmm_temp4); 2273 2274 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 2275 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 2276 2277 __ cmpl(keylen, 44); 2278 __ jccb(Assembler::equal, L_doLast); 2279 2280 __ aesenc(xmm_result, xmm_temp1); 2281 __ aesenc(xmm_result, xmm_temp2); 2282 2283 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 2284 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 2285 2286 __ cmpl(keylen, 52); 2287 __ jccb(Assembler::equal, L_doLast); 2288 2289 __ aesenc(xmm_result, xmm_temp1); 2290 __ aesenc(xmm_result, xmm_temp2); 2291 2292 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 2293 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 2294 2295 __ BIND(L_doLast); 2296 __ aesenc(xmm_result, xmm_temp1); 2297 __ aesenclast(xmm_result, xmm_temp2); 2298 __ movdqu(Address(to, 0), xmm_result); // store the result 2299 __ xorptr(rax, rax); // return 0 2300 __ leave(); // required for proper stackwalking of RuntimeStub frame 2301 __ ret(0); 2302 2303 return start; 2304 } 2305 2306 2307 // Arguments: 2308 // 2309 // Inputs: 2310 // c_rarg0 - source byte array address 2311 // c_rarg1 - destination byte array address 2312 // c_rarg2 - K (key) in little endian int array 2313 // 2314 address generate_aescrypt_decryptBlock() { 2315 assert(UseAES, "need AES instructions and misaligned SSE support"); 2316 __ align(CodeEntryAlignment); 2317 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 2318 Label L_doLast; 2319 address start = __ pc(); 2320 2321 const Register from = rdx; // source array address 2322 const Register to = rdx; // destination array address 2323 const Register key = rcx; // key array address 2324 const Register keylen = rax; 2325 const Address from_param(rbp, 8+0); 2326 const Address to_param (rbp, 8+4); 2327 const Address key_param (rbp, 8+8); 2328 2329 const XMMRegister xmm_result = xmm0; 2330 const XMMRegister xmm_key_shuf_mask = xmm1; 2331 const XMMRegister xmm_temp1 = xmm2; 2332 const XMMRegister xmm_temp2 = xmm3; 2333 const XMMRegister xmm_temp3 = xmm4; 2334 const XMMRegister xmm_temp4 = xmm5; 2335 2336 __ enter(); // required for proper stackwalking of RuntimeStub frame 2337 __ movptr(from, from_param); 2338 __ movptr(key, key_param); 2339 2340 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 2341 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 2342 2343 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2344 __ movdqu(xmm_result, Address(from, 0)); 2345 __ movptr(to, to_param); 2346 2347 // for decryption java expanded key ordering is rotated one position from what we want 2348 // so we start from 0x10 here and hit 0x00 last 2349 // we don't know if the key is aligned, hence not using load-execute form 2350 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 2351 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 2352 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 2353 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 2354 2355 __ pxor (xmm_result, xmm_temp1); 2356 __ aesdec(xmm_result, xmm_temp2); 2357 __ aesdec(xmm_result, xmm_temp3); 2358 __ aesdec(xmm_result, xmm_temp4); 2359 2360 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 2361 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 2362 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 2363 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 2364 2365 __ aesdec(xmm_result, xmm_temp1); 2366 __ aesdec(xmm_result, xmm_temp2); 2367 __ aesdec(xmm_result, xmm_temp3); 2368 __ aesdec(xmm_result, xmm_temp4); 2369 2370 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 2371 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 2372 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 2373 2374 __ cmpl(keylen, 44); 2375 __ jccb(Assembler::equal, L_doLast); 2376 2377 __ aesdec(xmm_result, xmm_temp1); 2378 __ aesdec(xmm_result, xmm_temp2); 2379 2380 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 2381 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 2382 2383 __ cmpl(keylen, 52); 2384 __ jccb(Assembler::equal, L_doLast); 2385 2386 __ aesdec(xmm_result, xmm_temp1); 2387 __ aesdec(xmm_result, xmm_temp2); 2388 2389 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 2390 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 2391 2392 __ BIND(L_doLast); 2393 __ aesdec(xmm_result, xmm_temp1); 2394 __ aesdec(xmm_result, xmm_temp2); 2395 2396 // for decryption the aesdeclast operation is always on key+0x00 2397 __ aesdeclast(xmm_result, xmm_temp3); 2398 __ movdqu(Address(to, 0), xmm_result); // store the result 2399 __ xorptr(rax, rax); // return 0 2400 __ leave(); // required for proper stackwalking of RuntimeStub frame 2401 __ ret(0); 2402 2403 return start; 2404 } 2405 2406 void handleSOERegisters(bool saving) { 2407 const int saveFrameSizeInBytes = 4 * wordSize; 2408 const Address saved_rbx (rbp, -3 * wordSize); 2409 const Address saved_rsi (rbp, -2 * wordSize); 2410 const Address saved_rdi (rbp, -1 * wordSize); 2411 2412 if (saving) { 2413 __ subptr(rsp, saveFrameSizeInBytes); 2414 __ movptr(saved_rsi, rsi); 2415 __ movptr(saved_rdi, rdi); 2416 __ movptr(saved_rbx, rbx); 2417 } else { 2418 // restoring 2419 __ movptr(rsi, saved_rsi); 2420 __ movptr(rdi, saved_rdi); 2421 __ movptr(rbx, saved_rbx); 2422 } 2423 } 2424 2425 // Arguments: 2426 // 2427 // Inputs: 2428 // c_rarg0 - source byte array address 2429 // c_rarg1 - destination byte array address 2430 // c_rarg2 - K (key) in little endian int array 2431 // c_rarg3 - r vector byte array address 2432 // c_rarg4 - input length 2433 // 2434 address generate_cipherBlockChaining_encryptAESCrypt() { 2435 assert(UseAES, "need AES instructions and misaligned SSE support"); 2436 __ align(CodeEntryAlignment); 2437 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 2438 address start = __ pc(); 2439 2440 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 2441 const Register from = rsi; // source array address 2442 const Register to = rdx; // destination array address 2443 const Register key = rcx; // key array address 2444 const Register rvec = rdi; // r byte array initialized from initvector array address 2445 // and left with the results of the last encryption block 2446 const Register len_reg = rbx; // src len (must be multiple of blocksize 16) 2447 const Register pos = rax; 2448 2449 // xmm register assignments for the loops below 2450 const XMMRegister xmm_result = xmm0; 2451 const XMMRegister xmm_temp = xmm1; 2452 // first 6 keys preloaded into xmm2-xmm7 2453 const int XMM_REG_NUM_KEY_FIRST = 2; 2454 const int XMM_REG_NUM_KEY_LAST = 7; 2455 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 2456 2457 __ enter(); // required for proper stackwalking of RuntimeStub frame 2458 handleSOERegisters(true /*saving*/); 2459 2460 // load registers from incoming parameters 2461 const Address from_param(rbp, 8+0); 2462 const Address to_param (rbp, 8+4); 2463 const Address key_param (rbp, 8+8); 2464 const Address rvec_param (rbp, 8+12); 2465 const Address len_param (rbp, 8+16); 2466 __ movptr(from , from_param); 2467 __ movptr(to , to_param); 2468 __ movptr(key , key_param); 2469 __ movptr(rvec , rvec_param); 2470 __ movptr(len_reg , len_param); 2471 2472 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 2473 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2474 // load up xmm regs 2 thru 7 with keys 0-5 2475 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2476 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 2477 offset += 0x10; 2478 } 2479 2480 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 2481 2482 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 2483 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 2484 __ cmpl(rax, 44); 2485 __ jcc(Assembler::notEqual, L_key_192_256); 2486 2487 // 128 bit code follows here 2488 __ movl(pos, 0); 2489 __ align(OptoLoopAlignment); 2490 __ BIND(L_loopTop_128); 2491 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 2492 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 2493 2494 __ pxor (xmm_result, xmm_key0); // do the aes rounds 2495 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2496 __ aesenc(xmm_result, as_XMMRegister(rnum)); 2497 } 2498 for (int key_offset = 0x60; key_offset <= 0x90; key_offset += 0x10) { 2499 aes_enc_key(xmm_result, xmm_temp, key, key_offset); 2500 } 2501 load_key(xmm_temp, key, 0xa0); 2502 __ aesenclast(xmm_result, xmm_temp); 2503 2504 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 2505 // no need to store r to memory until we exit 2506 __ addptr(pos, AESBlockSize); 2507 __ subptr(len_reg, AESBlockSize); 2508 __ jcc(Assembler::notEqual, L_loopTop_128); 2509 2510 __ BIND(L_exit); 2511 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 2512 2513 handleSOERegisters(false /*restoring*/); 2514 __ movl(rax, 0); // return 0 (why?) 2515 __ leave(); // required for proper stackwalking of RuntimeStub frame 2516 __ ret(0); 2517 2518 __ BIND(L_key_192_256); 2519 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 2520 __ cmpl(rax, 52); 2521 __ jcc(Assembler::notEqual, L_key_256); 2522 2523 // 192-bit code follows here (could be changed to use more xmm registers) 2524 __ movl(pos, 0); 2525 __ align(OptoLoopAlignment); 2526 __ BIND(L_loopTop_192); 2527 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 2528 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 2529 2530 __ pxor (xmm_result, xmm_key0); // do the aes rounds 2531 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2532 __ aesenc(xmm_result, as_XMMRegister(rnum)); 2533 } 2534 for (int key_offset = 0x60; key_offset <= 0xb0; key_offset += 0x10) { 2535 aes_enc_key(xmm_result, xmm_temp, key, key_offset); 2536 } 2537 load_key(xmm_temp, key, 0xc0); 2538 __ aesenclast(xmm_result, xmm_temp); 2539 2540 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 2541 // no need to store r to memory until we exit 2542 __ addptr(pos, AESBlockSize); 2543 __ subptr(len_reg, AESBlockSize); 2544 __ jcc(Assembler::notEqual, L_loopTop_192); 2545 __ jmp(L_exit); 2546 2547 __ BIND(L_key_256); 2548 // 256-bit code follows here (could be changed to use more xmm registers) 2549 __ movl(pos, 0); 2550 __ align(OptoLoopAlignment); 2551 __ BIND(L_loopTop_256); 2552 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 2553 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 2554 2555 __ pxor (xmm_result, xmm_key0); // do the aes rounds 2556 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2557 __ aesenc(xmm_result, as_XMMRegister(rnum)); 2558 } 2559 for (int key_offset = 0x60; key_offset <= 0xd0; key_offset += 0x10) { 2560 aes_enc_key(xmm_result, xmm_temp, key, key_offset); 2561 } 2562 load_key(xmm_temp, key, 0xe0); 2563 __ aesenclast(xmm_result, xmm_temp); 2564 2565 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 2566 // no need to store r to memory until we exit 2567 __ addptr(pos, AESBlockSize); 2568 __ subptr(len_reg, AESBlockSize); 2569 __ jcc(Assembler::notEqual, L_loopTop_256); 2570 __ jmp(L_exit); 2571 2572 return start; 2573 } 2574 2575 2576 // CBC AES Decryption. 2577 // In 32-bit stub, because of lack of registers we do not try to parallelize 4 blocks at a time. 2578 // 2579 // Arguments: 2580 // 2581 // Inputs: 2582 // c_rarg0 - source byte array address 2583 // c_rarg1 - destination byte array address 2584 // c_rarg2 - K (key) in little endian int array 2585 // c_rarg3 - r vector byte array address 2586 // c_rarg4 - input length 2587 // 2588 2589 address generate_cipherBlockChaining_decryptAESCrypt() { 2590 assert(UseAES, "need AES instructions and misaligned SSE support"); 2591 __ align(CodeEntryAlignment); 2592 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 2593 address start = __ pc(); 2594 2595 Label L_exit, L_key_192_256, L_key_256; 2596 Label L_singleBlock_loopTop_128; 2597 Label L_singleBlock_loopTop_192, L_singleBlock_loopTop_256; 2598 const Register from = rsi; // source array address 2599 const Register to = rdx; // destination array address 2600 const Register key = rcx; // key array address 2601 const Register rvec = rdi; // r byte array initialized from initvector array address 2602 // and left with the results of the last encryption block 2603 const Register len_reg = rbx; // src len (must be multiple of blocksize 16) 2604 const Register pos = rax; 2605 2606 // xmm register assignments for the loops below 2607 const XMMRegister xmm_result = xmm0; 2608 const XMMRegister xmm_temp = xmm1; 2609 // first 6 keys preloaded into xmm2-xmm7 2610 const int XMM_REG_NUM_KEY_FIRST = 2; 2611 const int XMM_REG_NUM_KEY_LAST = 7; 2612 const int FIRST_NON_REG_KEY_offset = 0x70; 2613 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 2614 2615 __ enter(); // required for proper stackwalking of RuntimeStub frame 2616 handleSOERegisters(true /*saving*/); 2617 2618 // load registers from incoming parameters 2619 const Address from_param(rbp, 8+0); 2620 const Address to_param (rbp, 8+4); 2621 const Address key_param (rbp, 8+8); 2622 const Address rvec_param (rbp, 8+12); 2623 const Address len_param (rbp, 8+16); 2624 __ movptr(from , from_param); 2625 __ movptr(to , to_param); 2626 __ movptr(key , key_param); 2627 __ movptr(rvec , rvec_param); 2628 __ movptr(len_reg , len_param); 2629 2630 // the java expanded key ordering is rotated one position from what we want 2631 // so we start from 0x10 here and hit 0x00 last 2632 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 2633 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2634 // load up xmm regs 2 thru 6 with first 5 keys 2635 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2636 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 2637 offset += 0x10; 2638 } 2639 2640 // inside here, use the rvec register to point to previous block cipher 2641 // with which we xor at the end of each newly decrypted block 2642 const Register prev_block_cipher_ptr = rvec; 2643 2644 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 2645 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 2646 __ cmpl(rax, 44); 2647 __ jcc(Assembler::notEqual, L_key_192_256); 2648 2649 2650 // 128-bit code follows here, parallelized 2651 __ movl(pos, 0); 2652 __ align(OptoLoopAlignment); 2653 __ BIND(L_singleBlock_loopTop_128); 2654 __ cmpptr(len_reg, 0); // any blocks left?? 2655 __ jcc(Assembler::equal, L_exit); 2656 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 2657 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 2658 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2659 __ aesdec(xmm_result, as_XMMRegister(rnum)); 2660 } 2661 for (int key_offset = FIRST_NON_REG_KEY_offset; key_offset <= 0xa0; key_offset += 0x10) { // 128-bit runs up to key offset a0 2662 aes_dec_key(xmm_result, xmm_temp, key, key_offset); 2663 } 2664 load_key(xmm_temp, key, 0x00); // final key is stored in java expanded array at offset 0 2665 __ aesdeclast(xmm_result, xmm_temp); 2666 __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00)); 2667 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 2668 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 2669 // no need to store r to memory until we exit 2670 __ lea(prev_block_cipher_ptr, Address(from, pos, Address::times_1, 0)); // set up new ptr 2671 __ addptr(pos, AESBlockSize); 2672 __ subptr(len_reg, AESBlockSize); 2673 __ jmp(L_singleBlock_loopTop_128); 2674 2675 2676 __ BIND(L_exit); 2677 __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00)); 2678 __ movptr(rvec , rvec_param); // restore this since used in loop 2679 __ movdqu(Address(rvec, 0), xmm_temp); // final value of r stored in rvec of CipherBlockChaining object 2680 handleSOERegisters(false /*restoring*/); 2681 __ movl(rax, 0); // return 0 (why?) 2682 __ leave(); // required for proper stackwalking of RuntimeStub frame 2683 __ ret(0); 2684 2685 2686 __ BIND(L_key_192_256); 2687 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 2688 __ cmpl(rax, 52); 2689 __ jcc(Assembler::notEqual, L_key_256); 2690 2691 // 192-bit code follows here (could be optimized to use parallelism) 2692 __ movl(pos, 0); 2693 __ align(OptoLoopAlignment); 2694 __ BIND(L_singleBlock_loopTop_192); 2695 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 2696 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 2697 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2698 __ aesdec(xmm_result, as_XMMRegister(rnum)); 2699 } 2700 for (int key_offset = FIRST_NON_REG_KEY_offset; key_offset <= 0xc0; key_offset += 0x10) { // 192-bit runs up to key offset c0 2701 aes_dec_key(xmm_result, xmm_temp, key, key_offset); 2702 } 2703 load_key(xmm_temp, key, 0x00); // final key is stored in java expanded array at offset 0 2704 __ aesdeclast(xmm_result, xmm_temp); 2705 __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00)); 2706 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 2707 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 2708 // no need to store r to memory until we exit 2709 __ lea(prev_block_cipher_ptr, Address(from, pos, Address::times_1, 0)); // set up new ptr 2710 __ addptr(pos, AESBlockSize); 2711 __ subptr(len_reg, AESBlockSize); 2712 __ jcc(Assembler::notEqual,L_singleBlock_loopTop_192); 2713 __ jmp(L_exit); 2714 2715 __ BIND(L_key_256); 2716 // 256-bit code follows here (could be optimized to use parallelism) 2717 __ movl(pos, 0); 2718 __ align(OptoLoopAlignment); 2719 __ BIND(L_singleBlock_loopTop_256); 2720 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 2721 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 2722 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2723 __ aesdec(xmm_result, as_XMMRegister(rnum)); 2724 } 2725 for (int key_offset = FIRST_NON_REG_KEY_offset; key_offset <= 0xe0; key_offset += 0x10) { // 256-bit runs up to key offset e0 2726 aes_dec_key(xmm_result, xmm_temp, key, key_offset); 2727 } 2728 load_key(xmm_temp, key, 0x00); // final key is stored in java expanded array at offset 0 2729 __ aesdeclast(xmm_result, xmm_temp); 2730 __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00)); 2731 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 2732 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 2733 // no need to store r to memory until we exit 2734 __ lea(prev_block_cipher_ptr, Address(from, pos, Address::times_1, 0)); // set up new ptr 2735 __ addptr(pos, AESBlockSize); 2736 __ subptr(len_reg, AESBlockSize); 2737 __ jcc(Assembler::notEqual,L_singleBlock_loopTop_256); 2738 __ jmp(L_exit); 2739 2740 return start; 2741 } 2742 2743 2744 public: 2745 // Information about frame layout at time of blocking runtime call. 2746 // Note that we only have to preserve callee-saved registers since 2747 // the compilers are responsible for supplying a continuation point 2748 // if they expect all registers to be preserved. 2749 enum layout { 2750 thread_off, // last_java_sp 2751 arg1_off, 2752 arg2_off, 2753 rbp_off, // callee saved register 2754 ret_pc, 2755 framesize 2756 }; 2757 2758 private: 2759 2760 #undef __ 2761 #define __ masm-> 2762 2763 //------------------------------------------------------------------------------------------------------------------------ 2764 // Continuation point for throwing of implicit exceptions that are not handled in 2765 // the current activation. Fabricates an exception oop and initiates normal 2766 // exception dispatching in this frame. 2767 // 2768 // Previously the compiler (c2) allowed for callee save registers on Java calls. 2769 // This is no longer true after adapter frames were removed but could possibly 2770 // be brought back in the future if the interpreter code was reworked and it 2771 // was deemed worthwhile. The comment below was left to describe what must 2772 // happen here if callee saves were resurrected. As it stands now this stub 2773 // could actually be a vanilla BufferBlob and have now oopMap at all. 2774 // Since it doesn't make much difference we've chosen to leave it the 2775 // way it was in the callee save days and keep the comment. 2776 2777 // If we need to preserve callee-saved values we need a callee-saved oop map and 2778 // therefore have to make these stubs into RuntimeStubs rather than BufferBlobs. 2779 // If the compiler needs all registers to be preserved between the fault 2780 // point and the exception handler then it must assume responsibility for that in 2781 // AbstractCompiler::continuation_for_implicit_null_exception or 2782 // continuation_for_implicit_division_by_zero_exception. All other implicit 2783 // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are 2784 // either at call sites or otherwise assume that stack unwinding will be initiated, 2785 // so caller saved registers were assumed volatile in the compiler. 2786 address generate_throw_exception(const char* name, address runtime_entry, 2787 Register arg1 = noreg, Register arg2 = noreg) { 2788 2789 int insts_size = 256; 2790 int locs_size = 32; 2791 2792 CodeBuffer code(name, insts_size, locs_size); 2793 OopMapSet* oop_maps = new OopMapSet(); 2794 MacroAssembler* masm = new MacroAssembler(&code); 2795 2796 address start = __ pc(); 2797 2798 // This is an inlined and slightly modified version of call_VM 2799 // which has the ability to fetch the return PC out of 2800 // thread-local storage and also sets up last_Java_sp slightly 2801 // differently than the real call_VM 2802 Register java_thread = rbx; 2803 __ get_thread(java_thread); 2804 2805 __ enter(); // required for proper stackwalking of RuntimeStub frame 2806 2807 // pc and rbp, already pushed 2808 __ subptr(rsp, (framesize-2) * wordSize); // prolog 2809 2810 // Frame is now completed as far as size and linkage. 2811 2812 int frame_complete = __ pc() - start; 2813 2814 // push java thread (becomes first argument of C function) 2815 __ movptr(Address(rsp, thread_off * wordSize), java_thread); 2816 if (arg1 != noreg) { 2817 __ movptr(Address(rsp, arg1_off * wordSize), arg1); 2818 } 2819 if (arg2 != noreg) { 2820 assert(arg1 != noreg, "missing reg arg"); 2821 __ movptr(Address(rsp, arg2_off * wordSize), arg2); 2822 } 2823 2824 // Set up last_Java_sp and last_Java_fp 2825 __ set_last_Java_frame(java_thread, rsp, rbp, NULL); 2826 2827 // Call runtime 2828 BLOCK_COMMENT("call runtime_entry"); 2829 __ call(RuntimeAddress(runtime_entry)); 2830 // Generate oop map 2831 OopMap* map = new OopMap(framesize, 0); 2832 oop_maps->add_gc_map(__ pc() - start, map); 2833 2834 // restore the thread (cannot use the pushed argument since arguments 2835 // may be overwritten by C code generated by an optimizing compiler); 2836 // however can use the register value directly if it is callee saved. 2837 __ get_thread(java_thread); 2838 2839 __ reset_last_Java_frame(java_thread, true, false); 2840 2841 __ leave(); // required for proper stackwalking of RuntimeStub frame 2842 2843 // check for pending exceptions 2844 #ifdef ASSERT 2845 Label L; 2846 __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 2847 __ jcc(Assembler::notEqual, L); 2848 __ should_not_reach_here(); 2849 __ bind(L); 2850 #endif /* ASSERT */ 2851 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 2852 2853 2854 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, framesize, oop_maps, false); 2855 return stub->entry_point(); 2856 } 2857 2858 2859 void create_control_words() { 2860 // Round to nearest, 53-bit mode, exceptions masked 2861 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 2862 // Round to zero, 53-bit mode, exception mased 2863 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 2864 // Round to nearest, 24-bit mode, exceptions masked 2865 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 2866 // Round to nearest, 64-bit mode, exceptions masked 2867 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F; 2868 // Round to nearest, 64-bit mode, exceptions masked 2869 StubRoutines::_mxcsr_std = 0x1F80; 2870 // Note: the following two constants are 80-bit values 2871 // layout is critical for correct loading by FPU. 2872 // Bias for strict fp multiply/divide 2873 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 2874 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 2875 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 2876 // Un-Bias for strict fp multiply/divide 2877 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 2878 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 2879 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 2880 } 2881 2882 //--------------------------------------------------------------------------- 2883 // Initialization 2884 2885 void generate_initial() { 2886 // Generates all stubs and initializes the entry points 2887 2888 //------------------------------------------------------------------------------------------------------------------------ 2889 // entry points that exist in all platforms 2890 // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than 2891 // the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp. 2892 StubRoutines::_forward_exception_entry = generate_forward_exception(); 2893 2894 StubRoutines::_call_stub_entry = 2895 generate_call_stub(StubRoutines::_call_stub_return_address); 2896 // is referenced by megamorphic call 2897 StubRoutines::_catch_exception_entry = generate_catch_exception(); 2898 2899 // These are currently used by Solaris/Intel 2900 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 2901 2902 StubRoutines::_handler_for_unsafe_access_entry = 2903 generate_handler_for_unsafe_access(); 2904 2905 // platform dependent 2906 create_control_words(); 2907 2908 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 2909 StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = generate_verify_fpu_cntrl_wrd(); 2910 StubRoutines::_d2i_wrapper = generate_d2i_wrapper(T_INT, 2911 CAST_FROM_FN_PTR(address, SharedRuntime::d2i)); 2912 StubRoutines::_d2l_wrapper = generate_d2i_wrapper(T_LONG, 2913 CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); 2914 2915 // Build this early so it's available for the interpreter 2916 StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); 2917 } 2918 2919 2920 void generate_all() { 2921 // Generates all stubs and initializes the entry points 2922 2923 // These entry points require SharedInfo::stack0 to be set up in non-core builds 2924 // and need to be relocatable, so they each fabricate a RuntimeStub internally. 2925 StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError)); 2926 StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError)); 2927 StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call)); 2928 2929 //------------------------------------------------------------------------------------------------------------------------ 2930 // entry points that are platform specific 2931 2932 // support for verify_oop (must happen after universe_init) 2933 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 2934 2935 // arraycopy stubs used by compilers 2936 generate_arraycopy_stubs(); 2937 2938 generate_math_stubs(); 2939 2940 // don't bother generating these AES intrinsic stubs unless global flag is set 2941 if (UseAESIntrinsics) { 2942 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // might be needed by the others 2943 2944 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 2945 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 2946 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 2947 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt(); 2948 } 2949 } 2950 2951 2952 public: 2953 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 2954 if (all) { 2955 generate_all(); 2956 } else { 2957 generate_initial(); 2958 } 2959 } 2960 }; // end class declaration 2961 2962 2963 void StubGenerator_generate(CodeBuffer* code, bool all) { 2964 StubGenerator g(code, all); 2965 }