1 /* 2 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_stubGenerator_x86_32.cpp.incl" 27 28 // Declaration and definition of StubGenerator (no .hpp file). 29 // For a more detailed description of the stub routine structure 30 // see the comment in stubRoutines.hpp 31 32 #define __ _masm-> 33 #define a__ ((Assembler*)_masm)-> 34 35 #ifdef PRODUCT 36 #define BLOCK_COMMENT(str) /* nothing */ 37 #else 38 #define BLOCK_COMMENT(str) __ block_comment(str) 39 #endif 40 41 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 42 43 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 44 const int FPU_CNTRL_WRD_MASK = 0xFFFF; 45 46 // ------------------------------------------------------------------------------------------------------------------------- 47 // Stub Code definitions 48 49 static address handle_unsafe_access() { 50 JavaThread* thread = JavaThread::current(); 51 address pc = thread->saved_exception_pc(); 52 // pc is the instruction which we must emulate 53 // doing a no-op is fine: return garbage from the load 54 // therefore, compute npc 55 address npc = Assembler::locate_next_instruction(pc); 56 57 // request an async exception 58 thread->set_pending_unsafe_access_error(); 59 60 // return address of next instruction to execute 61 return npc; 62 } 63 64 class StubGenerator: public StubCodeGenerator { 65 private: 66 67 #ifdef PRODUCT 68 #define inc_counter_np(counter) (0) 69 #else 70 void inc_counter_np_(int& counter) { 71 __ incrementl(ExternalAddress((address)&counter)); 72 } 73 #define inc_counter_np(counter) \ 74 BLOCK_COMMENT("inc_counter " #counter); \ 75 inc_counter_np_(counter); 76 #endif //PRODUCT 77 78 void inc_copy_counter_np(BasicType t) { 79 #ifndef PRODUCT 80 switch (t) { 81 case T_BYTE: inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); return; 82 case T_SHORT: inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); return; 83 case T_INT: inc_counter_np(SharedRuntime::_jint_array_copy_ctr); return; 84 case T_LONG: inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); return; 85 case T_OBJECT: inc_counter_np(SharedRuntime::_oop_array_copy_ctr); return; 86 } 87 ShouldNotReachHere(); 88 #endif //PRODUCT 89 } 90 91 //------------------------------------------------------------------------------------------------------------------------ 92 // Call stubs are used to call Java from C 93 // 94 // [ return_from_Java ] <--- rsp 95 // [ argument word n ] 96 // ... 97 // -N [ argument word 1 ] 98 // -7 [ Possible padding for stack alignment ] 99 // -6 [ Possible padding for stack alignment ] 100 // -5 [ Possible padding for stack alignment ] 101 // -4 [ mxcsr save ] <--- rsp_after_call 102 // -3 [ saved rbx, ] 103 // -2 [ saved rsi ] 104 // -1 [ saved rdi ] 105 // 0 [ saved rbp, ] <--- rbp, 106 // 1 [ return address ] 107 // 2 [ ptr. to call wrapper ] 108 // 3 [ result ] 109 // 4 [ result_type ] 110 // 5 [ method ] 111 // 6 [ entry_point ] 112 // 7 [ parameters ] 113 // 8 [ parameter_size ] 114 // 9 [ thread ] 115 116 117 address generate_call_stub(address& return_address) { 118 StubCodeMark mark(this, "StubRoutines", "call_stub"); 119 address start = __ pc(); 120 121 // stub code parameters / addresses 122 assert(frame::entry_frame_call_wrapper_offset == 2, "adjust this code"); 123 bool sse_save = false; 124 const Address rsp_after_call(rbp, -4 * wordSize); // same as in generate_catch_exception()! 125 const int locals_count_in_bytes (4*wordSize); 126 const Address mxcsr_save (rbp, -4 * wordSize); 127 const Address saved_rbx (rbp, -3 * wordSize); 128 const Address saved_rsi (rbp, -2 * wordSize); 129 const Address saved_rdi (rbp, -1 * wordSize); 130 const Address result (rbp, 3 * wordSize); 131 const Address result_type (rbp, 4 * wordSize); 132 const Address method (rbp, 5 * wordSize); 133 const Address entry_point (rbp, 6 * wordSize); 134 const Address parameters (rbp, 7 * wordSize); 135 const Address parameter_size(rbp, 8 * wordSize); 136 const Address thread (rbp, 9 * wordSize); // same as in generate_catch_exception()! 137 sse_save = UseSSE > 0; 138 139 // stub code 140 __ enter(); 141 __ movptr(rcx, parameter_size); // parameter counter 142 __ shlptr(rcx, Interpreter::logStackElementSize); // convert parameter count to bytes 143 __ addptr(rcx, locals_count_in_bytes); // reserve space for register saves 144 __ subptr(rsp, rcx); 145 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 146 147 // save rdi, rsi, & rbx, according to C calling conventions 148 __ movptr(saved_rdi, rdi); 149 __ movptr(saved_rsi, rsi); 150 __ movptr(saved_rbx, rbx); 151 // save and initialize %mxcsr 152 if (sse_save) { 153 Label skip_ldmx; 154 __ stmxcsr(mxcsr_save); 155 __ movl(rax, mxcsr_save); 156 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 157 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 158 __ cmp32(rax, mxcsr_std); 159 __ jcc(Assembler::equal, skip_ldmx); 160 __ ldmxcsr(mxcsr_std); 161 __ bind(skip_ldmx); 162 } 163 164 // make sure the control word is correct. 165 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 166 167 #ifdef ASSERT 168 // make sure we have no pending exceptions 169 { Label L; 170 __ movptr(rcx, thread); 171 __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 172 __ jcc(Assembler::equal, L); 173 __ stop("StubRoutines::call_stub: entered with pending exception"); 174 __ bind(L); 175 } 176 #endif 177 178 // pass parameters if any 179 BLOCK_COMMENT("pass parameters if any"); 180 Label parameters_done; 181 __ movl(rcx, parameter_size); // parameter counter 182 __ testl(rcx, rcx); 183 __ jcc(Assembler::zero, parameters_done); 184 185 // parameter passing loop 186 187 Label loop; 188 // Copy Java parameters in reverse order (receiver last) 189 // Note that the argument order is inverted in the process 190 // source is rdx[rcx: N-1..0] 191 // dest is rsp[rbx: 0..N-1] 192 193 __ movptr(rdx, parameters); // parameter pointer 194 __ xorptr(rbx, rbx); 195 196 __ BIND(loop); 197 198 // get parameter 199 __ movptr(rax, Address(rdx, rcx, Interpreter::stackElementScale(), -wordSize)); 200 __ movptr(Address(rsp, rbx, Interpreter::stackElementScale(), 201 Interpreter::expr_offset_in_bytes(0)), rax); // store parameter 202 __ increment(rbx); 203 __ decrement(rcx); 204 __ jcc(Assembler::notZero, loop); 205 206 // call Java function 207 __ BIND(parameters_done); 208 __ movptr(rbx, method); // get methodOop 209 __ movptr(rax, entry_point); // get entry_point 210 __ mov(rsi, rsp); // set sender sp 211 BLOCK_COMMENT("call Java function"); 212 __ call(rax); 213 214 BLOCK_COMMENT("call_stub_return_address:"); 215 return_address = __ pc(); 216 217 Label common_return; 218 219 __ BIND(common_return); 220 221 // store result depending on type 222 // (everything that is not T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 223 __ movptr(rdi, result); 224 Label is_long, is_float, is_double, exit; 225 __ movl(rsi, result_type); 226 __ cmpl(rsi, T_LONG); 227 __ jcc(Assembler::equal, is_long); 228 __ cmpl(rsi, T_FLOAT); 229 __ jcc(Assembler::equal, is_float); 230 __ cmpl(rsi, T_DOUBLE); 231 __ jcc(Assembler::equal, is_double); 232 233 // handle T_INT case 234 __ movl(Address(rdi, 0), rax); 235 __ BIND(exit); 236 237 // check that FPU stack is empty 238 __ verify_FPU(0, "generate_call_stub"); 239 240 // pop parameters 241 __ lea(rsp, rsp_after_call); 242 243 // restore %mxcsr 244 if (sse_save) { 245 __ ldmxcsr(mxcsr_save); 246 } 247 248 // restore rdi, rsi and rbx, 249 __ movptr(rbx, saved_rbx); 250 __ movptr(rsi, saved_rsi); 251 __ movptr(rdi, saved_rdi); 252 __ addptr(rsp, 4*wordSize); 253 254 // return 255 __ pop(rbp); 256 __ ret(0); 257 258 // handle return types different from T_INT 259 __ BIND(is_long); 260 __ movl(Address(rdi, 0 * wordSize), rax); 261 __ movl(Address(rdi, 1 * wordSize), rdx); 262 __ jmp(exit); 263 264 __ BIND(is_float); 265 // interpreter uses xmm0 for return values 266 if (UseSSE >= 1) { 267 __ movflt(Address(rdi, 0), xmm0); 268 } else { 269 __ fstp_s(Address(rdi, 0)); 270 } 271 __ jmp(exit); 272 273 __ BIND(is_double); 274 // interpreter uses xmm0 for return values 275 if (UseSSE >= 2) { 276 __ movdbl(Address(rdi, 0), xmm0); 277 } else { 278 __ fstp_d(Address(rdi, 0)); 279 } 280 __ jmp(exit); 281 282 // If we call compiled code directly from the call stub we will 283 // need to adjust the return back to the call stub to a specialized 284 // piece of code that can handle compiled results and cleaning the fpu 285 // stack. compiled code will be set to return here instead of the 286 // return above that handles interpreter returns. 287 288 BLOCK_COMMENT("call_stub_compiled_return:"); 289 StubRoutines::x86::set_call_stub_compiled_return( __ pc()); 290 291 #ifdef COMPILER2 292 if (UseSSE >= 2) { 293 __ verify_FPU(0, "call_stub_compiled_return"); 294 } else { 295 for (int i = 1; i < 8; i++) { 296 __ ffree(i); 297 } 298 299 // UseSSE <= 1 so double result should be left on TOS 300 __ movl(rsi, result_type); 301 __ cmpl(rsi, T_DOUBLE); 302 __ jcc(Assembler::equal, common_return); 303 if (UseSSE == 0) { 304 // UseSSE == 0 so float result should be left on TOS 305 __ cmpl(rsi, T_FLOAT); 306 __ jcc(Assembler::equal, common_return); 307 } 308 __ ffree(0); 309 } 310 #endif /* COMPILER2 */ 311 __ jmp(common_return); 312 313 return start; 314 } 315 316 317 //------------------------------------------------------------------------------------------------------------------------ 318 // Return point for a Java call if there's an exception thrown in Java code. 319 // The exception is caught and transformed into a pending exception stored in 320 // JavaThread that can be tested from within the VM. 321 // 322 // Note: Usually the parameters are removed by the callee. In case of an exception 323 // crossing an activation frame boundary, that is not the case if the callee 324 // is compiled code => need to setup the rsp. 325 // 326 // rax,: exception oop 327 328 address generate_catch_exception() { 329 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 330 const Address rsp_after_call(rbp, -4 * wordSize); // same as in generate_call_stub()! 331 const Address thread (rbp, 9 * wordSize); // same as in generate_call_stub()! 332 address start = __ pc(); 333 334 // get thread directly 335 __ movptr(rcx, thread); 336 #ifdef ASSERT 337 // verify that threads correspond 338 { Label L; 339 __ get_thread(rbx); 340 __ cmpptr(rbx, rcx); 341 __ jcc(Assembler::equal, L); 342 __ stop("StubRoutines::catch_exception: threads must correspond"); 343 __ bind(L); 344 } 345 #endif 346 // set pending exception 347 __ verify_oop(rax); 348 __ movptr(Address(rcx, Thread::pending_exception_offset()), rax ); 349 __ lea(Address(rcx, Thread::exception_file_offset ()), 350 ExternalAddress((address)__FILE__)); 351 __ movl(Address(rcx, Thread::exception_line_offset ()), __LINE__ ); 352 // complete return to VM 353 assert(StubRoutines::_call_stub_return_address != NULL, "_call_stub_return_address must have been generated before"); 354 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 355 356 return start; 357 } 358 359 360 //------------------------------------------------------------------------------------------------------------------------ 361 // Continuation point for runtime calls returning with a pending exception. 362 // The pending exception check happened in the runtime or native call stub. 363 // The pending exception in Thread is converted into a Java-level exception. 364 // 365 // Contract with Java-level exception handlers: 366 // rax: exception 367 // rdx: throwing pc 368 // 369 // NOTE: At entry of this stub, exception-pc must be on stack !! 370 371 address generate_forward_exception() { 372 StubCodeMark mark(this, "StubRoutines", "forward exception"); 373 address start = __ pc(); 374 const Register thread = rcx; 375 376 // other registers used in this stub 377 const Register exception_oop = rax; 378 const Register handler_addr = rbx; 379 const Register exception_pc = rdx; 380 381 // Upon entry, the sp points to the return address returning into Java 382 // (interpreted or compiled) code; i.e., the return address becomes the 383 // throwing pc. 384 // 385 // Arguments pushed before the runtime call are still on the stack but 386 // the exception handler will reset the stack pointer -> ignore them. 387 // A potential result in registers can be ignored as well. 388 389 #ifdef ASSERT 390 // make sure this code is only executed if there is a pending exception 391 { Label L; 392 __ get_thread(thread); 393 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 394 __ jcc(Assembler::notEqual, L); 395 __ stop("StubRoutines::forward exception: no pending exception (1)"); 396 __ bind(L); 397 } 398 #endif 399 400 // compute exception handler into rbx, 401 __ get_thread(thread); 402 __ movptr(exception_pc, Address(rsp, 0)); 403 BLOCK_COMMENT("call exception_handler_for_return_address"); 404 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc); 405 __ mov(handler_addr, rax); 406 407 // setup rax & rdx, remove return address & clear pending exception 408 __ get_thread(thread); 409 __ pop(exception_pc); 410 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); 411 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 412 413 #ifdef ASSERT 414 // make sure exception is set 415 { Label L; 416 __ testptr(exception_oop, exception_oop); 417 __ jcc(Assembler::notEqual, L); 418 __ stop("StubRoutines::forward exception: no pending exception (2)"); 419 __ bind(L); 420 } 421 #endif 422 423 // Verify that there is really a valid exception in RAX. 424 __ verify_oop(exception_oop); 425 426 // Restore SP from BP if the exception PC is a MethodHandle call site. 427 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0); 428 __ cmovptr(Assembler::notEqual, rsp, rbp); 429 430 // continue at exception handler (return address removed) 431 // rax: exception 432 // rbx: exception handler 433 // rdx: throwing pc 434 __ jmp(handler_addr); 435 436 return start; 437 } 438 439 440 //---------------------------------------------------------------------------------------------------- 441 // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest) 442 // 443 // xchg exists as far back as 8086, lock needed for MP only 444 // Stack layout immediately after call: 445 // 446 // 0 [ret addr ] <--- rsp 447 // 1 [ ex ] 448 // 2 [ dest ] 449 // 450 // Result: *dest <- ex, return (old *dest) 451 // 452 // Note: win32 does not currently use this code 453 454 address generate_atomic_xchg() { 455 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 456 address start = __ pc(); 457 458 __ push(rdx); 459 Address exchange(rsp, 2 * wordSize); 460 Address dest_addr(rsp, 3 * wordSize); 461 __ movl(rax, exchange); 462 __ movptr(rdx, dest_addr); 463 __ xchgl(rax, Address(rdx, 0)); 464 __ pop(rdx); 465 __ ret(0); 466 467 return start; 468 } 469 470 //---------------------------------------------------------------------------------------------------- 471 // Support for void verify_mxcsr() 472 // 473 // This routine is used with -Xcheck:jni to verify that native 474 // JNI code does not return to Java code without restoring the 475 // MXCSR register to our expected state. 476 477 478 address generate_verify_mxcsr() { 479 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 480 address start = __ pc(); 481 482 const Address mxcsr_save(rsp, 0); 483 484 if (CheckJNICalls && UseSSE > 0 ) { 485 Label ok_ret; 486 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 487 __ push(rax); 488 __ subptr(rsp, wordSize); // allocate a temp location 489 __ stmxcsr(mxcsr_save); 490 __ movl(rax, mxcsr_save); 491 __ andl(rax, MXCSR_MASK); 492 __ cmp32(rax, mxcsr_std); 493 __ jcc(Assembler::equal, ok_ret); 494 495 __ warn("MXCSR changed by native JNI code."); 496 497 __ ldmxcsr(mxcsr_std); 498 499 __ bind(ok_ret); 500 __ addptr(rsp, wordSize); 501 __ pop(rax); 502 } 503 504 __ ret(0); 505 506 return start; 507 } 508 509 510 //--------------------------------------------------------------------------- 511 // Support for void verify_fpu_cntrl_wrd() 512 // 513 // This routine is used with -Xcheck:jni to verify that native 514 // JNI code does not return to Java code without restoring the 515 // FP control word to our expected state. 516 517 address generate_verify_fpu_cntrl_wrd() { 518 StubCodeMark mark(this, "StubRoutines", "verify_spcw"); 519 address start = __ pc(); 520 521 const Address fpu_cntrl_wrd_save(rsp, 0); 522 523 if (CheckJNICalls) { 524 Label ok_ret; 525 __ push(rax); 526 __ subptr(rsp, wordSize); // allocate a temp location 527 __ fnstcw(fpu_cntrl_wrd_save); 528 __ movl(rax, fpu_cntrl_wrd_save); 529 __ andl(rax, FPU_CNTRL_WRD_MASK); 530 ExternalAddress fpu_std(StubRoutines::addr_fpu_cntrl_wrd_std()); 531 __ cmp32(rax, fpu_std); 532 __ jcc(Assembler::equal, ok_ret); 533 534 __ warn("Floating point control word changed by native JNI code."); 535 536 __ fldcw(fpu_std); 537 538 __ bind(ok_ret); 539 __ addptr(rsp, wordSize); 540 __ pop(rax); 541 } 542 543 __ ret(0); 544 545 return start; 546 } 547 548 //--------------------------------------------------------------------------- 549 // Wrapper for slow-case handling of double-to-integer conversion 550 // d2i or f2i fast case failed either because it is nan or because 551 // of under/overflow. 552 // Input: FPU TOS: float value 553 // Output: rax, (rdx): integer (long) result 554 555 address generate_d2i_wrapper(BasicType t, address fcn) { 556 StubCodeMark mark(this, "StubRoutines", "d2i_wrapper"); 557 address start = __ pc(); 558 559 // Capture info about frame layout 560 enum layout { FPUState_off = 0, 561 rbp_off = FPUStateSizeInWords, 562 rdi_off, 563 rsi_off, 564 rcx_off, 565 rbx_off, 566 saved_argument_off, 567 saved_argument_off2, // 2nd half of double 568 framesize 569 }; 570 571 assert(FPUStateSizeInWords == 27, "update stack layout"); 572 573 // Save outgoing argument to stack across push_FPU_state() 574 __ subptr(rsp, wordSize * 2); 575 __ fstp_d(Address(rsp, 0)); 576 577 // Save CPU & FPU state 578 __ push(rbx); 579 __ push(rcx); 580 __ push(rsi); 581 __ push(rdi); 582 __ push(rbp); 583 __ push_FPU_state(); 584 585 // push_FPU_state() resets the FP top of stack 586 // Load original double into FP top of stack 587 __ fld_d(Address(rsp, saved_argument_off * wordSize)); 588 // Store double into stack as outgoing argument 589 __ subptr(rsp, wordSize*2); 590 __ fst_d(Address(rsp, 0)); 591 592 // Prepare FPU for doing math in C-land 593 __ empty_FPU_stack(); 594 // Call the C code to massage the double. Result in EAX 595 if (t == T_INT) 596 { BLOCK_COMMENT("SharedRuntime::d2i"); } 597 else if (t == T_LONG) 598 { BLOCK_COMMENT("SharedRuntime::d2l"); } 599 __ call_VM_leaf( fcn, 2 ); 600 601 // Restore CPU & FPU state 602 __ pop_FPU_state(); 603 __ pop(rbp); 604 __ pop(rdi); 605 __ pop(rsi); 606 __ pop(rcx); 607 __ pop(rbx); 608 __ addptr(rsp, wordSize * 2); 609 610 __ ret(0); 611 612 return start; 613 } 614 615 616 //--------------------------------------------------------------------------- 617 // The following routine generates a subroutine to throw an asynchronous 618 // UnknownError when an unsafe access gets a fault that could not be 619 // reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.) 620 address generate_handler_for_unsafe_access() { 621 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); 622 address start = __ pc(); 623 624 __ push(0); // hole for return address-to-be 625 __ pusha(); // push registers 626 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord); 627 BLOCK_COMMENT("call handle_unsafe_access"); 628 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access))); 629 __ movptr(next_pc, rax); // stuff next address 630 __ popa(); 631 __ ret(0); // jump to next address 632 633 return start; 634 } 635 636 637 //---------------------------------------------------------------------------------------------------- 638 // Non-destructive plausibility checks for oops 639 640 address generate_verify_oop() { 641 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 642 address start = __ pc(); 643 644 // Incoming arguments on stack after saving rax,: 645 // 646 // [tos ]: saved rdx 647 // [tos + 1]: saved EFLAGS 648 // [tos + 2]: return address 649 // [tos + 3]: char* error message 650 // [tos + 4]: oop object to verify 651 // [tos + 5]: saved rax, - saved by caller and bashed 652 653 Label exit, error; 654 __ pushf(); 655 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 656 __ push(rdx); // save rdx 657 // make sure object is 'reasonable' 658 __ movptr(rax, Address(rsp, 4 * wordSize)); // get object 659 __ testptr(rax, rax); 660 __ jcc(Assembler::zero, exit); // if obj is NULL it is ok 661 662 // Check if the oop is in the right area of memory 663 const int oop_mask = Universe::verify_oop_mask(); 664 const int oop_bits = Universe::verify_oop_bits(); 665 __ mov(rdx, rax); 666 __ andptr(rdx, oop_mask); 667 __ cmpptr(rdx, oop_bits); 668 __ jcc(Assembler::notZero, error); 669 670 // make sure klass is 'reasonable' 671 __ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass 672 __ testptr(rax, rax); 673 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 674 675 // Check if the klass is in the right area of memory 676 const int klass_mask = Universe::verify_klass_mask(); 677 const int klass_bits = Universe::verify_klass_bits(); 678 __ mov(rdx, rax); 679 __ andptr(rdx, klass_mask); 680 __ cmpptr(rdx, klass_bits); 681 __ jcc(Assembler::notZero, error); 682 683 // make sure klass' klass is 'reasonable' 684 __ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass' klass 685 __ testptr(rax, rax); 686 __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken 687 688 __ mov(rdx, rax); 689 __ andptr(rdx, klass_mask); 690 __ cmpptr(rdx, klass_bits); 691 __ jcc(Assembler::notZero, error); // if klass not in right area 692 // of memory it is broken too. 693 694 // return if everything seems ok 695 __ bind(exit); 696 __ movptr(rax, Address(rsp, 5 * wordSize)); // get saved rax, back 697 __ pop(rdx); // restore rdx 698 __ popf(); // restore EFLAGS 699 __ ret(3 * wordSize); // pop arguments 700 701 // handle errors 702 __ bind(error); 703 __ movptr(rax, Address(rsp, 5 * wordSize)); // get saved rax, back 704 __ pop(rdx); // get saved rdx back 705 __ popf(); // get saved EFLAGS off stack -- will be ignored 706 __ pusha(); // push registers (eip = return address & msg are already pushed) 707 BLOCK_COMMENT("call MacroAssembler::debug"); 708 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); 709 __ popa(); 710 __ ret(3 * wordSize); // pop arguments 711 return start; 712 } 713 714 // 715 // Generate pre-barrier for array stores 716 // 717 // Input: 718 // start - starting address 719 // count - element count 720 void gen_write_ref_array_pre_barrier(Register start, Register count) { 721 assert_different_registers(start, count); 722 BarrierSet* bs = Universe::heap()->barrier_set(); 723 switch (bs->kind()) { 724 case BarrierSet::G1SATBCT: 725 case BarrierSet::G1SATBCTLogging: 726 { 727 __ pusha(); // push registers 728 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 729 start, count); 730 __ popa(); 731 } 732 break; 733 case BarrierSet::CardTableModRef: 734 case BarrierSet::CardTableExtension: 735 case BarrierSet::ModRef: 736 break; 737 default : 738 ShouldNotReachHere(); 739 740 } 741 } 742 743 744 // 745 // Generate a post-barrier for an array store 746 // 747 // start - starting address 748 // count - element count 749 // 750 // The two input registers are overwritten. 751 // 752 void gen_write_ref_array_post_barrier(Register start, Register count) { 753 BarrierSet* bs = Universe::heap()->barrier_set(); 754 assert_different_registers(start, count); 755 switch (bs->kind()) { 756 case BarrierSet::G1SATBCT: 757 case BarrierSet::G1SATBCTLogging: 758 { 759 __ pusha(); // push registers 760 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 761 start, count); 762 __ popa(); 763 } 764 break; 765 766 case BarrierSet::CardTableModRef: 767 case BarrierSet::CardTableExtension: 768 { 769 CardTableModRefBS* ct = (CardTableModRefBS*)bs; 770 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 771 772 Label L_loop; 773 const Register end = count; // elements count; end == start+count-1 774 assert_different_registers(start, end); 775 776 __ lea(end, Address(start, count, Address::times_ptr, -wordSize)); 777 __ shrptr(start, CardTableModRefBS::card_shift); 778 __ shrptr(end, CardTableModRefBS::card_shift); 779 __ subptr(end, start); // end --> count 780 __ BIND(L_loop); 781 intptr_t disp = (intptr_t) ct->byte_map_base; 782 Address cardtable(start, count, Address::times_1, disp); 783 __ movb(cardtable, 0); 784 __ decrement(count); 785 __ jcc(Assembler::greaterEqual, L_loop); 786 } 787 break; 788 case BarrierSet::ModRef: 789 break; 790 default : 791 ShouldNotReachHere(); 792 793 } 794 } 795 796 797 // Copy 64 bytes chunks 798 // 799 // Inputs: 800 // from - source array address 801 // to_from - destination array address - from 802 // qword_count - 8-bytes element count, negative 803 // 804 void xmm_copy_forward(Register from, Register to_from, Register qword_count) { 805 assert( UseSSE >= 2, "supported cpu only" ); 806 Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit; 807 // Copy 64-byte chunks 808 __ jmpb(L_copy_64_bytes); 809 __ align(OptoLoopAlignment); 810 __ BIND(L_copy_64_bytes_loop); 811 812 if(UseUnalignedLoadStores) { 813 __ movdqu(xmm0, Address(from, 0)); 814 __ movdqu(Address(from, to_from, Address::times_1, 0), xmm0); 815 __ movdqu(xmm1, Address(from, 16)); 816 __ movdqu(Address(from, to_from, Address::times_1, 16), xmm1); 817 __ movdqu(xmm2, Address(from, 32)); 818 __ movdqu(Address(from, to_from, Address::times_1, 32), xmm2); 819 __ movdqu(xmm3, Address(from, 48)); 820 __ movdqu(Address(from, to_from, Address::times_1, 48), xmm3); 821 822 } else { 823 __ movq(xmm0, Address(from, 0)); 824 __ movq(Address(from, to_from, Address::times_1, 0), xmm0); 825 __ movq(xmm1, Address(from, 8)); 826 __ movq(Address(from, to_from, Address::times_1, 8), xmm1); 827 __ movq(xmm2, Address(from, 16)); 828 __ movq(Address(from, to_from, Address::times_1, 16), xmm2); 829 __ movq(xmm3, Address(from, 24)); 830 __ movq(Address(from, to_from, Address::times_1, 24), xmm3); 831 __ movq(xmm4, Address(from, 32)); 832 __ movq(Address(from, to_from, Address::times_1, 32), xmm4); 833 __ movq(xmm5, Address(from, 40)); 834 __ movq(Address(from, to_from, Address::times_1, 40), xmm5); 835 __ movq(xmm6, Address(from, 48)); 836 __ movq(Address(from, to_from, Address::times_1, 48), xmm6); 837 __ movq(xmm7, Address(from, 56)); 838 __ movq(Address(from, to_from, Address::times_1, 56), xmm7); 839 } 840 841 __ addl(from, 64); 842 __ BIND(L_copy_64_bytes); 843 __ subl(qword_count, 8); 844 __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop); 845 __ addl(qword_count, 8); 846 __ jccb(Assembler::zero, L_exit); 847 // 848 // length is too short, just copy qwords 849 // 850 __ BIND(L_copy_8_bytes); 851 __ movq(xmm0, Address(from, 0)); 852 __ movq(Address(from, to_from, Address::times_1), xmm0); 853 __ addl(from, 8); 854 __ decrement(qword_count); 855 __ jcc(Assembler::greater, L_copy_8_bytes); 856 __ BIND(L_exit); 857 } 858 859 // Copy 64 bytes chunks 860 // 861 // Inputs: 862 // from - source array address 863 // to_from - destination array address - from 864 // qword_count - 8-bytes element count, negative 865 // 866 void mmx_copy_forward(Register from, Register to_from, Register qword_count) { 867 assert( VM_Version::supports_mmx(), "supported cpu only" ); 868 Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit; 869 // Copy 64-byte chunks 870 __ jmpb(L_copy_64_bytes); 871 __ align(OptoLoopAlignment); 872 __ BIND(L_copy_64_bytes_loop); 873 __ movq(mmx0, Address(from, 0)); 874 __ movq(mmx1, Address(from, 8)); 875 __ movq(mmx2, Address(from, 16)); 876 __ movq(Address(from, to_from, Address::times_1, 0), mmx0); 877 __ movq(mmx3, Address(from, 24)); 878 __ movq(Address(from, to_from, Address::times_1, 8), mmx1); 879 __ movq(mmx4, Address(from, 32)); 880 __ movq(Address(from, to_from, Address::times_1, 16), mmx2); 881 __ movq(mmx5, Address(from, 40)); 882 __ movq(Address(from, to_from, Address::times_1, 24), mmx3); 883 __ movq(mmx6, Address(from, 48)); 884 __ movq(Address(from, to_from, Address::times_1, 32), mmx4); 885 __ movq(mmx7, Address(from, 56)); 886 __ movq(Address(from, to_from, Address::times_1, 40), mmx5); 887 __ movq(Address(from, to_from, Address::times_1, 48), mmx6); 888 __ movq(Address(from, to_from, Address::times_1, 56), mmx7); 889 __ addptr(from, 64); 890 __ BIND(L_copy_64_bytes); 891 __ subl(qword_count, 8); 892 __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop); 893 __ addl(qword_count, 8); 894 __ jccb(Assembler::zero, L_exit); 895 // 896 // length is too short, just copy qwords 897 // 898 __ BIND(L_copy_8_bytes); 899 __ movq(mmx0, Address(from, 0)); 900 __ movq(Address(from, to_from, Address::times_1), mmx0); 901 __ addptr(from, 8); 902 __ decrement(qword_count); 903 __ jcc(Assembler::greater, L_copy_8_bytes); 904 __ BIND(L_exit); 905 __ emms(); 906 } 907 908 address generate_disjoint_copy(BasicType t, bool aligned, 909 Address::ScaleFactor sf, 910 address* entry, const char *name) { 911 __ align(CodeEntryAlignment); 912 StubCodeMark mark(this, "StubRoutines", name); 913 address start = __ pc(); 914 915 Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte; 916 Label L_copy_2_bytes, L_copy_4_bytes, L_copy_64_bytes; 917 918 int shift = Address::times_ptr - sf; 919 920 const Register from = rsi; // source array address 921 const Register to = rdi; // destination array address 922 const Register count = rcx; // elements count 923 const Register to_from = to; // (to - from) 924 const Register saved_to = rdx; // saved destination array address 925 926 __ enter(); // required for proper stackwalking of RuntimeStub frame 927 __ push(rsi); 928 __ push(rdi); 929 __ movptr(from , Address(rsp, 12+ 4)); 930 __ movptr(to , Address(rsp, 12+ 8)); 931 __ movl(count, Address(rsp, 12+ 12)); 932 if (t == T_OBJECT) { 933 __ testl(count, count); 934 __ jcc(Assembler::zero, L_0_count); 935 gen_write_ref_array_pre_barrier(to, count); 936 __ mov(saved_to, to); // save 'to' 937 } 938 939 *entry = __ pc(); // Entry point from conjoint arraycopy stub. 940 BLOCK_COMMENT("Entry:"); 941 942 __ subptr(to, from); // to --> to_from 943 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element 944 __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp 945 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) { 946 // align source address at 4 bytes address boundary 947 if (t == T_BYTE) { 948 // One byte misalignment happens only for byte arrays 949 __ testl(from, 1); 950 __ jccb(Assembler::zero, L_skip_align1); 951 __ movb(rax, Address(from, 0)); 952 __ movb(Address(from, to_from, Address::times_1, 0), rax); 953 __ increment(from); 954 __ decrement(count); 955 __ BIND(L_skip_align1); 956 } 957 // Two bytes misalignment happens only for byte and short (char) arrays 958 __ testl(from, 2); 959 __ jccb(Assembler::zero, L_skip_align2); 960 __ movw(rax, Address(from, 0)); 961 __ movw(Address(from, to_from, Address::times_1, 0), rax); 962 __ addptr(from, 2); 963 __ subl(count, 1<<(shift-1)); 964 __ BIND(L_skip_align2); 965 } 966 if (!VM_Version::supports_mmx()) { 967 __ mov(rax, count); // save 'count' 968 __ shrl(count, shift); // bytes count 969 __ addptr(to_from, from);// restore 'to' 970 __ rep_mov(); 971 __ subptr(to_from, from);// restore 'to_from' 972 __ mov(count, rax); // restore 'count' 973 __ jmpb(L_copy_2_bytes); // all dwords were copied 974 } else { 975 if (!UseUnalignedLoadStores) { 976 // align to 8 bytes, we know we are 4 byte aligned to start 977 __ testptr(from, 4); 978 __ jccb(Assembler::zero, L_copy_64_bytes); 979 __ movl(rax, Address(from, 0)); 980 __ movl(Address(from, to_from, Address::times_1, 0), rax); 981 __ addptr(from, 4); 982 __ subl(count, 1<<shift); 983 } 984 __ BIND(L_copy_64_bytes); 985 __ mov(rax, count); 986 __ shrl(rax, shift+1); // 8 bytes chunk count 987 // 988 // Copy 8-byte chunks through MMX registers, 8 per iteration of the loop 989 // 990 if (UseXMMForArrayCopy) { 991 xmm_copy_forward(from, to_from, rax); 992 } else { 993 mmx_copy_forward(from, to_from, rax); 994 } 995 } 996 // copy tailing dword 997 __ BIND(L_copy_4_bytes); 998 __ testl(count, 1<<shift); 999 __ jccb(Assembler::zero, L_copy_2_bytes); 1000 __ movl(rax, Address(from, 0)); 1001 __ movl(Address(from, to_from, Address::times_1, 0), rax); 1002 if (t == T_BYTE || t == T_SHORT) { 1003 __ addptr(from, 4); 1004 __ BIND(L_copy_2_bytes); 1005 // copy tailing word 1006 __ testl(count, 1<<(shift-1)); 1007 __ jccb(Assembler::zero, L_copy_byte); 1008 __ movw(rax, Address(from, 0)); 1009 __ movw(Address(from, to_from, Address::times_1, 0), rax); 1010 if (t == T_BYTE) { 1011 __ addptr(from, 2); 1012 __ BIND(L_copy_byte); 1013 // copy tailing byte 1014 __ testl(count, 1); 1015 __ jccb(Assembler::zero, L_exit); 1016 __ movb(rax, Address(from, 0)); 1017 __ movb(Address(from, to_from, Address::times_1, 0), rax); 1018 __ BIND(L_exit); 1019 } else { 1020 __ BIND(L_copy_byte); 1021 } 1022 } else { 1023 __ BIND(L_copy_2_bytes); 1024 } 1025 1026 if (t == T_OBJECT) { 1027 __ movl(count, Address(rsp, 12+12)); // reread 'count' 1028 __ mov(to, saved_to); // restore 'to' 1029 gen_write_ref_array_post_barrier(to, count); 1030 __ BIND(L_0_count); 1031 } 1032 inc_copy_counter_np(t); 1033 __ pop(rdi); 1034 __ pop(rsi); 1035 __ leave(); // required for proper stackwalking of RuntimeStub frame 1036 __ xorptr(rax, rax); // return 0 1037 __ ret(0); 1038 return start; 1039 } 1040 1041 1042 address generate_fill(BasicType t, bool aligned, const char *name) { 1043 __ align(CodeEntryAlignment); 1044 StubCodeMark mark(this, "StubRoutines", name); 1045 address start = __ pc(); 1046 1047 BLOCK_COMMENT("Entry:"); 1048 1049 const Register to = rdi; // source array address 1050 const Register value = rdx; // value 1051 const Register count = rsi; // elements count 1052 1053 __ enter(); // required for proper stackwalking of RuntimeStub frame 1054 __ push(rsi); 1055 __ push(rdi); 1056 __ movptr(to , Address(rsp, 12+ 4)); 1057 __ movl(value, Address(rsp, 12+ 8)); 1058 __ movl(count, Address(rsp, 12+ 12)); 1059 1060 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1061 1062 __ pop(rdi); 1063 __ pop(rsi); 1064 __ leave(); // required for proper stackwalking of RuntimeStub frame 1065 __ ret(0); 1066 return start; 1067 } 1068 1069 address generate_conjoint_copy(BasicType t, bool aligned, 1070 Address::ScaleFactor sf, 1071 address nooverlap_target, 1072 address* entry, const char *name) { 1073 __ align(CodeEntryAlignment); 1074 StubCodeMark mark(this, "StubRoutines", name); 1075 address start = __ pc(); 1076 1077 Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte; 1078 Label L_copy_2_bytes, L_copy_4_bytes, L_copy_8_bytes, L_copy_8_bytes_loop; 1079 1080 int shift = Address::times_ptr - sf; 1081 1082 const Register src = rax; // source array address 1083 const Register dst = rdx; // destination array address 1084 const Register from = rsi; // source array address 1085 const Register to = rdi; // destination array address 1086 const Register count = rcx; // elements count 1087 const Register end = rax; // array end address 1088 1089 __ enter(); // required for proper stackwalking of RuntimeStub frame 1090 __ push(rsi); 1091 __ push(rdi); 1092 __ movptr(src , Address(rsp, 12+ 4)); // from 1093 __ movptr(dst , Address(rsp, 12+ 8)); // to 1094 __ movl2ptr(count, Address(rsp, 12+12)); // count 1095 if (t == T_OBJECT) { 1096 gen_write_ref_array_pre_barrier(dst, count); 1097 } 1098 1099 if (entry != NULL) { 1100 *entry = __ pc(); // Entry point from generic arraycopy stub. 1101 BLOCK_COMMENT("Entry:"); 1102 } 1103 1104 if (t == T_OBJECT) { 1105 __ testl(count, count); 1106 __ jcc(Assembler::zero, L_0_count); 1107 } 1108 __ mov(from, src); 1109 __ mov(to , dst); 1110 1111 // arrays overlap test 1112 RuntimeAddress nooverlap(nooverlap_target); 1113 __ cmpptr(dst, src); 1114 __ lea(end, Address(src, count, sf, 0)); // src + count * elem_size 1115 __ jump_cc(Assembler::belowEqual, nooverlap); 1116 __ cmpptr(dst, end); 1117 __ jump_cc(Assembler::aboveEqual, nooverlap); 1118 1119 // copy from high to low 1120 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element 1121 __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp 1122 if (t == T_BYTE || t == T_SHORT) { 1123 // Align the end of destination array at 4 bytes address boundary 1124 __ lea(end, Address(dst, count, sf, 0)); 1125 if (t == T_BYTE) { 1126 // One byte misalignment happens only for byte arrays 1127 __ testl(end, 1); 1128 __ jccb(Assembler::zero, L_skip_align1); 1129 __ decrement(count); 1130 __ movb(rdx, Address(from, count, sf, 0)); 1131 __ movb(Address(to, count, sf, 0), rdx); 1132 __ BIND(L_skip_align1); 1133 } 1134 // Two bytes misalignment happens only for byte and short (char) arrays 1135 __ testl(end, 2); 1136 __ jccb(Assembler::zero, L_skip_align2); 1137 __ subptr(count, 1<<(shift-1)); 1138 __ movw(rdx, Address(from, count, sf, 0)); 1139 __ movw(Address(to, count, sf, 0), rdx); 1140 __ BIND(L_skip_align2); 1141 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element 1142 __ jcc(Assembler::below, L_copy_4_bytes); 1143 } 1144 1145 if (!VM_Version::supports_mmx()) { 1146 __ std(); 1147 __ mov(rax, count); // Save 'count' 1148 __ mov(rdx, to); // Save 'to' 1149 __ lea(rsi, Address(from, count, sf, -4)); 1150 __ lea(rdi, Address(to , count, sf, -4)); 1151 __ shrptr(count, shift); // bytes count 1152 __ rep_mov(); 1153 __ cld(); 1154 __ mov(count, rax); // restore 'count' 1155 __ andl(count, (1<<shift)-1); // mask the number of rest elements 1156 __ movptr(from, Address(rsp, 12+4)); // reread 'from' 1157 __ mov(to, rdx); // restore 'to' 1158 __ jmpb(L_copy_2_bytes); // all dword were copied 1159 } else { 1160 // Align to 8 bytes the end of array. It is aligned to 4 bytes already. 1161 __ testptr(end, 4); 1162 __ jccb(Assembler::zero, L_copy_8_bytes); 1163 __ subl(count, 1<<shift); 1164 __ movl(rdx, Address(from, count, sf, 0)); 1165 __ movl(Address(to, count, sf, 0), rdx); 1166 __ jmpb(L_copy_8_bytes); 1167 1168 __ align(OptoLoopAlignment); 1169 // Move 8 bytes 1170 __ BIND(L_copy_8_bytes_loop); 1171 if (UseXMMForArrayCopy) { 1172 __ movq(xmm0, Address(from, count, sf, 0)); 1173 __ movq(Address(to, count, sf, 0), xmm0); 1174 } else { 1175 __ movq(mmx0, Address(from, count, sf, 0)); 1176 __ movq(Address(to, count, sf, 0), mmx0); 1177 } 1178 __ BIND(L_copy_8_bytes); 1179 __ subl(count, 2<<shift); 1180 __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); 1181 __ addl(count, 2<<shift); 1182 if (!UseXMMForArrayCopy) { 1183 __ emms(); 1184 } 1185 } 1186 __ BIND(L_copy_4_bytes); 1187 // copy prefix qword 1188 __ testl(count, 1<<shift); 1189 __ jccb(Assembler::zero, L_copy_2_bytes); 1190 __ movl(rdx, Address(from, count, sf, -4)); 1191 __ movl(Address(to, count, sf, -4), rdx); 1192 1193 if (t == T_BYTE || t == T_SHORT) { 1194 __ subl(count, (1<<shift)); 1195 __ BIND(L_copy_2_bytes); 1196 // copy prefix dword 1197 __ testl(count, 1<<(shift-1)); 1198 __ jccb(Assembler::zero, L_copy_byte); 1199 __ movw(rdx, Address(from, count, sf, -2)); 1200 __ movw(Address(to, count, sf, -2), rdx); 1201 if (t == T_BYTE) { 1202 __ subl(count, 1<<(shift-1)); 1203 __ BIND(L_copy_byte); 1204 // copy prefix byte 1205 __ testl(count, 1); 1206 __ jccb(Assembler::zero, L_exit); 1207 __ movb(rdx, Address(from, 0)); 1208 __ movb(Address(to, 0), rdx); 1209 __ BIND(L_exit); 1210 } else { 1211 __ BIND(L_copy_byte); 1212 } 1213 } else { 1214 __ BIND(L_copy_2_bytes); 1215 } 1216 if (t == T_OBJECT) { 1217 __ movl2ptr(count, Address(rsp, 12+12)); // reread count 1218 gen_write_ref_array_post_barrier(to, count); 1219 __ BIND(L_0_count); 1220 } 1221 inc_copy_counter_np(t); 1222 __ pop(rdi); 1223 __ pop(rsi); 1224 __ leave(); // required for proper stackwalking of RuntimeStub frame 1225 __ xorptr(rax, rax); // return 0 1226 __ ret(0); 1227 return start; 1228 } 1229 1230 1231 address generate_disjoint_long_copy(address* entry, const char *name) { 1232 __ align(CodeEntryAlignment); 1233 StubCodeMark mark(this, "StubRoutines", name); 1234 address start = __ pc(); 1235 1236 Label L_copy_8_bytes, L_copy_8_bytes_loop; 1237 const Register from = rax; // source array address 1238 const Register to = rdx; // destination array address 1239 const Register count = rcx; // elements count 1240 const Register to_from = rdx; // (to - from) 1241 1242 __ enter(); // required for proper stackwalking of RuntimeStub frame 1243 __ movptr(from , Address(rsp, 8+0)); // from 1244 __ movptr(to , Address(rsp, 8+4)); // to 1245 __ movl2ptr(count, Address(rsp, 8+8)); // count 1246 1247 *entry = __ pc(); // Entry point from conjoint arraycopy stub. 1248 BLOCK_COMMENT("Entry:"); 1249 1250 __ subptr(to, from); // to --> to_from 1251 if (VM_Version::supports_mmx()) { 1252 if (UseXMMForArrayCopy) { 1253 xmm_copy_forward(from, to_from, count); 1254 } else { 1255 mmx_copy_forward(from, to_from, count); 1256 } 1257 } else { 1258 __ jmpb(L_copy_8_bytes); 1259 __ align(OptoLoopAlignment); 1260 __ BIND(L_copy_8_bytes_loop); 1261 __ fild_d(Address(from, 0)); 1262 __ fistp_d(Address(from, to_from, Address::times_1)); 1263 __ addptr(from, 8); 1264 __ BIND(L_copy_8_bytes); 1265 __ decrement(count); 1266 __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); 1267 } 1268 inc_copy_counter_np(T_LONG); 1269 __ leave(); // required for proper stackwalking of RuntimeStub frame 1270 __ xorptr(rax, rax); // return 0 1271 __ ret(0); 1272 return start; 1273 } 1274 1275 address generate_conjoint_long_copy(address nooverlap_target, 1276 address* entry, const char *name) { 1277 __ align(CodeEntryAlignment); 1278 StubCodeMark mark(this, "StubRoutines", name); 1279 address start = __ pc(); 1280 1281 Label L_copy_8_bytes, L_copy_8_bytes_loop; 1282 const Register from = rax; // source array address 1283 const Register to = rdx; // destination array address 1284 const Register count = rcx; // elements count 1285 const Register end_from = rax; // source array end address 1286 1287 __ enter(); // required for proper stackwalking of RuntimeStub frame 1288 __ movptr(from , Address(rsp, 8+0)); // from 1289 __ movptr(to , Address(rsp, 8+4)); // to 1290 __ movl2ptr(count, Address(rsp, 8+8)); // count 1291 1292 *entry = __ pc(); // Entry point from generic arraycopy stub. 1293 BLOCK_COMMENT("Entry:"); 1294 1295 // arrays overlap test 1296 __ cmpptr(to, from); 1297 RuntimeAddress nooverlap(nooverlap_target); 1298 __ jump_cc(Assembler::belowEqual, nooverlap); 1299 __ lea(end_from, Address(from, count, Address::times_8, 0)); 1300 __ cmpptr(to, end_from); 1301 __ movptr(from, Address(rsp, 8)); // from 1302 __ jump_cc(Assembler::aboveEqual, nooverlap); 1303 1304 __ jmpb(L_copy_8_bytes); 1305 1306 __ align(OptoLoopAlignment); 1307 __ BIND(L_copy_8_bytes_loop); 1308 if (VM_Version::supports_mmx()) { 1309 if (UseXMMForArrayCopy) { 1310 __ movq(xmm0, Address(from, count, Address::times_8)); 1311 __ movq(Address(to, count, Address::times_8), xmm0); 1312 } else { 1313 __ movq(mmx0, Address(from, count, Address::times_8)); 1314 __ movq(Address(to, count, Address::times_8), mmx0); 1315 } 1316 } else { 1317 __ fild_d(Address(from, count, Address::times_8)); 1318 __ fistp_d(Address(to, count, Address::times_8)); 1319 } 1320 __ BIND(L_copy_8_bytes); 1321 __ decrement(count); 1322 __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); 1323 1324 if (VM_Version::supports_mmx() && !UseXMMForArrayCopy) { 1325 __ emms(); 1326 } 1327 inc_copy_counter_np(T_LONG); 1328 __ leave(); // required for proper stackwalking of RuntimeStub frame 1329 __ xorptr(rax, rax); // return 0 1330 __ ret(0); 1331 return start; 1332 } 1333 1334 1335 // Helper for generating a dynamic type check. 1336 // The sub_klass must be one of {rbx, rdx, rsi}. 1337 // The temp is killed. 1338 void generate_type_check(Register sub_klass, 1339 Address& super_check_offset_addr, 1340 Address& super_klass_addr, 1341 Register temp, 1342 Label* L_success, Label* L_failure) { 1343 BLOCK_COMMENT("type_check:"); 1344 1345 Label L_fallthrough; 1346 #define LOCAL_JCC(assembler_con, label_ptr) \ 1347 if (label_ptr != NULL) __ jcc(assembler_con, *(label_ptr)); \ 1348 else __ jcc(assembler_con, L_fallthrough) /*omit semi*/ 1349 1350 // The following is a strange variation of the fast path which requires 1351 // one less register, because needed values are on the argument stack. 1352 // __ check_klass_subtype_fast_path(sub_klass, *super_klass*, temp, 1353 // L_success, L_failure, NULL); 1354 assert_different_registers(sub_klass, temp); 1355 1356 int sc_offset = (klassOopDesc::header_size() * HeapWordSize + 1357 Klass::secondary_super_cache_offset_in_bytes()); 1358 1359 // if the pointers are equal, we are done (e.g., String[] elements) 1360 __ cmpptr(sub_klass, super_klass_addr); 1361 LOCAL_JCC(Assembler::equal, L_success); 1362 1363 // check the supertype display: 1364 __ movl2ptr(temp, super_check_offset_addr); 1365 Address super_check_addr(sub_klass, temp, Address::times_1, 0); 1366 __ movptr(temp, super_check_addr); // load displayed supertype 1367 __ cmpptr(temp, super_klass_addr); // test the super type 1368 LOCAL_JCC(Assembler::equal, L_success); 1369 1370 // if it was a primary super, we can just fail immediately 1371 __ cmpl(super_check_offset_addr, sc_offset); 1372 LOCAL_JCC(Assembler::notEqual, L_failure); 1373 1374 // The repne_scan instruction uses fixed registers, which will get spilled. 1375 // We happen to know this works best when super_klass is in rax. 1376 Register super_klass = temp; 1377 __ movptr(super_klass, super_klass_addr); 1378 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, 1379 L_success, L_failure); 1380 1381 __ bind(L_fallthrough); 1382 1383 if (L_success == NULL) { BLOCK_COMMENT("L_success:"); } 1384 if (L_failure == NULL) { BLOCK_COMMENT("L_failure:"); } 1385 1386 #undef LOCAL_JCC 1387 } 1388 1389 // 1390 // Generate checkcasting array copy stub 1391 // 1392 // Input: 1393 // 4(rsp) - source array address 1394 // 8(rsp) - destination array address 1395 // 12(rsp) - element count, can be zero 1396 // 16(rsp) - size_t ckoff (super_check_offset) 1397 // 20(rsp) - oop ckval (super_klass) 1398 // 1399 // Output: 1400 // rax, == 0 - success 1401 // rax, == -1^K - failure, where K is partial transfer count 1402 // 1403 address generate_checkcast_copy(const char *name, address* entry) { 1404 __ align(CodeEntryAlignment); 1405 StubCodeMark mark(this, "StubRoutines", name); 1406 address start = __ pc(); 1407 1408 Label L_load_element, L_store_element, L_do_card_marks, L_done; 1409 1410 // register use: 1411 // rax, rdx, rcx -- loop control (end_from, end_to, count) 1412 // rdi, rsi -- element access (oop, klass) 1413 // rbx, -- temp 1414 const Register from = rax; // source array address 1415 const Register to = rdx; // destination array address 1416 const Register length = rcx; // elements count 1417 const Register elem = rdi; // each oop copied 1418 const Register elem_klass = rsi; // each elem._klass (sub_klass) 1419 const Register temp = rbx; // lone remaining temp 1420 1421 __ enter(); // required for proper stackwalking of RuntimeStub frame 1422 1423 __ push(rsi); 1424 __ push(rdi); 1425 __ push(rbx); 1426 1427 Address from_arg(rsp, 16+ 4); // from 1428 Address to_arg(rsp, 16+ 8); // to 1429 Address length_arg(rsp, 16+12); // elements count 1430 Address ckoff_arg(rsp, 16+16); // super_check_offset 1431 Address ckval_arg(rsp, 16+20); // super_klass 1432 1433 // Load up: 1434 __ movptr(from, from_arg); 1435 __ movptr(to, to_arg); 1436 __ movl2ptr(length, length_arg); 1437 1438 *entry = __ pc(); // Entry point from generic arraycopy stub. 1439 BLOCK_COMMENT("Entry:"); 1440 1441 //--------------------------------------------------------------- 1442 // Assembler stub will be used for this call to arraycopy 1443 // if the two arrays are subtypes of Object[] but the 1444 // destination array type is not equal to or a supertype 1445 // of the source type. Each element must be separately 1446 // checked. 1447 1448 // Loop-invariant addresses. They are exclusive end pointers. 1449 Address end_from_addr(from, length, Address::times_ptr, 0); 1450 Address end_to_addr(to, length, Address::times_ptr, 0); 1451 1452 Register end_from = from; // re-use 1453 Register end_to = to; // re-use 1454 Register count = length; // re-use 1455 1456 // Loop-variant addresses. They assume post-incremented count < 0. 1457 Address from_element_addr(end_from, count, Address::times_ptr, 0); 1458 Address to_element_addr(end_to, count, Address::times_ptr, 0); 1459 Address elem_klass_addr(elem, oopDesc::klass_offset_in_bytes()); 1460 1461 // Copy from low to high addresses, indexed from the end of each array. 1462 gen_write_ref_array_pre_barrier(to, count); 1463 __ lea(end_from, end_from_addr); 1464 __ lea(end_to, end_to_addr); 1465 assert(length == count, ""); // else fix next line: 1466 __ negptr(count); // negate and test the length 1467 __ jccb(Assembler::notZero, L_load_element); 1468 1469 // Empty array: Nothing to do. 1470 __ xorptr(rax, rax); // return 0 on (trivial) success 1471 __ jmp(L_done); 1472 1473 // ======== begin loop ======== 1474 // (Loop is rotated; its entry is L_load_element.) 1475 // Loop control: 1476 // for (count = -count; count != 0; count++) 1477 // Base pointers src, dst are biased by 8*count,to last element. 1478 __ align(OptoLoopAlignment); 1479 1480 __ BIND(L_store_element); 1481 __ movptr(to_element_addr, elem); // store the oop 1482 __ increment(count); // increment the count toward zero 1483 __ jccb(Assembler::zero, L_do_card_marks); 1484 1485 // ======== loop entry is here ======== 1486 __ BIND(L_load_element); 1487 __ movptr(elem, from_element_addr); // load the oop 1488 __ testptr(elem, elem); 1489 __ jccb(Assembler::zero, L_store_element); 1490 1491 // (Could do a trick here: Remember last successful non-null 1492 // element stored and make a quick oop equality check on it.) 1493 1494 __ movptr(elem_klass, elem_klass_addr); // query the object klass 1495 generate_type_check(elem_klass, ckoff_arg, ckval_arg, temp, 1496 &L_store_element, NULL); 1497 // (On fall-through, we have failed the element type check.) 1498 // ======== end loop ======== 1499 1500 // It was a real error; we must depend on the caller to finish the job. 1501 // Register "count" = -1 * number of *remaining* oops, length_arg = *total* oops. 1502 // Emit GC store barriers for the oops we have copied (length_arg + count), 1503 // and report their number to the caller. 1504 __ addl(count, length_arg); // transfers = (length - remaining) 1505 __ movl2ptr(rax, count); // save the value 1506 __ notptr(rax); // report (-1^K) to caller 1507 __ movptr(to, to_arg); // reload 1508 assert_different_registers(to, count, rax); 1509 gen_write_ref_array_post_barrier(to, count); 1510 __ jmpb(L_done); 1511 1512 // Come here on success only. 1513 __ BIND(L_do_card_marks); 1514 __ movl2ptr(count, length_arg); 1515 __ movptr(to, to_arg); // reload 1516 gen_write_ref_array_post_barrier(to, count); 1517 __ xorptr(rax, rax); // return 0 on success 1518 1519 // Common exit point (success or failure). 1520 __ BIND(L_done); 1521 __ pop(rbx); 1522 __ pop(rdi); 1523 __ pop(rsi); 1524 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); 1525 __ leave(); // required for proper stackwalking of RuntimeStub frame 1526 __ ret(0); 1527 1528 return start; 1529 } 1530 1531 // 1532 // Generate 'unsafe' array copy stub 1533 // Though just as safe as the other stubs, it takes an unscaled 1534 // size_t argument instead of an element count. 1535 // 1536 // Input: 1537 // 4(rsp) - source array address 1538 // 8(rsp) - destination array address 1539 // 12(rsp) - byte count, can be zero 1540 // 1541 // Output: 1542 // rax, == 0 - success 1543 // rax, == -1 - need to call System.arraycopy 1544 // 1545 // Examines the alignment of the operands and dispatches 1546 // to a long, int, short, or byte copy loop. 1547 // 1548 address generate_unsafe_copy(const char *name, 1549 address byte_copy_entry, 1550 address short_copy_entry, 1551 address int_copy_entry, 1552 address long_copy_entry) { 1553 1554 Label L_long_aligned, L_int_aligned, L_short_aligned; 1555 1556 __ align(CodeEntryAlignment); 1557 StubCodeMark mark(this, "StubRoutines", name); 1558 address start = __ pc(); 1559 1560 const Register from = rax; // source array address 1561 const Register to = rdx; // destination array address 1562 const Register count = rcx; // elements count 1563 1564 __ enter(); // required for proper stackwalking of RuntimeStub frame 1565 __ push(rsi); 1566 __ push(rdi); 1567 Address from_arg(rsp, 12+ 4); // from 1568 Address to_arg(rsp, 12+ 8); // to 1569 Address count_arg(rsp, 12+12); // byte count 1570 1571 // Load up: 1572 __ movptr(from , from_arg); 1573 __ movptr(to , to_arg); 1574 __ movl2ptr(count, count_arg); 1575 1576 // bump this on entry, not on exit: 1577 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 1578 1579 const Register bits = rsi; 1580 __ mov(bits, from); 1581 __ orptr(bits, to); 1582 __ orptr(bits, count); 1583 1584 __ testl(bits, BytesPerLong-1); 1585 __ jccb(Assembler::zero, L_long_aligned); 1586 1587 __ testl(bits, BytesPerInt-1); 1588 __ jccb(Assembler::zero, L_int_aligned); 1589 1590 __ testl(bits, BytesPerShort-1); 1591 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 1592 1593 __ BIND(L_short_aligned); 1594 __ shrptr(count, LogBytesPerShort); // size => short_count 1595 __ movl(count_arg, count); // update 'count' 1596 __ jump(RuntimeAddress(short_copy_entry)); 1597 1598 __ BIND(L_int_aligned); 1599 __ shrptr(count, LogBytesPerInt); // size => int_count 1600 __ movl(count_arg, count); // update 'count' 1601 __ jump(RuntimeAddress(int_copy_entry)); 1602 1603 __ BIND(L_long_aligned); 1604 __ shrptr(count, LogBytesPerLong); // size => qword_count 1605 __ movl(count_arg, count); // update 'count' 1606 __ pop(rdi); // Do pops here since jlong_arraycopy stub does not do it. 1607 __ pop(rsi); 1608 __ jump(RuntimeAddress(long_copy_entry)); 1609 1610 return start; 1611 } 1612 1613 1614 // Perform range checks on the proposed arraycopy. 1615 // Smashes src_pos and dst_pos. (Uses them up for temps.) 1616 void arraycopy_range_checks(Register src, 1617 Register src_pos, 1618 Register dst, 1619 Register dst_pos, 1620 Address& length, 1621 Label& L_failed) { 1622 BLOCK_COMMENT("arraycopy_range_checks:"); 1623 const Register src_end = src_pos; // source array end position 1624 const Register dst_end = dst_pos; // destination array end position 1625 __ addl(src_end, length); // src_pos + length 1626 __ addl(dst_end, length); // dst_pos + length 1627 1628 // if (src_pos + length > arrayOop(src)->length() ) FAIL; 1629 __ cmpl(src_end, Address(src, arrayOopDesc::length_offset_in_bytes())); 1630 __ jcc(Assembler::above, L_failed); 1631 1632 // if (dst_pos + length > arrayOop(dst)->length() ) FAIL; 1633 __ cmpl(dst_end, Address(dst, arrayOopDesc::length_offset_in_bytes())); 1634 __ jcc(Assembler::above, L_failed); 1635 1636 BLOCK_COMMENT("arraycopy_range_checks done"); 1637 } 1638 1639 1640 // 1641 // Generate generic array copy stubs 1642 // 1643 // Input: 1644 // 4(rsp) - src oop 1645 // 8(rsp) - src_pos 1646 // 12(rsp) - dst oop 1647 // 16(rsp) - dst_pos 1648 // 20(rsp) - element count 1649 // 1650 // Output: 1651 // rax, == 0 - success 1652 // rax, == -1^K - failure, where K is partial transfer count 1653 // 1654 address generate_generic_copy(const char *name, 1655 address entry_jbyte_arraycopy, 1656 address entry_jshort_arraycopy, 1657 address entry_jint_arraycopy, 1658 address entry_oop_arraycopy, 1659 address entry_jlong_arraycopy, 1660 address entry_checkcast_arraycopy) { 1661 Label L_failed, L_failed_0, L_objArray; 1662 1663 { int modulus = CodeEntryAlignment; 1664 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 1665 int advance = target - (__ offset() % modulus); 1666 if (advance < 0) advance += modulus; 1667 if (advance > 0) __ nop(advance); 1668 } 1669 StubCodeMark mark(this, "StubRoutines", name); 1670 1671 // Short-hop target to L_failed. Makes for denser prologue code. 1672 __ BIND(L_failed_0); 1673 __ jmp(L_failed); 1674 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 1675 1676 __ align(CodeEntryAlignment); 1677 address start = __ pc(); 1678 1679 __ enter(); // required for proper stackwalking of RuntimeStub frame 1680 __ push(rsi); 1681 __ push(rdi); 1682 1683 // bump this on entry, not on exit: 1684 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 1685 1686 // Input values 1687 Address SRC (rsp, 12+ 4); 1688 Address SRC_POS (rsp, 12+ 8); 1689 Address DST (rsp, 12+12); 1690 Address DST_POS (rsp, 12+16); 1691 Address LENGTH (rsp, 12+20); 1692 1693 //----------------------------------------------------------------------- 1694 // Assembler stub will be used for this call to arraycopy 1695 // if the following conditions are met: 1696 // 1697 // (1) src and dst must not be null. 1698 // (2) src_pos must not be negative. 1699 // (3) dst_pos must not be negative. 1700 // (4) length must not be negative. 1701 // (5) src klass and dst klass should be the same and not NULL. 1702 // (6) src and dst should be arrays. 1703 // (7) src_pos + length must not exceed length of src. 1704 // (8) dst_pos + length must not exceed length of dst. 1705 // 1706 1707 const Register src = rax; // source array oop 1708 const Register src_pos = rsi; 1709 const Register dst = rdx; // destination array oop 1710 const Register dst_pos = rdi; 1711 const Register length = rcx; // transfer count 1712 1713 // if (src == NULL) return -1; 1714 __ movptr(src, SRC); // src oop 1715 __ testptr(src, src); 1716 __ jccb(Assembler::zero, L_failed_0); 1717 1718 // if (src_pos < 0) return -1; 1719 __ movl2ptr(src_pos, SRC_POS); // src_pos 1720 __ testl(src_pos, src_pos); 1721 __ jccb(Assembler::negative, L_failed_0); 1722 1723 // if (dst == NULL) return -1; 1724 __ movptr(dst, DST); // dst oop 1725 __ testptr(dst, dst); 1726 __ jccb(Assembler::zero, L_failed_0); 1727 1728 // if (dst_pos < 0) return -1; 1729 __ movl2ptr(dst_pos, DST_POS); // dst_pos 1730 __ testl(dst_pos, dst_pos); 1731 __ jccb(Assembler::negative, L_failed_0); 1732 1733 // if (length < 0) return -1; 1734 __ movl2ptr(length, LENGTH); // length 1735 __ testl(length, length); 1736 __ jccb(Assembler::negative, L_failed_0); 1737 1738 // if (src->klass() == NULL) return -1; 1739 Address src_klass_addr(src, oopDesc::klass_offset_in_bytes()); 1740 Address dst_klass_addr(dst, oopDesc::klass_offset_in_bytes()); 1741 const Register rcx_src_klass = rcx; // array klass 1742 __ movptr(rcx_src_klass, Address(src, oopDesc::klass_offset_in_bytes())); 1743 1744 #ifdef ASSERT 1745 // assert(src->klass() != NULL); 1746 BLOCK_COMMENT("assert klasses not null"); 1747 { Label L1, L2; 1748 __ testptr(rcx_src_klass, rcx_src_klass); 1749 __ jccb(Assembler::notZero, L2); // it is broken if klass is NULL 1750 __ bind(L1); 1751 __ stop("broken null klass"); 1752 __ bind(L2); 1753 __ cmpptr(dst_klass_addr, (int32_t)NULL_WORD); 1754 __ jccb(Assembler::equal, L1); // this would be broken also 1755 BLOCK_COMMENT("assert done"); 1756 } 1757 #endif //ASSERT 1758 1759 // Load layout helper (32-bits) 1760 // 1761 // |array_tag| | header_size | element_type | |log2_element_size| 1762 // 32 30 24 16 8 2 0 1763 // 1764 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 1765 // 1766 1767 int lh_offset = klassOopDesc::header_size() * HeapWordSize + 1768 Klass::layout_helper_offset_in_bytes(); 1769 Address src_klass_lh_addr(rcx_src_klass, lh_offset); 1770 1771 // Handle objArrays completely differently... 1772 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 1773 __ cmpl(src_klass_lh_addr, objArray_lh); 1774 __ jcc(Assembler::equal, L_objArray); 1775 1776 // if (src->klass() != dst->klass()) return -1; 1777 __ cmpptr(rcx_src_klass, dst_klass_addr); 1778 __ jccb(Assembler::notEqual, L_failed_0); 1779 1780 const Register rcx_lh = rcx; // layout helper 1781 assert(rcx_lh == rcx_src_klass, "known alias"); 1782 __ movl(rcx_lh, src_klass_lh_addr); 1783 1784 // if (!src->is_Array()) return -1; 1785 __ cmpl(rcx_lh, Klass::_lh_neutral_value); 1786 __ jcc(Assembler::greaterEqual, L_failed_0); // signed cmp 1787 1788 // At this point, it is known to be a typeArray (array_tag 0x3). 1789 #ifdef ASSERT 1790 { Label L; 1791 __ cmpl(rcx_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 1792 __ jcc(Assembler::greaterEqual, L); // signed cmp 1793 __ stop("must be a primitive array"); 1794 __ bind(L); 1795 } 1796 #endif 1797 1798 assert_different_registers(src, src_pos, dst, dst_pos, rcx_lh); 1799 arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed); 1800 1801 // typeArrayKlass 1802 // 1803 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 1804 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 1805 // 1806 const Register rsi_offset = rsi; // array offset 1807 const Register src_array = src; // src array offset 1808 const Register dst_array = dst; // dst array offset 1809 const Register rdi_elsize = rdi; // log2 element size 1810 1811 __ mov(rsi_offset, rcx_lh); 1812 __ shrptr(rsi_offset, Klass::_lh_header_size_shift); 1813 __ andptr(rsi_offset, Klass::_lh_header_size_mask); // array_offset 1814 __ addptr(src_array, rsi_offset); // src array offset 1815 __ addptr(dst_array, rsi_offset); // dst array offset 1816 __ andptr(rcx_lh, Klass::_lh_log2_element_size_mask); // log2 elsize 1817 1818 // next registers should be set before the jump to corresponding stub 1819 const Register from = src; // source array address 1820 const Register to = dst; // destination array address 1821 const Register count = rcx; // elements count 1822 // some of them should be duplicated on stack 1823 #define FROM Address(rsp, 12+ 4) 1824 #define TO Address(rsp, 12+ 8) // Not used now 1825 #define COUNT Address(rsp, 12+12) // Only for oop arraycopy 1826 1827 BLOCK_COMMENT("scale indexes to element size"); 1828 __ movl2ptr(rsi, SRC_POS); // src_pos 1829 __ shlptr(rsi); // src_pos << rcx (log2 elsize) 1830 assert(src_array == from, ""); 1831 __ addptr(from, rsi); // from = src_array + SRC_POS << log2 elsize 1832 __ movl2ptr(rdi, DST_POS); // dst_pos 1833 __ shlptr(rdi); // dst_pos << rcx (log2 elsize) 1834 assert(dst_array == to, ""); 1835 __ addptr(to, rdi); // to = dst_array + DST_POS << log2 elsize 1836 __ movptr(FROM, from); // src_addr 1837 __ mov(rdi_elsize, rcx_lh); // log2 elsize 1838 __ movl2ptr(count, LENGTH); // elements count 1839 1840 BLOCK_COMMENT("choose copy loop based on element size"); 1841 __ cmpl(rdi_elsize, 0); 1842 1843 __ jump_cc(Assembler::equal, RuntimeAddress(entry_jbyte_arraycopy)); 1844 __ cmpl(rdi_elsize, LogBytesPerShort); 1845 __ jump_cc(Assembler::equal, RuntimeAddress(entry_jshort_arraycopy)); 1846 __ cmpl(rdi_elsize, LogBytesPerInt); 1847 __ jump_cc(Assembler::equal, RuntimeAddress(entry_jint_arraycopy)); 1848 #ifdef ASSERT 1849 __ cmpl(rdi_elsize, LogBytesPerLong); 1850 __ jccb(Assembler::notEqual, L_failed); 1851 #endif 1852 __ pop(rdi); // Do pops here since jlong_arraycopy stub does not do it. 1853 __ pop(rsi); 1854 __ jump(RuntimeAddress(entry_jlong_arraycopy)); 1855 1856 __ BIND(L_failed); 1857 __ xorptr(rax, rax); 1858 __ notptr(rax); // return -1 1859 __ pop(rdi); 1860 __ pop(rsi); 1861 __ leave(); // required for proper stackwalking of RuntimeStub frame 1862 __ ret(0); 1863 1864 // objArrayKlass 1865 __ BIND(L_objArray); 1866 // live at this point: rcx_src_klass, src[_pos], dst[_pos] 1867 1868 Label L_plain_copy, L_checkcast_copy; 1869 // test array classes for subtyping 1870 __ cmpptr(rcx_src_klass, dst_klass_addr); // usual case is exact equality 1871 __ jccb(Assembler::notEqual, L_checkcast_copy); 1872 1873 // Identically typed arrays can be copied without element-wise checks. 1874 assert_different_registers(src, src_pos, dst, dst_pos, rcx_src_klass); 1875 arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed); 1876 1877 __ BIND(L_plain_copy); 1878 __ movl2ptr(count, LENGTH); // elements count 1879 __ movl2ptr(src_pos, SRC_POS); // reload src_pos 1880 __ lea(from, Address(src, src_pos, Address::times_ptr, 1881 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 1882 __ movl2ptr(dst_pos, DST_POS); // reload dst_pos 1883 __ lea(to, Address(dst, dst_pos, Address::times_ptr, 1884 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 1885 __ movptr(FROM, from); // src_addr 1886 __ movptr(TO, to); // dst_addr 1887 __ movl(COUNT, count); // count 1888 __ jump(RuntimeAddress(entry_oop_arraycopy)); 1889 1890 __ BIND(L_checkcast_copy); 1891 // live at this point: rcx_src_klass, dst[_pos], src[_pos] 1892 { 1893 // Handy offsets: 1894 int ek_offset = (klassOopDesc::header_size() * HeapWordSize + 1895 objArrayKlass::element_klass_offset_in_bytes()); 1896 int sco_offset = (klassOopDesc::header_size() * HeapWordSize + 1897 Klass::super_check_offset_offset_in_bytes()); 1898 1899 Register rsi_dst_klass = rsi; 1900 Register rdi_temp = rdi; 1901 assert(rsi_dst_klass == src_pos, "expected alias w/ src_pos"); 1902 assert(rdi_temp == dst_pos, "expected alias w/ dst_pos"); 1903 Address dst_klass_lh_addr(rsi_dst_klass, lh_offset); 1904 1905 // Before looking at dst.length, make sure dst is also an objArray. 1906 __ movptr(rsi_dst_klass, dst_klass_addr); 1907 __ cmpl(dst_klass_lh_addr, objArray_lh); 1908 __ jccb(Assembler::notEqual, L_failed); 1909 1910 // It is safe to examine both src.length and dst.length. 1911 __ movl2ptr(src_pos, SRC_POS); // reload rsi 1912 arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed); 1913 // (Now src_pos and dst_pos are killed, but not src and dst.) 1914 1915 // We'll need this temp (don't forget to pop it after the type check). 1916 __ push(rbx); 1917 Register rbx_src_klass = rbx; 1918 1919 __ mov(rbx_src_klass, rcx_src_klass); // spill away from rcx 1920 __ movptr(rsi_dst_klass, dst_klass_addr); 1921 Address super_check_offset_addr(rsi_dst_klass, sco_offset); 1922 Label L_fail_array_check; 1923 generate_type_check(rbx_src_klass, 1924 super_check_offset_addr, dst_klass_addr, 1925 rdi_temp, NULL, &L_fail_array_check); 1926 // (On fall-through, we have passed the array type check.) 1927 __ pop(rbx); 1928 __ jmp(L_plain_copy); 1929 1930 __ BIND(L_fail_array_check); 1931 // Reshuffle arguments so we can call checkcast_arraycopy: 1932 1933 // match initial saves for checkcast_arraycopy 1934 // push(rsi); // already done; see above 1935 // push(rdi); // already done; see above 1936 // push(rbx); // already done; see above 1937 1938 // Marshal outgoing arguments now, freeing registers. 1939 Address from_arg(rsp, 16+ 4); // from 1940 Address to_arg(rsp, 16+ 8); // to 1941 Address length_arg(rsp, 16+12); // elements count 1942 Address ckoff_arg(rsp, 16+16); // super_check_offset 1943 Address ckval_arg(rsp, 16+20); // super_klass 1944 1945 Address SRC_POS_arg(rsp, 16+ 8); 1946 Address DST_POS_arg(rsp, 16+16); 1947 Address LENGTH_arg(rsp, 16+20); 1948 // push rbx, changed the incoming offsets (why not just use rbp,??) 1949 // assert(SRC_POS_arg.disp() == SRC_POS.disp() + 4, ""); 1950 1951 __ movptr(rbx, Address(rsi_dst_klass, ek_offset)); 1952 __ movl2ptr(length, LENGTH_arg); // reload elements count 1953 __ movl2ptr(src_pos, SRC_POS_arg); // reload src_pos 1954 __ movl2ptr(dst_pos, DST_POS_arg); // reload dst_pos 1955 1956 __ movptr(ckval_arg, rbx); // destination element type 1957 __ movl(rbx, Address(rbx, sco_offset)); 1958 __ movl(ckoff_arg, rbx); // corresponding class check offset 1959 1960 __ movl(length_arg, length); // outgoing length argument 1961 1962 __ lea(from, Address(src, src_pos, Address::times_ptr, 1963 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 1964 __ movptr(from_arg, from); 1965 1966 __ lea(to, Address(dst, dst_pos, Address::times_ptr, 1967 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 1968 __ movptr(to_arg, to); 1969 __ jump(RuntimeAddress(entry_checkcast_arraycopy)); 1970 } 1971 1972 return start; 1973 } 1974 1975 void generate_arraycopy_stubs() { 1976 address entry; 1977 address entry_jbyte_arraycopy; 1978 address entry_jshort_arraycopy; 1979 address entry_jint_arraycopy; 1980 address entry_oop_arraycopy; 1981 address entry_jlong_arraycopy; 1982 address entry_checkcast_arraycopy; 1983 1984 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = 1985 generate_disjoint_copy(T_BYTE, true, Address::times_1, &entry, 1986 "arrayof_jbyte_disjoint_arraycopy"); 1987 StubRoutines::_arrayof_jbyte_arraycopy = 1988 generate_conjoint_copy(T_BYTE, true, Address::times_1, entry, 1989 NULL, "arrayof_jbyte_arraycopy"); 1990 StubRoutines::_jbyte_disjoint_arraycopy = 1991 generate_disjoint_copy(T_BYTE, false, Address::times_1, &entry, 1992 "jbyte_disjoint_arraycopy"); 1993 StubRoutines::_jbyte_arraycopy = 1994 generate_conjoint_copy(T_BYTE, false, Address::times_1, entry, 1995 &entry_jbyte_arraycopy, "jbyte_arraycopy"); 1996 1997 StubRoutines::_arrayof_jshort_disjoint_arraycopy = 1998 generate_disjoint_copy(T_SHORT, true, Address::times_2, &entry, 1999 "arrayof_jshort_disjoint_arraycopy"); 2000 StubRoutines::_arrayof_jshort_arraycopy = 2001 generate_conjoint_copy(T_SHORT, true, Address::times_2, entry, 2002 NULL, "arrayof_jshort_arraycopy"); 2003 StubRoutines::_jshort_disjoint_arraycopy = 2004 generate_disjoint_copy(T_SHORT, false, Address::times_2, &entry, 2005 "jshort_disjoint_arraycopy"); 2006 StubRoutines::_jshort_arraycopy = 2007 generate_conjoint_copy(T_SHORT, false, Address::times_2, entry, 2008 &entry_jshort_arraycopy, "jshort_arraycopy"); 2009 2010 // Next arrays are always aligned on 4 bytes at least. 2011 StubRoutines::_jint_disjoint_arraycopy = 2012 generate_disjoint_copy(T_INT, true, Address::times_4, &entry, 2013 "jint_disjoint_arraycopy"); 2014 StubRoutines::_jint_arraycopy = 2015 generate_conjoint_copy(T_INT, true, Address::times_4, entry, 2016 &entry_jint_arraycopy, "jint_arraycopy"); 2017 2018 StubRoutines::_oop_disjoint_arraycopy = 2019 generate_disjoint_copy(T_OBJECT, true, Address::times_ptr, &entry, 2020 "oop_disjoint_arraycopy"); 2021 StubRoutines::_oop_arraycopy = 2022 generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry, 2023 &entry_oop_arraycopy, "oop_arraycopy"); 2024 2025 StubRoutines::_jlong_disjoint_arraycopy = 2026 generate_disjoint_long_copy(&entry, "jlong_disjoint_arraycopy"); 2027 StubRoutines::_jlong_arraycopy = 2028 generate_conjoint_long_copy(entry, &entry_jlong_arraycopy, 2029 "jlong_arraycopy"); 2030 2031 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2032 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2033 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2034 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2035 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2036 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2037 2038 StubRoutines::_arrayof_jint_disjoint_arraycopy = 2039 StubRoutines::_jint_disjoint_arraycopy; 2040 StubRoutines::_arrayof_oop_disjoint_arraycopy = 2041 StubRoutines::_oop_disjoint_arraycopy; 2042 StubRoutines::_arrayof_jlong_disjoint_arraycopy = 2043 StubRoutines::_jlong_disjoint_arraycopy; 2044 2045 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2046 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2047 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2048 2049 StubRoutines::_checkcast_arraycopy = 2050 generate_checkcast_copy("checkcast_arraycopy", 2051 &entry_checkcast_arraycopy); 2052 2053 StubRoutines::_unsafe_arraycopy = 2054 generate_unsafe_copy("unsafe_arraycopy", 2055 entry_jbyte_arraycopy, 2056 entry_jshort_arraycopy, 2057 entry_jint_arraycopy, 2058 entry_jlong_arraycopy); 2059 2060 StubRoutines::_generic_arraycopy = 2061 generate_generic_copy("generic_arraycopy", 2062 entry_jbyte_arraycopy, 2063 entry_jshort_arraycopy, 2064 entry_jint_arraycopy, 2065 entry_oop_arraycopy, 2066 entry_jlong_arraycopy, 2067 entry_checkcast_arraycopy); 2068 } 2069 2070 void generate_math_stubs() { 2071 { 2072 StubCodeMark mark(this, "StubRoutines", "log"); 2073 StubRoutines::_intrinsic_log = (double (*)(double)) __ pc(); 2074 2075 __ fld_d(Address(rsp, 4)); 2076 __ flog(); 2077 __ ret(0); 2078 } 2079 { 2080 StubCodeMark mark(this, "StubRoutines", "log10"); 2081 StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc(); 2082 2083 __ fld_d(Address(rsp, 4)); 2084 __ flog10(); 2085 __ ret(0); 2086 } 2087 { 2088 StubCodeMark mark(this, "StubRoutines", "sin"); 2089 StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc(); 2090 2091 __ fld_d(Address(rsp, 4)); 2092 __ trigfunc('s'); 2093 __ ret(0); 2094 } 2095 { 2096 StubCodeMark mark(this, "StubRoutines", "cos"); 2097 StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc(); 2098 2099 __ fld_d(Address(rsp, 4)); 2100 __ trigfunc('c'); 2101 __ ret(0); 2102 } 2103 { 2104 StubCodeMark mark(this, "StubRoutines", "tan"); 2105 StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc(); 2106 2107 __ fld_d(Address(rsp, 4)); 2108 __ trigfunc('t'); 2109 __ ret(0); 2110 } 2111 2112 // The intrinsic version of these seem to return the same value as 2113 // the strict version. 2114 StubRoutines::_intrinsic_exp = SharedRuntime::dexp; 2115 StubRoutines::_intrinsic_pow = SharedRuntime::dpow; 2116 } 2117 2118 public: 2119 // Information about frame layout at time of blocking runtime call. 2120 // Note that we only have to preserve callee-saved registers since 2121 // the compilers are responsible for supplying a continuation point 2122 // if they expect all registers to be preserved. 2123 enum layout { 2124 thread_off, // last_java_sp 2125 rbp_off, // callee saved register 2126 ret_pc, 2127 framesize 2128 }; 2129 2130 private: 2131 2132 #undef __ 2133 #define __ masm-> 2134 2135 //------------------------------------------------------------------------------------------------------------------------ 2136 // Continuation point for throwing of implicit exceptions that are not handled in 2137 // the current activation. Fabricates an exception oop and initiates normal 2138 // exception dispatching in this frame. 2139 // 2140 // Previously the compiler (c2) allowed for callee save registers on Java calls. 2141 // This is no longer true after adapter frames were removed but could possibly 2142 // be brought back in the future if the interpreter code was reworked and it 2143 // was deemed worthwhile. The comment below was left to describe what must 2144 // happen here if callee saves were resurrected. As it stands now this stub 2145 // could actually be a vanilla BufferBlob and have now oopMap at all. 2146 // Since it doesn't make much difference we've chosen to leave it the 2147 // way it was in the callee save days and keep the comment. 2148 2149 // If we need to preserve callee-saved values we need a callee-saved oop map and 2150 // therefore have to make these stubs into RuntimeStubs rather than BufferBlobs. 2151 // If the compiler needs all registers to be preserved between the fault 2152 // point and the exception handler then it must assume responsibility for that in 2153 // AbstractCompiler::continuation_for_implicit_null_exception or 2154 // continuation_for_implicit_division_by_zero_exception. All other implicit 2155 // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are 2156 // either at call sites or otherwise assume that stack unwinding will be initiated, 2157 // so caller saved registers were assumed volatile in the compiler. 2158 address generate_throw_exception(const char* name, address runtime_entry, 2159 bool restore_saved_exception_pc) { 2160 2161 int insts_size = 256; 2162 int locs_size = 32; 2163 2164 CodeBuffer code(name, insts_size, locs_size); 2165 OopMapSet* oop_maps = new OopMapSet(); 2166 MacroAssembler* masm = new MacroAssembler(&code); 2167 2168 address start = __ pc(); 2169 2170 // This is an inlined and slightly modified version of call_VM 2171 // which has the ability to fetch the return PC out of 2172 // thread-local storage and also sets up last_Java_sp slightly 2173 // differently than the real call_VM 2174 Register java_thread = rbx; 2175 __ get_thread(java_thread); 2176 if (restore_saved_exception_pc) { 2177 __ movptr(rax, Address(java_thread, in_bytes(JavaThread::saved_exception_pc_offset()))); 2178 __ push(rax); 2179 } 2180 2181 __ enter(); // required for proper stackwalking of RuntimeStub frame 2182 2183 // pc and rbp, already pushed 2184 __ subptr(rsp, (framesize-2) * wordSize); // prolog 2185 2186 // Frame is now completed as far as size and linkage. 2187 2188 int frame_complete = __ pc() - start; 2189 2190 // push java thread (becomes first argument of C function) 2191 __ movptr(Address(rsp, thread_off * wordSize), java_thread); 2192 2193 // Set up last_Java_sp and last_Java_fp 2194 __ set_last_Java_frame(java_thread, rsp, rbp, NULL); 2195 2196 // Call runtime 2197 BLOCK_COMMENT("call runtime_entry"); 2198 __ call(RuntimeAddress(runtime_entry)); 2199 // Generate oop map 2200 OopMap* map = new OopMap(framesize, 0); 2201 oop_maps->add_gc_map(__ pc() - start, map); 2202 2203 // restore the thread (cannot use the pushed argument since arguments 2204 // may be overwritten by C code generated by an optimizing compiler); 2205 // however can use the register value directly if it is callee saved. 2206 __ get_thread(java_thread); 2207 2208 __ reset_last_Java_frame(java_thread, true, false); 2209 2210 __ leave(); // required for proper stackwalking of RuntimeStub frame 2211 2212 // check for pending exceptions 2213 #ifdef ASSERT 2214 Label L; 2215 __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 2216 __ jcc(Assembler::notEqual, L); 2217 __ should_not_reach_here(); 2218 __ bind(L); 2219 #endif /* ASSERT */ 2220 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 2221 2222 2223 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, framesize, oop_maps, false); 2224 return stub->entry_point(); 2225 } 2226 2227 2228 void create_control_words() { 2229 // Round to nearest, 53-bit mode, exceptions masked 2230 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 2231 // Round to zero, 53-bit mode, exception mased 2232 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 2233 // Round to nearest, 24-bit mode, exceptions masked 2234 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 2235 // Round to nearest, 64-bit mode, exceptions masked 2236 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F; 2237 // Round to nearest, 64-bit mode, exceptions masked 2238 StubRoutines::_mxcsr_std = 0x1F80; 2239 // Note: the following two constants are 80-bit values 2240 // layout is critical for correct loading by FPU. 2241 // Bias for strict fp multiply/divide 2242 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 2243 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 2244 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 2245 // Un-Bias for strict fp multiply/divide 2246 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 2247 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 2248 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 2249 } 2250 2251 //--------------------------------------------------------------------------- 2252 // Initialization 2253 2254 void generate_initial() { 2255 // Generates all stubs and initializes the entry points 2256 2257 //------------------------------------------------------------------------------------------------------------------------ 2258 // entry points that exist in all platforms 2259 // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than 2260 // the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp. 2261 StubRoutines::_forward_exception_entry = generate_forward_exception(); 2262 2263 StubRoutines::_call_stub_entry = 2264 generate_call_stub(StubRoutines::_call_stub_return_address); 2265 // is referenced by megamorphic call 2266 StubRoutines::_catch_exception_entry = generate_catch_exception(); 2267 2268 // These are currently used by Solaris/Intel 2269 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 2270 2271 StubRoutines::_handler_for_unsafe_access_entry = 2272 generate_handler_for_unsafe_access(); 2273 2274 // platform dependent 2275 create_control_words(); 2276 2277 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 2278 StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = generate_verify_fpu_cntrl_wrd(); 2279 StubRoutines::_d2i_wrapper = generate_d2i_wrapper(T_INT, 2280 CAST_FROM_FN_PTR(address, SharedRuntime::d2i)); 2281 StubRoutines::_d2l_wrapper = generate_d2i_wrapper(T_LONG, 2282 CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); 2283 } 2284 2285 2286 void generate_all() { 2287 // Generates all stubs and initializes the entry points 2288 2289 // These entry points require SharedInfo::stack0 to be set up in non-core builds 2290 // and need to be relocatable, so they each fabricate a RuntimeStub internally. 2291 StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false); 2292 StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false); 2293 StubRoutines::_throw_ArithmeticException_entry = generate_throw_exception("ArithmeticException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_ArithmeticException), true); 2294 StubRoutines::_throw_NullPointerException_entry = generate_throw_exception("NullPointerException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException), true); 2295 StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false); 2296 StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false); 2297 2298 //------------------------------------------------------------------------------------------------------------------------ 2299 // entry points that are platform specific 2300 2301 // support for verify_oop (must happen after universe_init) 2302 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 2303 2304 // arraycopy stubs used by compilers 2305 generate_arraycopy_stubs(); 2306 2307 generate_math_stubs(); 2308 } 2309 2310 2311 public: 2312 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 2313 if (all) { 2314 generate_all(); 2315 } else { 2316 generate_initial(); 2317 } 2318 } 2319 }; // end class declaration 2320 2321 2322 void StubGenerator_generate(CodeBuffer* code, bool all) { 2323 StubGenerator g(code, all); 2324 }