1 /* 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "interpreter/interpreter.hpp" 28 #include "nativeInst_sparc.hpp" 29 #include "oops/instanceOop.hpp" 30 #include "oops/method.hpp" 31 #include "oops/objArrayKlass.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "prims/methodHandles.hpp" 34 #include "runtime/frame.inline.hpp" 35 #include "runtime/handles.inline.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/stubCodeGenerator.hpp" 38 #include "runtime/stubRoutines.hpp" 39 #include "runtime/thread.inline.hpp" 40 #include "utilities/top.hpp" 41 #ifdef COMPILER2 42 #include "opto/runtime.hpp" 43 #endif 44 45 // Declaration and definition of StubGenerator (no .hpp file). 46 // For a more detailed description of the stub routine structure 47 // see the comment in stubRoutines.hpp. 48 49 #define __ _masm-> 50 51 #ifdef PRODUCT 52 #define BLOCK_COMMENT(str) /* nothing */ 53 #else 54 #define BLOCK_COMMENT(str) __ block_comment(str) 55 #endif 56 57 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 58 59 // Note: The register L7 is used as L7_thread_cache, and may not be used 60 // any other way within this module. 61 62 63 static const Register& Lstub_temp = L2; 64 65 // ------------------------------------------------------------------------------------------------------------------------- 66 // Stub Code definitions 67 68 static address handle_unsafe_access() { 69 JavaThread* thread = JavaThread::current(); 70 address pc = thread->saved_exception_pc(); 71 address npc = thread->saved_exception_npc(); 72 // pc is the instruction which we must emulate 73 // doing a no-op is fine: return garbage from the load 74 75 // request an async exception 76 thread->set_pending_unsafe_access_error(); 77 78 // return address of next instruction to execute 79 return npc; 80 } 81 82 class StubGenerator: public StubCodeGenerator { 83 private: 84 85 #ifdef PRODUCT 86 #define inc_counter_np(a,b,c) (0) 87 #else 88 #define inc_counter_np(counter, t1, t2) \ 89 BLOCK_COMMENT("inc_counter " #counter); \ 90 __ inc_counter(&counter, t1, t2); 91 #endif 92 93 //---------------------------------------------------------------------------------------------------- 94 // Call stubs are used to call Java from C 95 96 address generate_call_stub(address& return_pc) { 97 StubCodeMark mark(this, "StubRoutines", "call_stub"); 98 address start = __ pc(); 99 100 // Incoming arguments: 101 // 102 // o0 : call wrapper address 103 // o1 : result (address) 104 // o2 : result type 105 // o3 : method 106 // o4 : (interpreter) entry point 107 // o5 : parameters (address) 108 // [sp + 0x5c]: parameter size (in words) 109 // [sp + 0x60]: thread 110 // 111 // +---------------+ <--- sp + 0 112 // | | 113 // . reg save area . 114 // | | 115 // +---------------+ <--- sp + 0x40 116 // | | 117 // . extra 7 slots . 118 // | | 119 // +---------------+ <--- sp + 0x5c 120 // | param. size | 121 // +---------------+ <--- sp + 0x60 122 // | thread | 123 // +---------------+ 124 // | | 125 126 // note: if the link argument position changes, adjust 127 // the code in frame::entry_frame_call_wrapper() 128 129 const Argument link = Argument(0, false); // used only for GC 130 const Argument result = Argument(1, false); 131 const Argument result_type = Argument(2, false); 132 const Argument method = Argument(3, false); 133 const Argument entry_point = Argument(4, false); 134 const Argument parameters = Argument(5, false); 135 const Argument parameter_size = Argument(6, false); 136 const Argument thread = Argument(7, false); 137 138 // setup thread register 139 __ ld_ptr(thread.as_address(), G2_thread); 140 __ reinit_heapbase(); 141 142 #ifdef ASSERT 143 // make sure we have no pending exceptions 144 { const Register t = G3_scratch; 145 Label L; 146 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), t); 147 __ br_null_short(t, Assembler::pt, L); 148 __ stop("StubRoutines::call_stub: entered with pending exception"); 149 __ bind(L); 150 } 151 #endif 152 153 // create activation frame & allocate space for parameters 154 { const Register t = G3_scratch; 155 __ ld_ptr(parameter_size.as_address(), t); // get parameter size (in words) 156 __ add(t, frame::memory_parameter_word_sp_offset, t); // add space for save area (in words) 157 __ round_to(t, WordsPerLong); // make sure it is multiple of 2 (in words) 158 __ sll(t, Interpreter::logStackElementSize, t); // compute number of bytes 159 __ neg(t); // negate so it can be used with save 160 __ save(SP, t, SP); // setup new frame 161 } 162 163 // +---------------+ <--- sp + 0 164 // | | 165 // . reg save area . 166 // | | 167 // +---------------+ <--- sp + 0x40 168 // | | 169 // . extra 7 slots . 170 // | | 171 // +---------------+ <--- sp + 0x5c 172 // | empty slot | (only if parameter size is even) 173 // +---------------+ 174 // | | 175 // . parameters . 176 // | | 177 // +---------------+ <--- fp + 0 178 // | | 179 // . reg save area . 180 // | | 181 // +---------------+ <--- fp + 0x40 182 // | | 183 // . extra 7 slots . 184 // | | 185 // +---------------+ <--- fp + 0x5c 186 // | param. size | 187 // +---------------+ <--- fp + 0x60 188 // | thread | 189 // +---------------+ 190 // | | 191 192 // pass parameters if any 193 BLOCK_COMMENT("pass parameters if any"); 194 { const Register src = parameters.as_in().as_register(); 195 const Register dst = Lentry_args; 196 const Register tmp = G3_scratch; 197 const Register cnt = G4_scratch; 198 199 // test if any parameters & setup of Lentry_args 200 Label exit; 201 __ ld_ptr(parameter_size.as_in().as_address(), cnt); // parameter counter 202 __ add( FP, STACK_BIAS, dst ); 203 __ cmp_zero_and_br(Assembler::zero, cnt, exit); 204 __ delayed()->sub(dst, BytesPerWord, dst); // setup Lentry_args 205 206 // copy parameters if any 207 Label loop; 208 __ BIND(loop); 209 // Store parameter value 210 __ ld_ptr(src, 0, tmp); 211 __ add(src, BytesPerWord, src); 212 __ st_ptr(tmp, dst, 0); 213 __ deccc(cnt); 214 __ br(Assembler::greater, false, Assembler::pt, loop); 215 __ delayed()->sub(dst, Interpreter::stackElementSize, dst); 216 217 // done 218 __ BIND(exit); 219 } 220 221 // setup parameters, method & call Java function 222 #ifdef ASSERT 223 // layout_activation_impl checks it's notion of saved SP against 224 // this register, so if this changes update it as well. 225 const Register saved_SP = Lscratch; 226 __ mov(SP, saved_SP); // keep track of SP before call 227 #endif 228 229 // setup parameters 230 const Register t = G3_scratch; 231 __ ld_ptr(parameter_size.as_in().as_address(), t); // get parameter size (in words) 232 __ sll(t, Interpreter::logStackElementSize, t); // compute number of bytes 233 __ sub(FP, t, Gargs); // setup parameter pointer 234 #ifdef _LP64 235 __ add( Gargs, STACK_BIAS, Gargs ); // Account for LP64 stack bias 236 #endif 237 __ mov(SP, O5_savedSP); 238 239 240 // do the call 241 // 242 // the following register must be setup: 243 // 244 // G2_thread 245 // G5_method 246 // Gargs 247 BLOCK_COMMENT("call Java function"); 248 __ jmpl(entry_point.as_in().as_register(), G0, O7); 249 __ delayed()->mov(method.as_in().as_register(), G5_method); // setup method 250 251 BLOCK_COMMENT("call_stub_return_address:"); 252 return_pc = __ pc(); 253 254 // The callee, if it wasn't interpreted, can return with SP changed so 255 // we can no longer assert of change of SP. 256 257 // store result depending on type 258 // (everything that is not T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE 259 // is treated as T_INT) 260 { const Register addr = result .as_in().as_register(); 261 const Register type = result_type.as_in().as_register(); 262 Label is_long, is_float, is_double, is_object, exit; 263 __ cmp(type, T_OBJECT); __ br(Assembler::equal, false, Assembler::pn, is_object); 264 __ delayed()->cmp(type, T_FLOAT); __ br(Assembler::equal, false, Assembler::pn, is_float); 265 __ delayed()->cmp(type, T_DOUBLE); __ br(Assembler::equal, false, Assembler::pn, is_double); 266 __ delayed()->cmp(type, T_LONG); __ br(Assembler::equal, false, Assembler::pn, is_long); 267 __ delayed()->nop(); 268 269 // store int result 270 __ st(O0, addr, G0); 271 272 __ BIND(exit); 273 __ ret(); 274 __ delayed()->restore(); 275 276 __ BIND(is_object); 277 __ ba(exit); 278 __ delayed()->st_ptr(O0, addr, G0); 279 280 __ BIND(is_float); 281 __ ba(exit); 282 __ delayed()->stf(FloatRegisterImpl::S, F0, addr, G0); 283 284 __ BIND(is_double); 285 __ ba(exit); 286 __ delayed()->stf(FloatRegisterImpl::D, F0, addr, G0); 287 288 __ BIND(is_long); 289 #ifdef _LP64 290 __ ba(exit); 291 __ delayed()->st_long(O0, addr, G0); // store entire long 292 #else 293 #if defined(COMPILER2) 294 // All return values are where we want them, except for Longs. C2 returns 295 // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1. 296 // Since the interpreter will return longs in G1 and O0/O1 in the 32bit 297 // build we simply always use G1. 298 // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to 299 // do this here. Unfortunately if we did a rethrow we'd see an machepilog node 300 // first which would move g1 -> O0/O1 and destroy the exception we were throwing. 301 302 __ ba(exit); 303 __ delayed()->stx(G1, addr, G0); // store entire long 304 #else 305 __ st(O1, addr, BytesPerInt); 306 __ ba(exit); 307 __ delayed()->st(O0, addr, G0); 308 #endif /* COMPILER2 */ 309 #endif /* _LP64 */ 310 } 311 return start; 312 } 313 314 315 //---------------------------------------------------------------------------------------------------- 316 // Return point for a Java call if there's an exception thrown in Java code. 317 // The exception is caught and transformed into a pending exception stored in 318 // JavaThread that can be tested from within the VM. 319 // 320 // Oexception: exception oop 321 322 address generate_catch_exception() { 323 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 324 325 address start = __ pc(); 326 // verify that thread corresponds 327 __ verify_thread(); 328 329 const Register& temp_reg = Gtemp; 330 Address pending_exception_addr (G2_thread, Thread::pending_exception_offset()); 331 Address exception_file_offset_addr(G2_thread, Thread::exception_file_offset ()); 332 Address exception_line_offset_addr(G2_thread, Thread::exception_line_offset ()); 333 334 // set pending exception 335 __ verify_oop(Oexception); 336 __ st_ptr(Oexception, pending_exception_addr); 337 __ set((intptr_t)__FILE__, temp_reg); 338 __ st_ptr(temp_reg, exception_file_offset_addr); 339 __ set((intptr_t)__LINE__, temp_reg); 340 __ st(temp_reg, exception_line_offset_addr); 341 342 // complete return to VM 343 assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before"); 344 345 AddressLiteral stub_ret(StubRoutines::_call_stub_return_address); 346 __ jump_to(stub_ret, temp_reg); 347 __ delayed()->nop(); 348 349 return start; 350 } 351 352 353 //---------------------------------------------------------------------------------------------------- 354 // Continuation point for runtime calls returning with a pending exception 355 // The pending exception check happened in the runtime or native call stub 356 // The pending exception in Thread is converted into a Java-level exception 357 // 358 // Contract with Java-level exception handler: O0 = exception 359 // O1 = throwing pc 360 361 address generate_forward_exception() { 362 StubCodeMark mark(this, "StubRoutines", "forward_exception"); 363 address start = __ pc(); 364 365 // Upon entry, O7 has the return address returning into Java 366 // (interpreted or compiled) code; i.e. the return address 367 // becomes the throwing pc. 368 369 const Register& handler_reg = Gtemp; 370 371 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 372 373 #ifdef ASSERT 374 // make sure that this code is only executed if there is a pending exception 375 { Label L; 376 __ ld_ptr(exception_addr, Gtemp); 377 __ br_notnull_short(Gtemp, Assembler::pt, L); 378 __ stop("StubRoutines::forward exception: no pending exception (1)"); 379 __ bind(L); 380 } 381 #endif 382 383 // compute exception handler into handler_reg 384 __ get_thread(); 385 __ ld_ptr(exception_addr, Oexception); 386 __ verify_oop(Oexception); 387 __ save_frame(0); // compensates for compiler weakness 388 __ add(O7->after_save(), frame::pc_return_offset, Lscratch); // save the issuing PC 389 BLOCK_COMMENT("call exception_handler_for_return_address"); 390 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), G2_thread, Lscratch); 391 __ mov(O0, handler_reg); 392 __ restore(); // compensates for compiler weakness 393 394 __ ld_ptr(exception_addr, Oexception); 395 __ add(O7, frame::pc_return_offset, Oissuing_pc); // save the issuing PC 396 397 #ifdef ASSERT 398 // make sure exception is set 399 { Label L; 400 __ br_notnull_short(Oexception, Assembler::pt, L); 401 __ stop("StubRoutines::forward exception: no pending exception (2)"); 402 __ bind(L); 403 } 404 #endif 405 // jump to exception handler 406 __ jmp(handler_reg, 0); 407 // clear pending exception 408 __ delayed()->st_ptr(G0, exception_addr); 409 410 return start; 411 } 412 413 // Safefetch stubs. 414 void generate_safefetch(const char* name, int size, address* entry, 415 address* fault_pc, address* continuation_pc) { 416 // safefetch signatures: 417 // int SafeFetch32(int* adr, int errValue); 418 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 419 // 420 // arguments: 421 // o0 = adr 422 // o1 = errValue 423 // 424 // result: 425 // o0 = *adr or errValue 426 427 StubCodeMark mark(this, "StubRoutines", name); 428 429 // Entry point, pc or function descriptor. 430 __ align(CodeEntryAlignment); 431 *entry = __ pc(); 432 433 __ mov(O0, G1); // g1 = o0 434 __ mov(O1, O0); // o0 = o1 435 // Load *adr into c_rarg1, may fault. 436 *fault_pc = __ pc(); 437 switch (size) { 438 case 4: 439 // int32_t 440 __ ldsw(G1, 0, O0); // o0 = [g1] 441 break; 442 case 8: 443 // int64_t 444 __ ldx(G1, 0, O0); // o0 = [g1] 445 break; 446 default: 447 ShouldNotReachHere(); 448 } 449 450 // return errValue or *adr 451 *continuation_pc = __ pc(); 452 // By convention with the trap handler we ensure there is a non-CTI 453 // instruction in the trap shadow. 454 __ nop(); 455 __ retl(); 456 __ delayed()->nop(); 457 } 458 459 //------------------------------------------------------------------------------------------------------------------------ 460 // Continuation point for throwing of implicit exceptions that are not handled in 461 // the current activation. Fabricates an exception oop and initiates normal 462 // exception dispatching in this frame. Only callee-saved registers are preserved 463 // (through the normal register window / RegisterMap handling). 464 // If the compiler needs all registers to be preserved between the fault 465 // point and the exception handler then it must assume responsibility for that in 466 // AbstractCompiler::continuation_for_implicit_null_exception or 467 // continuation_for_implicit_division_by_zero_exception. All other implicit 468 // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are 469 // either at call sites or otherwise assume that stack unwinding will be initiated, 470 // so caller saved registers were assumed volatile in the compiler. 471 472 // Note that we generate only this stub into a RuntimeStub, because it needs to be 473 // properly traversed and ignored during GC, so we change the meaning of the "__" 474 // macro within this method. 475 #undef __ 476 #define __ masm-> 477 478 address generate_throw_exception(const char* name, address runtime_entry, 479 Register arg1 = noreg, Register arg2 = noreg) { 480 #ifdef ASSERT 481 int insts_size = VerifyThread ? 1 * K : 600; 482 #else 483 int insts_size = VerifyThread ? 1 * K : 256; 484 #endif /* ASSERT */ 485 int locs_size = 32; 486 487 CodeBuffer code(name, insts_size, locs_size); 488 MacroAssembler* masm = new MacroAssembler(&code); 489 490 __ verify_thread(); 491 492 // This is an inlined and slightly modified version of call_VM 493 // which has the ability to fetch the return PC out of thread-local storage 494 __ assert_not_delayed(); 495 496 // Note that we always push a frame because on the SPARC 497 // architecture, for all of our implicit exception kinds at call 498 // sites, the implicit exception is taken before the callee frame 499 // is pushed. 500 __ save_frame(0); 501 502 int frame_complete = __ offset(); 503 504 // Note that we always have a runtime stub frame on the top of stack by this point 505 Register last_java_sp = SP; 506 // 64-bit last_java_sp is biased! 507 __ set_last_Java_frame(last_java_sp, G0); 508 if (VerifyThread) __ mov(G2_thread, O0); // about to be smashed; pass early 509 __ save_thread(noreg); 510 if (arg1 != noreg) { 511 assert(arg2 != O1, "clobbered"); 512 __ mov(arg1, O1); 513 } 514 if (arg2 != noreg) { 515 __ mov(arg2, O2); 516 } 517 // do the call 518 BLOCK_COMMENT("call runtime_entry"); 519 __ call(runtime_entry, relocInfo::runtime_call_type); 520 if (!VerifyThread) 521 __ delayed()->mov(G2_thread, O0); // pass thread as first argument 522 else 523 __ delayed()->nop(); // (thread already passed) 524 __ restore_thread(noreg); 525 __ reset_last_Java_frame(); 526 527 // check for pending exceptions. use Gtemp as scratch register. 528 #ifdef ASSERT 529 Label L; 530 531 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 532 Register scratch_reg = Gtemp; 533 __ ld_ptr(exception_addr, scratch_reg); 534 __ br_notnull_short(scratch_reg, Assembler::pt, L); 535 __ should_not_reach_here(); 536 __ bind(L); 537 #endif // ASSERT 538 BLOCK_COMMENT("call forward_exception_entry"); 539 __ call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 540 // we use O7 linkage so that forward_exception_entry has the issuing PC 541 __ delayed()->restore(); 542 543 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, masm->total_frame_size_in_bytes(0), NULL, false); 544 return stub->entry_point(); 545 } 546 547 #undef __ 548 #define __ _masm-> 549 550 551 // Generate a routine that sets all the registers so we 552 // can tell if the stop routine prints them correctly. 553 address generate_test_stop() { 554 StubCodeMark mark(this, "StubRoutines", "test_stop"); 555 address start = __ pc(); 556 557 int i; 558 559 __ save_frame(0); 560 561 static jfloat zero = 0.0, one = 1.0; 562 563 // put addr in L0, then load through L0 to F0 564 __ set((intptr_t)&zero, L0); __ ldf( FloatRegisterImpl::S, L0, 0, F0); 565 __ set((intptr_t)&one, L0); __ ldf( FloatRegisterImpl::S, L0, 0, F1); // 1.0 to F1 566 567 // use add to put 2..18 in F2..F18 568 for ( i = 2; i <= 18; ++i ) { 569 __ fadd( FloatRegisterImpl::S, F1, as_FloatRegister(i-1), as_FloatRegister(i)); 570 } 571 572 // Now put double 2 in F16, double 18 in F18 573 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F2, F16 ); 574 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F18, F18 ); 575 576 // use add to put 20..32 in F20..F32 577 for (i = 20; i < 32; i += 2) { 578 __ fadd( FloatRegisterImpl::D, F16, as_FloatRegister(i-2), as_FloatRegister(i)); 579 } 580 581 // put 0..7 in i's, 8..15 in l's, 16..23 in o's, 24..31 in g's 582 for ( i = 0; i < 8; ++i ) { 583 if (i < 6) { 584 __ set( i, as_iRegister(i)); 585 __ set(16 + i, as_oRegister(i)); 586 __ set(24 + i, as_gRegister(i)); 587 } 588 __ set( 8 + i, as_lRegister(i)); 589 } 590 591 __ stop("testing stop"); 592 593 594 __ ret(); 595 __ delayed()->restore(); 596 597 return start; 598 } 599 600 601 address generate_stop_subroutine() { 602 StubCodeMark mark(this, "StubRoutines", "stop_subroutine"); 603 address start = __ pc(); 604 605 __ stop_subroutine(); 606 607 return start; 608 } 609 610 address generate_flush_callers_register_windows() { 611 StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows"); 612 address start = __ pc(); 613 614 __ flushw(); 615 __ retl(false); 616 __ delayed()->add( FP, STACK_BIAS, O0 ); 617 // The returned value must be a stack pointer whose register save area 618 // is flushed, and will stay flushed while the caller executes. 619 620 return start; 621 } 622 623 // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest). 624 // 625 // Arguments: 626 // 627 // exchange_value: O0 628 // dest: O1 629 // 630 // Results: 631 // 632 // O0: the value previously stored in dest 633 // 634 address generate_atomic_xchg() { 635 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 636 address start = __ pc(); 637 638 if (UseCASForSwap) { 639 // Use CAS instead of swap, just in case the MP hardware 640 // prefers to work with just one kind of synch. instruction. 641 Label retry; 642 __ BIND(retry); 643 __ mov(O0, O3); // scratch copy of exchange value 644 __ ld(O1, 0, O2); // observe the previous value 645 // try to replace O2 with O3 646 __ cas(O1, O2, O3); 647 __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry); 648 649 __ retl(false); 650 __ delayed()->mov(O2, O0); // report previous value to caller 651 } else { 652 __ retl(false); 653 __ delayed()->swap(O1, 0, O0); 654 } 655 656 return start; 657 } 658 659 660 // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) 661 // 662 // Arguments: 663 // 664 // exchange_value: O0 665 // dest: O1 666 // compare_value: O2 667 // 668 // Results: 669 // 670 // O0: the value previously stored in dest 671 // 672 address generate_atomic_cmpxchg() { 673 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 674 address start = __ pc(); 675 676 // cmpxchg(dest, compare_value, exchange_value) 677 __ cas(O1, O2, O0); 678 __ retl(false); 679 __ delayed()->nop(); 680 681 return start; 682 } 683 684 // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value) 685 // 686 // Arguments: 687 // 688 // exchange_value: O1:O0 689 // dest: O2 690 // compare_value: O4:O3 691 // 692 // Results: 693 // 694 // O1:O0: the value previously stored in dest 695 // 696 // Overwrites: G1,G2,G3 697 // 698 address generate_atomic_cmpxchg_long() { 699 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 700 address start = __ pc(); 701 702 __ sllx(O0, 32, O0); 703 __ srl(O1, 0, O1); 704 __ or3(O0,O1,O0); // O0 holds 64-bit value from compare_value 705 __ sllx(O3, 32, O3); 706 __ srl(O4, 0, O4); 707 __ or3(O3,O4,O3); // O3 holds 64-bit value from exchange_value 708 __ casx(O2, O3, O0); 709 __ srl(O0, 0, O1); // unpacked return value in O1:O0 710 __ retl(false); 711 __ delayed()->srlx(O0, 32, O0); 712 713 return start; 714 } 715 716 717 // Support for jint Atomic::add(jint add_value, volatile jint* dest). 718 // 719 // Arguments: 720 // 721 // add_value: O0 (e.g., +1 or -1) 722 // dest: O1 723 // 724 // Results: 725 // 726 // O0: the new value stored in dest 727 // 728 // Overwrites: O3 729 // 730 address generate_atomic_add() { 731 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 732 address start = __ pc(); 733 __ BIND(_atomic_add_stub); 734 735 Label(retry); 736 __ BIND(retry); 737 738 __ lduw(O1, 0, O2); 739 __ add(O0, O2, O3); 740 __ cas(O1, O2, O3); 741 __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry); 742 __ retl(false); 743 __ delayed()->add(O0, O2, O0); // note that cas made O2==O3 744 745 return start; 746 } 747 Label _atomic_add_stub; // called from other stubs 748 749 750 //------------------------------------------------------------------------------------------------------------------------ 751 // The following routine generates a subroutine to throw an asynchronous 752 // UnknownError when an unsafe access gets a fault that could not be 753 // reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.) 754 // 755 // Arguments : 756 // 757 // trapping PC: O7 758 // 759 // Results: 760 // posts an asynchronous exception, skips the trapping instruction 761 // 762 763 address generate_handler_for_unsafe_access() { 764 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); 765 address start = __ pc(); 766 767 const int preserve_register_words = (64 * 2); 768 Address preserve_addr(FP, (-preserve_register_words * wordSize) + STACK_BIAS); 769 770 Register Lthread = L7_thread_cache; 771 int i; 772 773 __ save_frame(0); 774 __ mov(G1, L1); 775 __ mov(G2, L2); 776 __ mov(G3, L3); 777 __ mov(G4, L4); 778 __ mov(G5, L5); 779 for (i = 0; i < 64; i += 2) { 780 __ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize); 781 } 782 783 address entry_point = CAST_FROM_FN_PTR(address, handle_unsafe_access); 784 BLOCK_COMMENT("call handle_unsafe_access"); 785 __ call(entry_point, relocInfo::runtime_call_type); 786 __ delayed()->nop(); 787 788 __ mov(L1, G1); 789 __ mov(L2, G2); 790 __ mov(L3, G3); 791 __ mov(L4, G4); 792 __ mov(L5, G5); 793 for (i = 0; i < 64; i += 2) { 794 __ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize); 795 } 796 797 __ verify_thread(); 798 799 __ jmp(O0, 0); 800 __ delayed()->restore(); 801 802 return start; 803 } 804 805 806 // Support for uint StubRoutine::Sparc::partial_subtype_check( Klass sub, Klass super ); 807 // Arguments : 808 // 809 // ret : O0, returned 810 // icc/xcc: set as O0 (depending on wordSize) 811 // sub : O1, argument, not changed 812 // super: O2, argument, not changed 813 // raddr: O7, blown by call 814 address generate_partial_subtype_check() { 815 __ align(CodeEntryAlignment); 816 StubCodeMark mark(this, "StubRoutines", "partial_subtype_check"); 817 address start = __ pc(); 818 Label miss; 819 820 #if defined(COMPILER2) && !defined(_LP64) 821 // Do not use a 'save' because it blows the 64-bit O registers. 822 __ add(SP,-4*wordSize,SP); // Make space for 4 temps (stack must be 2 words aligned) 823 __ st_ptr(L0,SP,(frame::register_save_words+0)*wordSize); 824 __ st_ptr(L1,SP,(frame::register_save_words+1)*wordSize); 825 __ st_ptr(L2,SP,(frame::register_save_words+2)*wordSize); 826 __ st_ptr(L3,SP,(frame::register_save_words+3)*wordSize); 827 Register Rret = O0; 828 Register Rsub = O1; 829 Register Rsuper = O2; 830 #else 831 __ save_frame(0); 832 Register Rret = I0; 833 Register Rsub = I1; 834 Register Rsuper = I2; 835 #endif 836 837 Register L0_ary_len = L0; 838 Register L1_ary_ptr = L1; 839 Register L2_super = L2; 840 Register L3_index = L3; 841 842 __ check_klass_subtype_slow_path(Rsub, Rsuper, 843 L0, L1, L2, L3, 844 NULL, &miss); 845 846 // Match falls through here. 847 __ addcc(G0,0,Rret); // set Z flags, Z result 848 849 #if defined(COMPILER2) && !defined(_LP64) 850 __ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0); 851 __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1); 852 __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2); 853 __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3); 854 __ retl(); // Result in Rret is zero; flags set to Z 855 __ delayed()->add(SP,4*wordSize,SP); 856 #else 857 __ ret(); // Result in Rret is zero; flags set to Z 858 __ delayed()->restore(); 859 #endif 860 861 __ BIND(miss); 862 __ addcc(G0,1,Rret); // set NZ flags, NZ result 863 864 #if defined(COMPILER2) && !defined(_LP64) 865 __ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0); 866 __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1); 867 __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2); 868 __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3); 869 __ retl(); // Result in Rret is != 0; flags set to NZ 870 __ delayed()->add(SP,4*wordSize,SP); 871 #else 872 __ ret(); // Result in Rret is != 0; flags set to NZ 873 __ delayed()->restore(); 874 #endif 875 876 return start; 877 } 878 879 880 // Called from MacroAssembler::verify_oop 881 // 882 address generate_verify_oop_subroutine() { 883 StubCodeMark mark(this, "StubRoutines", "verify_oop_stub"); 884 885 address start = __ pc(); 886 887 __ verify_oop_subroutine(); 888 889 return start; 890 } 891 892 893 // 894 // Verify that a register contains clean 32-bits positive value 895 // (high 32-bits are 0) so it could be used in 64-bits shifts (sllx, srax). 896 // 897 // Input: 898 // Rint - 32-bits value 899 // Rtmp - scratch 900 // 901 void assert_clean_int(Register Rint, Register Rtmp) { 902 #if defined(ASSERT) && defined(_LP64) 903 __ signx(Rint, Rtmp); 904 __ cmp(Rint, Rtmp); 905 __ breakpoint_trap(Assembler::notEqual, Assembler::xcc); 906 #endif 907 } 908 909 // 910 // Generate overlap test for array copy stubs 911 // 912 // Input: 913 // O0 - array1 914 // O1 - array2 915 // O2 - element count 916 // 917 // Kills temps: O3, O4 918 // 919 void array_overlap_test(address no_overlap_target, int log2_elem_size) { 920 assert(no_overlap_target != NULL, "must be generated"); 921 array_overlap_test(no_overlap_target, NULL, log2_elem_size); 922 } 923 void array_overlap_test(Label& L_no_overlap, int log2_elem_size) { 924 array_overlap_test(NULL, &L_no_overlap, log2_elem_size); 925 } 926 void array_overlap_test(address no_overlap_target, Label* NOLp, int log2_elem_size) { 927 const Register from = O0; 928 const Register to = O1; 929 const Register count = O2; 930 const Register to_from = O3; // to - from 931 const Register byte_count = O4; // count << log2_elem_size 932 933 __ subcc(to, from, to_from); 934 __ sll_ptr(count, log2_elem_size, byte_count); 935 if (NOLp == NULL) 936 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, no_overlap_target); 937 else 938 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, (*NOLp)); 939 __ delayed()->cmp(to_from, byte_count); 940 if (NOLp == NULL) 941 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, no_overlap_target); 942 else 943 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, (*NOLp)); 944 __ delayed()->nop(); 945 } 946 947 // 948 // Generate pre-write barrier for array. 949 // 950 // Input: 951 // addr - register containing starting address 952 // count - register containing element count 953 // tmp - scratch register 954 // 955 // The input registers are overwritten. 956 // 957 void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { 958 BarrierSet* bs = Universe::heap()->barrier_set(); 959 switch (bs->kind()) { 960 case BarrierSet::G1SATBCT: 961 case BarrierSet::G1SATBCTLogging: 962 // With G1, don't generate the call if we statically know that the target in uninitialized 963 if (!dest_uninitialized) { 964 __ save_frame(0); 965 // Save the necessary global regs... will be used after. 966 if (addr->is_global()) { 967 __ mov(addr, L0); 968 } 969 if (count->is_global()) { 970 __ mov(count, L1); 971 } 972 __ mov(addr->after_save(), O0); 973 // Get the count into O1 974 __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre)); 975 __ delayed()->mov(count->after_save(), O1); 976 if (addr->is_global()) { 977 __ mov(L0, addr); 978 } 979 if (count->is_global()) { 980 __ mov(L1, count); 981 } 982 __ restore(); 983 } 984 break; 985 case BarrierSet::CardTableModRef: 986 case BarrierSet::CardTableExtension: 987 case BarrierSet::ModRef: 988 break; 989 default: 990 ShouldNotReachHere(); 991 } 992 } 993 // 994 // Generate post-write barrier for array. 995 // 996 // Input: 997 // addr - register containing starting address 998 // count - register containing element count 999 // tmp - scratch register 1000 // 1001 // The input registers are overwritten. 1002 // 1003 void gen_write_ref_array_post_barrier(Register addr, Register count, 1004 Register tmp) { 1005 BarrierSet* bs = Universe::heap()->barrier_set(); 1006 1007 switch (bs->kind()) { 1008 case BarrierSet::G1SATBCT: 1009 case BarrierSet::G1SATBCTLogging: 1010 { 1011 // Get some new fresh output registers. 1012 __ save_frame(0); 1013 __ mov(addr->after_save(), O0); 1014 __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)); 1015 __ delayed()->mov(count->after_save(), O1); 1016 __ restore(); 1017 } 1018 break; 1019 case BarrierSet::CardTableModRef: 1020 case BarrierSet::CardTableExtension: 1021 { 1022 CardTableModRefBS* ct = (CardTableModRefBS*)bs; 1023 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 1024 assert_different_registers(addr, count, tmp); 1025 1026 Label L_loop; 1027 1028 __ sll_ptr(count, LogBytesPerHeapOop, count); 1029 __ sub(count, BytesPerHeapOop, count); 1030 __ add(count, addr, count); 1031 // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.) 1032 __ srl_ptr(addr, CardTableModRefBS::card_shift, addr); 1033 __ srl_ptr(count, CardTableModRefBS::card_shift, count); 1034 __ sub(count, addr, count); 1035 AddressLiteral rs(ct->byte_map_base); 1036 __ set(rs, tmp); 1037 __ BIND(L_loop); 1038 __ stb(G0, tmp, addr); 1039 __ subcc(count, 1, count); 1040 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop); 1041 __ delayed()->add(addr, 1, addr); 1042 } 1043 break; 1044 case BarrierSet::ModRef: 1045 break; 1046 default: 1047 ShouldNotReachHere(); 1048 } 1049 } 1050 1051 // 1052 // Generate main code for disjoint arraycopy 1053 // 1054 typedef void (StubGenerator::*CopyLoopFunc)(Register from, Register to, Register count, int count_dec, 1055 Label& L_loop, bool use_prefetch, bool use_bis); 1056 1057 void disjoint_copy_core(Register from, Register to, Register count, int log2_elem_size, 1058 int iter_size, CopyLoopFunc copy_loop_func) { 1059 Label L_copy; 1060 1061 assert(log2_elem_size <= 3, "the following code should be changed"); 1062 int count_dec = 16>>log2_elem_size; 1063 1064 int prefetch_dist = MAX2(ArraycopySrcPrefetchDistance, ArraycopyDstPrefetchDistance); 1065 assert(prefetch_dist < 4096, "invalid value"); 1066 prefetch_dist = (prefetch_dist + (iter_size-1)) & (-iter_size); // round up to one iteration copy size 1067 int prefetch_count = (prefetch_dist >> log2_elem_size); // elements count 1068 1069 if (UseBlockCopy) { 1070 Label L_block_copy, L_block_copy_prefetch, L_skip_block_copy; 1071 1072 // 64 bytes tail + bytes copied in one loop iteration 1073 int tail_size = 64 + iter_size; 1074 int block_copy_count = (MAX2(tail_size, (int)BlockCopyLowLimit)) >> log2_elem_size; 1075 // Use BIS copy only for big arrays since it requires membar. 1076 __ set(block_copy_count, O4); 1077 __ cmp_and_br_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_skip_block_copy); 1078 // This code is for disjoint source and destination: 1079 // to <= from || to >= from+count 1080 // but BIS will stomp over 'from' if (to > from-tail_size && to <= from) 1081 __ sub(from, to, O4); 1082 __ srax(O4, 4, O4); // divide by 16 since following short branch have only 5 bits for imm. 1083 __ cmp_and_br_short(O4, (tail_size>>4), Assembler::lessEqualUnsigned, Assembler::pn, L_skip_block_copy); 1084 1085 __ wrasi(G0, Assembler::ASI_ST_BLKINIT_PRIMARY); 1086 // BIS should not be used to copy tail (64 bytes+iter_size) 1087 // to avoid zeroing of following values. 1088 __ sub(count, (tail_size>>log2_elem_size), count); // count is still positive >= 0 1089 1090 if (prefetch_count > 0) { // rounded up to one iteration count 1091 // Do prefetching only if copy size is bigger 1092 // than prefetch distance. 1093 __ set(prefetch_count, O4); 1094 __ cmp_and_brx_short(count, O4, Assembler::less, Assembler::pt, L_block_copy); 1095 __ sub(count, prefetch_count, count); 1096 1097 (this->*copy_loop_func)(from, to, count, count_dec, L_block_copy_prefetch, true, true); 1098 __ add(count, prefetch_count, count); // restore count 1099 1100 } // prefetch_count > 0 1101 1102 (this->*copy_loop_func)(from, to, count, count_dec, L_block_copy, false, true); 1103 __ add(count, (tail_size>>log2_elem_size), count); // restore count 1104 1105 __ wrasi(G0, Assembler::ASI_PRIMARY_NOFAULT); 1106 // BIS needs membar. 1107 __ membar(Assembler::StoreLoad); 1108 // Copy tail 1109 __ ba_short(L_copy); 1110 1111 __ BIND(L_skip_block_copy); 1112 } // UseBlockCopy 1113 1114 if (prefetch_count > 0) { // rounded up to one iteration count 1115 // Do prefetching only if copy size is bigger 1116 // than prefetch distance. 1117 __ set(prefetch_count, O4); 1118 __ cmp_and_brx_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_copy); 1119 __ sub(count, prefetch_count, count); 1120 1121 Label L_copy_prefetch; 1122 (this->*copy_loop_func)(from, to, count, count_dec, L_copy_prefetch, true, false); 1123 __ add(count, prefetch_count, count); // restore count 1124 1125 } // prefetch_count > 0 1126 1127 (this->*copy_loop_func)(from, to, count, count_dec, L_copy, false, false); 1128 } 1129 1130 1131 1132 // 1133 // Helper methods for copy_16_bytes_forward_with_shift() 1134 // 1135 void copy_16_bytes_shift_loop(Register from, Register to, Register count, int count_dec, 1136 Label& L_loop, bool use_prefetch, bool use_bis) { 1137 1138 const Register left_shift = G1; // left shift bit counter 1139 const Register right_shift = G5; // right shift bit counter 1140 1141 __ align(OptoLoopAlignment); 1142 __ BIND(L_loop); 1143 if (use_prefetch) { 1144 if (ArraycopySrcPrefetchDistance > 0) { 1145 __ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads); 1146 } 1147 if (ArraycopyDstPrefetchDistance > 0) { 1148 __ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads); 1149 } 1150 } 1151 __ ldx(from, 0, O4); 1152 __ ldx(from, 8, G4); 1153 __ inc(to, 16); 1154 __ inc(from, 16); 1155 __ deccc(count, count_dec); // Can we do next iteration after this one? 1156 __ srlx(O4, right_shift, G3); 1157 __ bset(G3, O3); 1158 __ sllx(O4, left_shift, O4); 1159 __ srlx(G4, right_shift, G3); 1160 __ bset(G3, O4); 1161 if (use_bis) { 1162 __ stxa(O3, to, -16); 1163 __ stxa(O4, to, -8); 1164 } else { 1165 __ stx(O3, to, -16); 1166 __ stx(O4, to, -8); 1167 } 1168 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop); 1169 __ delayed()->sllx(G4, left_shift, O3); 1170 } 1171 1172 // Copy big chunks forward with shift 1173 // 1174 // Inputs: 1175 // from - source arrays 1176 // to - destination array aligned to 8-bytes 1177 // count - elements count to copy >= the count equivalent to 16 bytes 1178 // count_dec - elements count's decrement equivalent to 16 bytes 1179 // L_copy_bytes - copy exit label 1180 // 1181 void copy_16_bytes_forward_with_shift(Register from, Register to, 1182 Register count, int log2_elem_size, Label& L_copy_bytes) { 1183 Label L_aligned_copy, L_copy_last_bytes; 1184 assert(log2_elem_size <= 3, "the following code should be changed"); 1185 int count_dec = 16>>log2_elem_size; 1186 1187 // if both arrays have the same alignment mod 8, do 8 bytes aligned copy 1188 __ andcc(from, 7, G1); // misaligned bytes 1189 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy); 1190 __ delayed()->nop(); 1191 1192 const Register left_shift = G1; // left shift bit counter 1193 const Register right_shift = G5; // right shift bit counter 1194 1195 __ sll(G1, LogBitsPerByte, left_shift); 1196 __ mov(64, right_shift); 1197 __ sub(right_shift, left_shift, right_shift); 1198 1199 // 1200 // Load 2 aligned 8-bytes chunks and use one from previous iteration 1201 // to form 2 aligned 8-bytes chunks to store. 1202 // 1203 __ dec(count, count_dec); // Pre-decrement 'count' 1204 __ andn(from, 7, from); // Align address 1205 __ ldx(from, 0, O3); 1206 __ inc(from, 8); 1207 __ sllx(O3, left_shift, O3); 1208 1209 disjoint_copy_core(from, to, count, log2_elem_size, 16, copy_16_bytes_shift_loop); 1210 1211 __ inccc(count, count_dec>>1 ); // + 8 bytes 1212 __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes); 1213 __ delayed()->inc(count, count_dec>>1); // restore 'count' 1214 1215 // copy 8 bytes, part of them already loaded in O3 1216 __ ldx(from, 0, O4); 1217 __ inc(to, 8); 1218 __ inc(from, 8); 1219 __ srlx(O4, right_shift, G3); 1220 __ bset(O3, G3); 1221 __ stx(G3, to, -8); 1222 1223 __ BIND(L_copy_last_bytes); 1224 __ srl(right_shift, LogBitsPerByte, right_shift); // misaligned bytes 1225 __ br(Assembler::always, false, Assembler::pt, L_copy_bytes); 1226 __ delayed()->sub(from, right_shift, from); // restore address 1227 1228 __ BIND(L_aligned_copy); 1229 } 1230 1231 // Copy big chunks backward with shift 1232 // 1233 // Inputs: 1234 // end_from - source arrays end address 1235 // end_to - destination array end address aligned to 8-bytes 1236 // count - elements count to copy >= the count equivalent to 16 bytes 1237 // count_dec - elements count's decrement equivalent to 16 bytes 1238 // L_aligned_copy - aligned copy exit label 1239 // L_copy_bytes - copy exit label 1240 // 1241 void copy_16_bytes_backward_with_shift(Register end_from, Register end_to, 1242 Register count, int count_dec, 1243 Label& L_aligned_copy, Label& L_copy_bytes) { 1244 Label L_loop, L_copy_last_bytes; 1245 1246 // if both arrays have the same alignment mod 8, do 8 bytes aligned copy 1247 __ andcc(end_from, 7, G1); // misaligned bytes 1248 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy); 1249 __ delayed()->deccc(count, count_dec); // Pre-decrement 'count' 1250 1251 const Register left_shift = G1; // left shift bit counter 1252 const Register right_shift = G5; // right shift bit counter 1253 1254 __ sll(G1, LogBitsPerByte, left_shift); 1255 __ mov(64, right_shift); 1256 __ sub(right_shift, left_shift, right_shift); 1257 1258 // 1259 // Load 2 aligned 8-bytes chunks and use one from previous iteration 1260 // to form 2 aligned 8-bytes chunks to store. 1261 // 1262 __ andn(end_from, 7, end_from); // Align address 1263 __ ldx(end_from, 0, O3); 1264 __ align(OptoLoopAlignment); 1265 __ BIND(L_loop); 1266 __ ldx(end_from, -8, O4); 1267 __ deccc(count, count_dec); // Can we do next iteration after this one? 1268 __ ldx(end_from, -16, G4); 1269 __ dec(end_to, 16); 1270 __ dec(end_from, 16); 1271 __ srlx(O3, right_shift, O3); 1272 __ sllx(O4, left_shift, G3); 1273 __ bset(G3, O3); 1274 __ stx(O3, end_to, 8); 1275 __ srlx(O4, right_shift, O4); 1276 __ sllx(G4, left_shift, G3); 1277 __ bset(G3, O4); 1278 __ stx(O4, end_to, 0); 1279 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop); 1280 __ delayed()->mov(G4, O3); 1281 1282 __ inccc(count, count_dec>>1 ); // + 8 bytes 1283 __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes); 1284 __ delayed()->inc(count, count_dec>>1); // restore 'count' 1285 1286 // copy 8 bytes, part of them already loaded in O3 1287 __ ldx(end_from, -8, O4); 1288 __ dec(end_to, 8); 1289 __ dec(end_from, 8); 1290 __ srlx(O3, right_shift, O3); 1291 __ sllx(O4, left_shift, G3); 1292 __ bset(O3, G3); 1293 __ stx(G3, end_to, 0); 1294 1295 __ BIND(L_copy_last_bytes); 1296 __ srl(left_shift, LogBitsPerByte, left_shift); // misaligned bytes 1297 __ br(Assembler::always, false, Assembler::pt, L_copy_bytes); 1298 __ delayed()->add(end_from, left_shift, end_from); // restore address 1299 } 1300 1301 // 1302 // Generate stub for disjoint byte copy. If "aligned" is true, the 1303 // "from" and "to" addresses are assumed to be heapword aligned. 1304 // 1305 // Arguments for generated stub: 1306 // from: O0 1307 // to: O1 1308 // count: O2 treated as signed 1309 // 1310 address generate_disjoint_byte_copy(bool aligned, address *entry, const char *name) { 1311 __ align(CodeEntryAlignment); 1312 StubCodeMark mark(this, "StubRoutines", name); 1313 address start = __ pc(); 1314 1315 Label L_skip_alignment, L_align; 1316 Label L_copy_byte, L_copy_byte_loop, L_exit; 1317 1318 const Register from = O0; // source array address 1319 const Register to = O1; // destination array address 1320 const Register count = O2; // elements count 1321 const Register offset = O5; // offset from start of arrays 1322 // O3, O4, G3, G4 are used as temp registers 1323 1324 assert_clean_int(count, O3); // Make sure 'count' is clean int. 1325 1326 if (entry != NULL) { 1327 *entry = __ pc(); 1328 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1329 BLOCK_COMMENT("Entry:"); 1330 } 1331 1332 // for short arrays, just do single element copy 1333 __ cmp(count, 23); // 16 + 7 1334 __ brx(Assembler::less, false, Assembler::pn, L_copy_byte); 1335 __ delayed()->mov(G0, offset); 1336 1337 if (aligned) { 1338 // 'aligned' == true when it is known statically during compilation 1339 // of this arraycopy call site that both 'from' and 'to' addresses 1340 // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()). 1341 // 1342 // Aligned arrays have 4 bytes alignment in 32-bits VM 1343 // and 8 bytes - in 64-bits VM. So we do it only for 32-bits VM 1344 // 1345 #ifndef _LP64 1346 // copy a 4-bytes word if necessary to align 'to' to 8 bytes 1347 __ andcc(to, 7, G0); 1348 __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment); 1349 __ delayed()->ld(from, 0, O3); 1350 __ inc(from, 4); 1351 __ inc(to, 4); 1352 __ dec(count, 4); 1353 __ st(O3, to, -4); 1354 __ BIND(L_skip_alignment); 1355 #endif 1356 } else { 1357 // copy bytes to align 'to' on 8 byte boundary 1358 __ andcc(to, 7, G1); // misaligned bytes 1359 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 1360 __ delayed()->neg(G1); 1361 __ inc(G1, 8); // bytes need to copy to next 8-bytes alignment 1362 __ sub(count, G1, count); 1363 __ BIND(L_align); 1364 __ ldub(from, 0, O3); 1365 __ deccc(G1); 1366 __ inc(from); 1367 __ stb(O3, to, 0); 1368 __ br(Assembler::notZero, false, Assembler::pt, L_align); 1369 __ delayed()->inc(to); 1370 __ BIND(L_skip_alignment); 1371 } 1372 #ifdef _LP64 1373 if (!aligned) 1374 #endif 1375 { 1376 // Copy with shift 16 bytes per iteration if arrays do not have 1377 // the same alignment mod 8, otherwise fall through to the next 1378 // code for aligned copy. 1379 // The compare above (count >= 23) guarantes 'count' >= 16 bytes. 1380 // Also jump over aligned copy after the copy with shift completed. 1381 1382 copy_16_bytes_forward_with_shift(from, to, count, 0, L_copy_byte); 1383 } 1384 1385 // Both array are 8 bytes aligned, copy 16 bytes at a time 1386 __ and3(count, 7, G4); // Save count 1387 __ srl(count, 3, count); 1388 generate_disjoint_long_copy_core(aligned); 1389 __ mov(G4, count); // Restore count 1390 1391 // copy tailing bytes 1392 __ BIND(L_copy_byte); 1393 __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit); 1394 __ align(OptoLoopAlignment); 1395 __ BIND(L_copy_byte_loop); 1396 __ ldub(from, offset, O3); 1397 __ deccc(count); 1398 __ stb(O3, to, offset); 1399 __ brx(Assembler::notZero, false, Assembler::pt, L_copy_byte_loop); 1400 __ delayed()->inc(offset); 1401 1402 __ BIND(L_exit); 1403 // O3, O4 are used as temp registers 1404 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4); 1405 __ retl(); 1406 __ delayed()->mov(G0, O0); // return 0 1407 return start; 1408 } 1409 1410 // 1411 // Generate stub for conjoint byte copy. If "aligned" is true, the 1412 // "from" and "to" addresses are assumed to be heapword aligned. 1413 // 1414 // Arguments for generated stub: 1415 // from: O0 1416 // to: O1 1417 // count: O2 treated as signed 1418 // 1419 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1420 address *entry, const char *name) { 1421 // Do reverse copy. 1422 1423 __ align(CodeEntryAlignment); 1424 StubCodeMark mark(this, "StubRoutines", name); 1425 address start = __ pc(); 1426 1427 Label L_skip_alignment, L_align, L_aligned_copy; 1428 Label L_copy_byte, L_copy_byte_loop, L_exit; 1429 1430 const Register from = O0; // source array address 1431 const Register to = O1; // destination array address 1432 const Register count = O2; // elements count 1433 const Register end_from = from; // source array end address 1434 const Register end_to = to; // destination array end address 1435 1436 assert_clean_int(count, O3); // Make sure 'count' is clean int. 1437 1438 if (entry != NULL) { 1439 *entry = __ pc(); 1440 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1441 BLOCK_COMMENT("Entry:"); 1442 } 1443 1444 array_overlap_test(nooverlap_target, 0); 1445 1446 __ add(to, count, end_to); // offset after last copied element 1447 1448 // for short arrays, just do single element copy 1449 __ cmp(count, 23); // 16 + 7 1450 __ brx(Assembler::less, false, Assembler::pn, L_copy_byte); 1451 __ delayed()->add(from, count, end_from); 1452 1453 { 1454 // Align end of arrays since they could be not aligned even 1455 // when arrays itself are aligned. 1456 1457 // copy bytes to align 'end_to' on 8 byte boundary 1458 __ andcc(end_to, 7, G1); // misaligned bytes 1459 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 1460 __ delayed()->nop(); 1461 __ sub(count, G1, count); 1462 __ BIND(L_align); 1463 __ dec(end_from); 1464 __ dec(end_to); 1465 __ ldub(end_from, 0, O3); 1466 __ deccc(G1); 1467 __ brx(Assembler::notZero, false, Assembler::pt, L_align); 1468 __ delayed()->stb(O3, end_to, 0); 1469 __ BIND(L_skip_alignment); 1470 } 1471 #ifdef _LP64 1472 if (aligned) { 1473 // Both arrays are aligned to 8-bytes in 64-bits VM. 1474 // The 'count' is decremented in copy_16_bytes_backward_with_shift() 1475 // in unaligned case. 1476 __ dec(count, 16); 1477 } else 1478 #endif 1479 { 1480 // Copy with shift 16 bytes per iteration if arrays do not have 1481 // the same alignment mod 8, otherwise jump to the next 1482 // code for aligned copy (and substracting 16 from 'count' before jump). 1483 // The compare above (count >= 11) guarantes 'count' >= 16 bytes. 1484 // Also jump over aligned copy after the copy with shift completed. 1485 1486 copy_16_bytes_backward_with_shift(end_from, end_to, count, 16, 1487 L_aligned_copy, L_copy_byte); 1488 } 1489 // copy 4 elements (16 bytes) at a time 1490 __ align(OptoLoopAlignment); 1491 __ BIND(L_aligned_copy); 1492 __ dec(end_from, 16); 1493 __ ldx(end_from, 8, O3); 1494 __ ldx(end_from, 0, O4); 1495 __ dec(end_to, 16); 1496 __ deccc(count, 16); 1497 __ stx(O3, end_to, 8); 1498 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy); 1499 __ delayed()->stx(O4, end_to, 0); 1500 __ inc(count, 16); 1501 1502 // copy 1 element (2 bytes) at a time 1503 __ BIND(L_copy_byte); 1504 __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit); 1505 __ align(OptoLoopAlignment); 1506 __ BIND(L_copy_byte_loop); 1507 __ dec(end_from); 1508 __ dec(end_to); 1509 __ ldub(end_from, 0, O4); 1510 __ deccc(count); 1511 __ brx(Assembler::greater, false, Assembler::pt, L_copy_byte_loop); 1512 __ delayed()->stb(O4, end_to, 0); 1513 1514 __ BIND(L_exit); 1515 // O3, O4 are used as temp registers 1516 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4); 1517 __ retl(); 1518 __ delayed()->mov(G0, O0); // return 0 1519 return start; 1520 } 1521 1522 // 1523 // Generate stub for disjoint short copy. If "aligned" is true, the 1524 // "from" and "to" addresses are assumed to be heapword aligned. 1525 // 1526 // Arguments for generated stub: 1527 // from: O0 1528 // to: O1 1529 // count: O2 treated as signed 1530 // 1531 address generate_disjoint_short_copy(bool aligned, address *entry, const char * name) { 1532 __ align(CodeEntryAlignment); 1533 StubCodeMark mark(this, "StubRoutines", name); 1534 address start = __ pc(); 1535 1536 Label L_skip_alignment, L_skip_alignment2; 1537 Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit; 1538 1539 const Register from = O0; // source array address 1540 const Register to = O1; // destination array address 1541 const Register count = O2; // elements count 1542 const Register offset = O5; // offset from start of arrays 1543 // O3, O4, G3, G4 are used as temp registers 1544 1545 assert_clean_int(count, O3); // Make sure 'count' is clean int. 1546 1547 if (entry != NULL) { 1548 *entry = __ pc(); 1549 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1550 BLOCK_COMMENT("Entry:"); 1551 } 1552 1553 // for short arrays, just do single element copy 1554 __ cmp(count, 11); // 8 + 3 (22 bytes) 1555 __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes); 1556 __ delayed()->mov(G0, offset); 1557 1558 if (aligned) { 1559 // 'aligned' == true when it is known statically during compilation 1560 // of this arraycopy call site that both 'from' and 'to' addresses 1561 // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()). 1562 // 1563 // Aligned arrays have 4 bytes alignment in 32-bits VM 1564 // and 8 bytes - in 64-bits VM. 1565 // 1566 #ifndef _LP64 1567 // copy a 2-elements word if necessary to align 'to' to 8 bytes 1568 __ andcc(to, 7, G0); 1569 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 1570 __ delayed()->ld(from, 0, O3); 1571 __ inc(from, 4); 1572 __ inc(to, 4); 1573 __ dec(count, 2); 1574 __ st(O3, to, -4); 1575 __ BIND(L_skip_alignment); 1576 #endif 1577 } else { 1578 // copy 1 element if necessary to align 'to' on an 4 bytes 1579 __ andcc(to, 3, G0); 1580 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 1581 __ delayed()->lduh(from, 0, O3); 1582 __ inc(from, 2); 1583 __ inc(to, 2); 1584 __ dec(count); 1585 __ sth(O3, to, -2); 1586 __ BIND(L_skip_alignment); 1587 1588 // copy 2 elements to align 'to' on an 8 byte boundary 1589 __ andcc(to, 7, G0); 1590 __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2); 1591 __ delayed()->lduh(from, 0, O3); 1592 __ dec(count, 2); 1593 __ lduh(from, 2, O4); 1594 __ inc(from, 4); 1595 __ inc(to, 4); 1596 __ sth(O3, to, -4); 1597 __ sth(O4, to, -2); 1598 __ BIND(L_skip_alignment2); 1599 } 1600 #ifdef _LP64 1601 if (!aligned) 1602 #endif 1603 { 1604 // Copy with shift 16 bytes per iteration if arrays do not have 1605 // the same alignment mod 8, otherwise fall through to the next 1606 // code for aligned copy. 1607 // The compare above (count >= 11) guarantes 'count' >= 16 bytes. 1608 // Also jump over aligned copy after the copy with shift completed. 1609 1610 copy_16_bytes_forward_with_shift(from, to, count, 1, L_copy_2_bytes); 1611 } 1612 1613 // Both array are 8 bytes aligned, copy 16 bytes at a time 1614 __ and3(count, 3, G4); // Save 1615 __ srl(count, 2, count); 1616 generate_disjoint_long_copy_core(aligned); 1617 __ mov(G4, count); // restore 1618 1619 // copy 1 element at a time 1620 __ BIND(L_copy_2_bytes); 1621 __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit); 1622 __ align(OptoLoopAlignment); 1623 __ BIND(L_copy_2_bytes_loop); 1624 __ lduh(from, offset, O3); 1625 __ deccc(count); 1626 __ sth(O3, to, offset); 1627 __ brx(Assembler::notZero, false, Assembler::pt, L_copy_2_bytes_loop); 1628 __ delayed()->inc(offset, 2); 1629 1630 __ BIND(L_exit); 1631 // O3, O4 are used as temp registers 1632 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4); 1633 __ retl(); 1634 __ delayed()->mov(G0, O0); // return 0 1635 return start; 1636 } 1637 1638 // 1639 // Generate stub for disjoint short fill. If "aligned" is true, the 1640 // "to" address is assumed to be heapword aligned. 1641 // 1642 // Arguments for generated stub: 1643 // to: O0 1644 // value: O1 1645 // count: O2 treated as signed 1646 // 1647 address generate_fill(BasicType t, bool aligned, const char* name) { 1648 __ align(CodeEntryAlignment); 1649 StubCodeMark mark(this, "StubRoutines", name); 1650 address start = __ pc(); 1651 1652 const Register to = O0; // source array address 1653 const Register value = O1; // fill value 1654 const Register count = O2; // elements count 1655 // O3 is used as a temp register 1656 1657 assert_clean_int(count, O3); // Make sure 'count' is clean int. 1658 1659 Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte; 1660 Label L_fill_2_bytes, L_fill_elements, L_fill_32_bytes; 1661 1662 int shift = -1; 1663 switch (t) { 1664 case T_BYTE: 1665 shift = 2; 1666 break; 1667 case T_SHORT: 1668 shift = 1; 1669 break; 1670 case T_INT: 1671 shift = 0; 1672 break; 1673 default: ShouldNotReachHere(); 1674 } 1675 1676 BLOCK_COMMENT("Entry:"); 1677 1678 if (t == T_BYTE) { 1679 // Zero extend value 1680 __ and3(value, 0xff, value); 1681 __ sllx(value, 8, O3); 1682 __ or3(value, O3, value); 1683 } 1684 if (t == T_SHORT) { 1685 // Zero extend value 1686 __ sllx(value, 48, value); 1687 __ srlx(value, 48, value); 1688 } 1689 if (t == T_BYTE || t == T_SHORT) { 1690 __ sllx(value, 16, O3); 1691 __ or3(value, O3, value); 1692 } 1693 1694 __ cmp(count, 2<<shift); // Short arrays (< 8 bytes) fill by element 1695 __ brx(Assembler::lessUnsigned, false, Assembler::pn, L_fill_elements); // use unsigned cmp 1696 __ delayed()->andcc(count, 1, G0); 1697 1698 if (!aligned && (t == T_BYTE || t == T_SHORT)) { 1699 // align source address at 4 bytes address boundary 1700 if (t == T_BYTE) { 1701 // One byte misalignment happens only for byte arrays 1702 __ andcc(to, 1, G0); 1703 __ br(Assembler::zero, false, Assembler::pt, L_skip_align1); 1704 __ delayed()->nop(); 1705 __ stb(value, to, 0); 1706 __ inc(to, 1); 1707 __ dec(count, 1); 1708 __ BIND(L_skip_align1); 1709 } 1710 // Two bytes misalignment happens only for byte and short (char) arrays 1711 __ andcc(to, 2, G0); 1712 __ br(Assembler::zero, false, Assembler::pt, L_skip_align2); 1713 __ delayed()->nop(); 1714 __ sth(value, to, 0); 1715 __ inc(to, 2); 1716 __ dec(count, 1 << (shift - 1)); 1717 __ BIND(L_skip_align2); 1718 } 1719 #ifdef _LP64 1720 if (!aligned) { 1721 #endif 1722 // align to 8 bytes, we know we are 4 byte aligned to start 1723 __ andcc(to, 7, G0); 1724 __ br(Assembler::zero, false, Assembler::pt, L_fill_32_bytes); 1725 __ delayed()->nop(); 1726 __ stw(value, to, 0); 1727 __ inc(to, 4); 1728 __ dec(count, 1 << shift); 1729 __ BIND(L_fill_32_bytes); 1730 #ifdef _LP64 1731 } 1732 #endif 1733 1734 if (t == T_INT) { 1735 // Zero extend value 1736 __ srl(value, 0, value); 1737 } 1738 if (t == T_BYTE || t == T_SHORT || t == T_INT) { 1739 __ sllx(value, 32, O3); 1740 __ or3(value, O3, value); 1741 } 1742 1743 Label L_check_fill_8_bytes; 1744 // Fill 32-byte chunks 1745 __ subcc(count, 8 << shift, count); 1746 __ brx(Assembler::less, false, Assembler::pt, L_check_fill_8_bytes); 1747 __ delayed()->nop(); 1748 1749 Label L_fill_32_bytes_loop, L_fill_4_bytes; 1750 __ align(16); 1751 __ BIND(L_fill_32_bytes_loop); 1752 1753 __ stx(value, to, 0); 1754 __ stx(value, to, 8); 1755 __ stx(value, to, 16); 1756 __ stx(value, to, 24); 1757 1758 __ subcc(count, 8 << shift, count); 1759 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_fill_32_bytes_loop); 1760 __ delayed()->add(to, 32, to); 1761 1762 __ BIND(L_check_fill_8_bytes); 1763 __ addcc(count, 8 << shift, count); 1764 __ brx(Assembler::zero, false, Assembler::pn, L_exit); 1765 __ delayed()->subcc(count, 1 << (shift + 1), count); 1766 __ brx(Assembler::less, false, Assembler::pn, L_fill_4_bytes); 1767 __ delayed()->andcc(count, 1<<shift, G0); 1768 1769 // 1770 // length is too short, just fill 8 bytes at a time 1771 // 1772 Label L_fill_8_bytes_loop; 1773 __ BIND(L_fill_8_bytes_loop); 1774 __ stx(value, to, 0); 1775 __ subcc(count, 1 << (shift + 1), count); 1776 __ brx(Assembler::greaterEqual, false, Assembler::pn, L_fill_8_bytes_loop); 1777 __ delayed()->add(to, 8, to); 1778 1779 // fill trailing 4 bytes 1780 __ andcc(count, 1<<shift, G0); // in delay slot of branches 1781 if (t == T_INT) { 1782 __ BIND(L_fill_elements); 1783 } 1784 __ BIND(L_fill_4_bytes); 1785 __ brx(Assembler::zero, false, Assembler::pt, L_fill_2_bytes); 1786 if (t == T_BYTE || t == T_SHORT) { 1787 __ delayed()->andcc(count, 1<<(shift-1), G0); 1788 } else { 1789 __ delayed()->nop(); 1790 } 1791 __ stw(value, to, 0); 1792 if (t == T_BYTE || t == T_SHORT) { 1793 __ inc(to, 4); 1794 // fill trailing 2 bytes 1795 __ andcc(count, 1<<(shift-1), G0); // in delay slot of branches 1796 __ BIND(L_fill_2_bytes); 1797 __ brx(Assembler::zero, false, Assembler::pt, L_fill_byte); 1798 __ delayed()->andcc(count, 1, count); 1799 __ sth(value, to, 0); 1800 if (t == T_BYTE) { 1801 __ inc(to, 2); 1802 // fill trailing byte 1803 __ andcc(count, 1, count); // in delay slot of branches 1804 __ BIND(L_fill_byte); 1805 __ brx(Assembler::zero, false, Assembler::pt, L_exit); 1806 __ delayed()->nop(); 1807 __ stb(value, to, 0); 1808 } else { 1809 __ BIND(L_fill_byte); 1810 } 1811 } else { 1812 __ BIND(L_fill_2_bytes); 1813 } 1814 __ BIND(L_exit); 1815 __ retl(); 1816 __ delayed()->nop(); 1817 1818 // Handle copies less than 8 bytes. Int is handled elsewhere. 1819 if (t == T_BYTE) { 1820 __ BIND(L_fill_elements); 1821 Label L_fill_2, L_fill_4; 1822 // in delay slot __ andcc(count, 1, G0); 1823 __ brx(Assembler::zero, false, Assembler::pt, L_fill_2); 1824 __ delayed()->andcc(count, 2, G0); 1825 __ stb(value, to, 0); 1826 __ inc(to, 1); 1827 __ BIND(L_fill_2); 1828 __ brx(Assembler::zero, false, Assembler::pt, L_fill_4); 1829 __ delayed()->andcc(count, 4, G0); 1830 __ stb(value, to, 0); 1831 __ stb(value, to, 1); 1832 __ inc(to, 2); 1833 __ BIND(L_fill_4); 1834 __ brx(Assembler::zero, false, Assembler::pt, L_exit); 1835 __ delayed()->nop(); 1836 __ stb(value, to, 0); 1837 __ stb(value, to, 1); 1838 __ stb(value, to, 2); 1839 __ retl(); 1840 __ delayed()->stb(value, to, 3); 1841 } 1842 1843 if (t == T_SHORT) { 1844 Label L_fill_2; 1845 __ BIND(L_fill_elements); 1846 // in delay slot __ andcc(count, 1, G0); 1847 __ brx(Assembler::zero, false, Assembler::pt, L_fill_2); 1848 __ delayed()->andcc(count, 2, G0); 1849 __ sth(value, to, 0); 1850 __ inc(to, 2); 1851 __ BIND(L_fill_2); 1852 __ brx(Assembler::zero, false, Assembler::pt, L_exit); 1853 __ delayed()->nop(); 1854 __ sth(value, to, 0); 1855 __ retl(); 1856 __ delayed()->sth(value, to, 2); 1857 } 1858 return start; 1859 } 1860 1861 // 1862 // Generate stub for conjoint short copy. If "aligned" is true, the 1863 // "from" and "to" addresses are assumed to be heapword aligned. 1864 // 1865 // Arguments for generated stub: 1866 // from: O0 1867 // to: O1 1868 // count: O2 treated as signed 1869 // 1870 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1871 address *entry, const char *name) { 1872 // Do reverse copy. 1873 1874 __ align(CodeEntryAlignment); 1875 StubCodeMark mark(this, "StubRoutines", name); 1876 address start = __ pc(); 1877 1878 Label L_skip_alignment, L_skip_alignment2, L_aligned_copy; 1879 Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit; 1880 1881 const Register from = O0; // source array address 1882 const Register to = O1; // destination array address 1883 const Register count = O2; // elements count 1884 const Register end_from = from; // source array end address 1885 const Register end_to = to; // destination array end address 1886 1887 const Register byte_count = O3; // bytes count to copy 1888 1889 assert_clean_int(count, O3); // Make sure 'count' is clean int. 1890 1891 if (entry != NULL) { 1892 *entry = __ pc(); 1893 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1894 BLOCK_COMMENT("Entry:"); 1895 } 1896 1897 array_overlap_test(nooverlap_target, 1); 1898 1899 __ sllx(count, LogBytesPerShort, byte_count); 1900 __ add(to, byte_count, end_to); // offset after last copied element 1901 1902 // for short arrays, just do single element copy 1903 __ cmp(count, 11); // 8 + 3 (22 bytes) 1904 __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes); 1905 __ delayed()->add(from, byte_count, end_from); 1906 1907 { 1908 // Align end of arrays since they could be not aligned even 1909 // when arrays itself are aligned. 1910 1911 // copy 1 element if necessary to align 'end_to' on an 4 bytes 1912 __ andcc(end_to, 3, G0); 1913 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 1914 __ delayed()->lduh(end_from, -2, O3); 1915 __ dec(end_from, 2); 1916 __ dec(end_to, 2); 1917 __ dec(count); 1918 __ sth(O3, end_to, 0); 1919 __ BIND(L_skip_alignment); 1920 1921 // copy 2 elements to align 'end_to' on an 8 byte boundary 1922 __ andcc(end_to, 7, G0); 1923 __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2); 1924 __ delayed()->lduh(end_from, -2, O3); 1925 __ dec(count, 2); 1926 __ lduh(end_from, -4, O4); 1927 __ dec(end_from, 4); 1928 __ dec(end_to, 4); 1929 __ sth(O3, end_to, 2); 1930 __ sth(O4, end_to, 0); 1931 __ BIND(L_skip_alignment2); 1932 } 1933 #ifdef _LP64 1934 if (aligned) { 1935 // Both arrays are aligned to 8-bytes in 64-bits VM. 1936 // The 'count' is decremented in copy_16_bytes_backward_with_shift() 1937 // in unaligned case. 1938 __ dec(count, 8); 1939 } else 1940 #endif 1941 { 1942 // Copy with shift 16 bytes per iteration if arrays do not have 1943 // the same alignment mod 8, otherwise jump to the next 1944 // code for aligned copy (and substracting 8 from 'count' before jump). 1945 // The compare above (count >= 11) guarantes 'count' >= 16 bytes. 1946 // Also jump over aligned copy after the copy with shift completed. 1947 1948 copy_16_bytes_backward_with_shift(end_from, end_to, count, 8, 1949 L_aligned_copy, L_copy_2_bytes); 1950 } 1951 // copy 4 elements (16 bytes) at a time 1952 __ align(OptoLoopAlignment); 1953 __ BIND(L_aligned_copy); 1954 __ dec(end_from, 16); 1955 __ ldx(end_from, 8, O3); 1956 __ ldx(end_from, 0, O4); 1957 __ dec(end_to, 16); 1958 __ deccc(count, 8); 1959 __ stx(O3, end_to, 8); 1960 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy); 1961 __ delayed()->stx(O4, end_to, 0); 1962 __ inc(count, 8); 1963 1964 // copy 1 element (2 bytes) at a time 1965 __ BIND(L_copy_2_bytes); 1966 __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit); 1967 __ BIND(L_copy_2_bytes_loop); 1968 __ dec(end_from, 2); 1969 __ dec(end_to, 2); 1970 __ lduh(end_from, 0, O4); 1971 __ deccc(count); 1972 __ brx(Assembler::greater, false, Assembler::pt, L_copy_2_bytes_loop); 1973 __ delayed()->sth(O4, end_to, 0); 1974 1975 __ BIND(L_exit); 1976 // O3, O4 are used as temp registers 1977 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4); 1978 __ retl(); 1979 __ delayed()->mov(G0, O0); // return 0 1980 return start; 1981 } 1982 1983 // 1984 // Helper methods for generate_disjoint_int_copy_core() 1985 // 1986 void copy_16_bytes_loop(Register from, Register to, Register count, int count_dec, 1987 Label& L_loop, bool use_prefetch, bool use_bis) { 1988 1989 __ align(OptoLoopAlignment); 1990 __ BIND(L_loop); 1991 if (use_prefetch) { 1992 if (ArraycopySrcPrefetchDistance > 0) { 1993 __ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads); 1994 } 1995 if (ArraycopyDstPrefetchDistance > 0) { 1996 __ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads); 1997 } 1998 } 1999 __ ldx(from, 4, O4); 2000 __ ldx(from, 12, G4); 2001 __ inc(to, 16); 2002 __ inc(from, 16); 2003 __ deccc(count, 4); // Can we do next iteration after this one? 2004 2005 __ srlx(O4, 32, G3); 2006 __ bset(G3, O3); 2007 __ sllx(O4, 32, O4); 2008 __ srlx(G4, 32, G3); 2009 __ bset(G3, O4); 2010 if (use_bis) { 2011 __ stxa(O3, to, -16); 2012 __ stxa(O4, to, -8); 2013 } else { 2014 __ stx(O3, to, -16); 2015 __ stx(O4, to, -8); 2016 } 2017 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop); 2018 __ delayed()->sllx(G4, 32, O3); 2019 2020 } 2021 2022 // 2023 // Generate core code for disjoint int copy (and oop copy on 32-bit). 2024 // If "aligned" is true, the "from" and "to" addresses are assumed 2025 // to be heapword aligned. 2026 // 2027 // Arguments: 2028 // from: O0 2029 // to: O1 2030 // count: O2 treated as signed 2031 // 2032 void generate_disjoint_int_copy_core(bool aligned) { 2033 2034 Label L_skip_alignment, L_aligned_copy; 2035 Label L_copy_4_bytes, L_copy_4_bytes_loop, L_exit; 2036 2037 const Register from = O0; // source array address 2038 const Register to = O1; // destination array address 2039 const Register count = O2; // elements count 2040 const Register offset = O5; // offset from start of arrays 2041 // O3, O4, G3, G4 are used as temp registers 2042 2043 // 'aligned' == true when it is known statically during compilation 2044 // of this arraycopy call site that both 'from' and 'to' addresses 2045 // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()). 2046 // 2047 // Aligned arrays have 4 bytes alignment in 32-bits VM 2048 // and 8 bytes - in 64-bits VM. 2049 // 2050 #ifdef _LP64 2051 if (!aligned) 2052 #endif 2053 { 2054 // The next check could be put under 'ifndef' since the code in 2055 // generate_disjoint_long_copy_core() has own checks and set 'offset'. 2056 2057 // for short arrays, just do single element copy 2058 __ cmp(count, 5); // 4 + 1 (20 bytes) 2059 __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes); 2060 __ delayed()->mov(G0, offset); 2061 2062 // copy 1 element to align 'to' on an 8 byte boundary 2063 __ andcc(to, 7, G0); 2064 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 2065 __ delayed()->ld(from, 0, O3); 2066 __ inc(from, 4); 2067 __ inc(to, 4); 2068 __ dec(count); 2069 __ st(O3, to, -4); 2070 __ BIND(L_skip_alignment); 2071 2072 // if arrays have same alignment mod 8, do 4 elements copy 2073 __ andcc(from, 7, G0); 2074 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy); 2075 __ delayed()->ld(from, 0, O3); 2076 2077 // 2078 // Load 2 aligned 8-bytes chunks and use one from previous iteration 2079 // to form 2 aligned 8-bytes chunks to store. 2080 // 2081 // copy_16_bytes_forward_with_shift() is not used here since this 2082 // code is more optimal. 2083 2084 // copy with shift 4 elements (16 bytes) at a time 2085 __ dec(count, 4); // The cmp at the beginning guaranty count >= 4 2086 __ sllx(O3, 32, O3); 2087 2088 disjoint_copy_core(from, to, count, 2, 16, copy_16_bytes_loop); 2089 2090 __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes); 2091 __ delayed()->inc(count, 4); // restore 'count' 2092 2093 __ BIND(L_aligned_copy); 2094 } // !aligned 2095 2096 // copy 4 elements (16 bytes) at a time 2097 __ and3(count, 1, G4); // Save 2098 __ srl(count, 1, count); 2099 generate_disjoint_long_copy_core(aligned); 2100 __ mov(G4, count); // Restore 2101 2102 // copy 1 element at a time 2103 __ BIND(L_copy_4_bytes); 2104 __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit); 2105 __ BIND(L_copy_4_bytes_loop); 2106 __ ld(from, offset, O3); 2107 __ deccc(count); 2108 __ st(O3, to, offset); 2109 __ brx(Assembler::notZero, false, Assembler::pt, L_copy_4_bytes_loop); 2110 __ delayed()->inc(offset, 4); 2111 __ BIND(L_exit); 2112 } 2113 2114 // 2115 // Generate stub for disjoint int copy. If "aligned" is true, the 2116 // "from" and "to" addresses are assumed to be heapword aligned. 2117 // 2118 // Arguments for generated stub: 2119 // from: O0 2120 // to: O1 2121 // count: O2 treated as signed 2122 // 2123 address generate_disjoint_int_copy(bool aligned, address *entry, const char *name) { 2124 __ align(CodeEntryAlignment); 2125 StubCodeMark mark(this, "StubRoutines", name); 2126 address start = __ pc(); 2127 2128 const Register count = O2; 2129 assert_clean_int(count, O3); // Make sure 'count' is clean int. 2130 2131 if (entry != NULL) { 2132 *entry = __ pc(); 2133 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2134 BLOCK_COMMENT("Entry:"); 2135 } 2136 2137 generate_disjoint_int_copy_core(aligned); 2138 2139 // O3, O4 are used as temp registers 2140 inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4); 2141 __ retl(); 2142 __ delayed()->mov(G0, O0); // return 0 2143 return start; 2144 } 2145 2146 // 2147 // Generate core code for conjoint int copy (and oop copy on 32-bit). 2148 // If "aligned" is true, the "from" and "to" addresses are assumed 2149 // to be heapword aligned. 2150 // 2151 // Arguments: 2152 // from: O0 2153 // to: O1 2154 // count: O2 treated as signed 2155 // 2156 void generate_conjoint_int_copy_core(bool aligned) { 2157 // Do reverse copy. 2158 2159 Label L_skip_alignment, L_aligned_copy; 2160 Label L_copy_16_bytes, L_copy_4_bytes, L_copy_4_bytes_loop, L_exit; 2161 2162 const Register from = O0; // source array address 2163 const Register to = O1; // destination array address 2164 const Register count = O2; // elements count 2165 const Register end_from = from; // source array end address 2166 const Register end_to = to; // destination array end address 2167 // O3, O4, O5, G3 are used as temp registers 2168 2169 const Register byte_count = O3; // bytes count to copy 2170 2171 __ sllx(count, LogBytesPerInt, byte_count); 2172 __ add(to, byte_count, end_to); // offset after last copied element 2173 2174 __ cmp(count, 5); // for short arrays, just do single element copy 2175 __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes); 2176 __ delayed()->add(from, byte_count, end_from); 2177 2178 // copy 1 element to align 'to' on an 8 byte boundary 2179 __ andcc(end_to, 7, G0); 2180 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 2181 __ delayed()->nop(); 2182 __ dec(count); 2183 __ dec(end_from, 4); 2184 __ dec(end_to, 4); 2185 __ ld(end_from, 0, O4); 2186 __ st(O4, end_to, 0); 2187 __ BIND(L_skip_alignment); 2188 2189 // Check if 'end_from' and 'end_to' has the same alignment. 2190 __ andcc(end_from, 7, G0); 2191 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy); 2192 __ delayed()->dec(count, 4); // The cmp at the start guaranty cnt >= 4 2193 2194 // copy with shift 4 elements (16 bytes) at a time 2195 // 2196 // Load 2 aligned 8-bytes chunks and use one from previous iteration 2197 // to form 2 aligned 8-bytes chunks to store. 2198 // 2199 __ ldx(end_from, -4, O3); 2200 __ align(OptoLoopAlignment); 2201 __ BIND(L_copy_16_bytes); 2202 __ ldx(end_from, -12, O4); 2203 __ deccc(count, 4); 2204 __ ldx(end_from, -20, O5); 2205 __ dec(end_to, 16); 2206 __ dec(end_from, 16); 2207 __ srlx(O3, 32, O3); 2208 __ sllx(O4, 32, G3); 2209 __ bset(G3, O3); 2210 __ stx(O3, end_to, 8); 2211 __ srlx(O4, 32, O4); 2212 __ sllx(O5, 32, G3); 2213 __ bset(O4, G3); 2214 __ stx(G3, end_to, 0); 2215 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes); 2216 __ delayed()->mov(O5, O3); 2217 2218 __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes); 2219 __ delayed()->inc(count, 4); 2220 2221 // copy 4 elements (16 bytes) at a time 2222 __ align(OptoLoopAlignment); 2223 __ BIND(L_aligned_copy); 2224 __ dec(end_from, 16); 2225 __ ldx(end_from, 8, O3); 2226 __ ldx(end_from, 0, O4); 2227 __ dec(end_to, 16); 2228 __ deccc(count, 4); 2229 __ stx(O3, end_to, 8); 2230 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy); 2231 __ delayed()->stx(O4, end_to, 0); 2232 __ inc(count, 4); 2233 2234 // copy 1 element (4 bytes) at a time 2235 __ BIND(L_copy_4_bytes); 2236 __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit); 2237 __ BIND(L_copy_4_bytes_loop); 2238 __ dec(end_from, 4); 2239 __ dec(end_to, 4); 2240 __ ld(end_from, 0, O4); 2241 __ deccc(count); 2242 __ brx(Assembler::greater, false, Assembler::pt, L_copy_4_bytes_loop); 2243 __ delayed()->st(O4, end_to, 0); 2244 __ BIND(L_exit); 2245 } 2246 2247 // 2248 // Generate stub for conjoint int copy. If "aligned" is true, the 2249 // "from" and "to" addresses are assumed to be heapword aligned. 2250 // 2251 // Arguments for generated stub: 2252 // from: O0 2253 // to: O1 2254 // count: O2 treated as signed 2255 // 2256 address generate_conjoint_int_copy(bool aligned, address nooverlap_target, 2257 address *entry, const char *name) { 2258 __ align(CodeEntryAlignment); 2259 StubCodeMark mark(this, "StubRoutines", name); 2260 address start = __ pc(); 2261 2262 assert_clean_int(O2, O3); // Make sure 'count' is clean int. 2263 2264 if (entry != NULL) { 2265 *entry = __ pc(); 2266 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2267 BLOCK_COMMENT("Entry:"); 2268 } 2269 2270 array_overlap_test(nooverlap_target, 2); 2271 2272 generate_conjoint_int_copy_core(aligned); 2273 2274 // O3, O4 are used as temp registers 2275 inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4); 2276 __ retl(); 2277 __ delayed()->mov(G0, O0); // return 0 2278 return start; 2279 } 2280 2281 // 2282 // Helper methods for generate_disjoint_long_copy_core() 2283 // 2284 void copy_64_bytes_loop(Register from, Register to, Register count, int count_dec, 2285 Label& L_loop, bool use_prefetch, bool use_bis) { 2286 __ align(OptoLoopAlignment); 2287 __ BIND(L_loop); 2288 for (int off = 0; off < 64; off += 16) { 2289 if (use_prefetch && (off & 31) == 0) { 2290 if (ArraycopySrcPrefetchDistance > 0) { 2291 __ prefetch(from, ArraycopySrcPrefetchDistance+off, Assembler::severalReads); 2292 } 2293 if (ArraycopyDstPrefetchDistance > 0) { 2294 __ prefetch(to, ArraycopyDstPrefetchDistance+off, Assembler::severalWritesAndPossiblyReads); 2295 } 2296 } 2297 __ ldx(from, off+0, O4); 2298 __ ldx(from, off+8, O5); 2299 if (use_bis) { 2300 __ stxa(O4, to, off+0); 2301 __ stxa(O5, to, off+8); 2302 } else { 2303 __ stx(O4, to, off+0); 2304 __ stx(O5, to, off+8); 2305 } 2306 } 2307 __ deccc(count, 8); 2308 __ inc(from, 64); 2309 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop); 2310 __ delayed()->inc(to, 64); 2311 } 2312 2313 // 2314 // Generate core code for disjoint long copy (and oop copy on 64-bit). 2315 // "aligned" is ignored, because we must make the stronger 2316 // assumption that both addresses are always 64-bit aligned. 2317 // 2318 // Arguments: 2319 // from: O0 2320 // to: O1 2321 // count: O2 treated as signed 2322 // 2323 // count -= 2; 2324 // if ( count >= 0 ) { // >= 2 elements 2325 // if ( count > 6) { // >= 8 elements 2326 // count -= 6; // original count - 8 2327 // do { 2328 // copy_8_elements; 2329 // count -= 8; 2330 // } while ( count >= 0 ); 2331 // count += 6; 2332 // } 2333 // if ( count >= 0 ) { // >= 2 elements 2334 // do { 2335 // copy_2_elements; 2336 // } while ( (count=count-2) >= 0 ); 2337 // } 2338 // } 2339 // count += 2; 2340 // if ( count != 0 ) { // 1 element left 2341 // copy_1_element; 2342 // } 2343 // 2344 void generate_disjoint_long_copy_core(bool aligned) { 2345 Label L_copy_8_bytes, L_copy_16_bytes, L_exit; 2346 const Register from = O0; // source array address 2347 const Register to = O1; // destination array address 2348 const Register count = O2; // elements count 2349 const Register offset0 = O4; // element offset 2350 const Register offset8 = O5; // next element offset 2351 2352 __ deccc(count, 2); 2353 __ mov(G0, offset0); // offset from start of arrays (0) 2354 __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes ); 2355 __ delayed()->add(offset0, 8, offset8); 2356 2357 // Copy by 64 bytes chunks 2358 2359 const Register from64 = O3; // source address 2360 const Register to64 = G3; // destination address 2361 __ subcc(count, 6, O3); 2362 __ brx(Assembler::negative, false, Assembler::pt, L_copy_16_bytes ); 2363 __ delayed()->mov(to, to64); 2364 // Now we can use O4(offset0), O5(offset8) as temps 2365 __ mov(O3, count); 2366 // count >= 0 (original count - 8) 2367 __ mov(from, from64); 2368 2369 disjoint_copy_core(from64, to64, count, 3, 64, copy_64_bytes_loop); 2370 2371 // Restore O4(offset0), O5(offset8) 2372 __ sub(from64, from, offset0); 2373 __ inccc(count, 6); // restore count 2374 __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes ); 2375 __ delayed()->add(offset0, 8, offset8); 2376 2377 // Copy by 16 bytes chunks 2378 __ align(OptoLoopAlignment); 2379 __ BIND(L_copy_16_bytes); 2380 __ ldx(from, offset0, O3); 2381 __ ldx(from, offset8, G3); 2382 __ deccc(count, 2); 2383 __ stx(O3, to, offset0); 2384 __ inc(offset0, 16); 2385 __ stx(G3, to, offset8); 2386 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes); 2387 __ delayed()->inc(offset8, 16); 2388 2389 // Copy last 8 bytes 2390 __ BIND(L_copy_8_bytes); 2391 __ inccc(count, 2); 2392 __ brx(Assembler::zero, true, Assembler::pn, L_exit ); 2393 __ delayed()->mov(offset0, offset8); // Set O5 used by other stubs 2394 __ ldx(from, offset0, O3); 2395 __ stx(O3, to, offset0); 2396 __ BIND(L_exit); 2397 } 2398 2399 // 2400 // Generate stub for disjoint long copy. 2401 // "aligned" is ignored, because we must make the stronger 2402 // assumption that both addresses are always 64-bit aligned. 2403 // 2404 // Arguments for generated stub: 2405 // from: O0 2406 // to: O1 2407 // count: O2 treated as signed 2408 // 2409 address generate_disjoint_long_copy(bool aligned, address *entry, const char *name) { 2410 __ align(CodeEntryAlignment); 2411 StubCodeMark mark(this, "StubRoutines", name); 2412 address start = __ pc(); 2413 2414 assert_clean_int(O2, O3); // Make sure 'count' is clean int. 2415 2416 if (entry != NULL) { 2417 *entry = __ pc(); 2418 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2419 BLOCK_COMMENT("Entry:"); 2420 } 2421 2422 generate_disjoint_long_copy_core(aligned); 2423 2424 // O3, O4 are used as temp registers 2425 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4); 2426 __ retl(); 2427 __ delayed()->mov(G0, O0); // return 0 2428 return start; 2429 } 2430 2431 // 2432 // Generate core code for conjoint long copy (and oop copy on 64-bit). 2433 // "aligned" is ignored, because we must make the stronger 2434 // assumption that both addresses are always 64-bit aligned. 2435 // 2436 // Arguments: 2437 // from: O0 2438 // to: O1 2439 // count: O2 treated as signed 2440 // 2441 void generate_conjoint_long_copy_core(bool aligned) { 2442 // Do reverse copy. 2443 Label L_copy_8_bytes, L_copy_16_bytes, L_exit; 2444 const Register from = O0; // source array address 2445 const Register to = O1; // destination array address 2446 const Register count = O2; // elements count 2447 const Register offset8 = O4; // element offset 2448 const Register offset0 = O5; // previous element offset 2449 2450 __ subcc(count, 1, count); 2451 __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_8_bytes ); 2452 __ delayed()->sllx(count, LogBytesPerLong, offset8); 2453 __ sub(offset8, 8, offset0); 2454 __ align(OptoLoopAlignment); 2455 __ BIND(L_copy_16_bytes); 2456 __ ldx(from, offset8, O2); 2457 __ ldx(from, offset0, O3); 2458 __ stx(O2, to, offset8); 2459 __ deccc(offset8, 16); // use offset8 as counter 2460 __ stx(O3, to, offset0); 2461 __ brx(Assembler::greater, false, Assembler::pt, L_copy_16_bytes); 2462 __ delayed()->dec(offset0, 16); 2463 2464 __ BIND(L_copy_8_bytes); 2465 __ brx(Assembler::negative, false, Assembler::pn, L_exit ); 2466 __ delayed()->nop(); 2467 __ ldx(from, 0, O3); 2468 __ stx(O3, to, 0); 2469 __ BIND(L_exit); 2470 } 2471 2472 // Generate stub for conjoint long copy. 2473 // "aligned" is ignored, because we must make the stronger 2474 // assumption that both addresses are always 64-bit aligned. 2475 // 2476 // Arguments for generated stub: 2477 // from: O0 2478 // to: O1 2479 // count: O2 treated as signed 2480 // 2481 address generate_conjoint_long_copy(bool aligned, address nooverlap_target, 2482 address *entry, const char *name) { 2483 __ align(CodeEntryAlignment); 2484 StubCodeMark mark(this, "StubRoutines", name); 2485 address start = __ pc(); 2486 2487 assert(aligned, "Should always be aligned"); 2488 2489 assert_clean_int(O2, O3); // Make sure 'count' is clean int. 2490 2491 if (entry != NULL) { 2492 *entry = __ pc(); 2493 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2494 BLOCK_COMMENT("Entry:"); 2495 } 2496 2497 array_overlap_test(nooverlap_target, 3); 2498 2499 generate_conjoint_long_copy_core(aligned); 2500 2501 // O3, O4 are used as temp registers 2502 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4); 2503 __ retl(); 2504 __ delayed()->mov(G0, O0); // return 0 2505 return start; 2506 } 2507 2508 // Generate stub for disjoint oop copy. If "aligned" is true, the 2509 // "from" and "to" addresses are assumed to be heapword aligned. 2510 // 2511 // Arguments for generated stub: 2512 // from: O0 2513 // to: O1 2514 // count: O2 treated as signed 2515 // 2516 address generate_disjoint_oop_copy(bool aligned, address *entry, const char *name, 2517 bool dest_uninitialized = false) { 2518 2519 const Register from = O0; // source array address 2520 const Register to = O1; // destination array address 2521 const Register count = O2; // elements count 2522 2523 __ align(CodeEntryAlignment); 2524 StubCodeMark mark(this, "StubRoutines", name); 2525 address start = __ pc(); 2526 2527 assert_clean_int(count, O3); // Make sure 'count' is clean int. 2528 2529 if (entry != NULL) { 2530 *entry = __ pc(); 2531 // caller can pass a 64-bit byte count here 2532 BLOCK_COMMENT("Entry:"); 2533 } 2534 2535 // save arguments for barrier generation 2536 __ mov(to, G1); 2537 __ mov(count, G5); 2538 gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized); 2539 #ifdef _LP64 2540 assert_clean_int(count, O3); // Make sure 'count' is clean int. 2541 if (UseCompressedOops) { 2542 generate_disjoint_int_copy_core(aligned); 2543 } else { 2544 generate_disjoint_long_copy_core(aligned); 2545 } 2546 #else 2547 generate_disjoint_int_copy_core(aligned); 2548 #endif 2549 // O0 is used as temp register 2550 gen_write_ref_array_post_barrier(G1, G5, O0); 2551 2552 // O3, O4 are used as temp registers 2553 inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4); 2554 __ retl(); 2555 __ delayed()->mov(G0, O0); // return 0 2556 return start; 2557 } 2558 2559 // Generate stub for conjoint oop copy. If "aligned" is true, the 2560 // "from" and "to" addresses are assumed to be heapword aligned. 2561 // 2562 // Arguments for generated stub: 2563 // from: O0 2564 // to: O1 2565 // count: O2 treated as signed 2566 // 2567 address generate_conjoint_oop_copy(bool aligned, address nooverlap_target, 2568 address *entry, const char *name, 2569 bool dest_uninitialized = false) { 2570 2571 const Register from = O0; // source array address 2572 const Register to = O1; // destination array address 2573 const Register count = O2; // elements count 2574 2575 __ align(CodeEntryAlignment); 2576 StubCodeMark mark(this, "StubRoutines", name); 2577 address start = __ pc(); 2578 2579 assert_clean_int(count, O3); // Make sure 'count' is clean int. 2580 2581 if (entry != NULL) { 2582 *entry = __ pc(); 2583 // caller can pass a 64-bit byte count here 2584 BLOCK_COMMENT("Entry:"); 2585 } 2586 2587 array_overlap_test(nooverlap_target, LogBytesPerHeapOop); 2588 2589 // save arguments for barrier generation 2590 __ mov(to, G1); 2591 __ mov(count, G5); 2592 gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized); 2593 2594 #ifdef _LP64 2595 if (UseCompressedOops) { 2596 generate_conjoint_int_copy_core(aligned); 2597 } else { 2598 generate_conjoint_long_copy_core(aligned); 2599 } 2600 #else 2601 generate_conjoint_int_copy_core(aligned); 2602 #endif 2603 2604 // O0 is used as temp register 2605 gen_write_ref_array_post_barrier(G1, G5, O0); 2606 2607 // O3, O4 are used as temp registers 2608 inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4); 2609 __ retl(); 2610 __ delayed()->mov(G0, O0); // return 0 2611 return start; 2612 } 2613 2614 2615 // Helper for generating a dynamic type check. 2616 // Smashes only the given temp registers. 2617 void generate_type_check(Register sub_klass, 2618 Register super_check_offset, 2619 Register super_klass, 2620 Register temp, 2621 Label& L_success) { 2622 assert_different_registers(sub_klass, super_check_offset, super_klass, temp); 2623 2624 BLOCK_COMMENT("type_check:"); 2625 2626 Label L_miss, L_pop_to_miss; 2627 2628 assert_clean_int(super_check_offset, temp); 2629 2630 __ check_klass_subtype_fast_path(sub_klass, super_klass, temp, noreg, 2631 &L_success, &L_miss, NULL, 2632 super_check_offset); 2633 2634 BLOCK_COMMENT("type_check_slow_path:"); 2635 __ save_frame(0); 2636 __ check_klass_subtype_slow_path(sub_klass->after_save(), 2637 super_klass->after_save(), 2638 L0, L1, L2, L4, 2639 NULL, &L_pop_to_miss); 2640 __ ba(L_success); 2641 __ delayed()->restore(); 2642 2643 __ bind(L_pop_to_miss); 2644 __ restore(); 2645 2646 // Fall through on failure! 2647 __ BIND(L_miss); 2648 } 2649 2650 2651 // Generate stub for checked oop copy. 2652 // 2653 // Arguments for generated stub: 2654 // from: O0 2655 // to: O1 2656 // count: O2 treated as signed 2657 // ckoff: O3 (super_check_offset) 2658 // ckval: O4 (super_klass) 2659 // ret: O0 zero for success; (-1^K) where K is partial transfer count 2660 // 2661 address generate_checkcast_copy(const char *name, address *entry, bool dest_uninitialized = false) { 2662 2663 const Register O0_from = O0; // source array address 2664 const Register O1_to = O1; // destination array address 2665 const Register O2_count = O2; // elements count 2666 const Register O3_ckoff = O3; // super_check_offset 2667 const Register O4_ckval = O4; // super_klass 2668 2669 const Register O5_offset = O5; // loop var, with stride wordSize 2670 const Register G1_remain = G1; // loop var, with stride -1 2671 const Register G3_oop = G3; // actual oop copied 2672 const Register G4_klass = G4; // oop._klass 2673 const Register G5_super = G5; // oop._klass._primary_supers[ckval] 2674 2675 __ align(CodeEntryAlignment); 2676 StubCodeMark mark(this, "StubRoutines", name); 2677 address start = __ pc(); 2678 2679 #ifdef ASSERT 2680 // We sometimes save a frame (see generate_type_check below). 2681 // If this will cause trouble, let's fail now instead of later. 2682 __ save_frame(0); 2683 __ restore(); 2684 #endif 2685 2686 assert_clean_int(O2_count, G1); // Make sure 'count' is clean int. 2687 2688 #ifdef ASSERT 2689 // caller guarantees that the arrays really are different 2690 // otherwise, we would have to make conjoint checks 2691 { Label L; 2692 __ mov(O3, G1); // spill: overlap test smashes O3 2693 __ mov(O4, G4); // spill: overlap test smashes O4 2694 array_overlap_test(L, LogBytesPerHeapOop); 2695 __ stop("checkcast_copy within a single array"); 2696 __ bind(L); 2697 __ mov(G1, O3); 2698 __ mov(G4, O4); 2699 } 2700 #endif //ASSERT 2701 2702 if (entry != NULL) { 2703 *entry = __ pc(); 2704 // caller can pass a 64-bit byte count here (from generic stub) 2705 BLOCK_COMMENT("Entry:"); 2706 } 2707 gen_write_ref_array_pre_barrier(O1_to, O2_count, dest_uninitialized); 2708 2709 Label load_element, store_element, do_card_marks, fail, done; 2710 __ addcc(O2_count, 0, G1_remain); // initialize loop index, and test it 2711 __ brx(Assembler::notZero, false, Assembler::pt, load_element); 2712 __ delayed()->mov(G0, O5_offset); // offset from start of arrays 2713 2714 // Empty array: Nothing to do. 2715 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4); 2716 __ retl(); 2717 __ delayed()->set(0, O0); // return 0 on (trivial) success 2718 2719 // ======== begin loop ======== 2720 // (Loop is rotated; its entry is load_element.) 2721 // Loop variables: 2722 // (O5 = 0; ; O5 += wordSize) --- offset from src, dest arrays 2723 // (O2 = len; O2 != 0; O2--) --- number of oops *remaining* 2724 // G3, G4, G5 --- current oop, oop.klass, oop.klass.super 2725 __ align(OptoLoopAlignment); 2726 2727 __ BIND(store_element); 2728 __ deccc(G1_remain); // decrement the count 2729 __ store_heap_oop(G3_oop, O1_to, O5_offset); // store the oop 2730 __ inc(O5_offset, heapOopSize); // step to next offset 2731 __ brx(Assembler::zero, true, Assembler::pt, do_card_marks); 2732 __ delayed()->set(0, O0); // return -1 on success 2733 2734 // ======== loop entry is here ======== 2735 __ BIND(load_element); 2736 __ load_heap_oop(O0_from, O5_offset, G3_oop); // load the oop 2737 __ br_null_short(G3_oop, Assembler::pt, store_element); 2738 2739 __ load_klass(G3_oop, G4_klass); // query the object klass 2740 2741 generate_type_check(G4_klass, O3_ckoff, O4_ckval, G5_super, 2742 // branch to this on success: 2743 store_element); 2744 // ======== end loop ======== 2745 2746 // It was a real error; we must depend on the caller to finish the job. 2747 // Register G1 has number of *remaining* oops, O2 number of *total* oops. 2748 // Emit GC store barriers for the oops we have copied (O2 minus G1), 2749 // and report their number to the caller. 2750 __ BIND(fail); 2751 __ subcc(O2_count, G1_remain, O2_count); 2752 __ brx(Assembler::zero, false, Assembler::pt, done); 2753 __ delayed()->not1(O2_count, O0); // report (-1^K) to caller 2754 2755 __ BIND(do_card_marks); 2756 gen_write_ref_array_post_barrier(O1_to, O2_count, O3); // store check on O1[0..O2] 2757 2758 __ BIND(done); 2759 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4); 2760 __ retl(); 2761 __ delayed()->nop(); // return value in 00 2762 2763 return start; 2764 } 2765 2766 2767 // Generate 'unsafe' array copy stub 2768 // Though just as safe as the other stubs, it takes an unscaled 2769 // size_t argument instead of an element count. 2770 // 2771 // Arguments for generated stub: 2772 // from: O0 2773 // to: O1 2774 // count: O2 byte count, treated as ssize_t, can be zero 2775 // 2776 // Examines the alignment of the operands and dispatches 2777 // to a long, int, short, or byte copy loop. 2778 // 2779 address generate_unsafe_copy(const char* name, 2780 address byte_copy_entry, 2781 address short_copy_entry, 2782 address int_copy_entry, 2783 address long_copy_entry) { 2784 2785 const Register O0_from = O0; // source array address 2786 const Register O1_to = O1; // destination array address 2787 const Register O2_count = O2; // elements count 2788 2789 const Register G1_bits = G1; // test copy of low bits 2790 2791 __ align(CodeEntryAlignment); 2792 StubCodeMark mark(this, "StubRoutines", name); 2793 address start = __ pc(); 2794 2795 // bump this on entry, not on exit: 2796 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, G1, G3); 2797 2798 __ or3(O0_from, O1_to, G1_bits); 2799 __ or3(O2_count, G1_bits, G1_bits); 2800 2801 __ btst(BytesPerLong-1, G1_bits); 2802 __ br(Assembler::zero, true, Assembler::pt, 2803 long_copy_entry, relocInfo::runtime_call_type); 2804 // scale the count on the way out: 2805 __ delayed()->srax(O2_count, LogBytesPerLong, O2_count); 2806 2807 __ btst(BytesPerInt-1, G1_bits); 2808 __ br(Assembler::zero, true, Assembler::pt, 2809 int_copy_entry, relocInfo::runtime_call_type); 2810 // scale the count on the way out: 2811 __ delayed()->srax(O2_count, LogBytesPerInt, O2_count); 2812 2813 __ btst(BytesPerShort-1, G1_bits); 2814 __ br(Assembler::zero, true, Assembler::pt, 2815 short_copy_entry, relocInfo::runtime_call_type); 2816 // scale the count on the way out: 2817 __ delayed()->srax(O2_count, LogBytesPerShort, O2_count); 2818 2819 __ br(Assembler::always, false, Assembler::pt, 2820 byte_copy_entry, relocInfo::runtime_call_type); 2821 __ delayed()->nop(); 2822 2823 return start; 2824 } 2825 2826 2827 // Perform range checks on the proposed arraycopy. 2828 // Kills the two temps, but nothing else. 2829 // Also, clean the sign bits of src_pos and dst_pos. 2830 void arraycopy_range_checks(Register src, // source array oop (O0) 2831 Register src_pos, // source position (O1) 2832 Register dst, // destination array oo (O2) 2833 Register dst_pos, // destination position (O3) 2834 Register length, // length of copy (O4) 2835 Register temp1, Register temp2, 2836 Label& L_failed) { 2837 BLOCK_COMMENT("arraycopy_range_checks:"); 2838 2839 // if (src_pos + length > arrayOop(src)->length() ) FAIL; 2840 2841 const Register array_length = temp1; // scratch 2842 const Register end_pos = temp2; // scratch 2843 2844 // Note: This next instruction may be in the delay slot of a branch: 2845 __ add(length, src_pos, end_pos); // src_pos + length 2846 __ lduw(src, arrayOopDesc::length_offset_in_bytes(), array_length); 2847 __ cmp(end_pos, array_length); 2848 __ br(Assembler::greater, false, Assembler::pn, L_failed); 2849 2850 // if (dst_pos + length > arrayOop(dst)->length() ) FAIL; 2851 __ delayed()->add(length, dst_pos, end_pos); // dst_pos + length 2852 __ lduw(dst, arrayOopDesc::length_offset_in_bytes(), array_length); 2853 __ cmp(end_pos, array_length); 2854 __ br(Assembler::greater, false, Assembler::pn, L_failed); 2855 2856 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2857 // Move with sign extension can be used since they are positive. 2858 __ delayed()->signx(src_pos, src_pos); 2859 __ signx(dst_pos, dst_pos); 2860 2861 BLOCK_COMMENT("arraycopy_range_checks done"); 2862 } 2863 2864 2865 // 2866 // Generate generic array copy stubs 2867 // 2868 // Input: 2869 // O0 - src oop 2870 // O1 - src_pos 2871 // O2 - dst oop 2872 // O3 - dst_pos 2873 // O4 - element count 2874 // 2875 // Output: 2876 // O0 == 0 - success 2877 // O0 == -1 - need to call System.arraycopy 2878 // 2879 address generate_generic_copy(const char *name, 2880 address entry_jbyte_arraycopy, 2881 address entry_jshort_arraycopy, 2882 address entry_jint_arraycopy, 2883 address entry_oop_arraycopy, 2884 address entry_jlong_arraycopy, 2885 address entry_checkcast_arraycopy) { 2886 Label L_failed, L_objArray; 2887 2888 // Input registers 2889 const Register src = O0; // source array oop 2890 const Register src_pos = O1; // source position 2891 const Register dst = O2; // destination array oop 2892 const Register dst_pos = O3; // destination position 2893 const Register length = O4; // elements count 2894 2895 // registers used as temp 2896 const Register G3_src_klass = G3; // source array klass 2897 const Register G4_dst_klass = G4; // destination array klass 2898 const Register G5_lh = G5; // layout handler 2899 const Register O5_temp = O5; 2900 2901 __ align(CodeEntryAlignment); 2902 StubCodeMark mark(this, "StubRoutines", name); 2903 address start = __ pc(); 2904 2905 // bump this on entry, not on exit: 2906 inc_counter_np(SharedRuntime::_generic_array_copy_ctr, G1, G3); 2907 2908 // In principle, the int arguments could be dirty. 2909 //assert_clean_int(src_pos, G1); 2910 //assert_clean_int(dst_pos, G1); 2911 //assert_clean_int(length, G1); 2912 2913 //----------------------------------------------------------------------- 2914 // Assembler stubs will be used for this call to arraycopy 2915 // if the following conditions are met: 2916 // 2917 // (1) src and dst must not be null. 2918 // (2) src_pos must not be negative. 2919 // (3) dst_pos must not be negative. 2920 // (4) length must not be negative. 2921 // (5) src klass and dst klass should be the same and not NULL. 2922 // (6) src and dst should be arrays. 2923 // (7) src_pos + length must not exceed length of src. 2924 // (8) dst_pos + length must not exceed length of dst. 2925 BLOCK_COMMENT("arraycopy initial argument checks"); 2926 2927 // if (src == NULL) return -1; 2928 __ br_null(src, false, Assembler::pn, L_failed); 2929 2930 // if (src_pos < 0) return -1; 2931 __ delayed()->tst(src_pos); 2932 __ br(Assembler::negative, false, Assembler::pn, L_failed); 2933 __ delayed()->nop(); 2934 2935 // if (dst == NULL) return -1; 2936 __ br_null(dst, false, Assembler::pn, L_failed); 2937 2938 // if (dst_pos < 0) return -1; 2939 __ delayed()->tst(dst_pos); 2940 __ br(Assembler::negative, false, Assembler::pn, L_failed); 2941 2942 // if (length < 0) return -1; 2943 __ delayed()->tst(length); 2944 __ br(Assembler::negative, false, Assembler::pn, L_failed); 2945 2946 BLOCK_COMMENT("arraycopy argument klass checks"); 2947 // get src->klass() 2948 if (UseCompressedClassPointers) { 2949 __ delayed()->nop(); // ??? not good 2950 __ load_klass(src, G3_src_klass); 2951 } else { 2952 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), G3_src_klass); 2953 } 2954 2955 #ifdef ASSERT 2956 // assert(src->klass() != NULL); 2957 BLOCK_COMMENT("assert klasses not null"); 2958 { Label L_a, L_b; 2959 __ br_notnull_short(G3_src_klass, Assembler::pt, L_b); // it is broken if klass is NULL 2960 __ bind(L_a); 2961 __ stop("broken null klass"); 2962 __ bind(L_b); 2963 __ load_klass(dst, G4_dst_klass); 2964 __ br_null(G4_dst_klass, false, Assembler::pn, L_a); // this would be broken also 2965 __ delayed()->mov(G0, G4_dst_klass); // scribble the temp 2966 BLOCK_COMMENT("assert done"); 2967 } 2968 #endif 2969 2970 // Load layout helper 2971 // 2972 // |array_tag| | header_size | element_type | |log2_element_size| 2973 // 32 30 24 16 8 2 0 2974 // 2975 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2976 // 2977 2978 int lh_offset = in_bytes(Klass::layout_helper_offset()); 2979 2980 // Load 32-bits signed value. Use br() instruction with it to check icc. 2981 __ lduw(G3_src_klass, lh_offset, G5_lh); 2982 2983 if (UseCompressedClassPointers) { 2984 __ load_klass(dst, G4_dst_klass); 2985 } 2986 // Handle objArrays completely differently... 2987 juint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2988 __ set(objArray_lh, O5_temp); 2989 __ cmp(G5_lh, O5_temp); 2990 __ br(Assembler::equal, false, Assembler::pt, L_objArray); 2991 if (UseCompressedClassPointers) { 2992 __ delayed()->nop(); 2993 } else { 2994 __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass); 2995 } 2996 2997 // if (src->klass() != dst->klass()) return -1; 2998 __ cmp_and_brx_short(G3_src_klass, G4_dst_klass, Assembler::notEqual, Assembler::pn, L_failed); 2999 3000 // if (!src->is_Array()) return -1; 3001 __ cmp(G5_lh, Klass::_lh_neutral_value); // < 0 3002 __ br(Assembler::greaterEqual, false, Assembler::pn, L_failed); 3003 3004 // At this point, it is known to be a typeArray (array_tag 0x3). 3005 #ifdef ASSERT 3006 __ delayed()->nop(); 3007 { Label L; 3008 jint lh_prim_tag_in_place = (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift); 3009 __ set(lh_prim_tag_in_place, O5_temp); 3010 __ cmp(G5_lh, O5_temp); 3011 __ br(Assembler::greaterEqual, false, Assembler::pt, L); 3012 __ delayed()->nop(); 3013 __ stop("must be a primitive array"); 3014 __ bind(L); 3015 } 3016 #else 3017 __ delayed(); // match next insn to prev branch 3018 #endif 3019 3020 arraycopy_range_checks(src, src_pos, dst, dst_pos, length, 3021 O5_temp, G4_dst_klass, L_failed); 3022 3023 // TypeArrayKlass 3024 // 3025 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 3026 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 3027 // 3028 3029 const Register G4_offset = G4_dst_klass; // array offset 3030 const Register G3_elsize = G3_src_klass; // log2 element size 3031 3032 __ srl(G5_lh, Klass::_lh_header_size_shift, G4_offset); 3033 __ and3(G4_offset, Klass::_lh_header_size_mask, G4_offset); // array_offset 3034 __ add(src, G4_offset, src); // src array offset 3035 __ add(dst, G4_offset, dst); // dst array offset 3036 __ and3(G5_lh, Klass::_lh_log2_element_size_mask, G3_elsize); // log2 element size 3037 3038 // next registers should be set before the jump to corresponding stub 3039 const Register from = O0; // source array address 3040 const Register to = O1; // destination array address 3041 const Register count = O2; // elements count 3042 3043 // 'from', 'to', 'count' registers should be set in this order 3044 // since they are the same as 'src', 'src_pos', 'dst'. 3045 3046 BLOCK_COMMENT("scale indexes to element size"); 3047 __ sll_ptr(src_pos, G3_elsize, src_pos); 3048 __ sll_ptr(dst_pos, G3_elsize, dst_pos); 3049 __ add(src, src_pos, from); // src_addr 3050 __ add(dst, dst_pos, to); // dst_addr 3051 3052 BLOCK_COMMENT("choose copy loop based on element size"); 3053 __ cmp(G3_elsize, 0); 3054 __ br(Assembler::equal, true, Assembler::pt, entry_jbyte_arraycopy); 3055 __ delayed()->signx(length, count); // length 3056 3057 __ cmp(G3_elsize, LogBytesPerShort); 3058 __ br(Assembler::equal, true, Assembler::pt, entry_jshort_arraycopy); 3059 __ delayed()->signx(length, count); // length 3060 3061 __ cmp(G3_elsize, LogBytesPerInt); 3062 __ br(Assembler::equal, true, Assembler::pt, entry_jint_arraycopy); 3063 __ delayed()->signx(length, count); // length 3064 #ifdef ASSERT 3065 { Label L; 3066 __ cmp_and_br_short(G3_elsize, LogBytesPerLong, Assembler::equal, Assembler::pt, L); 3067 __ stop("must be long copy, but elsize is wrong"); 3068 __ bind(L); 3069 } 3070 #endif 3071 __ br(Assembler::always, false, Assembler::pt, entry_jlong_arraycopy); 3072 __ delayed()->signx(length, count); // length 3073 3074 // ObjArrayKlass 3075 __ BIND(L_objArray); 3076 // live at this point: G3_src_klass, G4_dst_klass, src[_pos], dst[_pos], length 3077 3078 Label L_plain_copy, L_checkcast_copy; 3079 // test array classes for subtyping 3080 __ cmp(G3_src_klass, G4_dst_klass); // usual case is exact equality 3081 __ brx(Assembler::notEqual, true, Assembler::pn, L_checkcast_copy); 3082 __ delayed()->lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted from below 3083 3084 // Identically typed arrays can be copied without element-wise checks. 3085 arraycopy_range_checks(src, src_pos, dst, dst_pos, length, 3086 O5_temp, G5_lh, L_failed); 3087 3088 __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset 3089 __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset 3090 __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos); 3091 __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos); 3092 __ add(src, src_pos, from); // src_addr 3093 __ add(dst, dst_pos, to); // dst_addr 3094 __ BIND(L_plain_copy); 3095 __ br(Assembler::always, false, Assembler::pt, entry_oop_arraycopy); 3096 __ delayed()->signx(length, count); // length 3097 3098 __ BIND(L_checkcast_copy); 3099 // live at this point: G3_src_klass, G4_dst_klass 3100 { 3101 // Before looking at dst.length, make sure dst is also an objArray. 3102 // lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted to delay slot 3103 __ cmp(G5_lh, O5_temp); 3104 __ br(Assembler::notEqual, false, Assembler::pn, L_failed); 3105 3106 // It is safe to examine both src.length and dst.length. 3107 __ delayed(); // match next insn to prev branch 3108 arraycopy_range_checks(src, src_pos, dst, dst_pos, length, 3109 O5_temp, G5_lh, L_failed); 3110 3111 // Marshal the base address arguments now, freeing registers. 3112 __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset 3113 __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset 3114 __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos); 3115 __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos); 3116 __ add(src, src_pos, from); // src_addr 3117 __ add(dst, dst_pos, to); // dst_addr 3118 __ signx(length, count); // length (reloaded) 3119 3120 Register sco_temp = O3; // this register is free now 3121 assert_different_registers(from, to, count, sco_temp, 3122 G4_dst_klass, G3_src_klass); 3123 3124 // Generate the type check. 3125 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 3126 __ lduw(G4_dst_klass, sco_offset, sco_temp); 3127 generate_type_check(G3_src_klass, sco_temp, G4_dst_klass, 3128 O5_temp, L_plain_copy); 3129 3130 // Fetch destination element klass from the ObjArrayKlass header. 3131 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 3132 3133 // the checkcast_copy loop needs two extra arguments: 3134 __ ld_ptr(G4_dst_klass, ek_offset, O4); // dest elem klass 3135 // lduw(O4, sco_offset, O3); // sco of elem klass 3136 3137 __ br(Assembler::always, false, Assembler::pt, entry_checkcast_arraycopy); 3138 __ delayed()->lduw(O4, sco_offset, O3); 3139 } 3140 3141 __ BIND(L_failed); 3142 __ retl(); 3143 __ delayed()->sub(G0, 1, O0); // return -1 3144 return start; 3145 } 3146 3147 // 3148 // Generate stub for heap zeroing. 3149 // "to" address is aligned to jlong (8 bytes). 3150 // 3151 // Arguments for generated stub: 3152 // to: O0 3153 // count: O1 treated as signed (count of HeapWord) 3154 // count could be 0 3155 // 3156 address generate_zero_aligned_words(const char* name) { 3157 __ align(CodeEntryAlignment); 3158 StubCodeMark mark(this, "StubRoutines", name); 3159 address start = __ pc(); 3160 3161 const Register to = O0; // source array address 3162 const Register count = O1; // HeapWords count 3163 const Register temp = O2; // scratch 3164 3165 Label Ldone; 3166 __ sllx(count, LogHeapWordSize, count); // to bytes count 3167 // Use BIS for zeroing 3168 __ bis_zeroing(to, count, temp, Ldone); 3169 __ bind(Ldone); 3170 __ retl(); 3171 __ delayed()->nop(); 3172 return start; 3173 } 3174 3175 void generate_arraycopy_stubs() { 3176 address entry; 3177 address entry_jbyte_arraycopy; 3178 address entry_jshort_arraycopy; 3179 address entry_jint_arraycopy; 3180 address entry_oop_arraycopy; 3181 address entry_jlong_arraycopy; 3182 address entry_checkcast_arraycopy; 3183 3184 //*** jbyte 3185 // Always need aligned and unaligned versions 3186 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 3187 "jbyte_disjoint_arraycopy"); 3188 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, 3189 &entry_jbyte_arraycopy, 3190 "jbyte_arraycopy"); 3191 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry, 3192 "arrayof_jbyte_disjoint_arraycopy"); 3193 StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, entry, NULL, 3194 "arrayof_jbyte_arraycopy"); 3195 3196 //*** jshort 3197 // Always need aligned and unaligned versions 3198 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 3199 "jshort_disjoint_arraycopy"); 3200 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, 3201 &entry_jshort_arraycopy, 3202 "jshort_arraycopy"); 3203 StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry, 3204 "arrayof_jshort_disjoint_arraycopy"); 3205 StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, entry, NULL, 3206 "arrayof_jshort_arraycopy"); 3207 3208 //*** jint 3209 // Aligned versions 3210 StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, &entry, 3211 "arrayof_jint_disjoint_arraycopy"); 3212 StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, entry, &entry_jint_arraycopy, 3213 "arrayof_jint_arraycopy"); 3214 #ifdef _LP64 3215 // In 64 bit we need both aligned and unaligned versions of jint arraycopy. 3216 // entry_jint_arraycopy always points to the unaligned version (notice that we overwrite it). 3217 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, &entry, 3218 "jint_disjoint_arraycopy"); 3219 StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, entry, 3220 &entry_jint_arraycopy, 3221 "jint_arraycopy"); 3222 #else 3223 // In 32 bit jints are always HeapWordSize aligned, so always use the aligned version 3224 // (in fact in 32bit we always have a pre-loop part even in the aligned version, 3225 // because it uses 64-bit loads/stores, so the aligned flag is actually ignored). 3226 StubRoutines::_jint_disjoint_arraycopy = StubRoutines::_arrayof_jint_disjoint_arraycopy; 3227 StubRoutines::_jint_arraycopy = StubRoutines::_arrayof_jint_arraycopy; 3228 #endif 3229 3230 3231 //*** jlong 3232 // It is always aligned 3233 StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, &entry, 3234 "arrayof_jlong_disjoint_arraycopy"); 3235 StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy(true, entry, &entry_jlong_arraycopy, 3236 "arrayof_jlong_arraycopy"); 3237 StubRoutines::_jlong_disjoint_arraycopy = StubRoutines::_arrayof_jlong_disjoint_arraycopy; 3238 StubRoutines::_jlong_arraycopy = StubRoutines::_arrayof_jlong_arraycopy; 3239 3240 3241 //*** oops 3242 // Aligned versions 3243 StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(true, &entry, 3244 "arrayof_oop_disjoint_arraycopy"); 3245 StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(true, entry, &entry_oop_arraycopy, 3246 "arrayof_oop_arraycopy"); 3247 // Aligned versions without pre-barriers 3248 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, &entry, 3249 "arrayof_oop_disjoint_arraycopy_uninit", 3250 /*dest_uninitialized*/true); 3251 StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, entry, NULL, 3252 "arrayof_oop_arraycopy_uninit", 3253 /*dest_uninitialized*/true); 3254 #ifdef _LP64 3255 if (UseCompressedOops) { 3256 // With compressed oops we need unaligned versions, notice that we overwrite entry_oop_arraycopy. 3257 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, &entry, 3258 "oop_disjoint_arraycopy"); 3259 StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(false, entry, &entry_oop_arraycopy, 3260 "oop_arraycopy"); 3261 // Unaligned versions without pre-barriers 3262 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(false, &entry, 3263 "oop_disjoint_arraycopy_uninit", 3264 /*dest_uninitialized*/true); 3265 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, entry, NULL, 3266 "oop_arraycopy_uninit", 3267 /*dest_uninitialized*/true); 3268 } else 3269 #endif 3270 { 3271 // oop arraycopy is always aligned on 32bit and 64bit without compressed oops 3272 StubRoutines::_oop_disjoint_arraycopy = StubRoutines::_arrayof_oop_disjoint_arraycopy; 3273 StubRoutines::_oop_arraycopy = StubRoutines::_arrayof_oop_arraycopy; 3274 StubRoutines::_oop_disjoint_arraycopy_uninit = StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit; 3275 StubRoutines::_oop_arraycopy_uninit = StubRoutines::_arrayof_oop_arraycopy_uninit; 3276 } 3277 3278 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 3279 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 3280 /*dest_uninitialized*/true); 3281 3282 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 3283 entry_jbyte_arraycopy, 3284 entry_jshort_arraycopy, 3285 entry_jint_arraycopy, 3286 entry_jlong_arraycopy); 3287 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 3288 entry_jbyte_arraycopy, 3289 entry_jshort_arraycopy, 3290 entry_jint_arraycopy, 3291 entry_oop_arraycopy, 3292 entry_jlong_arraycopy, 3293 entry_checkcast_arraycopy); 3294 3295 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 3296 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 3297 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 3298 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 3299 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 3300 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 3301 3302 if (UseBlockZeroing) { 3303 StubRoutines::_zero_aligned_words = generate_zero_aligned_words("zero_aligned_words"); 3304 } 3305 } 3306 3307 void generate_initial() { 3308 // Generates all stubs and initializes the entry points 3309 3310 //------------------------------------------------------------------------------------------------------------------------ 3311 // entry points that exist in all platforms 3312 // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than 3313 // the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp. 3314 StubRoutines::_forward_exception_entry = generate_forward_exception(); 3315 3316 StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address); 3317 StubRoutines::_catch_exception_entry = generate_catch_exception(); 3318 3319 //------------------------------------------------------------------------------------------------------------------------ 3320 // entry points that are platform specific 3321 StubRoutines::Sparc::_test_stop_entry = generate_test_stop(); 3322 3323 StubRoutines::Sparc::_stop_subroutine_entry = generate_stop_subroutine(); 3324 StubRoutines::Sparc::_flush_callers_register_windows_entry = generate_flush_callers_register_windows(); 3325 3326 #if !defined(COMPILER2) && !defined(_LP64) 3327 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 3328 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 3329 StubRoutines::_atomic_add_entry = generate_atomic_add(); 3330 StubRoutines::_atomic_xchg_ptr_entry = StubRoutines::_atomic_xchg_entry; 3331 StubRoutines::_atomic_cmpxchg_ptr_entry = StubRoutines::_atomic_cmpxchg_entry; 3332 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 3333 StubRoutines::_atomic_add_ptr_entry = StubRoutines::_atomic_add_entry; 3334 #endif // COMPILER2 !=> _LP64 3335 3336 // Build this early so it's available for the interpreter. 3337 StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); 3338 } 3339 3340 3341 void generate_all() { 3342 // Generates all stubs and initializes the entry points 3343 3344 // Generate partial_subtype_check first here since its code depends on 3345 // UseZeroBaseCompressedOops which is defined after heap initialization. 3346 StubRoutines::Sparc::_partial_subtype_check = generate_partial_subtype_check(); 3347 // These entry points require SharedInfo::stack0 to be set up in non-core builds 3348 StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError)); 3349 StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError)); 3350 StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call)); 3351 3352 StubRoutines::_handler_for_unsafe_access_entry = 3353 generate_handler_for_unsafe_access(); 3354 3355 // support for verify_oop (must happen after universe_init) 3356 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine(); 3357 3358 // arraycopy stubs used by compilers 3359 generate_arraycopy_stubs(); 3360 3361 // Don't initialize the platform math functions since sparc 3362 // doesn't have intrinsics for these operations. 3363 3364 // Safefetch stubs. 3365 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 3366 &StubRoutines::_safefetch32_fault_pc, 3367 &StubRoutines::_safefetch32_continuation_pc); 3368 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 3369 &StubRoutines::_safefetchN_fault_pc, 3370 &StubRoutines::_safefetchN_continuation_pc); 3371 } 3372 3373 3374 public: 3375 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 3376 // replace the standard masm with a special one: 3377 _masm = new MacroAssembler(code); 3378 3379 _stub_count = !all ? 0x100 : 0x200; 3380 if (all) { 3381 generate_all(); 3382 } else { 3383 generate_initial(); 3384 } 3385 3386 // make sure this stub is available for all local calls 3387 if (_atomic_add_stub.is_unbound()) { 3388 // generate a second time, if necessary 3389 (void) generate_atomic_add(); 3390 } 3391 } 3392 3393 3394 private: 3395 int _stub_count; 3396 void stub_prolog(StubCodeDesc* cdesc) { 3397 # ifdef ASSERT 3398 // put extra information in the stub code, to make it more readable 3399 #ifdef _LP64 3400 // Write the high part of the address 3401 // [RGV] Check if there is a dependency on the size of this prolog 3402 __ emit_data((intptr_t)cdesc >> 32, relocInfo::none); 3403 #endif 3404 __ emit_data((intptr_t)cdesc, relocInfo::none); 3405 __ emit_data(++_stub_count, relocInfo::none); 3406 # endif 3407 align(true); 3408 } 3409 3410 void align(bool at_header = false) { 3411 // %%%%% move this constant somewhere else 3412 // UltraSPARC cache line size is 8 instructions: 3413 const unsigned int icache_line_size = 32; 3414 const unsigned int icache_half_line_size = 16; 3415 3416 if (at_header) { 3417 while ((intptr_t)(__ pc()) % icache_line_size != 0) { 3418 __ emit_data(0, relocInfo::none); 3419 } 3420 } else { 3421 while ((intptr_t)(__ pc()) % icache_half_line_size != 0) { 3422 __ nop(); 3423 } 3424 } 3425 } 3426 3427 }; // end class declaration 3428 3429 void StubGenerator_generate(CodeBuffer* code, bool all) { 3430 StubGenerator g(code, all); 3431 }