1 /* 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "assembler_sparc.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "nativeInst_sparc.hpp" 30 #include "oops/instanceOop.hpp" 31 #include "oops/methodOop.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/frame.inline.hpp" 36 #include "runtime/handles.inline.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubCodeGenerator.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "utilities/top.hpp" 41 #ifdef TARGET_OS_FAMILY_linux 42 # include "thread_linux.inline.hpp" 43 #endif 44 #ifdef TARGET_OS_FAMILY_solaris 45 # include "thread_solaris.inline.hpp" 46 #endif 47 #ifdef COMPILER2 48 #include "opto/runtime.hpp" 49 #endif 50 51 // Declaration and definition of StubGenerator (no .hpp file). 52 // For a more detailed description of the stub routine structure 53 // see the comment in stubRoutines.hpp. 54 55 #define __ _masm-> 56 57 #ifdef PRODUCT 58 #define BLOCK_COMMENT(str) /* nothing */ 59 #else 60 #define BLOCK_COMMENT(str) __ block_comment(str) 61 #endif 62 63 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 64 65 // Note: The register L7 is used as L7_thread_cache, and may not be used 66 // any other way within this module. 67 68 69 static const Register& Lstub_temp = L2; 70 71 // ------------------------------------------------------------------------------------------------------------------------- 72 // Stub Code definitions 73 74 static address handle_unsafe_access() { 75 JavaThread* thread = JavaThread::current(); 76 address pc = thread->saved_exception_pc(); 77 address npc = thread->saved_exception_npc(); 78 // pc is the instruction which we must emulate 79 // doing a no-op is fine: return garbage from the load 80 81 // request an async exception 82 thread->set_pending_unsafe_access_error(); 83 84 // return address of next instruction to execute 85 return npc; 86 } 87 88 class StubGenerator: public StubCodeGenerator { 89 private: 90 91 #ifdef PRODUCT 92 #define inc_counter_np(a,b,c) (0) 93 #else 94 #define inc_counter_np(counter, t1, t2) \ 95 BLOCK_COMMENT("inc_counter " #counter); \ 96 __ inc_counter(&counter, t1, t2); 97 #endif 98 99 //---------------------------------------------------------------------------------------------------- 100 // Call stubs are used to call Java from C 101 102 address generate_call_stub(address& return_pc) { 103 StubCodeMark mark(this, "StubRoutines", "call_stub"); 104 address start = __ pc(); 105 106 // Incoming arguments: 107 // 108 // o0 : call wrapper address 109 // o1 : result (address) 110 // o2 : result type 111 // o3 : method 112 // o4 : (interpreter) entry point 113 // o5 : parameters (address) 114 // [sp + 0x5c]: parameter size (in words) 115 // [sp + 0x60]: thread 116 // 117 // +---------------+ <--- sp + 0 118 // | | 119 // . reg save area . 120 // | | 121 // +---------------+ <--- sp + 0x40 122 // | | 123 // . extra 7 slots . 124 // | | 125 // +---------------+ <--- sp + 0x5c 126 // | param. size | 127 // +---------------+ <--- sp + 0x60 128 // | thread | 129 // +---------------+ 130 // | | 131 132 // note: if the link argument position changes, adjust 133 // the code in frame::entry_frame_call_wrapper() 134 135 const Argument link = Argument(0, false); // used only for GC 136 const Argument result = Argument(1, false); 137 const Argument result_type = Argument(2, false); 138 const Argument method = Argument(3, false); 139 const Argument entry_point = Argument(4, false); 140 const Argument parameters = Argument(5, false); 141 const Argument parameter_size = Argument(6, false); 142 const Argument thread = Argument(7, false); 143 144 // setup thread register 145 __ ld_ptr(thread.as_address(), G2_thread); 146 __ reinit_heapbase(); 147 148 #ifdef ASSERT 149 // make sure we have no pending exceptions 150 { const Register t = G3_scratch; 151 Label L; 152 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), t); 153 __ br_null(t, false, Assembler::pt, L); 154 __ delayed()->nop(); 155 __ stop("StubRoutines::call_stub: entered with pending exception"); 156 __ bind(L); 157 } 158 #endif 159 160 // create activation frame & allocate space for parameters 161 { const Register t = G3_scratch; 162 __ ld_ptr(parameter_size.as_address(), t); // get parameter size (in words) 163 __ add(t, frame::memory_parameter_word_sp_offset, t); // add space for save area (in words) 164 __ round_to(t, WordsPerLong); // make sure it is multiple of 2 (in words) 165 __ sll(t, Interpreter::logStackElementSize, t); // compute number of bytes 166 __ neg(t); // negate so it can be used with save 167 __ save(SP, t, SP); // setup new frame 168 } 169 170 // +---------------+ <--- sp + 0 171 // | | 172 // . reg save area . 173 // | | 174 // +---------------+ <--- sp + 0x40 175 // | | 176 // . extra 7 slots . 177 // | | 178 // +---------------+ <--- sp + 0x5c 179 // | empty slot | (only if parameter size is even) 180 // +---------------+ 181 // | | 182 // . parameters . 183 // | | 184 // +---------------+ <--- fp + 0 185 // | | 186 // . reg save area . 187 // | | 188 // +---------------+ <--- fp + 0x40 189 // | | 190 // . extra 7 slots . 191 // | | 192 // +---------------+ <--- fp + 0x5c 193 // | param. size | 194 // +---------------+ <--- fp + 0x60 195 // | thread | 196 // +---------------+ 197 // | | 198 199 // pass parameters if any 200 BLOCK_COMMENT("pass parameters if any"); 201 { const Register src = parameters.as_in().as_register(); 202 const Register dst = Lentry_args; 203 const Register tmp = G3_scratch; 204 const Register cnt = G4_scratch; 205 206 // test if any parameters & setup of Lentry_args 207 Label exit; 208 __ ld_ptr(parameter_size.as_in().as_address(), cnt); // parameter counter 209 __ add( FP, STACK_BIAS, dst ); 210 __ tst(cnt); 211 __ br(Assembler::zero, false, Assembler::pn, exit); 212 __ delayed()->sub(dst, BytesPerWord, dst); // setup Lentry_args 213 214 // copy parameters if any 215 Label loop; 216 __ BIND(loop); 217 // Store parameter value 218 __ ld_ptr(src, 0, tmp); 219 __ add(src, BytesPerWord, src); 220 __ st_ptr(tmp, dst, 0); 221 __ deccc(cnt); 222 __ br(Assembler::greater, false, Assembler::pt, loop); 223 __ delayed()->sub(dst, Interpreter::stackElementSize, dst); 224 225 // done 226 __ BIND(exit); 227 } 228 229 // setup parameters, method & call Java function 230 #ifdef ASSERT 231 // layout_activation_impl checks it's notion of saved SP against 232 // this register, so if this changes update it as well. 233 const Register saved_SP = Lscratch; 234 __ mov(SP, saved_SP); // keep track of SP before call 235 #endif 236 237 // setup parameters 238 const Register t = G3_scratch; 239 __ ld_ptr(parameter_size.as_in().as_address(), t); // get parameter size (in words) 240 __ sll(t, Interpreter::logStackElementSize, t); // compute number of bytes 241 __ sub(FP, t, Gargs); // setup parameter pointer 242 #ifdef _LP64 243 __ add( Gargs, STACK_BIAS, Gargs ); // Account for LP64 stack bias 244 #endif 245 __ mov(SP, O5_savedSP); 246 247 248 // do the call 249 // 250 // the following register must be setup: 251 // 252 // G2_thread 253 // G5_method 254 // Gargs 255 BLOCK_COMMENT("call Java function"); 256 __ jmpl(entry_point.as_in().as_register(), G0, O7); 257 __ delayed()->mov(method.as_in().as_register(), G5_method); // setup method 258 259 BLOCK_COMMENT("call_stub_return_address:"); 260 return_pc = __ pc(); 261 262 // The callee, if it wasn't interpreted, can return with SP changed so 263 // we can no longer assert of change of SP. 264 265 // store result depending on type 266 // (everything that is not T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE 267 // is treated as T_INT) 268 { const Register addr = result .as_in().as_register(); 269 const Register type = result_type.as_in().as_register(); 270 Label is_long, is_float, is_double, is_object, exit; 271 __ cmp(type, T_OBJECT); __ br(Assembler::equal, false, Assembler::pn, is_object); 272 __ delayed()->cmp(type, T_FLOAT); __ br(Assembler::equal, false, Assembler::pn, is_float); 273 __ delayed()->cmp(type, T_DOUBLE); __ br(Assembler::equal, false, Assembler::pn, is_double); 274 __ delayed()->cmp(type, T_LONG); __ br(Assembler::equal, false, Assembler::pn, is_long); 275 __ delayed()->nop(); 276 277 // store int result 278 __ st(O0, addr, G0); 279 280 __ BIND(exit); 281 __ ret(); 282 __ delayed()->restore(); 283 284 __ BIND(is_object); 285 __ ba(false, exit); 286 __ delayed()->st_ptr(O0, addr, G0); 287 288 __ BIND(is_float); 289 __ ba(false, exit); 290 __ delayed()->stf(FloatRegisterImpl::S, F0, addr, G0); 291 292 __ BIND(is_double); 293 __ ba(false, exit); 294 __ delayed()->stf(FloatRegisterImpl::D, F0, addr, G0); 295 296 __ BIND(is_long); 297 #ifdef _LP64 298 __ ba(false, exit); 299 __ delayed()->st_long(O0, addr, G0); // store entire long 300 #else 301 #if defined(COMPILER2) 302 // All return values are where we want them, except for Longs. C2 returns 303 // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1. 304 // Since the interpreter will return longs in G1 and O0/O1 in the 32bit 305 // build we simply always use G1. 306 // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to 307 // do this here. Unfortunately if we did a rethrow we'd see an machepilog node 308 // first which would move g1 -> O0/O1 and destroy the exception we were throwing. 309 310 __ ba(false, exit); 311 __ delayed()->stx(G1, addr, G0); // store entire long 312 #else 313 __ st(O1, addr, BytesPerInt); 314 __ ba(false, exit); 315 __ delayed()->st(O0, addr, G0); 316 #endif /* COMPILER2 */ 317 #endif /* _LP64 */ 318 } 319 return start; 320 } 321 322 323 //---------------------------------------------------------------------------------------------------- 324 // Return point for a Java call if there's an exception thrown in Java code. 325 // The exception is caught and transformed into a pending exception stored in 326 // JavaThread that can be tested from within the VM. 327 // 328 // Oexception: exception oop 329 330 address generate_catch_exception() { 331 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 332 333 address start = __ pc(); 334 // verify that thread corresponds 335 __ verify_thread(); 336 337 const Register& temp_reg = Gtemp; 338 Address pending_exception_addr (G2_thread, Thread::pending_exception_offset()); 339 Address exception_file_offset_addr(G2_thread, Thread::exception_file_offset ()); 340 Address exception_line_offset_addr(G2_thread, Thread::exception_line_offset ()); 341 342 // set pending exception 343 __ verify_oop(Oexception); 344 __ st_ptr(Oexception, pending_exception_addr); 345 __ set((intptr_t)__FILE__, temp_reg); 346 __ st_ptr(temp_reg, exception_file_offset_addr); 347 __ set((intptr_t)__LINE__, temp_reg); 348 __ st(temp_reg, exception_line_offset_addr); 349 350 // complete return to VM 351 assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before"); 352 353 AddressLiteral stub_ret(StubRoutines::_call_stub_return_address); 354 __ jump_to(stub_ret, temp_reg); 355 __ delayed()->nop(); 356 357 return start; 358 } 359 360 361 //---------------------------------------------------------------------------------------------------- 362 // Continuation point for runtime calls returning with a pending exception 363 // The pending exception check happened in the runtime or native call stub 364 // The pending exception in Thread is converted into a Java-level exception 365 // 366 // Contract with Java-level exception handler: O0 = exception 367 // O1 = throwing pc 368 369 address generate_forward_exception() { 370 StubCodeMark mark(this, "StubRoutines", "forward_exception"); 371 address start = __ pc(); 372 373 // Upon entry, O7 has the return address returning into Java 374 // (interpreted or compiled) code; i.e. the return address 375 // becomes the throwing pc. 376 377 const Register& handler_reg = Gtemp; 378 379 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 380 381 #ifdef ASSERT 382 // make sure that this code is only executed if there is a pending exception 383 { Label L; 384 __ ld_ptr(exception_addr, Gtemp); 385 __ br_notnull(Gtemp, false, Assembler::pt, L); 386 __ delayed()->nop(); 387 __ stop("StubRoutines::forward exception: no pending exception (1)"); 388 __ bind(L); 389 } 390 #endif 391 392 // compute exception handler into handler_reg 393 __ get_thread(); 394 __ ld_ptr(exception_addr, Oexception); 395 __ verify_oop(Oexception); 396 __ save_frame(0); // compensates for compiler weakness 397 __ add(O7->after_save(), frame::pc_return_offset, Lscratch); // save the issuing PC 398 BLOCK_COMMENT("call exception_handler_for_return_address"); 399 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), G2_thread, Lscratch); 400 __ mov(O0, handler_reg); 401 __ restore(); // compensates for compiler weakness 402 403 __ ld_ptr(exception_addr, Oexception); 404 __ add(O7, frame::pc_return_offset, Oissuing_pc); // save the issuing PC 405 406 #ifdef ASSERT 407 // make sure exception is set 408 { Label L; 409 __ br_notnull(Oexception, false, Assembler::pt, L); 410 __ delayed()->nop(); 411 __ stop("StubRoutines::forward exception: no pending exception (2)"); 412 __ bind(L); 413 } 414 #endif 415 // jump to exception handler 416 __ jmp(handler_reg, 0); 417 // clear pending exception 418 __ delayed()->st_ptr(G0, exception_addr); 419 420 return start; 421 } 422 423 424 //------------------------------------------------------------------------------------------------------------------------ 425 // Continuation point for throwing of implicit exceptions that are not handled in 426 // the current activation. Fabricates an exception oop and initiates normal 427 // exception dispatching in this frame. Only callee-saved registers are preserved 428 // (through the normal register window / RegisterMap handling). 429 // If the compiler needs all registers to be preserved between the fault 430 // point and the exception handler then it must assume responsibility for that in 431 // AbstractCompiler::continuation_for_implicit_null_exception or 432 // continuation_for_implicit_division_by_zero_exception. All other implicit 433 // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are 434 // either at call sites or otherwise assume that stack unwinding will be initiated, 435 // so caller saved registers were assumed volatile in the compiler. 436 437 // Note that we generate only this stub into a RuntimeStub, because it needs to be 438 // properly traversed and ignored during GC, so we change the meaning of the "__" 439 // macro within this method. 440 #undef __ 441 #define __ masm-> 442 443 address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc) { 444 #ifdef ASSERT 445 int insts_size = VerifyThread ? 1 * K : 600; 446 #else 447 int insts_size = VerifyThread ? 1 * K : 256; 448 #endif /* ASSERT */ 449 int locs_size = 32; 450 451 CodeBuffer code(name, insts_size, locs_size); 452 MacroAssembler* masm = new MacroAssembler(&code); 453 454 __ verify_thread(); 455 456 // This is an inlined and slightly modified version of call_VM 457 // which has the ability to fetch the return PC out of thread-local storage 458 __ assert_not_delayed(); 459 460 // Note that we always push a frame because on the SPARC 461 // architecture, for all of our implicit exception kinds at call 462 // sites, the implicit exception is taken before the callee frame 463 // is pushed. 464 __ save_frame(0); 465 466 int frame_complete = __ offset(); 467 468 if (restore_saved_exception_pc) { 469 __ ld_ptr(G2_thread, JavaThread::saved_exception_pc_offset(), I7); 470 __ sub(I7, frame::pc_return_offset, I7); 471 } 472 473 // Note that we always have a runtime stub frame on the top of stack by this point 474 Register last_java_sp = SP; 475 // 64-bit last_java_sp is biased! 476 __ set_last_Java_frame(last_java_sp, G0); 477 if (VerifyThread) __ mov(G2_thread, O0); // about to be smashed; pass early 478 __ save_thread(noreg); 479 // do the call 480 BLOCK_COMMENT("call runtime_entry"); 481 __ call(runtime_entry, relocInfo::runtime_call_type); 482 if (!VerifyThread) 483 __ delayed()->mov(G2_thread, O0); // pass thread as first argument 484 else 485 __ delayed()->nop(); // (thread already passed) 486 __ restore_thread(noreg); 487 __ reset_last_Java_frame(); 488 489 // check for pending exceptions. use Gtemp as scratch register. 490 #ifdef ASSERT 491 Label L; 492 493 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 494 Register scratch_reg = Gtemp; 495 __ ld_ptr(exception_addr, scratch_reg); 496 __ br_notnull(scratch_reg, false, Assembler::pt, L); 497 __ delayed()->nop(); 498 __ should_not_reach_here(); 499 __ bind(L); 500 #endif // ASSERT 501 BLOCK_COMMENT("call forward_exception_entry"); 502 __ call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 503 // we use O7 linkage so that forward_exception_entry has the issuing PC 504 __ delayed()->restore(); 505 506 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, masm->total_frame_size_in_bytes(0), NULL, false); 507 return stub->entry_point(); 508 } 509 510 #undef __ 511 #define __ _masm-> 512 513 514 // Generate a routine that sets all the registers so we 515 // can tell if the stop routine prints them correctly. 516 address generate_test_stop() { 517 StubCodeMark mark(this, "StubRoutines", "test_stop"); 518 address start = __ pc(); 519 520 int i; 521 522 __ save_frame(0); 523 524 static jfloat zero = 0.0, one = 1.0; 525 526 // put addr in L0, then load through L0 to F0 527 __ set((intptr_t)&zero, L0); __ ldf( FloatRegisterImpl::S, L0, 0, F0); 528 __ set((intptr_t)&one, L0); __ ldf( FloatRegisterImpl::S, L0, 0, F1); // 1.0 to F1 529 530 // use add to put 2..18 in F2..F18 531 for ( i = 2; i <= 18; ++i ) { 532 __ fadd( FloatRegisterImpl::S, F1, as_FloatRegister(i-1), as_FloatRegister(i)); 533 } 534 535 // Now put double 2 in F16, double 18 in F18 536 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F2, F16 ); 537 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F18, F18 ); 538 539 // use add to put 20..32 in F20..F32 540 for (i = 20; i < 32; i += 2) { 541 __ fadd( FloatRegisterImpl::D, F16, as_FloatRegister(i-2), as_FloatRegister(i)); 542 } 543 544 // put 0..7 in i's, 8..15 in l's, 16..23 in o's, 24..31 in g's 545 for ( i = 0; i < 8; ++i ) { 546 if (i < 6) { 547 __ set( i, as_iRegister(i)); 548 __ set(16 + i, as_oRegister(i)); 549 __ set(24 + i, as_gRegister(i)); 550 } 551 __ set( 8 + i, as_lRegister(i)); 552 } 553 554 __ stop("testing stop"); 555 556 557 __ ret(); 558 __ delayed()->restore(); 559 560 return start; 561 } 562 563 564 address generate_stop_subroutine() { 565 StubCodeMark mark(this, "StubRoutines", "stop_subroutine"); 566 address start = __ pc(); 567 568 __ stop_subroutine(); 569 570 return start; 571 } 572 573 address generate_flush_callers_register_windows() { 574 StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows"); 575 address start = __ pc(); 576 577 __ flush_windows(); 578 __ retl(false); 579 __ delayed()->add( FP, STACK_BIAS, O0 ); 580 // The returned value must be a stack pointer whose register save area 581 // is flushed, and will stay flushed while the caller executes. 582 583 return start; 584 } 585 586 // Helper functions for v8 atomic operations. 587 // 588 void get_v8_oop_lock_ptr(Register lock_ptr_reg, Register mark_oop_reg, Register scratch_reg) { 589 if (mark_oop_reg == noreg) { 590 address lock_ptr = (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(); 591 __ set((intptr_t)lock_ptr, lock_ptr_reg); 592 } else { 593 assert(scratch_reg != noreg, "just checking"); 594 address lock_ptr = (address)StubRoutines::Sparc::_v8_oop_lock_cache; 595 __ set((intptr_t)lock_ptr, lock_ptr_reg); 596 __ and3(mark_oop_reg, StubRoutines::Sparc::v8_oop_lock_mask_in_place, scratch_reg); 597 __ add(lock_ptr_reg, scratch_reg, lock_ptr_reg); 598 } 599 } 600 601 void generate_v8_lock_prologue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) { 602 603 get_v8_oop_lock_ptr(lock_ptr_reg, mark_oop_reg, scratch_reg); 604 __ set(StubRoutines::Sparc::locked, lock_reg); 605 // Initialize yield counter 606 __ mov(G0,yield_reg); 607 608 __ BIND(retry); 609 __ cmp(yield_reg, V8AtomicOperationUnderLockSpinCount); 610 __ br(Assembler::less, false, Assembler::pt, dontyield); 611 __ delayed()->nop(); 612 613 // This code can only be called from inside the VM, this 614 // stub is only invoked from Atomic::add(). We do not 615 // want to use call_VM, because _last_java_sp and such 616 // must already be set. 617 // 618 // Save the regs and make space for a C call 619 __ save(SP, -96, SP); 620 __ save_all_globals_into_locals(); 621 BLOCK_COMMENT("call os::naked_sleep"); 622 __ call(CAST_FROM_FN_PTR(address, os::naked_sleep)); 623 __ delayed()->nop(); 624 __ restore_globals_from_locals(); 625 __ restore(); 626 // reset the counter 627 __ mov(G0,yield_reg); 628 629 __ BIND(dontyield); 630 631 // try to get lock 632 __ swap(lock_ptr_reg, 0, lock_reg); 633 634 // did we get the lock? 635 __ cmp(lock_reg, StubRoutines::Sparc::unlocked); 636 __ br(Assembler::notEqual, true, Assembler::pn, retry); 637 __ delayed()->add(yield_reg,1,yield_reg); 638 639 // yes, got lock. do the operation here. 640 } 641 642 void generate_v8_lock_epilogue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) { 643 __ st(lock_reg, lock_ptr_reg, 0); // unlock 644 } 645 646 // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest). 647 // 648 // Arguments : 649 // 650 // exchange_value: O0 651 // dest: O1 652 // 653 // Results: 654 // 655 // O0: the value previously stored in dest 656 // 657 address generate_atomic_xchg() { 658 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 659 address start = __ pc(); 660 661 if (UseCASForSwap) { 662 // Use CAS instead of swap, just in case the MP hardware 663 // prefers to work with just one kind of synch. instruction. 664 Label retry; 665 __ BIND(retry); 666 __ mov(O0, O3); // scratch copy of exchange value 667 __ ld(O1, 0, O2); // observe the previous value 668 // try to replace O2 with O3 669 __ cas_under_lock(O1, O2, O3, 670 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false); 671 __ cmp(O2, O3); 672 __ br(Assembler::notEqual, false, Assembler::pn, retry); 673 __ delayed()->nop(); 674 675 __ retl(false); 676 __ delayed()->mov(O2, O0); // report previous value to caller 677 678 } else { 679 if (VM_Version::v9_instructions_work()) { 680 __ retl(false); 681 __ delayed()->swap(O1, 0, O0); 682 } else { 683 const Register& lock_reg = O2; 684 const Register& lock_ptr_reg = O3; 685 const Register& yield_reg = O4; 686 687 Label retry; 688 Label dontyield; 689 690 generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield); 691 // got the lock, do the swap 692 __ swap(O1, 0, O0); 693 694 generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield); 695 __ retl(false); 696 __ delayed()->nop(); 697 } 698 } 699 700 return start; 701 } 702 703 704 // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) 705 // 706 // Arguments : 707 // 708 // exchange_value: O0 709 // dest: O1 710 // compare_value: O2 711 // 712 // Results: 713 // 714 // O0: the value previously stored in dest 715 // 716 // Overwrites (v8): O3,O4,O5 717 // 718 address generate_atomic_cmpxchg() { 719 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 720 address start = __ pc(); 721 722 // cmpxchg(dest, compare_value, exchange_value) 723 __ cas_under_lock(O1, O2, O0, 724 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false); 725 __ retl(false); 726 __ delayed()->nop(); 727 728 return start; 729 } 730 731 // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value) 732 // 733 // Arguments : 734 // 735 // exchange_value: O1:O0 736 // dest: O2 737 // compare_value: O4:O3 738 // 739 // Results: 740 // 741 // O1:O0: the value previously stored in dest 742 // 743 // This only works on V9, on V8 we don't generate any 744 // code and just return NULL. 745 // 746 // Overwrites: G1,G2,G3 747 // 748 address generate_atomic_cmpxchg_long() { 749 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 750 address start = __ pc(); 751 752 if (!VM_Version::supports_cx8()) 753 return NULL;; 754 __ sllx(O0, 32, O0); 755 __ srl(O1, 0, O1); 756 __ or3(O0,O1,O0); // O0 holds 64-bit value from compare_value 757 __ sllx(O3, 32, O3); 758 __ srl(O4, 0, O4); 759 __ or3(O3,O4,O3); // O3 holds 64-bit value from exchange_value 760 __ casx(O2, O3, O0); 761 __ srl(O0, 0, O1); // unpacked return value in O1:O0 762 __ retl(false); 763 __ delayed()->srlx(O0, 32, O0); 764 765 return start; 766 } 767 768 769 // Support for jint Atomic::add(jint add_value, volatile jint* dest). 770 // 771 // Arguments : 772 // 773 // add_value: O0 (e.g., +1 or -1) 774 // dest: O1 775 // 776 // Results: 777 // 778 // O0: the new value stored in dest 779 // 780 // Overwrites (v9): O3 781 // Overwrites (v8): O3,O4,O5 782 // 783 address generate_atomic_add() { 784 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 785 address start = __ pc(); 786 __ BIND(_atomic_add_stub); 787 788 if (VM_Version::v9_instructions_work()) { 789 Label(retry); 790 __ BIND(retry); 791 792 __ lduw(O1, 0, O2); 793 __ add(O0, O2, O3); 794 __ cas(O1, O2, O3); 795 __ cmp( O2, O3); 796 __ br(Assembler::notEqual, false, Assembler::pn, retry); 797 __ delayed()->nop(); 798 __ retl(false); 799 __ delayed()->add(O0, O2, O0); // note that cas made O2==O3 800 } else { 801 const Register& lock_reg = O2; 802 const Register& lock_ptr_reg = O3; 803 const Register& value_reg = O4; 804 const Register& yield_reg = O5; 805 806 Label(retry); 807 Label(dontyield); 808 809 generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield); 810 // got lock, do the increment 811 __ ld(O1, 0, value_reg); 812 __ add(O0, value_reg, value_reg); 813 __ st(value_reg, O1, 0); 814 815 // %%% only for RMO and PSO 816 __ membar(Assembler::StoreStore); 817 818 generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield); 819 820 __ retl(false); 821 __ delayed()->mov(value_reg, O0); 822 } 823 824 return start; 825 } 826 Label _atomic_add_stub; // called from other stubs 827 828 829 //------------------------------------------------------------------------------------------------------------------------ 830 // The following routine generates a subroutine to throw an asynchronous 831 // UnknownError when an unsafe access gets a fault that could not be 832 // reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.) 833 // 834 // Arguments : 835 // 836 // trapping PC: O7 837 // 838 // Results: 839 // posts an asynchronous exception, skips the trapping instruction 840 // 841 842 address generate_handler_for_unsafe_access() { 843 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); 844 address start = __ pc(); 845 846 const int preserve_register_words = (64 * 2); 847 Address preserve_addr(FP, (-preserve_register_words * wordSize) + STACK_BIAS); 848 849 Register Lthread = L7_thread_cache; 850 int i; 851 852 __ save_frame(0); 853 __ mov(G1, L1); 854 __ mov(G2, L2); 855 __ mov(G3, L3); 856 __ mov(G4, L4); 857 __ mov(G5, L5); 858 for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) { 859 __ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize); 860 } 861 862 address entry_point = CAST_FROM_FN_PTR(address, handle_unsafe_access); 863 BLOCK_COMMENT("call handle_unsafe_access"); 864 __ call(entry_point, relocInfo::runtime_call_type); 865 __ delayed()->nop(); 866 867 __ mov(L1, G1); 868 __ mov(L2, G2); 869 __ mov(L3, G3); 870 __ mov(L4, G4); 871 __ mov(L5, G5); 872 for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) { 873 __ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize); 874 } 875 876 __ verify_thread(); 877 878 __ jmp(O0, 0); 879 __ delayed()->restore(); 880 881 return start; 882 } 883 884 885 // Support for uint StubRoutine::Sparc::partial_subtype_check( Klass sub, Klass super ); 886 // Arguments : 887 // 888 // ret : O0, returned 889 // icc/xcc: set as O0 (depending on wordSize) 890 // sub : O1, argument, not changed 891 // super: O2, argument, not changed 892 // raddr: O7, blown by call 893 address generate_partial_subtype_check() { 894 __ align(CodeEntryAlignment); 895 StubCodeMark mark(this, "StubRoutines", "partial_subtype_check"); 896 address start = __ pc(); 897 Label miss; 898 899 #if defined(COMPILER2) && !defined(_LP64) 900 // Do not use a 'save' because it blows the 64-bit O registers. 901 __ add(SP,-4*wordSize,SP); // Make space for 4 temps (stack must be 2 words aligned) 902 __ st_ptr(L0,SP,(frame::register_save_words+0)*wordSize); 903 __ st_ptr(L1,SP,(frame::register_save_words+1)*wordSize); 904 __ st_ptr(L2,SP,(frame::register_save_words+2)*wordSize); 905 __ st_ptr(L3,SP,(frame::register_save_words+3)*wordSize); 906 Register Rret = O0; 907 Register Rsub = O1; 908 Register Rsuper = O2; 909 #else 910 __ save_frame(0); 911 Register Rret = I0; 912 Register Rsub = I1; 913 Register Rsuper = I2; 914 #endif 915 916 Register L0_ary_len = L0; 917 Register L1_ary_ptr = L1; 918 Register L2_super = L2; 919 Register L3_index = L3; 920 921 __ check_klass_subtype_slow_path(Rsub, Rsuper, 922 L0, L1, L2, L3, 923 NULL, &miss); 924 925 // Match falls through here. 926 __ addcc(G0,0,Rret); // set Z flags, Z result 927 928 #if defined(COMPILER2) && !defined(_LP64) 929 __ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0); 930 __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1); 931 __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2); 932 __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3); 933 __ retl(); // Result in Rret is zero; flags set to Z 934 __ delayed()->add(SP,4*wordSize,SP); 935 #else 936 __ ret(); // Result in Rret is zero; flags set to Z 937 __ delayed()->restore(); 938 #endif 939 940 __ BIND(miss); 941 __ addcc(G0,1,Rret); // set NZ flags, NZ result 942 943 #if defined(COMPILER2) && !defined(_LP64) 944 __ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0); 945 __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1); 946 __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2); 947 __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3); 948 __ retl(); // Result in Rret is != 0; flags set to NZ 949 __ delayed()->add(SP,4*wordSize,SP); 950 #else 951 __ ret(); // Result in Rret is != 0; flags set to NZ 952 __ delayed()->restore(); 953 #endif 954 955 return start; 956 } 957 958 959 // Called from MacroAssembler::verify_oop 960 // 961 address generate_verify_oop_subroutine() { 962 StubCodeMark mark(this, "StubRoutines", "verify_oop_stub"); 963 964 address start = __ pc(); 965 966 __ verify_oop_subroutine(); 967 968 return start; 969 } 970 971 972 // 973 // Verify that a register contains clean 32-bits positive value 974 // (high 32-bits are 0) so it could be used in 64-bits shifts (sllx, srax). 975 // 976 // Input: 977 // Rint - 32-bits value 978 // Rtmp - scratch 979 // 980 void assert_clean_int(Register Rint, Register Rtmp) { 981 #if defined(ASSERT) && defined(_LP64) 982 __ signx(Rint, Rtmp); 983 __ cmp(Rint, Rtmp); 984 __ breakpoint_trap(Assembler::notEqual, Assembler::xcc); 985 #endif 986 } 987 988 // 989 // Generate overlap test for array copy stubs 990 // 991 // Input: 992 // O0 - array1 993 // O1 - array2 994 // O2 - element count 995 // 996 // Kills temps: O3, O4 997 // 998 void array_overlap_test(address no_overlap_target, int log2_elem_size) { 999 assert(no_overlap_target != NULL, "must be generated"); 1000 array_overlap_test(no_overlap_target, NULL, log2_elem_size); 1001 } 1002 void array_overlap_test(Label& L_no_overlap, int log2_elem_size) { 1003 array_overlap_test(NULL, &L_no_overlap, log2_elem_size); 1004 } 1005 void array_overlap_test(address no_overlap_target, Label* NOLp, int log2_elem_size) { 1006 const Register from = O0; 1007 const Register to = O1; 1008 const Register count = O2; 1009 const Register to_from = O3; // to - from 1010 const Register byte_count = O4; // count << log2_elem_size 1011 1012 __ subcc(to, from, to_from); 1013 __ sll_ptr(count, log2_elem_size, byte_count); 1014 if (NOLp == NULL) 1015 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, no_overlap_target); 1016 else 1017 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, (*NOLp)); 1018 __ delayed()->cmp(to_from, byte_count); 1019 if (NOLp == NULL) 1020 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, no_overlap_target); 1021 else 1022 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, (*NOLp)); 1023 __ delayed()->nop(); 1024 } 1025 1026 // 1027 // Generate pre-write barrier for array. 1028 // 1029 // Input: 1030 // addr - register containing starting address 1031 // count - register containing element count 1032 // tmp - scratch register 1033 // 1034 // The input registers are overwritten. 1035 // 1036 void gen_write_ref_array_pre_barrier(Register addr, Register count) { 1037 BarrierSet* bs = Universe::heap()->barrier_set(); 1038 if (bs->has_write_ref_pre_barrier()) { 1039 assert(bs->has_write_ref_array_pre_opt(), 1040 "Else unsupported barrier set."); 1041 1042 __ save_frame(0); 1043 // Save the necessary global regs... will be used after. 1044 if (addr->is_global()) { 1045 __ mov(addr, L0); 1046 } 1047 if (count->is_global()) { 1048 __ mov(count, L1); 1049 } 1050 __ mov(addr->after_save(), O0); 1051 // Get the count into O1 1052 __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre)); 1053 __ delayed()->mov(count->after_save(), O1); 1054 if (addr->is_global()) { 1055 __ mov(L0, addr); 1056 } 1057 if (count->is_global()) { 1058 __ mov(L1, count); 1059 } 1060 __ restore(); 1061 } 1062 } 1063 // 1064 // Generate post-write barrier for array. 1065 // 1066 // Input: 1067 // addr - register containing starting address 1068 // count - register containing element count 1069 // tmp - scratch register 1070 // 1071 // The input registers are overwritten. 1072 // 1073 void gen_write_ref_array_post_barrier(Register addr, Register count, 1074 Register tmp) { 1075 BarrierSet* bs = Universe::heap()->barrier_set(); 1076 1077 switch (bs->kind()) { 1078 case BarrierSet::G1SATBCT: 1079 case BarrierSet::G1SATBCTLogging: 1080 { 1081 // Get some new fresh output registers. 1082 __ save_frame(0); 1083 __ mov(addr->after_save(), O0); 1084 __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)); 1085 __ delayed()->mov(count->after_save(), O1); 1086 __ restore(); 1087 } 1088 break; 1089 case BarrierSet::CardTableModRef: 1090 case BarrierSet::CardTableExtension: 1091 { 1092 CardTableModRefBS* ct = (CardTableModRefBS*)bs; 1093 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 1094 assert_different_registers(addr, count, tmp); 1095 1096 Label L_loop; 1097 1098 __ sll_ptr(count, LogBytesPerHeapOop, count); 1099 __ sub(count, BytesPerHeapOop, count); 1100 __ add(count, addr, count); 1101 // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.) 1102 __ srl_ptr(addr, CardTableModRefBS::card_shift, addr); 1103 __ srl_ptr(count, CardTableModRefBS::card_shift, count); 1104 __ sub(count, addr, count); 1105 AddressLiteral rs(ct->byte_map_base); 1106 __ set(rs, tmp); 1107 __ BIND(L_loop); 1108 __ stb(G0, tmp, addr); 1109 __ subcc(count, 1, count); 1110 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop); 1111 __ delayed()->add(addr, 1, addr); 1112 } 1113 break; 1114 case BarrierSet::ModRef: 1115 break; 1116 default: 1117 ShouldNotReachHere(); 1118 } 1119 } 1120 1121 1122 // Copy big chunks forward with shift 1123 // 1124 // Inputs: 1125 // from - source arrays 1126 // to - destination array aligned to 8-bytes 1127 // count - elements count to copy >= the count equivalent to 16 bytes 1128 // count_dec - elements count's decrement equivalent to 16 bytes 1129 // L_copy_bytes - copy exit label 1130 // 1131 void copy_16_bytes_forward_with_shift(Register from, Register to, 1132 Register count, int count_dec, Label& L_copy_bytes) { 1133 Label L_loop, L_aligned_copy, L_copy_last_bytes; 1134 1135 // if both arrays have the same alignment mod 8, do 8 bytes aligned copy 1136 __ andcc(from, 7, G1); // misaligned bytes 1137 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy); 1138 __ delayed()->nop(); 1139 1140 const Register left_shift = G1; // left shift bit counter 1141 const Register right_shift = G5; // right shift bit counter 1142 1143 __ sll(G1, LogBitsPerByte, left_shift); 1144 __ mov(64, right_shift); 1145 __ sub(right_shift, left_shift, right_shift); 1146 1147 // 1148 // Load 2 aligned 8-bytes chunks and use one from previous iteration 1149 // to form 2 aligned 8-bytes chunks to store. 1150 // 1151 __ deccc(count, count_dec); // Pre-decrement 'count' 1152 __ andn(from, 7, from); // Align address 1153 __ ldx(from, 0, O3); 1154 __ inc(from, 8); 1155 __ align(OptoLoopAlignment); 1156 __ BIND(L_loop); 1157 __ ldx(from, 0, O4); 1158 __ deccc(count, count_dec); // Can we do next iteration after this one? 1159 __ ldx(from, 8, G4); 1160 __ inc(to, 16); 1161 __ inc(from, 16); 1162 __ sllx(O3, left_shift, O3); 1163 __ srlx(O4, right_shift, G3); 1164 __ bset(G3, O3); 1165 __ stx(O3, to, -16); 1166 __ sllx(O4, left_shift, O4); 1167 __ srlx(G4, right_shift, G3); 1168 __ bset(G3, O4); 1169 __ stx(O4, to, -8); 1170 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop); 1171 __ delayed()->mov(G4, O3); 1172 1173 __ inccc(count, count_dec>>1 ); // + 8 bytes 1174 __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes); 1175 __ delayed()->inc(count, count_dec>>1); // restore 'count' 1176 1177 // copy 8 bytes, part of them already loaded in O3 1178 __ ldx(from, 0, O4); 1179 __ inc(to, 8); 1180 __ inc(from, 8); 1181 __ sllx(O3, left_shift, O3); 1182 __ srlx(O4, right_shift, G3); 1183 __ bset(O3, G3); 1184 __ stx(G3, to, -8); 1185 1186 __ BIND(L_copy_last_bytes); 1187 __ srl(right_shift, LogBitsPerByte, right_shift); // misaligned bytes 1188 __ br(Assembler::always, false, Assembler::pt, L_copy_bytes); 1189 __ delayed()->sub(from, right_shift, from); // restore address 1190 1191 __ BIND(L_aligned_copy); 1192 } 1193 1194 // Copy big chunks backward with shift 1195 // 1196 // Inputs: 1197 // end_from - source arrays end address 1198 // end_to - destination array end address aligned to 8-bytes 1199 // count - elements count to copy >= the count equivalent to 16 bytes 1200 // count_dec - elements count's decrement equivalent to 16 bytes 1201 // L_aligned_copy - aligned copy exit label 1202 // L_copy_bytes - copy exit label 1203 // 1204 void copy_16_bytes_backward_with_shift(Register end_from, Register end_to, 1205 Register count, int count_dec, 1206 Label& L_aligned_copy, Label& L_copy_bytes) { 1207 Label L_loop, L_copy_last_bytes; 1208 1209 // if both arrays have the same alignment mod 8, do 8 bytes aligned copy 1210 __ andcc(end_from, 7, G1); // misaligned bytes 1211 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy); 1212 __ delayed()->deccc(count, count_dec); // Pre-decrement 'count' 1213 1214 const Register left_shift = G1; // left shift bit counter 1215 const Register right_shift = G5; // right shift bit counter 1216 1217 __ sll(G1, LogBitsPerByte, left_shift); 1218 __ mov(64, right_shift); 1219 __ sub(right_shift, left_shift, right_shift); 1220 1221 // 1222 // Load 2 aligned 8-bytes chunks and use one from previous iteration 1223 // to form 2 aligned 8-bytes chunks to store. 1224 // 1225 __ andn(end_from, 7, end_from); // Align address 1226 __ ldx(end_from, 0, O3); 1227 __ align(OptoLoopAlignment); 1228 __ BIND(L_loop); 1229 __ ldx(end_from, -8, O4); 1230 __ deccc(count, count_dec); // Can we do next iteration after this one? 1231 __ ldx(end_from, -16, G4); 1232 __ dec(end_to, 16); 1233 __ dec(end_from, 16); 1234 __ srlx(O3, right_shift, O3); 1235 __ sllx(O4, left_shift, G3); 1236 __ bset(G3, O3); 1237 __ stx(O3, end_to, 8); 1238 __ srlx(O4, right_shift, O4); 1239 __ sllx(G4, left_shift, G3); 1240 __ bset(G3, O4); 1241 __ stx(O4, end_to, 0); 1242 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop); 1243 __ delayed()->mov(G4, O3); 1244 1245 __ inccc(count, count_dec>>1 ); // + 8 bytes 1246 __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes); 1247 __ delayed()->inc(count, count_dec>>1); // restore 'count' 1248 1249 // copy 8 bytes, part of them already loaded in O3 1250 __ ldx(end_from, -8, O4); 1251 __ dec(end_to, 8); 1252 __ dec(end_from, 8); 1253 __ srlx(O3, right_shift, O3); 1254 __ sllx(O4, left_shift, G3); 1255 __ bset(O3, G3); 1256 __ stx(G3, end_to, 0); 1257 1258 __ BIND(L_copy_last_bytes); 1259 __ srl(left_shift, LogBitsPerByte, left_shift); // misaligned bytes 1260 __ br(Assembler::always, false, Assembler::pt, L_copy_bytes); 1261 __ delayed()->add(end_from, left_shift, end_from); // restore address 1262 } 1263 1264 // 1265 // Generate stub for disjoint byte copy. If "aligned" is true, the 1266 // "from" and "to" addresses are assumed to be heapword aligned. 1267 // 1268 // Arguments for generated stub: 1269 // from: O0 1270 // to: O1 1271 // count: O2 treated as signed 1272 // 1273 address generate_disjoint_byte_copy(bool aligned, address *entry, const char *name) { 1274 __ align(CodeEntryAlignment); 1275 StubCodeMark mark(this, "StubRoutines", name); 1276 address start = __ pc(); 1277 1278 Label L_skip_alignment, L_align; 1279 Label L_copy_byte, L_copy_byte_loop, L_exit; 1280 1281 const Register from = O0; // source array address 1282 const Register to = O1; // destination array address 1283 const Register count = O2; // elements count 1284 const Register offset = O5; // offset from start of arrays 1285 // O3, O4, G3, G4 are used as temp registers 1286 1287 assert_clean_int(count, O3); // Make sure 'count' is clean int. 1288 1289 if (entry != NULL) { 1290 *entry = __ pc(); 1291 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1292 BLOCK_COMMENT("Entry:"); 1293 } 1294 1295 // for short arrays, just do single element copy 1296 __ cmp(count, 23); // 16 + 7 1297 __ brx(Assembler::less, false, Assembler::pn, L_copy_byte); 1298 __ delayed()->mov(G0, offset); 1299 1300 if (aligned) { 1301 // 'aligned' == true when it is known statically during compilation 1302 // of this arraycopy call site that both 'from' and 'to' addresses 1303 // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()). 1304 // 1305 // Aligned arrays have 4 bytes alignment in 32-bits VM 1306 // and 8 bytes - in 64-bits VM. So we do it only for 32-bits VM 1307 // 1308 #ifndef _LP64 1309 // copy a 4-bytes word if necessary to align 'to' to 8 bytes 1310 __ andcc(to, 7, G0); 1311 __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment); 1312 __ delayed()->ld(from, 0, O3); 1313 __ inc(from, 4); 1314 __ inc(to, 4); 1315 __ dec(count, 4); 1316 __ st(O3, to, -4); 1317 __ BIND(L_skip_alignment); 1318 #endif 1319 } else { 1320 // copy bytes to align 'to' on 8 byte boundary 1321 __ andcc(to, 7, G1); // misaligned bytes 1322 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 1323 __ delayed()->neg(G1); 1324 __ inc(G1, 8); // bytes need to copy to next 8-bytes alignment 1325 __ sub(count, G1, count); 1326 __ BIND(L_align); 1327 __ ldub(from, 0, O3); 1328 __ deccc(G1); 1329 __ inc(from); 1330 __ stb(O3, to, 0); 1331 __ br(Assembler::notZero, false, Assembler::pt, L_align); 1332 __ delayed()->inc(to); 1333 __ BIND(L_skip_alignment); 1334 } 1335 #ifdef _LP64 1336 if (!aligned) 1337 #endif 1338 { 1339 // Copy with shift 16 bytes per iteration if arrays do not have 1340 // the same alignment mod 8, otherwise fall through to the next 1341 // code for aligned copy. 1342 // The compare above (count >= 23) guarantes 'count' >= 16 bytes. 1343 // Also jump over aligned copy after the copy with shift completed. 1344 1345 copy_16_bytes_forward_with_shift(from, to, count, 16, L_copy_byte); 1346 } 1347 1348 // Both array are 8 bytes aligned, copy 16 bytes at a time 1349 __ and3(count, 7, G4); // Save count 1350 __ srl(count, 3, count); 1351 generate_disjoint_long_copy_core(aligned); 1352 __ mov(G4, count); // Restore count 1353 1354 // copy tailing bytes 1355 __ BIND(L_copy_byte); 1356 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit); 1357 __ delayed()->nop(); 1358 __ align(OptoLoopAlignment); 1359 __ BIND(L_copy_byte_loop); 1360 __ ldub(from, offset, O3); 1361 __ deccc(count); 1362 __ stb(O3, to, offset); 1363 __ brx(Assembler::notZero, false, Assembler::pt, L_copy_byte_loop); 1364 __ delayed()->inc(offset); 1365 1366 __ BIND(L_exit); 1367 // O3, O4 are used as temp registers 1368 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4); 1369 __ retl(); 1370 __ delayed()->mov(G0, O0); // return 0 1371 return start; 1372 } 1373 1374 // 1375 // Generate stub for conjoint byte copy. If "aligned" is true, the 1376 // "from" and "to" addresses are assumed to be heapword aligned. 1377 // 1378 // Arguments for generated stub: 1379 // from: O0 1380 // to: O1 1381 // count: O2 treated as signed 1382 // 1383 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1384 address *entry, const char *name) { 1385 // Do reverse copy. 1386 1387 __ align(CodeEntryAlignment); 1388 StubCodeMark mark(this, "StubRoutines", name); 1389 address start = __ pc(); 1390 1391 Label L_skip_alignment, L_align, L_aligned_copy; 1392 Label L_copy_byte, L_copy_byte_loop, L_exit; 1393 1394 const Register from = O0; // source array address 1395 const Register to = O1; // destination array address 1396 const Register count = O2; // elements count 1397 const Register end_from = from; // source array end address 1398 const Register end_to = to; // destination array end address 1399 1400 assert_clean_int(count, O3); // Make sure 'count' is clean int. 1401 1402 if (entry != NULL) { 1403 *entry = __ pc(); 1404 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1405 BLOCK_COMMENT("Entry:"); 1406 } 1407 1408 array_overlap_test(nooverlap_target, 0); 1409 1410 __ add(to, count, end_to); // offset after last copied element 1411 1412 // for short arrays, just do single element copy 1413 __ cmp(count, 23); // 16 + 7 1414 __ brx(Assembler::less, false, Assembler::pn, L_copy_byte); 1415 __ delayed()->add(from, count, end_from); 1416 1417 { 1418 // Align end of arrays since they could be not aligned even 1419 // when arrays itself are aligned. 1420 1421 // copy bytes to align 'end_to' on 8 byte boundary 1422 __ andcc(end_to, 7, G1); // misaligned bytes 1423 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 1424 __ delayed()->nop(); 1425 __ sub(count, G1, count); 1426 __ BIND(L_align); 1427 __ dec(end_from); 1428 __ dec(end_to); 1429 __ ldub(end_from, 0, O3); 1430 __ deccc(G1); 1431 __ brx(Assembler::notZero, false, Assembler::pt, L_align); 1432 __ delayed()->stb(O3, end_to, 0); 1433 __ BIND(L_skip_alignment); 1434 } 1435 #ifdef _LP64 1436 if (aligned) { 1437 // Both arrays are aligned to 8-bytes in 64-bits VM. 1438 // The 'count' is decremented in copy_16_bytes_backward_with_shift() 1439 // in unaligned case. 1440 __ dec(count, 16); 1441 } else 1442 #endif 1443 { 1444 // Copy with shift 16 bytes per iteration if arrays do not have 1445 // the same alignment mod 8, otherwise jump to the next 1446 // code for aligned copy (and substracting 16 from 'count' before jump). 1447 // The compare above (count >= 11) guarantes 'count' >= 16 bytes. 1448 // Also jump over aligned copy after the copy with shift completed. 1449 1450 copy_16_bytes_backward_with_shift(end_from, end_to, count, 16, 1451 L_aligned_copy, L_copy_byte); 1452 } 1453 // copy 4 elements (16 bytes) at a time 1454 __ align(OptoLoopAlignment); 1455 __ BIND(L_aligned_copy); 1456 __ dec(end_from, 16); 1457 __ ldx(end_from, 8, O3); 1458 __ ldx(end_from, 0, O4); 1459 __ dec(end_to, 16); 1460 __ deccc(count, 16); 1461 __ stx(O3, end_to, 8); 1462 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy); 1463 __ delayed()->stx(O4, end_to, 0); 1464 __ inc(count, 16); 1465 1466 // copy 1 element (2 bytes) at a time 1467 __ BIND(L_copy_byte); 1468 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit); 1469 __ delayed()->nop(); 1470 __ align(OptoLoopAlignment); 1471 __ BIND(L_copy_byte_loop); 1472 __ dec(end_from); 1473 __ dec(end_to); 1474 __ ldub(end_from, 0, O4); 1475 __ deccc(count); 1476 __ brx(Assembler::greater, false, Assembler::pt, L_copy_byte_loop); 1477 __ delayed()->stb(O4, end_to, 0); 1478 1479 __ BIND(L_exit); 1480 // O3, O4 are used as temp registers 1481 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4); 1482 __ retl(); 1483 __ delayed()->mov(G0, O0); // return 0 1484 return start; 1485 } 1486 1487 // 1488 // Generate stub for disjoint short copy. If "aligned" is true, the 1489 // "from" and "to" addresses are assumed to be heapword aligned. 1490 // 1491 // Arguments for generated stub: 1492 // from: O0 1493 // to: O1 1494 // count: O2 treated as signed 1495 // 1496 address generate_disjoint_short_copy(bool aligned, address *entry, const char * name) { 1497 __ align(CodeEntryAlignment); 1498 StubCodeMark mark(this, "StubRoutines", name); 1499 address start = __ pc(); 1500 1501 Label L_skip_alignment, L_skip_alignment2; 1502 Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit; 1503 1504 const Register from = O0; // source array address 1505 const Register to = O1; // destination array address 1506 const Register count = O2; // elements count 1507 const Register offset = O5; // offset from start of arrays 1508 // O3, O4, G3, G4 are used as temp registers 1509 1510 assert_clean_int(count, O3); // Make sure 'count' is clean int. 1511 1512 if (entry != NULL) { 1513 *entry = __ pc(); 1514 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1515 BLOCK_COMMENT("Entry:"); 1516 } 1517 1518 // for short arrays, just do single element copy 1519 __ cmp(count, 11); // 8 + 3 (22 bytes) 1520 __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes); 1521 __ delayed()->mov(G0, offset); 1522 1523 if (aligned) { 1524 // 'aligned' == true when it is known statically during compilation 1525 // of this arraycopy call site that both 'from' and 'to' addresses 1526 // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()). 1527 // 1528 // Aligned arrays have 4 bytes alignment in 32-bits VM 1529 // and 8 bytes - in 64-bits VM. 1530 // 1531 #ifndef _LP64 1532 // copy a 2-elements word if necessary to align 'to' to 8 bytes 1533 __ andcc(to, 7, G0); 1534 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 1535 __ delayed()->ld(from, 0, O3); 1536 __ inc(from, 4); 1537 __ inc(to, 4); 1538 __ dec(count, 2); 1539 __ st(O3, to, -4); 1540 __ BIND(L_skip_alignment); 1541 #endif 1542 } else { 1543 // copy 1 element if necessary to align 'to' on an 4 bytes 1544 __ andcc(to, 3, G0); 1545 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 1546 __ delayed()->lduh(from, 0, O3); 1547 __ inc(from, 2); 1548 __ inc(to, 2); 1549 __ dec(count); 1550 __ sth(O3, to, -2); 1551 __ BIND(L_skip_alignment); 1552 1553 // copy 2 elements to align 'to' on an 8 byte boundary 1554 __ andcc(to, 7, G0); 1555 __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2); 1556 __ delayed()->lduh(from, 0, O3); 1557 __ dec(count, 2); 1558 __ lduh(from, 2, O4); 1559 __ inc(from, 4); 1560 __ inc(to, 4); 1561 __ sth(O3, to, -4); 1562 __ sth(O4, to, -2); 1563 __ BIND(L_skip_alignment2); 1564 } 1565 #ifdef _LP64 1566 if (!aligned) 1567 #endif 1568 { 1569 // Copy with shift 16 bytes per iteration if arrays do not have 1570 // the same alignment mod 8, otherwise fall through to the next 1571 // code for aligned copy. 1572 // The compare above (count >= 11) guarantes 'count' >= 16 bytes. 1573 // Also jump over aligned copy after the copy with shift completed. 1574 1575 copy_16_bytes_forward_with_shift(from, to, count, 8, L_copy_2_bytes); 1576 } 1577 1578 // Both array are 8 bytes aligned, copy 16 bytes at a time 1579 __ and3(count, 3, G4); // Save 1580 __ srl(count, 2, count); 1581 generate_disjoint_long_copy_core(aligned); 1582 __ mov(G4, count); // restore 1583 1584 // copy 1 element at a time 1585 __ BIND(L_copy_2_bytes); 1586 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit); 1587 __ delayed()->nop(); 1588 __ align(OptoLoopAlignment); 1589 __ BIND(L_copy_2_bytes_loop); 1590 __ lduh(from, offset, O3); 1591 __ deccc(count); 1592 __ sth(O3, to, offset); 1593 __ brx(Assembler::notZero, false, Assembler::pt, L_copy_2_bytes_loop); 1594 __ delayed()->inc(offset, 2); 1595 1596 __ BIND(L_exit); 1597 // O3, O4 are used as temp registers 1598 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4); 1599 __ retl(); 1600 __ delayed()->mov(G0, O0); // return 0 1601 return start; 1602 } 1603 1604 // 1605 // Generate stub for disjoint short fill. If "aligned" is true, the 1606 // "to" address is assumed to be heapword aligned. 1607 // 1608 // Arguments for generated stub: 1609 // to: O0 1610 // value: O1 1611 // count: O2 treated as signed 1612 // 1613 address generate_fill(BasicType t, bool aligned, const char* name) { 1614 __ align(CodeEntryAlignment); 1615 StubCodeMark mark(this, "StubRoutines", name); 1616 address start = __ pc(); 1617 1618 const Register to = O0; // source array address 1619 const Register value = O1; // fill value 1620 const Register count = O2; // elements count 1621 // O3 is used as a temp register 1622 1623 assert_clean_int(count, O3); // Make sure 'count' is clean int. 1624 1625 Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte; 1626 Label L_fill_2_bytes, L_fill_elements, L_fill_32_bytes; 1627 1628 int shift = -1; 1629 switch (t) { 1630 case T_BYTE: 1631 shift = 2; 1632 break; 1633 case T_SHORT: 1634 shift = 1; 1635 break; 1636 case T_INT: 1637 shift = 0; 1638 break; 1639 default: ShouldNotReachHere(); 1640 } 1641 1642 BLOCK_COMMENT("Entry:"); 1643 1644 if (t == T_BYTE) { 1645 // Zero extend value 1646 __ and3(value, 0xff, value); 1647 __ sllx(value, 8, O3); 1648 __ or3(value, O3, value); 1649 } 1650 if (t == T_SHORT) { 1651 // Zero extend value 1652 __ sllx(value, 48, value); 1653 __ srlx(value, 48, value); 1654 } 1655 if (t == T_BYTE || t == T_SHORT) { 1656 __ sllx(value, 16, O3); 1657 __ or3(value, O3, value); 1658 } 1659 1660 __ cmp(count, 2<<shift); // Short arrays (< 8 bytes) fill by element 1661 __ brx(Assembler::lessUnsigned, false, Assembler::pn, L_fill_elements); // use unsigned cmp 1662 __ delayed()->andcc(count, 1, G0); 1663 1664 if (!aligned && (t == T_BYTE || t == T_SHORT)) { 1665 // align source address at 4 bytes address boundary 1666 if (t == T_BYTE) { 1667 // One byte misalignment happens only for byte arrays 1668 __ andcc(to, 1, G0); 1669 __ br(Assembler::zero, false, Assembler::pt, L_skip_align1); 1670 __ delayed()->nop(); 1671 __ stb(value, to, 0); 1672 __ inc(to, 1); 1673 __ dec(count, 1); 1674 __ BIND(L_skip_align1); 1675 } 1676 // Two bytes misalignment happens only for byte and short (char) arrays 1677 __ andcc(to, 2, G0); 1678 __ br(Assembler::zero, false, Assembler::pt, L_skip_align2); 1679 __ delayed()->nop(); 1680 __ sth(value, to, 0); 1681 __ inc(to, 2); 1682 __ dec(count, 1 << (shift - 1)); 1683 __ BIND(L_skip_align2); 1684 } 1685 #ifdef _LP64 1686 if (!aligned) { 1687 #endif 1688 // align to 8 bytes, we know we are 4 byte aligned to start 1689 __ andcc(to, 7, G0); 1690 __ br(Assembler::zero, false, Assembler::pt, L_fill_32_bytes); 1691 __ delayed()->nop(); 1692 __ stw(value, to, 0); 1693 __ inc(to, 4); 1694 __ dec(count, 1 << shift); 1695 __ BIND(L_fill_32_bytes); 1696 #ifdef _LP64 1697 } 1698 #endif 1699 1700 if (t == T_INT) { 1701 // Zero extend value 1702 __ srl(value, 0, value); 1703 } 1704 if (t == T_BYTE || t == T_SHORT || t == T_INT) { 1705 __ sllx(value, 32, O3); 1706 __ or3(value, O3, value); 1707 } 1708 1709 Label L_check_fill_8_bytes; 1710 // Fill 32-byte chunks 1711 __ subcc(count, 8 << shift, count); 1712 __ brx(Assembler::less, false, Assembler::pt, L_check_fill_8_bytes); 1713 __ delayed()->nop(); 1714 1715 Label L_fill_32_bytes_loop, L_fill_4_bytes; 1716 __ align(16); 1717 __ BIND(L_fill_32_bytes_loop); 1718 1719 __ stx(value, to, 0); 1720 __ stx(value, to, 8); 1721 __ stx(value, to, 16); 1722 __ stx(value, to, 24); 1723 1724 __ subcc(count, 8 << shift, count); 1725 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_fill_32_bytes_loop); 1726 __ delayed()->add(to, 32, to); 1727 1728 __ BIND(L_check_fill_8_bytes); 1729 __ addcc(count, 8 << shift, count); 1730 __ brx(Assembler::zero, false, Assembler::pn, L_exit); 1731 __ delayed()->subcc(count, 1 << (shift + 1), count); 1732 __ brx(Assembler::less, false, Assembler::pn, L_fill_4_bytes); 1733 __ delayed()->andcc(count, 1<<shift, G0); 1734 1735 // 1736 // length is too short, just fill 8 bytes at a time 1737 // 1738 Label L_fill_8_bytes_loop; 1739 __ BIND(L_fill_8_bytes_loop); 1740 __ stx(value, to, 0); 1741 __ subcc(count, 1 << (shift + 1), count); 1742 __ brx(Assembler::greaterEqual, false, Assembler::pn, L_fill_8_bytes_loop); 1743 __ delayed()->add(to, 8, to); 1744 1745 // fill trailing 4 bytes 1746 __ andcc(count, 1<<shift, G0); // in delay slot of branches 1747 if (t == T_INT) { 1748 __ BIND(L_fill_elements); 1749 } 1750 __ BIND(L_fill_4_bytes); 1751 __ brx(Assembler::zero, false, Assembler::pt, L_fill_2_bytes); 1752 if (t == T_BYTE || t == T_SHORT) { 1753 __ delayed()->andcc(count, 1<<(shift-1), G0); 1754 } else { 1755 __ delayed()->nop(); 1756 } 1757 __ stw(value, to, 0); 1758 if (t == T_BYTE || t == T_SHORT) { 1759 __ inc(to, 4); 1760 // fill trailing 2 bytes 1761 __ andcc(count, 1<<(shift-1), G0); // in delay slot of branches 1762 __ BIND(L_fill_2_bytes); 1763 __ brx(Assembler::zero, false, Assembler::pt, L_fill_byte); 1764 __ delayed()->andcc(count, 1, count); 1765 __ sth(value, to, 0); 1766 if (t == T_BYTE) { 1767 __ inc(to, 2); 1768 // fill trailing byte 1769 __ andcc(count, 1, count); // in delay slot of branches 1770 __ BIND(L_fill_byte); 1771 __ brx(Assembler::zero, false, Assembler::pt, L_exit); 1772 __ delayed()->nop(); 1773 __ stb(value, to, 0); 1774 } else { 1775 __ BIND(L_fill_byte); 1776 } 1777 } else { 1778 __ BIND(L_fill_2_bytes); 1779 } 1780 __ BIND(L_exit); 1781 __ retl(); 1782 __ delayed()->nop(); 1783 1784 // Handle copies less than 8 bytes. Int is handled elsewhere. 1785 if (t == T_BYTE) { 1786 __ BIND(L_fill_elements); 1787 Label L_fill_2, L_fill_4; 1788 // in delay slot __ andcc(count, 1, G0); 1789 __ brx(Assembler::zero, false, Assembler::pt, L_fill_2); 1790 __ delayed()->andcc(count, 2, G0); 1791 __ stb(value, to, 0); 1792 __ inc(to, 1); 1793 __ BIND(L_fill_2); 1794 __ brx(Assembler::zero, false, Assembler::pt, L_fill_4); 1795 __ delayed()->andcc(count, 4, G0); 1796 __ stb(value, to, 0); 1797 __ stb(value, to, 1); 1798 __ inc(to, 2); 1799 __ BIND(L_fill_4); 1800 __ brx(Assembler::zero, false, Assembler::pt, L_exit); 1801 __ delayed()->nop(); 1802 __ stb(value, to, 0); 1803 __ stb(value, to, 1); 1804 __ stb(value, to, 2); 1805 __ retl(); 1806 __ delayed()->stb(value, to, 3); 1807 } 1808 1809 if (t == T_SHORT) { 1810 Label L_fill_2; 1811 __ BIND(L_fill_elements); 1812 // in delay slot __ andcc(count, 1, G0); 1813 __ brx(Assembler::zero, false, Assembler::pt, L_fill_2); 1814 __ delayed()->andcc(count, 2, G0); 1815 __ sth(value, to, 0); 1816 __ inc(to, 2); 1817 __ BIND(L_fill_2); 1818 __ brx(Assembler::zero, false, Assembler::pt, L_exit); 1819 __ delayed()->nop(); 1820 __ sth(value, to, 0); 1821 __ retl(); 1822 __ delayed()->sth(value, to, 2); 1823 } 1824 return start; 1825 } 1826 1827 // 1828 // Generate stub for conjoint short copy. If "aligned" is true, the 1829 // "from" and "to" addresses are assumed to be heapword aligned. 1830 // 1831 // Arguments for generated stub: 1832 // from: O0 1833 // to: O1 1834 // count: O2 treated as signed 1835 // 1836 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1837 address *entry, const char *name) { 1838 // Do reverse copy. 1839 1840 __ align(CodeEntryAlignment); 1841 StubCodeMark mark(this, "StubRoutines", name); 1842 address start = __ pc(); 1843 1844 Label L_skip_alignment, L_skip_alignment2, L_aligned_copy; 1845 Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit; 1846 1847 const Register from = O0; // source array address 1848 const Register to = O1; // destination array address 1849 const Register count = O2; // elements count 1850 const Register end_from = from; // source array end address 1851 const Register end_to = to; // destination array end address 1852 1853 const Register byte_count = O3; // bytes count to copy 1854 1855 assert_clean_int(count, O3); // Make sure 'count' is clean int. 1856 1857 if (entry != NULL) { 1858 *entry = __ pc(); 1859 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1860 BLOCK_COMMENT("Entry:"); 1861 } 1862 1863 array_overlap_test(nooverlap_target, 1); 1864 1865 __ sllx(count, LogBytesPerShort, byte_count); 1866 __ add(to, byte_count, end_to); // offset after last copied element 1867 1868 // for short arrays, just do single element copy 1869 __ cmp(count, 11); // 8 + 3 (22 bytes) 1870 __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes); 1871 __ delayed()->add(from, byte_count, end_from); 1872 1873 { 1874 // Align end of arrays since they could be not aligned even 1875 // when arrays itself are aligned. 1876 1877 // copy 1 element if necessary to align 'end_to' on an 4 bytes 1878 __ andcc(end_to, 3, G0); 1879 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 1880 __ delayed()->lduh(end_from, -2, O3); 1881 __ dec(end_from, 2); 1882 __ dec(end_to, 2); 1883 __ dec(count); 1884 __ sth(O3, end_to, 0); 1885 __ BIND(L_skip_alignment); 1886 1887 // copy 2 elements to align 'end_to' on an 8 byte boundary 1888 __ andcc(end_to, 7, G0); 1889 __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2); 1890 __ delayed()->lduh(end_from, -2, O3); 1891 __ dec(count, 2); 1892 __ lduh(end_from, -4, O4); 1893 __ dec(end_from, 4); 1894 __ dec(end_to, 4); 1895 __ sth(O3, end_to, 2); 1896 __ sth(O4, end_to, 0); 1897 __ BIND(L_skip_alignment2); 1898 } 1899 #ifdef _LP64 1900 if (aligned) { 1901 // Both arrays are aligned to 8-bytes in 64-bits VM. 1902 // The 'count' is decremented in copy_16_bytes_backward_with_shift() 1903 // in unaligned case. 1904 __ dec(count, 8); 1905 } else 1906 #endif 1907 { 1908 // Copy with shift 16 bytes per iteration if arrays do not have 1909 // the same alignment mod 8, otherwise jump to the next 1910 // code for aligned copy (and substracting 8 from 'count' before jump). 1911 // The compare above (count >= 11) guarantes 'count' >= 16 bytes. 1912 // Also jump over aligned copy after the copy with shift completed. 1913 1914 copy_16_bytes_backward_with_shift(end_from, end_to, count, 8, 1915 L_aligned_copy, L_copy_2_bytes); 1916 } 1917 // copy 4 elements (16 bytes) at a time 1918 __ align(OptoLoopAlignment); 1919 __ BIND(L_aligned_copy); 1920 __ dec(end_from, 16); 1921 __ ldx(end_from, 8, O3); 1922 __ ldx(end_from, 0, O4); 1923 __ dec(end_to, 16); 1924 __ deccc(count, 8); 1925 __ stx(O3, end_to, 8); 1926 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy); 1927 __ delayed()->stx(O4, end_to, 0); 1928 __ inc(count, 8); 1929 1930 // copy 1 element (2 bytes) at a time 1931 __ BIND(L_copy_2_bytes); 1932 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit); 1933 __ delayed()->nop(); 1934 __ BIND(L_copy_2_bytes_loop); 1935 __ dec(end_from, 2); 1936 __ dec(end_to, 2); 1937 __ lduh(end_from, 0, O4); 1938 __ deccc(count); 1939 __ brx(Assembler::greater, false, Assembler::pt, L_copy_2_bytes_loop); 1940 __ delayed()->sth(O4, end_to, 0); 1941 1942 __ BIND(L_exit); 1943 // O3, O4 are used as temp registers 1944 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4); 1945 __ retl(); 1946 __ delayed()->mov(G0, O0); // return 0 1947 return start; 1948 } 1949 1950 // 1951 // Generate core code for disjoint int copy (and oop copy on 32-bit). 1952 // If "aligned" is true, the "from" and "to" addresses are assumed 1953 // to be heapword aligned. 1954 // 1955 // Arguments: 1956 // from: O0 1957 // to: O1 1958 // count: O2 treated as signed 1959 // 1960 void generate_disjoint_int_copy_core(bool aligned) { 1961 1962 Label L_skip_alignment, L_aligned_copy; 1963 Label L_copy_16_bytes, L_copy_4_bytes, L_copy_4_bytes_loop, L_exit; 1964 1965 const Register from = O0; // source array address 1966 const Register to = O1; // destination array address 1967 const Register count = O2; // elements count 1968 const Register offset = O5; // offset from start of arrays 1969 // O3, O4, G3, G4 are used as temp registers 1970 1971 // 'aligned' == true when it is known statically during compilation 1972 // of this arraycopy call site that both 'from' and 'to' addresses 1973 // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()). 1974 // 1975 // Aligned arrays have 4 bytes alignment in 32-bits VM 1976 // and 8 bytes - in 64-bits VM. 1977 // 1978 #ifdef _LP64 1979 if (!aligned) 1980 #endif 1981 { 1982 // The next check could be put under 'ifndef' since the code in 1983 // generate_disjoint_long_copy_core() has own checks and set 'offset'. 1984 1985 // for short arrays, just do single element copy 1986 __ cmp(count, 5); // 4 + 1 (20 bytes) 1987 __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes); 1988 __ delayed()->mov(G0, offset); 1989 1990 // copy 1 element to align 'to' on an 8 byte boundary 1991 __ andcc(to, 7, G0); 1992 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 1993 __ delayed()->ld(from, 0, O3); 1994 __ inc(from, 4); 1995 __ inc(to, 4); 1996 __ dec(count); 1997 __ st(O3, to, -4); 1998 __ BIND(L_skip_alignment); 1999 2000 // if arrays have same alignment mod 8, do 4 elements copy 2001 __ andcc(from, 7, G0); 2002 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy); 2003 __ delayed()->ld(from, 0, O3); 2004 2005 // 2006 // Load 2 aligned 8-bytes chunks and use one from previous iteration 2007 // to form 2 aligned 8-bytes chunks to store. 2008 // 2009 // copy_16_bytes_forward_with_shift() is not used here since this 2010 // code is more optimal. 2011 2012 // copy with shift 4 elements (16 bytes) at a time 2013 __ dec(count, 4); // The cmp at the beginning guaranty count >= 4 2014 2015 __ align(OptoLoopAlignment); 2016 __ BIND(L_copy_16_bytes); 2017 __ ldx(from, 4, O4); 2018 __ deccc(count, 4); // Can we do next iteration after this one? 2019 __ ldx(from, 12, G4); 2020 __ inc(to, 16); 2021 __ inc(from, 16); 2022 __ sllx(O3, 32, O3); 2023 __ srlx(O4, 32, G3); 2024 __ bset(G3, O3); 2025 __ stx(O3, to, -16); 2026 __ sllx(O4, 32, O4); 2027 __ srlx(G4, 32, G3); 2028 __ bset(G3, O4); 2029 __ stx(O4, to, -8); 2030 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes); 2031 __ delayed()->mov(G4, O3); 2032 2033 __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes); 2034 __ delayed()->inc(count, 4); // restore 'count' 2035 2036 __ BIND(L_aligned_copy); 2037 } 2038 // copy 4 elements (16 bytes) at a time 2039 __ and3(count, 1, G4); // Save 2040 __ srl(count, 1, count); 2041 generate_disjoint_long_copy_core(aligned); 2042 __ mov(G4, count); // Restore 2043 2044 // copy 1 element at a time 2045 __ BIND(L_copy_4_bytes); 2046 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit); 2047 __ delayed()->nop(); 2048 __ BIND(L_copy_4_bytes_loop); 2049 __ ld(from, offset, O3); 2050 __ deccc(count); 2051 __ st(O3, to, offset); 2052 __ brx(Assembler::notZero, false, Assembler::pt, L_copy_4_bytes_loop); 2053 __ delayed()->inc(offset, 4); 2054 __ BIND(L_exit); 2055 } 2056 2057 // 2058 // Generate stub for disjoint int copy. If "aligned" is true, the 2059 // "from" and "to" addresses are assumed to be heapword aligned. 2060 // 2061 // Arguments for generated stub: 2062 // from: O0 2063 // to: O1 2064 // count: O2 treated as signed 2065 // 2066 address generate_disjoint_int_copy(bool aligned, address *entry, const char *name) { 2067 __ align(CodeEntryAlignment); 2068 StubCodeMark mark(this, "StubRoutines", name); 2069 address start = __ pc(); 2070 2071 const Register count = O2; 2072 assert_clean_int(count, O3); // Make sure 'count' is clean int. 2073 2074 if (entry != NULL) { 2075 *entry = __ pc(); 2076 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2077 BLOCK_COMMENT("Entry:"); 2078 } 2079 2080 generate_disjoint_int_copy_core(aligned); 2081 2082 // O3, O4 are used as temp registers 2083 inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4); 2084 __ retl(); 2085 __ delayed()->mov(G0, O0); // return 0 2086 return start; 2087 } 2088 2089 // 2090 // Generate core code for conjoint int copy (and oop copy on 32-bit). 2091 // If "aligned" is true, the "from" and "to" addresses are assumed 2092 // to be heapword aligned. 2093 // 2094 // Arguments: 2095 // from: O0 2096 // to: O1 2097 // count: O2 treated as signed 2098 // 2099 void generate_conjoint_int_copy_core(bool aligned) { 2100 // Do reverse copy. 2101 2102 Label L_skip_alignment, L_aligned_copy; 2103 Label L_copy_16_bytes, L_copy_4_bytes, L_copy_4_bytes_loop, L_exit; 2104 2105 const Register from = O0; // source array address 2106 const Register to = O1; // destination array address 2107 const Register count = O2; // elements count 2108 const Register end_from = from; // source array end address 2109 const Register end_to = to; // destination array end address 2110 // O3, O4, O5, G3 are used as temp registers 2111 2112 const Register byte_count = O3; // bytes count to copy 2113 2114 __ sllx(count, LogBytesPerInt, byte_count); 2115 __ add(to, byte_count, end_to); // offset after last copied element 2116 2117 __ cmp(count, 5); // for short arrays, just do single element copy 2118 __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes); 2119 __ delayed()->add(from, byte_count, end_from); 2120 2121 // copy 1 element to align 'to' on an 8 byte boundary 2122 __ andcc(end_to, 7, G0); 2123 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 2124 __ delayed()->nop(); 2125 __ dec(count); 2126 __ dec(end_from, 4); 2127 __ dec(end_to, 4); 2128 __ ld(end_from, 0, O4); 2129 __ st(O4, end_to, 0); 2130 __ BIND(L_skip_alignment); 2131 2132 // Check if 'end_from' and 'end_to' has the same alignment. 2133 __ andcc(end_from, 7, G0); 2134 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy); 2135 __ delayed()->dec(count, 4); // The cmp at the start guaranty cnt >= 4 2136 2137 // copy with shift 4 elements (16 bytes) at a time 2138 // 2139 // Load 2 aligned 8-bytes chunks and use one from previous iteration 2140 // to form 2 aligned 8-bytes chunks to store. 2141 // 2142 __ ldx(end_from, -4, O3); 2143 __ align(OptoLoopAlignment); 2144 __ BIND(L_copy_16_bytes); 2145 __ ldx(end_from, -12, O4); 2146 __ deccc(count, 4); 2147 __ ldx(end_from, -20, O5); 2148 __ dec(end_to, 16); 2149 __ dec(end_from, 16); 2150 __ srlx(O3, 32, O3); 2151 __ sllx(O4, 32, G3); 2152 __ bset(G3, O3); 2153 __ stx(O3, end_to, 8); 2154 __ srlx(O4, 32, O4); 2155 __ sllx(O5, 32, G3); 2156 __ bset(O4, G3); 2157 __ stx(G3, end_to, 0); 2158 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes); 2159 __ delayed()->mov(O5, O3); 2160 2161 __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes); 2162 __ delayed()->inc(count, 4); 2163 2164 // copy 4 elements (16 bytes) at a time 2165 __ align(OptoLoopAlignment); 2166 __ BIND(L_aligned_copy); 2167 __ dec(end_from, 16); 2168 __ ldx(end_from, 8, O3); 2169 __ ldx(end_from, 0, O4); 2170 __ dec(end_to, 16); 2171 __ deccc(count, 4); 2172 __ stx(O3, end_to, 8); 2173 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy); 2174 __ delayed()->stx(O4, end_to, 0); 2175 __ inc(count, 4); 2176 2177 // copy 1 element (4 bytes) at a time 2178 __ BIND(L_copy_4_bytes); 2179 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit); 2180 __ delayed()->nop(); 2181 __ BIND(L_copy_4_bytes_loop); 2182 __ dec(end_from, 4); 2183 __ dec(end_to, 4); 2184 __ ld(end_from, 0, O4); 2185 __ deccc(count); 2186 __ brx(Assembler::greater, false, Assembler::pt, L_copy_4_bytes_loop); 2187 __ delayed()->st(O4, end_to, 0); 2188 __ BIND(L_exit); 2189 } 2190 2191 // 2192 // Generate stub for conjoint int copy. If "aligned" is true, the 2193 // "from" and "to" addresses are assumed to be heapword aligned. 2194 // 2195 // Arguments for generated stub: 2196 // from: O0 2197 // to: O1 2198 // count: O2 treated as signed 2199 // 2200 address generate_conjoint_int_copy(bool aligned, address nooverlap_target, 2201 address *entry, const char *name) { 2202 __ align(CodeEntryAlignment); 2203 StubCodeMark mark(this, "StubRoutines", name); 2204 address start = __ pc(); 2205 2206 assert_clean_int(O2, O3); // Make sure 'count' is clean int. 2207 2208 if (entry != NULL) { 2209 *entry = __ pc(); 2210 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2211 BLOCK_COMMENT("Entry:"); 2212 } 2213 2214 array_overlap_test(nooverlap_target, 2); 2215 2216 generate_conjoint_int_copy_core(aligned); 2217 2218 // O3, O4 are used as temp registers 2219 inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4); 2220 __ retl(); 2221 __ delayed()->mov(G0, O0); // return 0 2222 return start; 2223 } 2224 2225 // 2226 // Generate core code for disjoint long copy (and oop copy on 64-bit). 2227 // "aligned" is ignored, because we must make the stronger 2228 // assumption that both addresses are always 64-bit aligned. 2229 // 2230 // Arguments: 2231 // from: O0 2232 // to: O1 2233 // count: O2 treated as signed 2234 // 2235 // count -= 2; 2236 // if ( count >= 0 ) { // >= 2 elements 2237 // if ( count > 6) { // >= 8 elements 2238 // count -= 6; // original count - 8 2239 // do { 2240 // copy_8_elements; 2241 // count -= 8; 2242 // } while ( count >= 0 ); 2243 // count += 6; 2244 // } 2245 // if ( count >= 0 ) { // >= 2 elements 2246 // do { 2247 // copy_2_elements; 2248 // } while ( (count=count-2) >= 0 ); 2249 // } 2250 // } 2251 // count += 2; 2252 // if ( count != 0 ) { // 1 element left 2253 // copy_1_element; 2254 // } 2255 // 2256 void generate_disjoint_long_copy_core(bool aligned) { 2257 Label L_copy_8_bytes, L_copy_16_bytes, L_exit; 2258 const Register from = O0; // source array address 2259 const Register to = O1; // destination array address 2260 const Register count = O2; // elements count 2261 const Register offset0 = O4; // element offset 2262 const Register offset8 = O5; // next element offset 2263 2264 __ deccc(count, 2); 2265 __ mov(G0, offset0); // offset from start of arrays (0) 2266 __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes ); 2267 __ delayed()->add(offset0, 8, offset8); 2268 2269 // Copy by 64 bytes chunks 2270 Label L_copy_64_bytes; 2271 const Register from64 = O3; // source address 2272 const Register to64 = G3; // destination address 2273 __ subcc(count, 6, O3); 2274 __ brx(Assembler::negative, false, Assembler::pt, L_copy_16_bytes ); 2275 __ delayed()->mov(to, to64); 2276 // Now we can use O4(offset0), O5(offset8) as temps 2277 __ mov(O3, count); 2278 __ mov(from, from64); 2279 2280 __ align(OptoLoopAlignment); 2281 __ BIND(L_copy_64_bytes); 2282 for( int off = 0; off < 64; off += 16 ) { 2283 __ ldx(from64, off+0, O4); 2284 __ ldx(from64, off+8, O5); 2285 __ stx(O4, to64, off+0); 2286 __ stx(O5, to64, off+8); 2287 } 2288 __ deccc(count, 8); 2289 __ inc(from64, 64); 2290 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_64_bytes); 2291 __ delayed()->inc(to64, 64); 2292 2293 // Restore O4(offset0), O5(offset8) 2294 __ sub(from64, from, offset0); 2295 __ inccc(count, 6); 2296 __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes ); 2297 __ delayed()->add(offset0, 8, offset8); 2298 2299 // Copy by 16 bytes chunks 2300 __ align(OptoLoopAlignment); 2301 __ BIND(L_copy_16_bytes); 2302 __ ldx(from, offset0, O3); 2303 __ ldx(from, offset8, G3); 2304 __ deccc(count, 2); 2305 __ stx(O3, to, offset0); 2306 __ inc(offset0, 16); 2307 __ stx(G3, to, offset8); 2308 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes); 2309 __ delayed()->inc(offset8, 16); 2310 2311 // Copy last 8 bytes 2312 __ BIND(L_copy_8_bytes); 2313 __ inccc(count, 2); 2314 __ brx(Assembler::zero, true, Assembler::pn, L_exit ); 2315 __ delayed()->mov(offset0, offset8); // Set O5 used by other stubs 2316 __ ldx(from, offset0, O3); 2317 __ stx(O3, to, offset0); 2318 __ BIND(L_exit); 2319 } 2320 2321 // 2322 // Generate stub for disjoint long copy. 2323 // "aligned" is ignored, because we must make the stronger 2324 // assumption that both addresses are always 64-bit aligned. 2325 // 2326 // Arguments for generated stub: 2327 // from: O0 2328 // to: O1 2329 // count: O2 treated as signed 2330 // 2331 address generate_disjoint_long_copy(bool aligned, address *entry, const char *name) { 2332 __ align(CodeEntryAlignment); 2333 StubCodeMark mark(this, "StubRoutines", name); 2334 address start = __ pc(); 2335 2336 assert_clean_int(O2, O3); // Make sure 'count' is clean int. 2337 2338 if (entry != NULL) { 2339 *entry = __ pc(); 2340 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2341 BLOCK_COMMENT("Entry:"); 2342 } 2343 2344 generate_disjoint_long_copy_core(aligned); 2345 2346 // O3, O4 are used as temp registers 2347 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4); 2348 __ retl(); 2349 __ delayed()->mov(G0, O0); // return 0 2350 return start; 2351 } 2352 2353 // 2354 // Generate core code for conjoint long copy (and oop copy on 64-bit). 2355 // "aligned" is ignored, because we must make the stronger 2356 // assumption that both addresses are always 64-bit aligned. 2357 // 2358 // Arguments: 2359 // from: O0 2360 // to: O1 2361 // count: O2 treated as signed 2362 // 2363 void generate_conjoint_long_copy_core(bool aligned) { 2364 // Do reverse copy. 2365 Label L_copy_8_bytes, L_copy_16_bytes, L_exit; 2366 const Register from = O0; // source array address 2367 const Register to = O1; // destination array address 2368 const Register count = O2; // elements count 2369 const Register offset8 = O4; // element offset 2370 const Register offset0 = O5; // previous element offset 2371 2372 __ subcc(count, 1, count); 2373 __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_8_bytes ); 2374 __ delayed()->sllx(count, LogBytesPerLong, offset8); 2375 __ sub(offset8, 8, offset0); 2376 __ align(OptoLoopAlignment); 2377 __ BIND(L_copy_16_bytes); 2378 __ ldx(from, offset8, O2); 2379 __ ldx(from, offset0, O3); 2380 __ stx(O2, to, offset8); 2381 __ deccc(offset8, 16); // use offset8 as counter 2382 __ stx(O3, to, offset0); 2383 __ brx(Assembler::greater, false, Assembler::pt, L_copy_16_bytes); 2384 __ delayed()->dec(offset0, 16); 2385 2386 __ BIND(L_copy_8_bytes); 2387 __ brx(Assembler::negative, false, Assembler::pn, L_exit ); 2388 __ delayed()->nop(); 2389 __ ldx(from, 0, O3); 2390 __ stx(O3, to, 0); 2391 __ BIND(L_exit); 2392 } 2393 2394 // Generate stub for conjoint long copy. 2395 // "aligned" is ignored, because we must make the stronger 2396 // assumption that both addresses are always 64-bit aligned. 2397 // 2398 // Arguments for generated stub: 2399 // from: O0 2400 // to: O1 2401 // count: O2 treated as signed 2402 // 2403 address generate_conjoint_long_copy(bool aligned, address nooverlap_target, 2404 address *entry, const char *name) { 2405 __ align(CodeEntryAlignment); 2406 StubCodeMark mark(this, "StubRoutines", name); 2407 address start = __ pc(); 2408 2409 assert(aligned, "Should always be aligned"); 2410 2411 assert_clean_int(O2, O3); // Make sure 'count' is clean int. 2412 2413 if (entry != NULL) { 2414 *entry = __ pc(); 2415 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2416 BLOCK_COMMENT("Entry:"); 2417 } 2418 2419 array_overlap_test(nooverlap_target, 3); 2420 2421 generate_conjoint_long_copy_core(aligned); 2422 2423 // O3, O4 are used as temp registers 2424 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4); 2425 __ retl(); 2426 __ delayed()->mov(G0, O0); // return 0 2427 return start; 2428 } 2429 2430 // Generate stub for disjoint oop copy. If "aligned" is true, the 2431 // "from" and "to" addresses are assumed to be heapword aligned. 2432 // 2433 // Arguments for generated stub: 2434 // from: O0 2435 // to: O1 2436 // count: O2 treated as signed 2437 // 2438 address generate_disjoint_oop_copy(bool aligned, address *entry, const char *name, 2439 bool need_pre_barrier = true) { 2440 2441 const Register from = O0; // source array address 2442 const Register to = O1; // destination array address 2443 const Register count = O2; // elements count 2444 2445 __ align(CodeEntryAlignment); 2446 StubCodeMark mark(this, "StubRoutines", name); 2447 address start = __ pc(); 2448 2449 assert_clean_int(count, O3); // Make sure 'count' is clean int. 2450 2451 if (entry != NULL) { 2452 *entry = __ pc(); 2453 // caller can pass a 64-bit byte count here 2454 BLOCK_COMMENT("Entry:"); 2455 } 2456 2457 // save arguments for barrier generation 2458 __ mov(to, G1); 2459 __ mov(count, G5); 2460 if (need_pre_barrier) { 2461 gen_write_ref_array_pre_barrier(G1, G5); 2462 } 2463 #ifdef _LP64 2464 assert_clean_int(count, O3); // Make sure 'count' is clean int. 2465 if (UseCompressedOops) { 2466 generate_disjoint_int_copy_core(aligned); 2467 } else { 2468 generate_disjoint_long_copy_core(aligned); 2469 } 2470 #else 2471 generate_disjoint_int_copy_core(aligned); 2472 #endif 2473 // O0 is used as temp register 2474 gen_write_ref_array_post_barrier(G1, G5, O0); 2475 2476 // O3, O4 are used as temp registers 2477 inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4); 2478 __ retl(); 2479 __ delayed()->mov(G0, O0); // return 0 2480 return start; 2481 } 2482 2483 // Generate stub for conjoint oop copy. If "aligned" is true, the 2484 // "from" and "to" addresses are assumed to be heapword aligned. 2485 // 2486 // Arguments for generated stub: 2487 // from: O0 2488 // to: O1 2489 // count: O2 treated as signed 2490 // 2491 address generate_conjoint_oop_copy(bool aligned, address nooverlap_target, 2492 address *entry, const char *name, 2493 bool need_pre_barrier = true) { 2494 2495 const Register from = O0; // source array address 2496 const Register to = O1; // destination array address 2497 const Register count = O2; // elements count 2498 2499 __ align(CodeEntryAlignment); 2500 StubCodeMark mark(this, "StubRoutines", name); 2501 address start = __ pc(); 2502 2503 assert_clean_int(count, O3); // Make sure 'count' is clean int. 2504 2505 if (entry != NULL) { 2506 *entry = __ pc(); 2507 // caller can pass a 64-bit byte count here 2508 BLOCK_COMMENT("Entry:"); 2509 } 2510 2511 array_overlap_test(nooverlap_target, LogBytesPerHeapOop); 2512 2513 // save arguments for barrier generation 2514 __ mov(to, G1); 2515 __ mov(count, G5); 2516 if (need_pre_barrier) { 2517 gen_write_ref_array_pre_barrier(G1, G5); 2518 } 2519 2520 #ifdef _LP64 2521 if (UseCompressedOops) { 2522 generate_conjoint_int_copy_core(aligned); 2523 } else { 2524 generate_conjoint_long_copy_core(aligned); 2525 } 2526 #else 2527 generate_conjoint_int_copy_core(aligned); 2528 #endif 2529 2530 // O0 is used as temp register 2531 gen_write_ref_array_post_barrier(G1, G5, O0); 2532 2533 // O3, O4 are used as temp registers 2534 inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4); 2535 __ retl(); 2536 __ delayed()->mov(G0, O0); // return 0 2537 return start; 2538 } 2539 2540 2541 // Helper for generating a dynamic type check. 2542 // Smashes only the given temp registers. 2543 void generate_type_check(Register sub_klass, 2544 Register super_check_offset, 2545 Register super_klass, 2546 Register temp, 2547 Label& L_success) { 2548 assert_different_registers(sub_klass, super_check_offset, super_klass, temp); 2549 2550 BLOCK_COMMENT("type_check:"); 2551 2552 Label L_miss, L_pop_to_miss; 2553 2554 assert_clean_int(super_check_offset, temp); 2555 2556 __ check_klass_subtype_fast_path(sub_klass, super_klass, temp, noreg, 2557 &L_success, &L_miss, NULL, 2558 super_check_offset); 2559 2560 BLOCK_COMMENT("type_check_slow_path:"); 2561 __ save_frame(0); 2562 __ check_klass_subtype_slow_path(sub_klass->after_save(), 2563 super_klass->after_save(), 2564 L0, L1, L2, L4, 2565 NULL, &L_pop_to_miss); 2566 __ ba(false, L_success); 2567 __ delayed()->restore(); 2568 2569 __ bind(L_pop_to_miss); 2570 __ restore(); 2571 2572 // Fall through on failure! 2573 __ BIND(L_miss); 2574 } 2575 2576 2577 // Generate stub for checked oop copy. 2578 // 2579 // Arguments for generated stub: 2580 // from: O0 2581 // to: O1 2582 // count: O2 treated as signed 2583 // ckoff: O3 (super_check_offset) 2584 // ckval: O4 (super_klass) 2585 // ret: O0 zero for success; (-1^K) where K is partial transfer count 2586 // 2587 address generate_checkcast_copy(const char *name, address *entry, bool need_pre_barrier = true) { 2588 2589 const Register O0_from = O0; // source array address 2590 const Register O1_to = O1; // destination array address 2591 const Register O2_count = O2; // elements count 2592 const Register O3_ckoff = O3; // super_check_offset 2593 const Register O4_ckval = O4; // super_klass 2594 2595 const Register O5_offset = O5; // loop var, with stride wordSize 2596 const Register G1_remain = G1; // loop var, with stride -1 2597 const Register G3_oop = G3; // actual oop copied 2598 const Register G4_klass = G4; // oop._klass 2599 const Register G5_super = G5; // oop._klass._primary_supers[ckval] 2600 2601 __ align(CodeEntryAlignment); 2602 StubCodeMark mark(this, "StubRoutines", name); 2603 address start = __ pc(); 2604 2605 #ifdef ASSERT 2606 // We sometimes save a frame (see generate_type_check below). 2607 // If this will cause trouble, let's fail now instead of later. 2608 __ save_frame(0); 2609 __ restore(); 2610 #endif 2611 2612 assert_clean_int(O2_count, G1); // Make sure 'count' is clean int. 2613 2614 #ifdef ASSERT 2615 // caller guarantees that the arrays really are different 2616 // otherwise, we would have to make conjoint checks 2617 { Label L; 2618 __ mov(O3, G1); // spill: overlap test smashes O3 2619 __ mov(O4, G4); // spill: overlap test smashes O4 2620 array_overlap_test(L, LogBytesPerHeapOop); 2621 __ stop("checkcast_copy within a single array"); 2622 __ bind(L); 2623 __ mov(G1, O3); 2624 __ mov(G4, O4); 2625 } 2626 #endif //ASSERT 2627 2628 if (entry != NULL) { 2629 *entry = __ pc(); 2630 // caller can pass a 64-bit byte count here (from generic stub) 2631 BLOCK_COMMENT("Entry:"); 2632 } 2633 2634 gen_write_ref_array_pre_barrier(O1_to, O2_count); 2635 2636 Label load_element, store_element, do_card_marks, fail, done; 2637 __ addcc(O2_count, 0, G1_remain); // initialize loop index, and test it 2638 __ brx(Assembler::notZero, false, Assembler::pt, load_element); 2639 __ delayed()->mov(G0, O5_offset); // offset from start of arrays 2640 2641 // Empty array: Nothing to do. 2642 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4); 2643 __ retl(); 2644 __ delayed()->set(0, O0); // return 0 on (trivial) success 2645 2646 // ======== begin loop ======== 2647 // (Loop is rotated; its entry is load_element.) 2648 // Loop variables: 2649 // (O5 = 0; ; O5 += wordSize) --- offset from src, dest arrays 2650 // (O2 = len; O2 != 0; O2--) --- number of oops *remaining* 2651 // G3, G4, G5 --- current oop, oop.klass, oop.klass.super 2652 __ align(OptoLoopAlignment); 2653 2654 __ BIND(store_element); 2655 __ deccc(G1_remain); // decrement the count 2656 __ store_heap_oop(G3_oop, O1_to, O5_offset); // store the oop 2657 __ inc(O5_offset, heapOopSize); // step to next offset 2658 __ brx(Assembler::zero, true, Assembler::pt, do_card_marks); 2659 __ delayed()->set(0, O0); // return -1 on success 2660 2661 // ======== loop entry is here ======== 2662 __ BIND(load_element); 2663 __ load_heap_oop(O0_from, O5_offset, G3_oop); // load the oop 2664 __ br_null(G3_oop, true, Assembler::pt, store_element); 2665 __ delayed()->nop(); 2666 2667 __ load_klass(G3_oop, G4_klass); // query the object klass 2668 2669 generate_type_check(G4_klass, O3_ckoff, O4_ckval, G5_super, 2670 // branch to this on success: 2671 store_element); 2672 // ======== end loop ======== 2673 2674 // It was a real error; we must depend on the caller to finish the job. 2675 // Register G1 has number of *remaining* oops, O2 number of *total* oops. 2676 // Emit GC store barriers for the oops we have copied (O2 minus G1), 2677 // and report their number to the caller. 2678 __ BIND(fail); 2679 __ subcc(O2_count, G1_remain, O2_count); 2680 __ brx(Assembler::zero, false, Assembler::pt, done); 2681 __ delayed()->not1(O2_count, O0); // report (-1^K) to caller 2682 2683 __ BIND(do_card_marks); 2684 gen_write_ref_array_post_barrier(O1_to, O2_count, O3); // store check on O1[0..O2] 2685 2686 __ BIND(done); 2687 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4); 2688 __ retl(); 2689 __ delayed()->nop(); // return value in 00 2690 2691 return start; 2692 } 2693 2694 2695 // Generate 'unsafe' array copy stub 2696 // Though just as safe as the other stubs, it takes an unscaled 2697 // size_t argument instead of an element count. 2698 // 2699 // Arguments for generated stub: 2700 // from: O0 2701 // to: O1 2702 // count: O2 byte count, treated as ssize_t, can be zero 2703 // 2704 // Examines the alignment of the operands and dispatches 2705 // to a long, int, short, or byte copy loop. 2706 // 2707 address generate_unsafe_copy(const char* name, 2708 address byte_copy_entry, 2709 address short_copy_entry, 2710 address int_copy_entry, 2711 address long_copy_entry) { 2712 2713 const Register O0_from = O0; // source array address 2714 const Register O1_to = O1; // destination array address 2715 const Register O2_count = O2; // elements count 2716 2717 const Register G1_bits = G1; // test copy of low bits 2718 2719 __ align(CodeEntryAlignment); 2720 StubCodeMark mark(this, "StubRoutines", name); 2721 address start = __ pc(); 2722 2723 // bump this on entry, not on exit: 2724 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, G1, G3); 2725 2726 __ or3(O0_from, O1_to, G1_bits); 2727 __ or3(O2_count, G1_bits, G1_bits); 2728 2729 __ btst(BytesPerLong-1, G1_bits); 2730 __ br(Assembler::zero, true, Assembler::pt, 2731 long_copy_entry, relocInfo::runtime_call_type); 2732 // scale the count on the way out: 2733 __ delayed()->srax(O2_count, LogBytesPerLong, O2_count); 2734 2735 __ btst(BytesPerInt-1, G1_bits); 2736 __ br(Assembler::zero, true, Assembler::pt, 2737 int_copy_entry, relocInfo::runtime_call_type); 2738 // scale the count on the way out: 2739 __ delayed()->srax(O2_count, LogBytesPerInt, O2_count); 2740 2741 __ btst(BytesPerShort-1, G1_bits); 2742 __ br(Assembler::zero, true, Assembler::pt, 2743 short_copy_entry, relocInfo::runtime_call_type); 2744 // scale the count on the way out: 2745 __ delayed()->srax(O2_count, LogBytesPerShort, O2_count); 2746 2747 __ br(Assembler::always, false, Assembler::pt, 2748 byte_copy_entry, relocInfo::runtime_call_type); 2749 __ delayed()->nop(); 2750 2751 return start; 2752 } 2753 2754 2755 // Perform range checks on the proposed arraycopy. 2756 // Kills the two temps, but nothing else. 2757 // Also, clean the sign bits of src_pos and dst_pos. 2758 void arraycopy_range_checks(Register src, // source array oop (O0) 2759 Register src_pos, // source position (O1) 2760 Register dst, // destination array oo (O2) 2761 Register dst_pos, // destination position (O3) 2762 Register length, // length of copy (O4) 2763 Register temp1, Register temp2, 2764 Label& L_failed) { 2765 BLOCK_COMMENT("arraycopy_range_checks:"); 2766 2767 // if (src_pos + length > arrayOop(src)->length() ) FAIL; 2768 2769 const Register array_length = temp1; // scratch 2770 const Register end_pos = temp2; // scratch 2771 2772 // Note: This next instruction may be in the delay slot of a branch: 2773 __ add(length, src_pos, end_pos); // src_pos + length 2774 __ lduw(src, arrayOopDesc::length_offset_in_bytes(), array_length); 2775 __ cmp(end_pos, array_length); 2776 __ br(Assembler::greater, false, Assembler::pn, L_failed); 2777 2778 // if (dst_pos + length > arrayOop(dst)->length() ) FAIL; 2779 __ delayed()->add(length, dst_pos, end_pos); // dst_pos + length 2780 __ lduw(dst, arrayOopDesc::length_offset_in_bytes(), array_length); 2781 __ cmp(end_pos, array_length); 2782 __ br(Assembler::greater, false, Assembler::pn, L_failed); 2783 2784 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2785 // Move with sign extension can be used since they are positive. 2786 __ delayed()->signx(src_pos, src_pos); 2787 __ signx(dst_pos, dst_pos); 2788 2789 BLOCK_COMMENT("arraycopy_range_checks done"); 2790 } 2791 2792 2793 // 2794 // Generate generic array copy stubs 2795 // 2796 // Input: 2797 // O0 - src oop 2798 // O1 - src_pos 2799 // O2 - dst oop 2800 // O3 - dst_pos 2801 // O4 - element count 2802 // 2803 // Output: 2804 // O0 == 0 - success 2805 // O0 == -1 - need to call System.arraycopy 2806 // 2807 address generate_generic_copy(const char *name, 2808 address entry_jbyte_arraycopy, 2809 address entry_jshort_arraycopy, 2810 address entry_jint_arraycopy, 2811 address entry_oop_arraycopy, 2812 address entry_jlong_arraycopy, 2813 address entry_checkcast_arraycopy) { 2814 Label L_failed, L_objArray; 2815 2816 // Input registers 2817 const Register src = O0; // source array oop 2818 const Register src_pos = O1; // source position 2819 const Register dst = O2; // destination array oop 2820 const Register dst_pos = O3; // destination position 2821 const Register length = O4; // elements count 2822 2823 // registers used as temp 2824 const Register G3_src_klass = G3; // source array klass 2825 const Register G4_dst_klass = G4; // destination array klass 2826 const Register G5_lh = G5; // layout handler 2827 const Register O5_temp = O5; 2828 2829 __ align(CodeEntryAlignment); 2830 StubCodeMark mark(this, "StubRoutines", name); 2831 address start = __ pc(); 2832 2833 // bump this on entry, not on exit: 2834 inc_counter_np(SharedRuntime::_generic_array_copy_ctr, G1, G3); 2835 2836 // In principle, the int arguments could be dirty. 2837 //assert_clean_int(src_pos, G1); 2838 //assert_clean_int(dst_pos, G1); 2839 //assert_clean_int(length, G1); 2840 2841 //----------------------------------------------------------------------- 2842 // Assembler stubs will be used for this call to arraycopy 2843 // if the following conditions are met: 2844 // 2845 // (1) src and dst must not be null. 2846 // (2) src_pos must not be negative. 2847 // (3) dst_pos must not be negative. 2848 // (4) length must not be negative. 2849 // (5) src klass and dst klass should be the same and not NULL. 2850 // (6) src and dst should be arrays. 2851 // (7) src_pos + length must not exceed length of src. 2852 // (8) dst_pos + length must not exceed length of dst. 2853 BLOCK_COMMENT("arraycopy initial argument checks"); 2854 2855 // if (src == NULL) return -1; 2856 __ br_null(src, false, Assembler::pn, L_failed); 2857 2858 // if (src_pos < 0) return -1; 2859 __ delayed()->tst(src_pos); 2860 __ br(Assembler::negative, false, Assembler::pn, L_failed); 2861 __ delayed()->nop(); 2862 2863 // if (dst == NULL) return -1; 2864 __ br_null(dst, false, Assembler::pn, L_failed); 2865 2866 // if (dst_pos < 0) return -1; 2867 __ delayed()->tst(dst_pos); 2868 __ br(Assembler::negative, false, Assembler::pn, L_failed); 2869 2870 // if (length < 0) return -1; 2871 __ delayed()->tst(length); 2872 __ br(Assembler::negative, false, Assembler::pn, L_failed); 2873 2874 BLOCK_COMMENT("arraycopy argument klass checks"); 2875 // get src->klass() 2876 if (UseCompressedOops) { 2877 __ delayed()->nop(); // ??? not good 2878 __ load_klass(src, G3_src_klass); 2879 } else { 2880 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), G3_src_klass); 2881 } 2882 2883 #ifdef ASSERT 2884 // assert(src->klass() != NULL); 2885 BLOCK_COMMENT("assert klasses not null"); 2886 { Label L_a, L_b; 2887 __ br_notnull(G3_src_klass, false, Assembler::pt, L_b); // it is broken if klass is NULL 2888 __ delayed()->nop(); 2889 __ bind(L_a); 2890 __ stop("broken null klass"); 2891 __ bind(L_b); 2892 __ load_klass(dst, G4_dst_klass); 2893 __ br_null(G4_dst_klass, false, Assembler::pn, L_a); // this would be broken also 2894 __ delayed()->mov(G0, G4_dst_klass); // scribble the temp 2895 BLOCK_COMMENT("assert done"); 2896 } 2897 #endif 2898 2899 // Load layout helper 2900 // 2901 // |array_tag| | header_size | element_type | |log2_element_size| 2902 // 32 30 24 16 8 2 0 2903 // 2904 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2905 // 2906 2907 int lh_offset = klassOopDesc::header_size() * HeapWordSize + 2908 Klass::layout_helper_offset_in_bytes(); 2909 2910 // Load 32-bits signed value. Use br() instruction with it to check icc. 2911 __ lduw(G3_src_klass, lh_offset, G5_lh); 2912 2913 if (UseCompressedOops) { 2914 __ load_klass(dst, G4_dst_klass); 2915 } 2916 // Handle objArrays completely differently... 2917 juint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2918 __ set(objArray_lh, O5_temp); 2919 __ cmp(G5_lh, O5_temp); 2920 __ br(Assembler::equal, false, Assembler::pt, L_objArray); 2921 if (UseCompressedOops) { 2922 __ delayed()->nop(); 2923 } else { 2924 __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass); 2925 } 2926 2927 // if (src->klass() != dst->klass()) return -1; 2928 __ cmp(G3_src_klass, G4_dst_klass); 2929 __ brx(Assembler::notEqual, false, Assembler::pn, L_failed); 2930 __ delayed()->nop(); 2931 2932 // if (!src->is_Array()) return -1; 2933 __ cmp(G5_lh, Klass::_lh_neutral_value); // < 0 2934 __ br(Assembler::greaterEqual, false, Assembler::pn, L_failed); 2935 2936 // At this point, it is known to be a typeArray (array_tag 0x3). 2937 #ifdef ASSERT 2938 __ delayed()->nop(); 2939 { Label L; 2940 jint lh_prim_tag_in_place = (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift); 2941 __ set(lh_prim_tag_in_place, O5_temp); 2942 __ cmp(G5_lh, O5_temp); 2943 __ br(Assembler::greaterEqual, false, Assembler::pt, L); 2944 __ delayed()->nop(); 2945 __ stop("must be a primitive array"); 2946 __ bind(L); 2947 } 2948 #else 2949 __ delayed(); // match next insn to prev branch 2950 #endif 2951 2952 arraycopy_range_checks(src, src_pos, dst, dst_pos, length, 2953 O5_temp, G4_dst_klass, L_failed); 2954 2955 // typeArrayKlass 2956 // 2957 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2958 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2959 // 2960 2961 const Register G4_offset = G4_dst_klass; // array offset 2962 const Register G3_elsize = G3_src_klass; // log2 element size 2963 2964 __ srl(G5_lh, Klass::_lh_header_size_shift, G4_offset); 2965 __ and3(G4_offset, Klass::_lh_header_size_mask, G4_offset); // array_offset 2966 __ add(src, G4_offset, src); // src array offset 2967 __ add(dst, G4_offset, dst); // dst array offset 2968 __ and3(G5_lh, Klass::_lh_log2_element_size_mask, G3_elsize); // log2 element size 2969 2970 // next registers should be set before the jump to corresponding stub 2971 const Register from = O0; // source array address 2972 const Register to = O1; // destination array address 2973 const Register count = O2; // elements count 2974 2975 // 'from', 'to', 'count' registers should be set in this order 2976 // since they are the same as 'src', 'src_pos', 'dst'. 2977 2978 BLOCK_COMMENT("scale indexes to element size"); 2979 __ sll_ptr(src_pos, G3_elsize, src_pos); 2980 __ sll_ptr(dst_pos, G3_elsize, dst_pos); 2981 __ add(src, src_pos, from); // src_addr 2982 __ add(dst, dst_pos, to); // dst_addr 2983 2984 BLOCK_COMMENT("choose copy loop based on element size"); 2985 __ cmp(G3_elsize, 0); 2986 __ br(Assembler::equal, true, Assembler::pt, entry_jbyte_arraycopy); 2987 __ delayed()->signx(length, count); // length 2988 2989 __ cmp(G3_elsize, LogBytesPerShort); 2990 __ br(Assembler::equal, true, Assembler::pt, entry_jshort_arraycopy); 2991 __ delayed()->signx(length, count); // length 2992 2993 __ cmp(G3_elsize, LogBytesPerInt); 2994 __ br(Assembler::equal, true, Assembler::pt, entry_jint_arraycopy); 2995 __ delayed()->signx(length, count); // length 2996 #ifdef ASSERT 2997 { Label L; 2998 __ cmp(G3_elsize, LogBytesPerLong); 2999 __ br(Assembler::equal, false, Assembler::pt, L); 3000 __ delayed()->nop(); 3001 __ stop("must be long copy, but elsize is wrong"); 3002 __ bind(L); 3003 } 3004 #endif 3005 __ br(Assembler::always, false, Assembler::pt, entry_jlong_arraycopy); 3006 __ delayed()->signx(length, count); // length 3007 3008 // objArrayKlass 3009 __ BIND(L_objArray); 3010 // live at this point: G3_src_klass, G4_dst_klass, src[_pos], dst[_pos], length 3011 3012 Label L_plain_copy, L_checkcast_copy; 3013 // test array classes for subtyping 3014 __ cmp(G3_src_klass, G4_dst_klass); // usual case is exact equality 3015 __ brx(Assembler::notEqual, true, Assembler::pn, L_checkcast_copy); 3016 __ delayed()->lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted from below 3017 3018 // Identically typed arrays can be copied without element-wise checks. 3019 arraycopy_range_checks(src, src_pos, dst, dst_pos, length, 3020 O5_temp, G5_lh, L_failed); 3021 3022 __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset 3023 __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset 3024 __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos); 3025 __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos); 3026 __ add(src, src_pos, from); // src_addr 3027 __ add(dst, dst_pos, to); // dst_addr 3028 __ BIND(L_plain_copy); 3029 __ br(Assembler::always, false, Assembler::pt, entry_oop_arraycopy); 3030 __ delayed()->signx(length, count); // length 3031 3032 __ BIND(L_checkcast_copy); 3033 // live at this point: G3_src_klass, G4_dst_klass 3034 { 3035 // Before looking at dst.length, make sure dst is also an objArray. 3036 // lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted to delay slot 3037 __ cmp(G5_lh, O5_temp); 3038 __ br(Assembler::notEqual, false, Assembler::pn, L_failed); 3039 3040 // It is safe to examine both src.length and dst.length. 3041 __ delayed(); // match next insn to prev branch 3042 arraycopy_range_checks(src, src_pos, dst, dst_pos, length, 3043 O5_temp, G5_lh, L_failed); 3044 3045 // Marshal the base address arguments now, freeing registers. 3046 __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset 3047 __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset 3048 __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos); 3049 __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos); 3050 __ add(src, src_pos, from); // src_addr 3051 __ add(dst, dst_pos, to); // dst_addr 3052 __ signx(length, count); // length (reloaded) 3053 3054 Register sco_temp = O3; // this register is free now 3055 assert_different_registers(from, to, count, sco_temp, 3056 G4_dst_klass, G3_src_klass); 3057 3058 // Generate the type check. 3059 int sco_offset = (klassOopDesc::header_size() * HeapWordSize + 3060 Klass::super_check_offset_offset_in_bytes()); 3061 __ lduw(G4_dst_klass, sco_offset, sco_temp); 3062 generate_type_check(G3_src_klass, sco_temp, G4_dst_klass, 3063 O5_temp, L_plain_copy); 3064 3065 // Fetch destination element klass from the objArrayKlass header. 3066 int ek_offset = (klassOopDesc::header_size() * HeapWordSize + 3067 objArrayKlass::element_klass_offset_in_bytes()); 3068 3069 // the checkcast_copy loop needs two extra arguments: 3070 __ ld_ptr(G4_dst_klass, ek_offset, O4); // dest elem klass 3071 // lduw(O4, sco_offset, O3); // sco of elem klass 3072 3073 __ br(Assembler::always, false, Assembler::pt, entry_checkcast_arraycopy); 3074 __ delayed()->lduw(O4, sco_offset, O3); 3075 } 3076 3077 __ BIND(L_failed); 3078 __ retl(); 3079 __ delayed()->sub(G0, 1, O0); // return -1 3080 return start; 3081 } 3082 3083 void generate_arraycopy_stubs() { 3084 address entry; 3085 address entry_jbyte_arraycopy; 3086 address entry_jshort_arraycopy; 3087 address entry_jint_arraycopy; 3088 address entry_oop_arraycopy; 3089 address entry_jlong_arraycopy; 3090 address entry_checkcast_arraycopy; 3091 3092 //*** jbyte 3093 // Always need alinged and unaligned versions 3094 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 3095 "jbyte_disjoint_arraycopy"); 3096 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, 3097 &entry_jbyte_arraycopy, 3098 "jbyte_arraycopy"); 3099 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry, 3100 "arrayof_jbyte_disjoint_arraycopy"); 3101 StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, entry, NULL, 3102 "arrayof_jbyte_arraycopy"); 3103 3104 //*** jshort 3105 // Always need alinged and unaligned versions 3106 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 3107 "jshort_disjoint_arraycopy"); 3108 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, 3109 &entry_jshort_arraycopy, 3110 "jshort_arraycopy"); 3111 StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry, 3112 "arrayof_jshort_disjoint_arraycopy"); 3113 StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, entry, NULL, 3114 "arrayof_jshort_arraycopy"); 3115 3116 //*** jint 3117 // Aligned versions 3118 StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, &entry, 3119 "arrayof_jint_disjoint_arraycopy"); 3120 StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, entry, &entry_jint_arraycopy, 3121 "arrayof_jint_arraycopy"); 3122 #ifdef _LP64 3123 // In 64 bit we need both aligned and unaligned versions of jint arraycopy. 3124 // entry_jint_arraycopy always points to the unaligned version (notice that we overwrite it). 3125 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, &entry, 3126 "jint_disjoint_arraycopy"); 3127 StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, entry, 3128 &entry_jint_arraycopy, 3129 "jint_arraycopy"); 3130 #else 3131 // In 32 bit jints are always HeapWordSize aligned, so always use the aligned version 3132 // (in fact in 32bit we always have a pre-loop part even in the aligned version, 3133 // because it uses 64-bit loads/stores, so the aligned flag is actually ignored). 3134 StubRoutines::_jint_disjoint_arraycopy = StubRoutines::_arrayof_jint_disjoint_arraycopy; 3135 StubRoutines::_jint_arraycopy = StubRoutines::_arrayof_jint_arraycopy; 3136 #endif 3137 3138 3139 //*** jlong 3140 // It is always aligned 3141 StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, &entry, 3142 "arrayof_jlong_disjoint_arraycopy"); 3143 StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy(true, entry, &entry_jlong_arraycopy, 3144 "arrayof_jlong_arraycopy"); 3145 StubRoutines::_jlong_disjoint_arraycopy = StubRoutines::_arrayof_jlong_disjoint_arraycopy; 3146 StubRoutines::_jlong_arraycopy = StubRoutines::_arrayof_jlong_arraycopy; 3147 3148 3149 //*** oops 3150 // Aligned versions 3151 StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(true, &entry, 3152 "arrayof_oop_disjoint_arraycopy"); 3153 StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(true, entry, &entry_oop_arraycopy, 3154 "arrayof_oop_arraycopy"); 3155 // Aligned versions without pre-barriers 3156 StubRoutines::_arrayof_oop_disjoint_arraycopy_no_pre = generate_disjoint_oop_copy(true, &entry, 3157 "arrayof_oop_disjoint_arraycopy_no_pre", false); 3158 StubRoutines::_arrayof_oop_arraycopy_no_pre = generate_conjoint_oop_copy(true, entry, NULL, 3159 "arrayof_oop_arraycopy_no_pre", false); 3160 #ifdef _LP64 3161 if (UseCompressedOops) { 3162 // With compressed oops we need unalinged versions, notice that we overwrite entry_oop_arraycopy. 3163 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, &entry, 3164 "oop_disjoint_arraycopy"); 3165 StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(false, entry, &entry_oop_arraycopy, 3166 "oop_arraycopy"); 3167 // Unaligned versions without pre-barriers 3168 StubRoutines::_oop_disjoint_arraycopy_no_pre = generate_disjoint_oop_copy(false, &entry, 3169 "oop_disjoint_arraycopy_no_pre", false); 3170 StubRoutines::_oop_arraycopy_no_pre = generate_conjoint_oop_copy(false, entry, NULL, 3171 "oop_arraycopy_no_pre", false); 3172 } else 3173 #endif 3174 { 3175 // oop arraycopy is always aligned on 32bit and 64bit without compressed oops 3176 StubRoutines::_oop_disjoint_arraycopy = StubRoutines::_arrayof_oop_disjoint_arraycopy; 3177 StubRoutines::_oop_arraycopy = StubRoutines::_arrayof_oop_arraycopy; 3178 StubRoutines::_oop_disjoint_arraycopy_no_pre = StubRoutines::_arrayof_oop_disjoint_arraycopy_no_pre; 3179 StubRoutines::_oop_arraycopy_no_pre = StubRoutines::_arrayof_oop_arraycopy_no_pre; 3180 } 3181 3182 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 3183 StubRoutines::_checkcast_arraycopy_no_pre = generate_checkcast_copy("checkcast_arraycopy_no_pre", NULL, false); 3184 3185 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 3186 entry_jbyte_arraycopy, 3187 entry_jshort_arraycopy, 3188 entry_jint_arraycopy, 3189 entry_jlong_arraycopy); 3190 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 3191 entry_jbyte_arraycopy, 3192 entry_jshort_arraycopy, 3193 entry_jint_arraycopy, 3194 entry_oop_arraycopy, 3195 entry_jlong_arraycopy, 3196 entry_checkcast_arraycopy); 3197 3198 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 3199 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 3200 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 3201 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 3202 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 3203 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 3204 } 3205 3206 void generate_initial() { 3207 // Generates all stubs and initializes the entry points 3208 3209 //------------------------------------------------------------------------------------------------------------------------ 3210 // entry points that exist in all platforms 3211 // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than 3212 // the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp. 3213 StubRoutines::_forward_exception_entry = generate_forward_exception(); 3214 3215 StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address); 3216 StubRoutines::_catch_exception_entry = generate_catch_exception(); 3217 3218 //------------------------------------------------------------------------------------------------------------------------ 3219 // entry points that are platform specific 3220 StubRoutines::Sparc::_test_stop_entry = generate_test_stop(); 3221 3222 StubRoutines::Sparc::_stop_subroutine_entry = generate_stop_subroutine(); 3223 StubRoutines::Sparc::_flush_callers_register_windows_entry = generate_flush_callers_register_windows(); 3224 3225 #if !defined(COMPILER2) && !defined(_LP64) 3226 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 3227 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 3228 StubRoutines::_atomic_add_entry = generate_atomic_add(); 3229 StubRoutines::_atomic_xchg_ptr_entry = StubRoutines::_atomic_xchg_entry; 3230 StubRoutines::_atomic_cmpxchg_ptr_entry = StubRoutines::_atomic_cmpxchg_entry; 3231 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 3232 StubRoutines::_atomic_add_ptr_entry = StubRoutines::_atomic_add_entry; 3233 #endif // COMPILER2 !=> _LP64 3234 } 3235 3236 3237 void generate_all() { 3238 // Generates all stubs and initializes the entry points 3239 3240 // Generate partial_subtype_check first here since its code depends on 3241 // UseZeroBaseCompressedOops which is defined after heap initialization. 3242 StubRoutines::Sparc::_partial_subtype_check = generate_partial_subtype_check(); 3243 // These entry points require SharedInfo::stack0 to be set up in non-core builds 3244 StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false); 3245 StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false); 3246 StubRoutines::_throw_ArithmeticException_entry = generate_throw_exception("ArithmeticException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_ArithmeticException), true); 3247 StubRoutines::_throw_NullPointerException_entry = generate_throw_exception("NullPointerException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException), true); 3248 StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false); 3249 StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false); 3250 3251 StubRoutines::_handler_for_unsafe_access_entry = 3252 generate_handler_for_unsafe_access(); 3253 3254 // support for verify_oop (must happen after universe_init) 3255 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine(); 3256 3257 // arraycopy stubs used by compilers 3258 generate_arraycopy_stubs(); 3259 3260 // Don't initialize the platform math functions since sparc 3261 // doesn't have intrinsics for these operations. 3262 } 3263 3264 3265 public: 3266 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 3267 // replace the standard masm with a special one: 3268 _masm = new MacroAssembler(code); 3269 3270 _stub_count = !all ? 0x100 : 0x200; 3271 if (all) { 3272 generate_all(); 3273 } else { 3274 generate_initial(); 3275 } 3276 3277 // make sure this stub is available for all local calls 3278 if (_atomic_add_stub.is_unbound()) { 3279 // generate a second time, if necessary 3280 (void) generate_atomic_add(); 3281 } 3282 } 3283 3284 3285 private: 3286 int _stub_count; 3287 void stub_prolog(StubCodeDesc* cdesc) { 3288 # ifdef ASSERT 3289 // put extra information in the stub code, to make it more readable 3290 #ifdef _LP64 3291 // Write the high part of the address 3292 // [RGV] Check if there is a dependency on the size of this prolog 3293 __ emit_data((intptr_t)cdesc >> 32, relocInfo::none); 3294 #endif 3295 __ emit_data((intptr_t)cdesc, relocInfo::none); 3296 __ emit_data(++_stub_count, relocInfo::none); 3297 # endif 3298 align(true); 3299 } 3300 3301 void align(bool at_header = false) { 3302 // %%%%% move this constant somewhere else 3303 // UltraSPARC cache line size is 8 instructions: 3304 const unsigned int icache_line_size = 32; 3305 const unsigned int icache_half_line_size = 16; 3306 3307 if (at_header) { 3308 while ((intptr_t)(__ pc()) % icache_line_size != 0) { 3309 __ emit_data(0, relocInfo::none); 3310 } 3311 } else { 3312 while ((intptr_t)(__ pc()) % icache_half_line_size != 0) { 3313 __ nop(); 3314 } 3315 } 3316 } 3317 3318 }; // end class declaration 3319 3320 void StubGenerator_generate(CodeBuffer* code, bool all) { 3321 StubGenerator g(code, all); 3322 }