1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "gc/shared/barrierSet.hpp" 28 #include "gc/shared/barrierSetCodeGen.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "nativeInst_sparc.hpp" 31 #include "oops/instanceOop.hpp" 32 #include "oops/method.hpp" 33 #include "oops/objArrayKlass.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "prims/methodHandles.hpp" 36 #include "runtime/frame.inline.hpp" 37 #include "runtime/handles.inline.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 #include "runtime/stubCodeGenerator.hpp" 40 #include "runtime/stubRoutines.hpp" 41 #include "runtime/thread.inline.hpp" 42 #ifdef COMPILER2 43 #include "opto/runtime.hpp" 44 #endif 45 46 // Declaration and definition of StubGenerator (no .hpp file). 47 // For a more detailed description of the stub routine structure 48 // see the comment in stubRoutines.hpp. 49 50 #define __ _masm-> 51 52 #ifdef PRODUCT 53 #define BLOCK_COMMENT(str) /* nothing */ 54 #else 55 #define BLOCK_COMMENT(str) __ block_comment(str) 56 #endif 57 58 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 59 60 // Note: The register L7 is used as L7_thread_cache, and may not be used 61 // any other way within this module. 62 63 static const Register& Lstub_temp = L2; 64 65 // ------------------------------------------------------------------------------------------------------------------------- 66 // Stub Code definitions 67 68 class StubGenerator: public StubCodeGenerator { 69 private: 70 71 #ifdef PRODUCT 72 #define inc_counter_np(a,b,c) 73 #else 74 #define inc_counter_np(counter, t1, t2) \ 75 BLOCK_COMMENT("inc_counter " #counter); \ 76 __ inc_counter(&counter, t1, t2); 77 #endif 78 79 //---------------------------------------------------------------------------------------------------- 80 // Call stubs are used to call Java from C 81 82 address generate_call_stub(address& return_pc) { 83 StubCodeMark mark(this, "StubRoutines", "call_stub"); 84 address start = __ pc(); 85 86 // Incoming arguments: 87 // 88 // o0 : call wrapper address 89 // o1 : result (address) 90 // o2 : result type 91 // o3 : method 92 // o4 : (interpreter) entry point 93 // o5 : parameters (address) 94 // [sp + 0x5c]: parameter size (in words) 95 // [sp + 0x60]: thread 96 // 97 // +---------------+ <--- sp + 0 98 // | | 99 // . reg save area . 100 // | | 101 // +---------------+ <--- sp + 0x40 102 // | | 103 // . extra 7 slots . 104 // | | 105 // +---------------+ <--- sp + 0x5c 106 // | param. size | 107 // +---------------+ <--- sp + 0x60 108 // | thread | 109 // +---------------+ 110 // | | 111 112 // note: if the link argument position changes, adjust 113 // the code in frame::entry_frame_call_wrapper() 114 115 const Argument link = Argument(0, false); // used only for GC 116 const Argument result = Argument(1, false); 117 const Argument result_type = Argument(2, false); 118 const Argument method = Argument(3, false); 119 const Argument entry_point = Argument(4, false); 120 const Argument parameters = Argument(5, false); 121 const Argument parameter_size = Argument(6, false); 122 const Argument thread = Argument(7, false); 123 124 // setup thread register 125 __ ld_ptr(thread.as_address(), G2_thread); 126 __ reinit_heapbase(); 127 128 #ifdef ASSERT 129 // make sure we have no pending exceptions 130 { const Register t = G3_scratch; 131 Label L; 132 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), t); 133 __ br_null_short(t, Assembler::pt, L); 134 __ stop("StubRoutines::call_stub: entered with pending exception"); 135 __ bind(L); 136 } 137 #endif 138 139 // create activation frame & allocate space for parameters 140 { const Register t = G3_scratch; 141 __ ld_ptr(parameter_size.as_address(), t); // get parameter size (in words) 142 __ add(t, frame::memory_parameter_word_sp_offset, t); // add space for save area (in words) 143 __ round_to(t, WordsPerLong); // make sure it is multiple of 2 (in words) 144 __ sll(t, Interpreter::logStackElementSize, t); // compute number of bytes 145 __ neg(t); // negate so it can be used with save 146 __ save(SP, t, SP); // setup new frame 147 } 148 149 // +---------------+ <--- sp + 0 150 // | | 151 // . reg save area . 152 // | | 153 // +---------------+ <--- sp + 0x40 154 // | | 155 // . extra 7 slots . 156 // | | 157 // +---------------+ <--- sp + 0x5c 158 // | empty slot | (only if parameter size is even) 159 // +---------------+ 160 // | | 161 // . parameters . 162 // | | 163 // +---------------+ <--- fp + 0 164 // | | 165 // . reg save area . 166 // | | 167 // +---------------+ <--- fp + 0x40 168 // | | 169 // . extra 7 slots . 170 // | | 171 // +---------------+ <--- fp + 0x5c 172 // | param. size | 173 // +---------------+ <--- fp + 0x60 174 // | thread | 175 // +---------------+ 176 // | | 177 178 // pass parameters if any 179 BLOCK_COMMENT("pass parameters if any"); 180 { const Register src = parameters.as_in().as_register(); 181 const Register dst = Lentry_args; 182 const Register tmp = G3_scratch; 183 const Register cnt = G4_scratch; 184 185 // test if any parameters & setup of Lentry_args 186 Label exit; 187 __ ld_ptr(parameter_size.as_in().as_address(), cnt); // parameter counter 188 __ add( FP, STACK_BIAS, dst ); 189 __ cmp_zero_and_br(Assembler::zero, cnt, exit); 190 __ delayed()->sub(dst, BytesPerWord, dst); // setup Lentry_args 191 192 // copy parameters if any 193 Label loop; 194 __ BIND(loop); 195 // Store parameter value 196 __ ld_ptr(src, 0, tmp); 197 __ add(src, BytesPerWord, src); 198 __ st_ptr(tmp, dst, 0); 199 __ deccc(cnt); 200 __ br(Assembler::greater, false, Assembler::pt, loop); 201 __ delayed()->sub(dst, Interpreter::stackElementSize, dst); 202 203 // done 204 __ BIND(exit); 205 } 206 207 // setup parameters, method & call Java function 208 #ifdef ASSERT 209 // layout_activation_impl checks it's notion of saved SP against 210 // this register, so if this changes update it as well. 211 const Register saved_SP = Lscratch; 212 __ mov(SP, saved_SP); // keep track of SP before call 213 #endif 214 215 // setup parameters 216 const Register t = G3_scratch; 217 __ ld_ptr(parameter_size.as_in().as_address(), t); // get parameter size (in words) 218 __ sll(t, Interpreter::logStackElementSize, t); // compute number of bytes 219 __ sub(FP, t, Gargs); // setup parameter pointer 220 __ add( Gargs, STACK_BIAS, Gargs ); // Account for LP64 stack bias 221 __ mov(SP, O5_savedSP); 222 223 224 // do the call 225 // 226 // the following register must be setup: 227 // 228 // G2_thread 229 // G5_method 230 // Gargs 231 BLOCK_COMMENT("call Java function"); 232 __ jmpl(entry_point.as_in().as_register(), G0, O7); 233 __ delayed()->mov(method.as_in().as_register(), G5_method); // setup method 234 235 BLOCK_COMMENT("call_stub_return_address:"); 236 return_pc = __ pc(); 237 238 // The callee, if it wasn't interpreted, can return with SP changed so 239 // we can no longer assert of change of SP. 240 241 // store result depending on type 242 // (everything that is not T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE 243 // is treated as T_INT) 244 { const Register addr = result .as_in().as_register(); 245 const Register type = result_type.as_in().as_register(); 246 Label is_long, is_float, is_double, is_object, exit; 247 __ cmp(type, T_OBJECT); __ br(Assembler::equal, false, Assembler::pn, is_object); 248 __ delayed()->cmp(type, T_FLOAT); __ br(Assembler::equal, false, Assembler::pn, is_float); 249 __ delayed()->cmp(type, T_DOUBLE); __ br(Assembler::equal, false, Assembler::pn, is_double); 250 __ delayed()->cmp(type, T_LONG); __ br(Assembler::equal, false, Assembler::pn, is_long); 251 __ delayed()->nop(); 252 253 // store int result 254 __ st(O0, addr, G0); 255 256 __ BIND(exit); 257 __ ret(); 258 __ delayed()->restore(); 259 260 __ BIND(is_object); 261 __ ba(exit); 262 __ delayed()->st_ptr(O0, addr, G0); 263 264 __ BIND(is_float); 265 __ ba(exit); 266 __ delayed()->stf(FloatRegisterImpl::S, F0, addr, G0); 267 268 __ BIND(is_double); 269 __ ba(exit); 270 __ delayed()->stf(FloatRegisterImpl::D, F0, addr, G0); 271 272 __ BIND(is_long); 273 __ ba(exit); 274 __ delayed()->st_long(O0, addr, G0); // store entire long 275 } 276 return start; 277 } 278 279 280 //---------------------------------------------------------------------------------------------------- 281 // Return point for a Java call if there's an exception thrown in Java code. 282 // The exception is caught and transformed into a pending exception stored in 283 // JavaThread that can be tested from within the VM. 284 // 285 // Oexception: exception oop 286 287 address generate_catch_exception() { 288 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 289 290 address start = __ pc(); 291 // verify that thread corresponds 292 __ verify_thread(); 293 294 const Register& temp_reg = Gtemp; 295 Address pending_exception_addr (G2_thread, Thread::pending_exception_offset()); 296 Address exception_file_offset_addr(G2_thread, Thread::exception_file_offset ()); 297 Address exception_line_offset_addr(G2_thread, Thread::exception_line_offset ()); 298 299 // set pending exception 300 __ verify_oop(Oexception); 301 __ st_ptr(Oexception, pending_exception_addr); 302 __ set((intptr_t)__FILE__, temp_reg); 303 __ st_ptr(temp_reg, exception_file_offset_addr); 304 __ set((intptr_t)__LINE__, temp_reg); 305 __ st(temp_reg, exception_line_offset_addr); 306 307 // complete return to VM 308 assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before"); 309 310 AddressLiteral stub_ret(StubRoutines::_call_stub_return_address); 311 __ jump_to(stub_ret, temp_reg); 312 __ delayed()->nop(); 313 314 return start; 315 } 316 317 318 //---------------------------------------------------------------------------------------------------- 319 // Continuation point for runtime calls returning with a pending exception 320 // The pending exception check happened in the runtime or native call stub 321 // The pending exception in Thread is converted into a Java-level exception 322 // 323 // Contract with Java-level exception handler: O0 = exception 324 // O1 = throwing pc 325 326 address generate_forward_exception() { 327 StubCodeMark mark(this, "StubRoutines", "forward_exception"); 328 address start = __ pc(); 329 330 // Upon entry, O7 has the return address returning into Java 331 // (interpreted or compiled) code; i.e. the return address 332 // becomes the throwing pc. 333 334 const Register& handler_reg = Gtemp; 335 336 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 337 338 #ifdef ASSERT 339 // make sure that this code is only executed if there is a pending exception 340 { Label L; 341 __ ld_ptr(exception_addr, Gtemp); 342 __ br_notnull_short(Gtemp, Assembler::pt, L); 343 __ stop("StubRoutines::forward exception: no pending exception (1)"); 344 __ bind(L); 345 } 346 #endif 347 348 // compute exception handler into handler_reg 349 __ get_thread(); 350 __ ld_ptr(exception_addr, Oexception); 351 __ verify_oop(Oexception); 352 __ save_frame(0); // compensates for compiler weakness 353 __ add(O7->after_save(), frame::pc_return_offset, Lscratch); // save the issuing PC 354 BLOCK_COMMENT("call exception_handler_for_return_address"); 355 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), G2_thread, Lscratch); 356 __ mov(O0, handler_reg); 357 __ restore(); // compensates for compiler weakness 358 359 __ ld_ptr(exception_addr, Oexception); 360 __ add(O7, frame::pc_return_offset, Oissuing_pc); // save the issuing PC 361 362 #ifdef ASSERT 363 // make sure exception is set 364 { Label L; 365 __ br_notnull_short(Oexception, Assembler::pt, L); 366 __ stop("StubRoutines::forward exception: no pending exception (2)"); 367 __ bind(L); 368 } 369 #endif 370 // jump to exception handler 371 __ jmp(handler_reg, 0); 372 // clear pending exception 373 __ delayed()->st_ptr(G0, exception_addr); 374 375 return start; 376 } 377 378 // Safefetch stubs. 379 void generate_safefetch(const char* name, int size, address* entry, 380 address* fault_pc, address* continuation_pc) { 381 // safefetch signatures: 382 // int SafeFetch32(int* adr, int errValue); 383 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 384 // 385 // arguments: 386 // o0 = adr 387 // o1 = errValue 388 // 389 // result: 390 // o0 = *adr or errValue 391 392 StubCodeMark mark(this, "StubRoutines", name); 393 394 // Entry point, pc or function descriptor. 395 __ align(CodeEntryAlignment); 396 *entry = __ pc(); 397 398 __ mov(O0, G1); // g1 = o0 399 __ mov(O1, O0); // o0 = o1 400 // Load *adr into c_rarg1, may fault. 401 *fault_pc = __ pc(); 402 switch (size) { 403 case 4: 404 // int32_t 405 __ ldsw(G1, 0, O0); // o0 = [g1] 406 break; 407 case 8: 408 // int64_t 409 __ ldx(G1, 0, O0); // o0 = [g1] 410 break; 411 default: 412 ShouldNotReachHere(); 413 } 414 415 // return errValue or *adr 416 *continuation_pc = __ pc(); 417 // By convention with the trap handler we ensure there is a non-CTI 418 // instruction in the trap shadow. 419 __ nop(); 420 __ retl(); 421 __ delayed()->nop(); 422 } 423 424 //------------------------------------------------------------------------------------------------------------------------ 425 // Continuation point for throwing of implicit exceptions that are not handled in 426 // the current activation. Fabricates an exception oop and initiates normal 427 // exception dispatching in this frame. Only callee-saved registers are preserved 428 // (through the normal register window / RegisterMap handling). 429 // If the compiler needs all registers to be preserved between the fault 430 // point and the exception handler then it must assume responsibility for that in 431 // AbstractCompiler::continuation_for_implicit_null_exception or 432 // continuation_for_implicit_division_by_zero_exception. All other implicit 433 // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are 434 // either at call sites or otherwise assume that stack unwinding will be initiated, 435 // so caller saved registers were assumed volatile in the compiler. 436 437 // Note that we generate only this stub into a RuntimeStub, because it needs to be 438 // properly traversed and ignored during GC, so we change the meaning of the "__" 439 // macro within this method. 440 #undef __ 441 #define __ masm-> 442 443 address generate_throw_exception(const char* name, address runtime_entry, 444 Register arg1 = noreg, Register arg2 = noreg) { 445 #ifdef ASSERT 446 int insts_size = VerifyThread ? 1 * K : 600; 447 #else 448 int insts_size = VerifyThread ? 1 * K : 256; 449 #endif /* ASSERT */ 450 int locs_size = 32; 451 452 CodeBuffer code(name, insts_size, locs_size); 453 MacroAssembler* masm = new MacroAssembler(&code); 454 455 __ verify_thread(); 456 457 // This is an inlined and slightly modified version of call_VM 458 // which has the ability to fetch the return PC out of thread-local storage 459 __ assert_not_delayed(); 460 461 // Note that we always push a frame because on the SPARC 462 // architecture, for all of our implicit exception kinds at call 463 // sites, the implicit exception is taken before the callee frame 464 // is pushed. 465 __ save_frame(0); 466 467 int frame_complete = __ offset(); 468 469 // Note that we always have a runtime stub frame on the top of stack by this point 470 Register last_java_sp = SP; 471 // 64-bit last_java_sp is biased! 472 __ set_last_Java_frame(last_java_sp, G0); 473 if (VerifyThread) __ mov(G2_thread, O0); // about to be smashed; pass early 474 __ save_thread(noreg); 475 if (arg1 != noreg) { 476 assert(arg2 != O1, "clobbered"); 477 __ mov(arg1, O1); 478 } 479 if (arg2 != noreg) { 480 __ mov(arg2, O2); 481 } 482 // do the call 483 BLOCK_COMMENT("call runtime_entry"); 484 __ call(runtime_entry, relocInfo::runtime_call_type); 485 if (!VerifyThread) 486 __ delayed()->mov(G2_thread, O0); // pass thread as first argument 487 else 488 __ delayed()->nop(); // (thread already passed) 489 __ restore_thread(noreg); 490 __ reset_last_Java_frame(); 491 492 // check for pending exceptions. use Gtemp as scratch register. 493 #ifdef ASSERT 494 Label L; 495 496 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 497 Register scratch_reg = Gtemp; 498 __ ld_ptr(exception_addr, scratch_reg); 499 __ br_notnull_short(scratch_reg, Assembler::pt, L); 500 __ should_not_reach_here(); 501 __ bind(L); 502 #endif // ASSERT 503 BLOCK_COMMENT("call forward_exception_entry"); 504 __ call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 505 // we use O7 linkage so that forward_exception_entry has the issuing PC 506 __ delayed()->restore(); 507 508 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, masm->total_frame_size_in_bytes(0), NULL, false); 509 return stub->entry_point(); 510 } 511 512 #undef __ 513 #define __ _masm-> 514 515 516 // Generate a routine that sets all the registers so we 517 // can tell if the stop routine prints them correctly. 518 address generate_test_stop() { 519 StubCodeMark mark(this, "StubRoutines", "test_stop"); 520 address start = __ pc(); 521 522 int i; 523 524 __ save_frame(0); 525 526 static jfloat zero = 0.0, one = 1.0; 527 528 // put addr in L0, then load through L0 to F0 529 __ set((intptr_t)&zero, L0); __ ldf( FloatRegisterImpl::S, L0, 0, F0); 530 __ set((intptr_t)&one, L0); __ ldf( FloatRegisterImpl::S, L0, 0, F1); // 1.0 to F1 531 532 // use add to put 2..18 in F2..F18 533 for ( i = 2; i <= 18; ++i ) { 534 __ fadd( FloatRegisterImpl::S, F1, as_FloatRegister(i-1), as_FloatRegister(i)); 535 } 536 537 // Now put double 2 in F16, double 18 in F18 538 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F2, F16 ); 539 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F18, F18 ); 540 541 // use add to put 20..32 in F20..F32 542 for (i = 20; i < 32; i += 2) { 543 __ fadd( FloatRegisterImpl::D, F16, as_FloatRegister(i-2), as_FloatRegister(i)); 544 } 545 546 // put 0..7 in i's, 8..15 in l's, 16..23 in o's, 24..31 in g's 547 for ( i = 0; i < 8; ++i ) { 548 if (i < 6) { 549 __ set( i, as_iRegister(i)); 550 __ set(16 + i, as_oRegister(i)); 551 __ set(24 + i, as_gRegister(i)); 552 } 553 __ set( 8 + i, as_lRegister(i)); 554 } 555 556 __ stop("testing stop"); 557 558 559 __ ret(); 560 __ delayed()->restore(); 561 562 return start; 563 } 564 565 566 address generate_stop_subroutine() { 567 StubCodeMark mark(this, "StubRoutines", "stop_subroutine"); 568 address start = __ pc(); 569 570 __ stop_subroutine(); 571 572 return start; 573 } 574 575 address generate_flush_callers_register_windows() { 576 StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows"); 577 address start = __ pc(); 578 579 __ flushw(); 580 __ retl(false); 581 __ delayed()->add( FP, STACK_BIAS, O0 ); 582 // The returned value must be a stack pointer whose register save area 583 // is flushed, and will stay flushed while the caller executes. 584 585 return start; 586 } 587 588 // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest). 589 // 590 // Arguments: 591 // 592 // exchange_value: O0 593 // dest: O1 594 // 595 // Results: 596 // 597 // O0: the value previously stored in dest 598 // 599 address generate_atomic_xchg() { 600 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 601 address start = __ pc(); 602 603 if (UseCASForSwap) { 604 // Use CAS instead of swap, just in case the MP hardware 605 // prefers to work with just one kind of synch. instruction. 606 Label retry; 607 __ BIND(retry); 608 __ mov(O0, O3); // scratch copy of exchange value 609 __ ld(O1, 0, O2); // observe the previous value 610 // try to replace O2 with O3 611 __ cas(O1, O2, O3); 612 __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry); 613 614 __ retl(false); 615 __ delayed()->mov(O2, O0); // report previous value to caller 616 } else { 617 __ retl(false); 618 __ delayed()->swap(O1, 0, O0); 619 } 620 621 return start; 622 } 623 624 625 // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) 626 // 627 // Arguments: 628 // 629 // exchange_value: O0 630 // dest: O1 631 // compare_value: O2 632 // 633 // Results: 634 // 635 // O0: the value previously stored in dest 636 // 637 address generate_atomic_cmpxchg() { 638 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 639 address start = __ pc(); 640 641 // cmpxchg(dest, compare_value, exchange_value) 642 __ cas(O1, O2, O0); 643 __ retl(false); 644 __ delayed()->nop(); 645 646 return start; 647 } 648 649 // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value) 650 // 651 // Arguments: 652 // 653 // exchange_value: O1:O0 654 // dest: O2 655 // compare_value: O4:O3 656 // 657 // Results: 658 // 659 // O1:O0: the value previously stored in dest 660 // 661 // Overwrites: G1,G2,G3 662 // 663 address generate_atomic_cmpxchg_long() { 664 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 665 address start = __ pc(); 666 667 __ sllx(O0, 32, O0); 668 __ srl(O1, 0, O1); 669 __ or3(O0,O1,O0); // O0 holds 64-bit value from compare_value 670 __ sllx(O3, 32, O3); 671 __ srl(O4, 0, O4); 672 __ or3(O3,O4,O3); // O3 holds 64-bit value from exchange_value 673 __ casx(O2, O3, O0); 674 __ srl(O0, 0, O1); // unpacked return value in O1:O0 675 __ retl(false); 676 __ delayed()->srlx(O0, 32, O0); 677 678 return start; 679 } 680 681 682 // Support for jint Atomic::add(jint add_value, volatile jint* dest). 683 // 684 // Arguments: 685 // 686 // add_value: O0 (e.g., +1 or -1) 687 // dest: O1 688 // 689 // Results: 690 // 691 // O0: the new value stored in dest 692 // 693 // Overwrites: O3 694 // 695 address generate_atomic_add() { 696 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 697 address start = __ pc(); 698 __ BIND(_atomic_add_stub); 699 700 Label(retry); 701 __ BIND(retry); 702 703 __ lduw(O1, 0, O2); 704 __ add(O0, O2, O3); 705 __ cas(O1, O2, O3); 706 __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry); 707 __ retl(false); 708 __ delayed()->add(O0, O2, O0); // note that cas made O2==O3 709 710 return start; 711 } 712 Label _atomic_add_stub; // called from other stubs 713 714 715 // Support for uint StubRoutine::Sparc::partial_subtype_check( Klass sub, Klass super ); 716 // Arguments : 717 // 718 // ret : O0, returned 719 // icc/xcc: set as O0 (depending on wordSize) 720 // sub : O1, argument, not changed 721 // super: O2, argument, not changed 722 // raddr: O7, blown by call 723 address generate_partial_subtype_check() { 724 __ align(CodeEntryAlignment); 725 StubCodeMark mark(this, "StubRoutines", "partial_subtype_check"); 726 address start = __ pc(); 727 Label miss; 728 729 __ save_frame(0); 730 Register Rret = I0; 731 Register Rsub = I1; 732 Register Rsuper = I2; 733 734 Register L0_ary_len = L0; 735 Register L1_ary_ptr = L1; 736 Register L2_super = L2; 737 Register L3_index = L3; 738 739 __ check_klass_subtype_slow_path(Rsub, Rsuper, 740 L0, L1, L2, L3, 741 NULL, &miss); 742 743 // Match falls through here. 744 __ addcc(G0,0,Rret); // set Z flags, Z result 745 746 __ ret(); // Result in Rret is zero; flags set to Z 747 __ delayed()->restore(); 748 749 __ BIND(miss); 750 __ addcc(G0,1,Rret); // set NZ flags, NZ result 751 752 __ ret(); // Result in Rret is != 0; flags set to NZ 753 __ delayed()->restore(); 754 755 return start; 756 } 757 758 759 // Called from MacroAssembler::verify_oop 760 // 761 address generate_verify_oop_subroutine() { 762 StubCodeMark mark(this, "StubRoutines", "verify_oop_stub"); 763 764 address start = __ pc(); 765 766 __ verify_oop_subroutine(); 767 768 return start; 769 } 770 771 772 // 773 // Verify that a register contains clean 32-bits positive value 774 // (high 32-bits are 0) so it could be used in 64-bits shifts (sllx, srax). 775 // 776 // Input: 777 // Rint - 32-bits value 778 // Rtmp - scratch 779 // 780 void assert_clean_int(Register Rint, Register Rtmp) { 781 #if defined(ASSERT) 782 __ signx(Rint, Rtmp); 783 __ cmp(Rint, Rtmp); 784 __ breakpoint_trap(Assembler::notEqual, Assembler::xcc); 785 #endif 786 } 787 788 // 789 // Generate overlap test for array copy stubs 790 // 791 // Input: 792 // O0 - array1 793 // O1 - array2 794 // O2 - element count 795 // 796 // Kills temps: O3, O4 797 // 798 void array_overlap_test(address no_overlap_target, int log2_elem_size) { 799 assert(no_overlap_target != NULL, "must be generated"); 800 array_overlap_test(no_overlap_target, NULL, log2_elem_size); 801 } 802 void array_overlap_test(Label& L_no_overlap, int log2_elem_size) { 803 array_overlap_test(NULL, &L_no_overlap, log2_elem_size); 804 } 805 void array_overlap_test(address no_overlap_target, Label* NOLp, int log2_elem_size) { 806 const Register from = O0; 807 const Register to = O1; 808 const Register count = O2; 809 const Register to_from = O3; // to - from 810 const Register byte_count = O4; // count << log2_elem_size 811 812 __ subcc(to, from, to_from); 813 __ sll_ptr(count, log2_elem_size, byte_count); 814 if (NOLp == NULL) 815 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, no_overlap_target); 816 else 817 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, (*NOLp)); 818 __ delayed()->cmp(to_from, byte_count); 819 if (NOLp == NULL) 820 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, no_overlap_target); 821 else 822 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, (*NOLp)); 823 __ delayed()->nop(); 824 } 825 826 827 // 828 // Generate main code for disjoint arraycopy 829 // 830 typedef void (StubGenerator::*CopyLoopFunc)(Register from, Register to, Register count, int count_dec, 831 Label& L_loop, bool use_prefetch, bool use_bis); 832 833 void disjoint_copy_core(Register from, Register to, Register count, int log2_elem_size, 834 int iter_size, StubGenerator::CopyLoopFunc copy_loop_func) { 835 Label L_copy; 836 837 assert(log2_elem_size <= 3, "the following code should be changed"); 838 int count_dec = 16>>log2_elem_size; 839 840 int prefetch_dist = MAX2(ArraycopySrcPrefetchDistance, ArraycopyDstPrefetchDistance); 841 assert(prefetch_dist < 4096, "invalid value"); 842 prefetch_dist = (prefetch_dist + (iter_size-1)) & (-iter_size); // round up to one iteration copy size 843 int prefetch_count = (prefetch_dist >> log2_elem_size); // elements count 844 845 if (UseBlockCopy) { 846 Label L_block_copy, L_block_copy_prefetch, L_skip_block_copy; 847 848 // 64 bytes tail + bytes copied in one loop iteration 849 int tail_size = 64 + iter_size; 850 int block_copy_count = (MAX2(tail_size, (int)BlockCopyLowLimit)) >> log2_elem_size; 851 // Use BIS copy only for big arrays since it requires membar. 852 __ set(block_copy_count, O4); 853 __ cmp_and_br_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_skip_block_copy); 854 // This code is for disjoint source and destination: 855 // to <= from || to >= from+count 856 // but BIS will stomp over 'from' if (to > from-tail_size && to <= from) 857 __ sub(from, to, O4); 858 __ srax(O4, 4, O4); // divide by 16 since following short branch have only 5 bits for imm. 859 __ cmp_and_br_short(O4, (tail_size>>4), Assembler::lessEqualUnsigned, Assembler::pn, L_skip_block_copy); 860 861 __ wrasi(G0, Assembler::ASI_ST_BLKINIT_PRIMARY); 862 // BIS should not be used to copy tail (64 bytes+iter_size) 863 // to avoid zeroing of following values. 864 __ sub(count, (tail_size>>log2_elem_size), count); // count is still positive >= 0 865 866 if (prefetch_count > 0) { // rounded up to one iteration count 867 // Do prefetching only if copy size is bigger 868 // than prefetch distance. 869 __ set(prefetch_count, O4); 870 __ cmp_and_brx_short(count, O4, Assembler::less, Assembler::pt, L_block_copy); 871 __ sub(count, O4, count); 872 873 (this->*copy_loop_func)(from, to, count, count_dec, L_block_copy_prefetch, true, true); 874 __ set(prefetch_count, O4); 875 __ add(count, O4, count); 876 877 } // prefetch_count > 0 878 879 (this->*copy_loop_func)(from, to, count, count_dec, L_block_copy, false, true); 880 __ add(count, (tail_size>>log2_elem_size), count); // restore count 881 882 __ wrasi(G0, Assembler::ASI_PRIMARY_NOFAULT); 883 // BIS needs membar. 884 __ membar(Assembler::StoreLoad); 885 // Copy tail 886 __ ba_short(L_copy); 887 888 __ BIND(L_skip_block_copy); 889 } // UseBlockCopy 890 891 if (prefetch_count > 0) { // rounded up to one iteration count 892 // Do prefetching only if copy size is bigger 893 // than prefetch distance. 894 __ set(prefetch_count, O4); 895 __ cmp_and_brx_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_copy); 896 __ sub(count, O4, count); 897 898 Label L_copy_prefetch; 899 (this->*copy_loop_func)(from, to, count, count_dec, L_copy_prefetch, true, false); 900 __ set(prefetch_count, O4); 901 __ add(count, O4, count); 902 903 } // prefetch_count > 0 904 905 (this->*copy_loop_func)(from, to, count, count_dec, L_copy, false, false); 906 } 907 908 909 910 // 911 // Helper methods for copy_16_bytes_forward_with_shift() 912 // 913 void copy_16_bytes_shift_loop(Register from, Register to, Register count, int count_dec, 914 Label& L_loop, bool use_prefetch, bool use_bis) { 915 916 const Register left_shift = G1; // left shift bit counter 917 const Register right_shift = G5; // right shift bit counter 918 919 __ align(OptoLoopAlignment); 920 __ BIND(L_loop); 921 if (use_prefetch) { 922 if (ArraycopySrcPrefetchDistance > 0) { 923 __ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads); 924 } 925 if (ArraycopyDstPrefetchDistance > 0) { 926 __ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads); 927 } 928 } 929 __ ldx(from, 0, O4); 930 __ ldx(from, 8, G4); 931 __ inc(to, 16); 932 __ inc(from, 16); 933 __ deccc(count, count_dec); // Can we do next iteration after this one? 934 __ srlx(O4, right_shift, G3); 935 __ bset(G3, O3); 936 __ sllx(O4, left_shift, O4); 937 __ srlx(G4, right_shift, G3); 938 __ bset(G3, O4); 939 if (use_bis) { 940 __ stxa(O3, to, -16); 941 __ stxa(O4, to, -8); 942 } else { 943 __ stx(O3, to, -16); 944 __ stx(O4, to, -8); 945 } 946 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop); 947 __ delayed()->sllx(G4, left_shift, O3); 948 } 949 950 // Copy big chunks forward with shift 951 // 952 // Inputs: 953 // from - source arrays 954 // to - destination array aligned to 8-bytes 955 // count - elements count to copy >= the count equivalent to 16 bytes 956 // count_dec - elements count's decrement equivalent to 16 bytes 957 // L_copy_bytes - copy exit label 958 // 959 void copy_16_bytes_forward_with_shift(Register from, Register to, 960 Register count, int log2_elem_size, Label& L_copy_bytes) { 961 Label L_aligned_copy, L_copy_last_bytes; 962 assert(log2_elem_size <= 3, "the following code should be changed"); 963 int count_dec = 16>>log2_elem_size; 964 965 // if both arrays have the same alignment mod 8, do 8 bytes aligned copy 966 __ andcc(from, 7, G1); // misaligned bytes 967 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy); 968 __ delayed()->nop(); 969 970 const Register left_shift = G1; // left shift bit counter 971 const Register right_shift = G5; // right shift bit counter 972 973 __ sll(G1, LogBitsPerByte, left_shift); 974 __ mov(64, right_shift); 975 __ sub(right_shift, left_shift, right_shift); 976 977 // 978 // Load 2 aligned 8-bytes chunks and use one from previous iteration 979 // to form 2 aligned 8-bytes chunks to store. 980 // 981 __ dec(count, count_dec); // Pre-decrement 'count' 982 __ andn(from, 7, from); // Align address 983 __ ldx(from, 0, O3); 984 __ inc(from, 8); 985 __ sllx(O3, left_shift, O3); 986 987 disjoint_copy_core(from, to, count, log2_elem_size, 16, &StubGenerator::copy_16_bytes_shift_loop); 988 989 __ inccc(count, count_dec>>1 ); // + 8 bytes 990 __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes); 991 __ delayed()->inc(count, count_dec>>1); // restore 'count' 992 993 // copy 8 bytes, part of them already loaded in O3 994 __ ldx(from, 0, O4); 995 __ inc(to, 8); 996 __ inc(from, 8); 997 __ srlx(O4, right_shift, G3); 998 __ bset(O3, G3); 999 __ stx(G3, to, -8); 1000 1001 __ BIND(L_copy_last_bytes); 1002 __ srl(right_shift, LogBitsPerByte, right_shift); // misaligned bytes 1003 __ br(Assembler::always, false, Assembler::pt, L_copy_bytes); 1004 __ delayed()->sub(from, right_shift, from); // restore address 1005 1006 __ BIND(L_aligned_copy); 1007 } 1008 1009 // Copy big chunks backward with shift 1010 // 1011 // Inputs: 1012 // end_from - source arrays end address 1013 // end_to - destination array end address aligned to 8-bytes 1014 // count - elements count to copy >= the count equivalent to 16 bytes 1015 // count_dec - elements count's decrement equivalent to 16 bytes 1016 // L_aligned_copy - aligned copy exit label 1017 // L_copy_bytes - copy exit label 1018 // 1019 void copy_16_bytes_backward_with_shift(Register end_from, Register end_to, 1020 Register count, int count_dec, 1021 Label& L_aligned_copy, Label& L_copy_bytes) { 1022 Label L_loop, L_copy_last_bytes; 1023 1024 // if both arrays have the same alignment mod 8, do 8 bytes aligned copy 1025 __ andcc(end_from, 7, G1); // misaligned bytes 1026 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy); 1027 __ delayed()->deccc(count, count_dec); // Pre-decrement 'count' 1028 1029 const Register left_shift = G1; // left shift bit counter 1030 const Register right_shift = G5; // right shift bit counter 1031 1032 __ sll(G1, LogBitsPerByte, left_shift); 1033 __ mov(64, right_shift); 1034 __ sub(right_shift, left_shift, right_shift); 1035 1036 // 1037 // Load 2 aligned 8-bytes chunks and use one from previous iteration 1038 // to form 2 aligned 8-bytes chunks to store. 1039 // 1040 __ andn(end_from, 7, end_from); // Align address 1041 __ ldx(end_from, 0, O3); 1042 __ align(OptoLoopAlignment); 1043 __ BIND(L_loop); 1044 __ ldx(end_from, -8, O4); 1045 __ deccc(count, count_dec); // Can we do next iteration after this one? 1046 __ ldx(end_from, -16, G4); 1047 __ dec(end_to, 16); 1048 __ dec(end_from, 16); 1049 __ srlx(O3, right_shift, O3); 1050 __ sllx(O4, left_shift, G3); 1051 __ bset(G3, O3); 1052 __ stx(O3, end_to, 8); 1053 __ srlx(O4, right_shift, O4); 1054 __ sllx(G4, left_shift, G3); 1055 __ bset(G3, O4); 1056 __ stx(O4, end_to, 0); 1057 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop); 1058 __ delayed()->mov(G4, O3); 1059 1060 __ inccc(count, count_dec>>1 ); // + 8 bytes 1061 __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes); 1062 __ delayed()->inc(count, count_dec>>1); // restore 'count' 1063 1064 // copy 8 bytes, part of them already loaded in O3 1065 __ ldx(end_from, -8, O4); 1066 __ dec(end_to, 8); 1067 __ dec(end_from, 8); 1068 __ srlx(O3, right_shift, O3); 1069 __ sllx(O4, left_shift, G3); 1070 __ bset(O3, G3); 1071 __ stx(G3, end_to, 0); 1072 1073 __ BIND(L_copy_last_bytes); 1074 __ srl(left_shift, LogBitsPerByte, left_shift); // misaligned bytes 1075 __ br(Assembler::always, false, Assembler::pt, L_copy_bytes); 1076 __ delayed()->add(end_from, left_shift, end_from); // restore address 1077 } 1078 1079 // 1080 // Generate stub for disjoint byte copy. If "aligned" is true, the 1081 // "from" and "to" addresses are assumed to be heapword aligned. 1082 // 1083 // Arguments for generated stub: 1084 // from: O0 1085 // to: O1 1086 // count: O2 treated as signed 1087 // 1088 address generate_disjoint_byte_copy(bool aligned, address *entry, const char *name) { 1089 __ align(CodeEntryAlignment); 1090 StubCodeMark mark(this, "StubRoutines", name); 1091 address start = __ pc(); 1092 1093 Label L_skip_alignment, L_align; 1094 Label L_copy_byte, L_copy_byte_loop, L_exit; 1095 1096 const Register from = O0; // source array address 1097 const Register to = O1; // destination array address 1098 const Register count = O2; // elements count 1099 const Register offset = O5; // offset from start of arrays 1100 // O3, O4, G3, G4 are used as temp registers 1101 1102 assert_clean_int(count, O3); // Make sure 'count' is clean int. 1103 1104 if (entry != NULL) { 1105 *entry = __ pc(); 1106 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1107 BLOCK_COMMENT("Entry:"); 1108 } 1109 1110 // for short arrays, just do single element copy 1111 __ cmp(count, 23); // 16 + 7 1112 __ brx(Assembler::less, false, Assembler::pn, L_copy_byte); 1113 __ delayed()->mov(G0, offset); 1114 1115 if (aligned) { 1116 // 'aligned' == true when it is known statically during compilation 1117 // of this arraycopy call site that both 'from' and 'to' addresses 1118 // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()). 1119 // 1120 // Aligned arrays have 4 bytes alignment in 32-bits VM 1121 // and 8 bytes - in 64-bits VM. So we do it only for 32-bits VM 1122 // 1123 } else { 1124 // copy bytes to align 'to' on 8 byte boundary 1125 __ andcc(to, 7, G1); // misaligned bytes 1126 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 1127 __ delayed()->neg(G1); 1128 __ inc(G1, 8); // bytes need to copy to next 8-bytes alignment 1129 __ sub(count, G1, count); 1130 __ BIND(L_align); 1131 __ ldub(from, 0, O3); 1132 __ deccc(G1); 1133 __ inc(from); 1134 __ stb(O3, to, 0); 1135 __ br(Assembler::notZero, false, Assembler::pt, L_align); 1136 __ delayed()->inc(to); 1137 __ BIND(L_skip_alignment); 1138 } 1139 if (!aligned) { 1140 // Copy with shift 16 bytes per iteration if arrays do not have 1141 // the same alignment mod 8, otherwise fall through to the next 1142 // code for aligned copy. 1143 // The compare above (count >= 23) guarantes 'count' >= 16 bytes. 1144 // Also jump over aligned copy after the copy with shift completed. 1145 1146 copy_16_bytes_forward_with_shift(from, to, count, 0, L_copy_byte); 1147 } 1148 1149 // Both array are 8 bytes aligned, copy 16 bytes at a time 1150 __ and3(count, 7, G4); // Save count 1151 __ srl(count, 3, count); 1152 generate_disjoint_long_copy_core(aligned); 1153 __ mov(G4, count); // Restore count 1154 1155 // copy tailing bytes 1156 __ BIND(L_copy_byte); 1157 __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit); 1158 __ align(OptoLoopAlignment); 1159 __ BIND(L_copy_byte_loop); 1160 __ ldub(from, offset, O3); 1161 __ deccc(count); 1162 __ stb(O3, to, offset); 1163 __ brx(Assembler::notZero, false, Assembler::pt, L_copy_byte_loop); 1164 __ delayed()->inc(offset); 1165 1166 __ BIND(L_exit); 1167 // O3, O4 are used as temp registers 1168 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4); 1169 __ retl(); 1170 __ delayed()->mov(G0, O0); // return 0 1171 return start; 1172 } 1173 1174 // 1175 // Generate stub for conjoint byte copy. If "aligned" is true, the 1176 // "from" and "to" addresses are assumed to be heapword aligned. 1177 // 1178 // Arguments for generated stub: 1179 // from: O0 1180 // to: O1 1181 // count: O2 treated as signed 1182 // 1183 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1184 address *entry, const char *name) { 1185 // Do reverse copy. 1186 1187 __ align(CodeEntryAlignment); 1188 StubCodeMark mark(this, "StubRoutines", name); 1189 address start = __ pc(); 1190 1191 Label L_skip_alignment, L_align, L_aligned_copy; 1192 Label L_copy_byte, L_copy_byte_loop, L_exit; 1193 1194 const Register from = O0; // source array address 1195 const Register to = O1; // destination array address 1196 const Register count = O2; // elements count 1197 const Register end_from = from; // source array end address 1198 const Register end_to = to; // destination array end address 1199 1200 assert_clean_int(count, O3); // Make sure 'count' is clean int. 1201 1202 if (entry != NULL) { 1203 *entry = __ pc(); 1204 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1205 BLOCK_COMMENT("Entry:"); 1206 } 1207 1208 array_overlap_test(nooverlap_target, 0); 1209 1210 __ add(to, count, end_to); // offset after last copied element 1211 1212 // for short arrays, just do single element copy 1213 __ cmp(count, 23); // 16 + 7 1214 __ brx(Assembler::less, false, Assembler::pn, L_copy_byte); 1215 __ delayed()->add(from, count, end_from); 1216 1217 { 1218 // Align end of arrays since they could be not aligned even 1219 // when arrays itself are aligned. 1220 1221 // copy bytes to align 'end_to' on 8 byte boundary 1222 __ andcc(end_to, 7, G1); // misaligned bytes 1223 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 1224 __ delayed()->nop(); 1225 __ sub(count, G1, count); 1226 __ BIND(L_align); 1227 __ dec(end_from); 1228 __ dec(end_to); 1229 __ ldub(end_from, 0, O3); 1230 __ deccc(G1); 1231 __ brx(Assembler::notZero, false, Assembler::pt, L_align); 1232 __ delayed()->stb(O3, end_to, 0); 1233 __ BIND(L_skip_alignment); 1234 } 1235 if (aligned) { 1236 // Both arrays are aligned to 8-bytes in 64-bits VM. 1237 // The 'count' is decremented in copy_16_bytes_backward_with_shift() 1238 // in unaligned case. 1239 __ dec(count, 16); 1240 } else { 1241 // Copy with shift 16 bytes per iteration if arrays do not have 1242 // the same alignment mod 8, otherwise jump to the next 1243 // code for aligned copy (and substracting 16 from 'count' before jump). 1244 // The compare above (count >= 11) guarantes 'count' >= 16 bytes. 1245 // Also jump over aligned copy after the copy with shift completed. 1246 1247 copy_16_bytes_backward_with_shift(end_from, end_to, count, 16, 1248 L_aligned_copy, L_copy_byte); 1249 } 1250 // copy 4 elements (16 bytes) at a time 1251 __ align(OptoLoopAlignment); 1252 __ BIND(L_aligned_copy); 1253 __ dec(end_from, 16); 1254 __ ldx(end_from, 8, O3); 1255 __ ldx(end_from, 0, O4); 1256 __ dec(end_to, 16); 1257 __ deccc(count, 16); 1258 __ stx(O3, end_to, 8); 1259 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy); 1260 __ delayed()->stx(O4, end_to, 0); 1261 __ inc(count, 16); 1262 1263 // copy 1 element (2 bytes) at a time 1264 __ BIND(L_copy_byte); 1265 __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit); 1266 __ align(OptoLoopAlignment); 1267 __ BIND(L_copy_byte_loop); 1268 __ dec(end_from); 1269 __ dec(end_to); 1270 __ ldub(end_from, 0, O4); 1271 __ deccc(count); 1272 __ brx(Assembler::greater, false, Assembler::pt, L_copy_byte_loop); 1273 __ delayed()->stb(O4, end_to, 0); 1274 1275 __ BIND(L_exit); 1276 // O3, O4 are used as temp registers 1277 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4); 1278 __ retl(); 1279 __ delayed()->mov(G0, O0); // return 0 1280 return start; 1281 } 1282 1283 // 1284 // Generate stub for disjoint short copy. If "aligned" is true, the 1285 // "from" and "to" addresses are assumed to be heapword aligned. 1286 // 1287 // Arguments for generated stub: 1288 // from: O0 1289 // to: O1 1290 // count: O2 treated as signed 1291 // 1292 address generate_disjoint_short_copy(bool aligned, address *entry, const char * name) { 1293 __ align(CodeEntryAlignment); 1294 StubCodeMark mark(this, "StubRoutines", name); 1295 address start = __ pc(); 1296 1297 Label L_skip_alignment, L_skip_alignment2; 1298 Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit; 1299 1300 const Register from = O0; // source array address 1301 const Register to = O1; // destination array address 1302 const Register count = O2; // elements count 1303 const Register offset = O5; // offset from start of arrays 1304 // O3, O4, G3, G4 are used as temp registers 1305 1306 assert_clean_int(count, O3); // Make sure 'count' is clean int. 1307 1308 if (entry != NULL) { 1309 *entry = __ pc(); 1310 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1311 BLOCK_COMMENT("Entry:"); 1312 } 1313 1314 // for short arrays, just do single element copy 1315 __ cmp(count, 11); // 8 + 3 (22 bytes) 1316 __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes); 1317 __ delayed()->mov(G0, offset); 1318 1319 if (aligned) { 1320 // 'aligned' == true when it is known statically during compilation 1321 // of this arraycopy call site that both 'from' and 'to' addresses 1322 // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()). 1323 // 1324 // Aligned arrays have 4 bytes alignment in 32-bits VM 1325 // and 8 bytes - in 64-bits VM. 1326 // 1327 } else { 1328 // copy 1 element if necessary to align 'to' on an 4 bytes 1329 __ andcc(to, 3, G0); 1330 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 1331 __ delayed()->lduh(from, 0, O3); 1332 __ inc(from, 2); 1333 __ inc(to, 2); 1334 __ dec(count); 1335 __ sth(O3, to, -2); 1336 __ BIND(L_skip_alignment); 1337 1338 // copy 2 elements to align 'to' on an 8 byte boundary 1339 __ andcc(to, 7, G0); 1340 __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2); 1341 __ delayed()->lduh(from, 0, O3); 1342 __ dec(count, 2); 1343 __ lduh(from, 2, O4); 1344 __ inc(from, 4); 1345 __ inc(to, 4); 1346 __ sth(O3, to, -4); 1347 __ sth(O4, to, -2); 1348 __ BIND(L_skip_alignment2); 1349 } 1350 if (!aligned) { 1351 // Copy with shift 16 bytes per iteration if arrays do not have 1352 // the same alignment mod 8, otherwise fall through to the next 1353 // code for aligned copy. 1354 // The compare above (count >= 11) guarantes 'count' >= 16 bytes. 1355 // Also jump over aligned copy after the copy with shift completed. 1356 1357 copy_16_bytes_forward_with_shift(from, to, count, 1, L_copy_2_bytes); 1358 } 1359 1360 // Both array are 8 bytes aligned, copy 16 bytes at a time 1361 __ and3(count, 3, G4); // Save 1362 __ srl(count, 2, count); 1363 generate_disjoint_long_copy_core(aligned); 1364 __ mov(G4, count); // restore 1365 1366 // copy 1 element at a time 1367 __ BIND(L_copy_2_bytes); 1368 __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit); 1369 __ align(OptoLoopAlignment); 1370 __ BIND(L_copy_2_bytes_loop); 1371 __ lduh(from, offset, O3); 1372 __ deccc(count); 1373 __ sth(O3, to, offset); 1374 __ brx(Assembler::notZero, false, Assembler::pt, L_copy_2_bytes_loop); 1375 __ delayed()->inc(offset, 2); 1376 1377 __ BIND(L_exit); 1378 // O3, O4 are used as temp registers 1379 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4); 1380 __ retl(); 1381 __ delayed()->mov(G0, O0); // return 0 1382 return start; 1383 } 1384 1385 // 1386 // Generate stub for disjoint short fill. If "aligned" is true, the 1387 // "to" address is assumed to be heapword aligned. 1388 // 1389 // Arguments for generated stub: 1390 // to: O0 1391 // value: O1 1392 // count: O2 treated as signed 1393 // 1394 address generate_fill(BasicType t, bool aligned, const char* name) { 1395 __ align(CodeEntryAlignment); 1396 StubCodeMark mark(this, "StubRoutines", name); 1397 address start = __ pc(); 1398 1399 const Register to = O0; // source array address 1400 const Register value = O1; // fill value 1401 const Register count = O2; // elements count 1402 // O3 is used as a temp register 1403 1404 assert_clean_int(count, O3); // Make sure 'count' is clean int. 1405 1406 Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte; 1407 Label L_fill_2_bytes, L_fill_elements, L_fill_32_bytes; 1408 1409 int shift = -1; 1410 switch (t) { 1411 case T_BYTE: 1412 shift = 2; 1413 break; 1414 case T_SHORT: 1415 shift = 1; 1416 break; 1417 case T_INT: 1418 shift = 0; 1419 break; 1420 default: ShouldNotReachHere(); 1421 } 1422 1423 BLOCK_COMMENT("Entry:"); 1424 1425 if (t == T_BYTE) { 1426 // Zero extend value 1427 __ and3(value, 0xff, value); 1428 __ sllx(value, 8, O3); 1429 __ or3(value, O3, value); 1430 } 1431 if (t == T_SHORT) { 1432 // Zero extend value 1433 __ sllx(value, 48, value); 1434 __ srlx(value, 48, value); 1435 } 1436 if (t == T_BYTE || t == T_SHORT) { 1437 __ sllx(value, 16, O3); 1438 __ or3(value, O3, value); 1439 } 1440 1441 __ cmp(count, 2<<shift); // Short arrays (< 8 bytes) fill by element 1442 __ brx(Assembler::lessUnsigned, false, Assembler::pn, L_fill_elements); // use unsigned cmp 1443 __ delayed()->andcc(count, 1, G0); 1444 1445 if (!aligned && (t == T_BYTE || t == T_SHORT)) { 1446 // align source address at 4 bytes address boundary 1447 if (t == T_BYTE) { 1448 // One byte misalignment happens only for byte arrays 1449 __ andcc(to, 1, G0); 1450 __ br(Assembler::zero, false, Assembler::pt, L_skip_align1); 1451 __ delayed()->nop(); 1452 __ stb(value, to, 0); 1453 __ inc(to, 1); 1454 __ dec(count, 1); 1455 __ BIND(L_skip_align1); 1456 } 1457 // Two bytes misalignment happens only for byte and short (char) arrays 1458 __ andcc(to, 2, G0); 1459 __ br(Assembler::zero, false, Assembler::pt, L_skip_align2); 1460 __ delayed()->nop(); 1461 __ sth(value, to, 0); 1462 __ inc(to, 2); 1463 __ dec(count, 1 << (shift - 1)); 1464 __ BIND(L_skip_align2); 1465 } 1466 if (!aligned) { 1467 // align to 8 bytes, we know we are 4 byte aligned to start 1468 __ andcc(to, 7, G0); 1469 __ br(Assembler::zero, false, Assembler::pt, L_fill_32_bytes); 1470 __ delayed()->nop(); 1471 __ stw(value, to, 0); 1472 __ inc(to, 4); 1473 __ dec(count, 1 << shift); 1474 __ BIND(L_fill_32_bytes); 1475 } 1476 1477 if (t == T_INT) { 1478 // Zero extend value 1479 __ srl(value, 0, value); 1480 } 1481 if (t == T_BYTE || t == T_SHORT || t == T_INT) { 1482 __ sllx(value, 32, O3); 1483 __ or3(value, O3, value); 1484 } 1485 1486 Label L_check_fill_8_bytes; 1487 // Fill 32-byte chunks 1488 __ subcc(count, 8 << shift, count); 1489 __ brx(Assembler::less, false, Assembler::pt, L_check_fill_8_bytes); 1490 __ delayed()->nop(); 1491 1492 Label L_fill_32_bytes_loop, L_fill_4_bytes; 1493 __ align(16); 1494 __ BIND(L_fill_32_bytes_loop); 1495 1496 __ stx(value, to, 0); 1497 __ stx(value, to, 8); 1498 __ stx(value, to, 16); 1499 __ stx(value, to, 24); 1500 1501 __ subcc(count, 8 << shift, count); 1502 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_fill_32_bytes_loop); 1503 __ delayed()->add(to, 32, to); 1504 1505 __ BIND(L_check_fill_8_bytes); 1506 __ addcc(count, 8 << shift, count); 1507 __ brx(Assembler::zero, false, Assembler::pn, L_exit); 1508 __ delayed()->subcc(count, 1 << (shift + 1), count); 1509 __ brx(Assembler::less, false, Assembler::pn, L_fill_4_bytes); 1510 __ delayed()->andcc(count, 1<<shift, G0); 1511 1512 // 1513 // length is too short, just fill 8 bytes at a time 1514 // 1515 Label L_fill_8_bytes_loop; 1516 __ BIND(L_fill_8_bytes_loop); 1517 __ stx(value, to, 0); 1518 __ subcc(count, 1 << (shift + 1), count); 1519 __ brx(Assembler::greaterEqual, false, Assembler::pn, L_fill_8_bytes_loop); 1520 __ delayed()->add(to, 8, to); 1521 1522 // fill trailing 4 bytes 1523 __ andcc(count, 1<<shift, G0); // in delay slot of branches 1524 if (t == T_INT) { 1525 __ BIND(L_fill_elements); 1526 } 1527 __ BIND(L_fill_4_bytes); 1528 __ brx(Assembler::zero, false, Assembler::pt, L_fill_2_bytes); 1529 if (t == T_BYTE || t == T_SHORT) { 1530 __ delayed()->andcc(count, 1<<(shift-1), G0); 1531 } else { 1532 __ delayed()->nop(); 1533 } 1534 __ stw(value, to, 0); 1535 if (t == T_BYTE || t == T_SHORT) { 1536 __ inc(to, 4); 1537 // fill trailing 2 bytes 1538 __ andcc(count, 1<<(shift-1), G0); // in delay slot of branches 1539 __ BIND(L_fill_2_bytes); 1540 __ brx(Assembler::zero, false, Assembler::pt, L_fill_byte); 1541 __ delayed()->andcc(count, 1, count); 1542 __ sth(value, to, 0); 1543 if (t == T_BYTE) { 1544 __ inc(to, 2); 1545 // fill trailing byte 1546 __ andcc(count, 1, count); // in delay slot of branches 1547 __ BIND(L_fill_byte); 1548 __ brx(Assembler::zero, false, Assembler::pt, L_exit); 1549 __ delayed()->nop(); 1550 __ stb(value, to, 0); 1551 } else { 1552 __ BIND(L_fill_byte); 1553 } 1554 } else { 1555 __ BIND(L_fill_2_bytes); 1556 } 1557 __ BIND(L_exit); 1558 __ retl(); 1559 __ delayed()->nop(); 1560 1561 // Handle copies less than 8 bytes. Int is handled elsewhere. 1562 if (t == T_BYTE) { 1563 __ BIND(L_fill_elements); 1564 Label L_fill_2, L_fill_4; 1565 // in delay slot __ andcc(count, 1, G0); 1566 __ brx(Assembler::zero, false, Assembler::pt, L_fill_2); 1567 __ delayed()->andcc(count, 2, G0); 1568 __ stb(value, to, 0); 1569 __ inc(to, 1); 1570 __ BIND(L_fill_2); 1571 __ brx(Assembler::zero, false, Assembler::pt, L_fill_4); 1572 __ delayed()->andcc(count, 4, G0); 1573 __ stb(value, to, 0); 1574 __ stb(value, to, 1); 1575 __ inc(to, 2); 1576 __ BIND(L_fill_4); 1577 __ brx(Assembler::zero, false, Assembler::pt, L_exit); 1578 __ delayed()->nop(); 1579 __ stb(value, to, 0); 1580 __ stb(value, to, 1); 1581 __ stb(value, to, 2); 1582 __ retl(); 1583 __ delayed()->stb(value, to, 3); 1584 } 1585 1586 if (t == T_SHORT) { 1587 Label L_fill_2; 1588 __ BIND(L_fill_elements); 1589 // in delay slot __ andcc(count, 1, G0); 1590 __ brx(Assembler::zero, false, Assembler::pt, L_fill_2); 1591 __ delayed()->andcc(count, 2, G0); 1592 __ sth(value, to, 0); 1593 __ inc(to, 2); 1594 __ BIND(L_fill_2); 1595 __ brx(Assembler::zero, false, Assembler::pt, L_exit); 1596 __ delayed()->nop(); 1597 __ sth(value, to, 0); 1598 __ retl(); 1599 __ delayed()->sth(value, to, 2); 1600 } 1601 return start; 1602 } 1603 1604 // 1605 // Generate stub for conjoint short copy. If "aligned" is true, the 1606 // "from" and "to" addresses are assumed to be heapword aligned. 1607 // 1608 // Arguments for generated stub: 1609 // from: O0 1610 // to: O1 1611 // count: O2 treated as signed 1612 // 1613 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1614 address *entry, const char *name) { 1615 // Do reverse copy. 1616 1617 __ align(CodeEntryAlignment); 1618 StubCodeMark mark(this, "StubRoutines", name); 1619 address start = __ pc(); 1620 1621 Label L_skip_alignment, L_skip_alignment2, L_aligned_copy; 1622 Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit; 1623 1624 const Register from = O0; // source array address 1625 const Register to = O1; // destination array address 1626 const Register count = O2; // elements count 1627 const Register end_from = from; // source array end address 1628 const Register end_to = to; // destination array end address 1629 1630 const Register byte_count = O3; // bytes count to copy 1631 1632 assert_clean_int(count, O3); // Make sure 'count' is clean int. 1633 1634 if (entry != NULL) { 1635 *entry = __ pc(); 1636 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1637 BLOCK_COMMENT("Entry:"); 1638 } 1639 1640 array_overlap_test(nooverlap_target, 1); 1641 1642 __ sllx(count, LogBytesPerShort, byte_count); 1643 __ add(to, byte_count, end_to); // offset after last copied element 1644 1645 // for short arrays, just do single element copy 1646 __ cmp(count, 11); // 8 + 3 (22 bytes) 1647 __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes); 1648 __ delayed()->add(from, byte_count, end_from); 1649 1650 { 1651 // Align end of arrays since they could be not aligned even 1652 // when arrays itself are aligned. 1653 1654 // copy 1 element if necessary to align 'end_to' on an 4 bytes 1655 __ andcc(end_to, 3, G0); 1656 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 1657 __ delayed()->lduh(end_from, -2, O3); 1658 __ dec(end_from, 2); 1659 __ dec(end_to, 2); 1660 __ dec(count); 1661 __ sth(O3, end_to, 0); 1662 __ BIND(L_skip_alignment); 1663 1664 // copy 2 elements to align 'end_to' on an 8 byte boundary 1665 __ andcc(end_to, 7, G0); 1666 __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2); 1667 __ delayed()->lduh(end_from, -2, O3); 1668 __ dec(count, 2); 1669 __ lduh(end_from, -4, O4); 1670 __ dec(end_from, 4); 1671 __ dec(end_to, 4); 1672 __ sth(O3, end_to, 2); 1673 __ sth(O4, end_to, 0); 1674 __ BIND(L_skip_alignment2); 1675 } 1676 if (aligned) { 1677 // Both arrays are aligned to 8-bytes in 64-bits VM. 1678 // The 'count' is decremented in copy_16_bytes_backward_with_shift() 1679 // in unaligned case. 1680 __ dec(count, 8); 1681 } else { 1682 // Copy with shift 16 bytes per iteration if arrays do not have 1683 // the same alignment mod 8, otherwise jump to the next 1684 // code for aligned copy (and substracting 8 from 'count' before jump). 1685 // The compare above (count >= 11) guarantes 'count' >= 16 bytes. 1686 // Also jump over aligned copy after the copy with shift completed. 1687 1688 copy_16_bytes_backward_with_shift(end_from, end_to, count, 8, 1689 L_aligned_copy, L_copy_2_bytes); 1690 } 1691 // copy 4 elements (16 bytes) at a time 1692 __ align(OptoLoopAlignment); 1693 __ BIND(L_aligned_copy); 1694 __ dec(end_from, 16); 1695 __ ldx(end_from, 8, O3); 1696 __ ldx(end_from, 0, O4); 1697 __ dec(end_to, 16); 1698 __ deccc(count, 8); 1699 __ stx(O3, end_to, 8); 1700 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy); 1701 __ delayed()->stx(O4, end_to, 0); 1702 __ inc(count, 8); 1703 1704 // copy 1 element (2 bytes) at a time 1705 __ BIND(L_copy_2_bytes); 1706 __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit); 1707 __ BIND(L_copy_2_bytes_loop); 1708 __ dec(end_from, 2); 1709 __ dec(end_to, 2); 1710 __ lduh(end_from, 0, O4); 1711 __ deccc(count); 1712 __ brx(Assembler::greater, false, Assembler::pt, L_copy_2_bytes_loop); 1713 __ delayed()->sth(O4, end_to, 0); 1714 1715 __ BIND(L_exit); 1716 // O3, O4 are used as temp registers 1717 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4); 1718 __ retl(); 1719 __ delayed()->mov(G0, O0); // return 0 1720 return start; 1721 } 1722 1723 // 1724 // Helper methods for generate_disjoint_int_copy_core() 1725 // 1726 void copy_16_bytes_loop(Register from, Register to, Register count, int count_dec, 1727 Label& L_loop, bool use_prefetch, bool use_bis) { 1728 1729 __ align(OptoLoopAlignment); 1730 __ BIND(L_loop); 1731 if (use_prefetch) { 1732 if (ArraycopySrcPrefetchDistance > 0) { 1733 __ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads); 1734 } 1735 if (ArraycopyDstPrefetchDistance > 0) { 1736 __ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads); 1737 } 1738 } 1739 __ ldx(from, 4, O4); 1740 __ ldx(from, 12, G4); 1741 __ inc(to, 16); 1742 __ inc(from, 16); 1743 __ deccc(count, 4); // Can we do next iteration after this one? 1744 1745 __ srlx(O4, 32, G3); 1746 __ bset(G3, O3); 1747 __ sllx(O4, 32, O4); 1748 __ srlx(G4, 32, G3); 1749 __ bset(G3, O4); 1750 if (use_bis) { 1751 __ stxa(O3, to, -16); 1752 __ stxa(O4, to, -8); 1753 } else { 1754 __ stx(O3, to, -16); 1755 __ stx(O4, to, -8); 1756 } 1757 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop); 1758 __ delayed()->sllx(G4, 32, O3); 1759 1760 } 1761 1762 // 1763 // Generate core code for disjoint int copy (and oop copy on 32-bit). 1764 // If "aligned" is true, the "from" and "to" addresses are assumed 1765 // to be heapword aligned. 1766 // 1767 // Arguments: 1768 // from: O0 1769 // to: O1 1770 // count: O2 treated as signed 1771 // 1772 void generate_disjoint_int_copy_core(bool aligned) { 1773 1774 Label L_skip_alignment, L_aligned_copy; 1775 Label L_copy_4_bytes, L_copy_4_bytes_loop, L_exit; 1776 1777 const Register from = O0; // source array address 1778 const Register to = O1; // destination array address 1779 const Register count = O2; // elements count 1780 const Register offset = O5; // offset from start of arrays 1781 // O3, O4, G3, G4 are used as temp registers 1782 1783 // 'aligned' == true when it is known statically during compilation 1784 // of this arraycopy call site that both 'from' and 'to' addresses 1785 // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()). 1786 // 1787 // Aligned arrays have 4 bytes alignment in 32-bits VM 1788 // and 8 bytes - in 64-bits VM. 1789 // 1790 if (!aligned) { 1791 // The next check could be put under 'ifndef' since the code in 1792 // generate_disjoint_long_copy_core() has own checks and set 'offset'. 1793 1794 // for short arrays, just do single element copy 1795 __ cmp(count, 5); // 4 + 1 (20 bytes) 1796 __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes); 1797 __ delayed()->mov(G0, offset); 1798 1799 // copy 1 element to align 'to' on an 8 byte boundary 1800 __ andcc(to, 7, G0); 1801 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 1802 __ delayed()->ld(from, 0, O3); 1803 __ inc(from, 4); 1804 __ inc(to, 4); 1805 __ dec(count); 1806 __ st(O3, to, -4); 1807 __ BIND(L_skip_alignment); 1808 1809 // if arrays have same alignment mod 8, do 4 elements copy 1810 __ andcc(from, 7, G0); 1811 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy); 1812 __ delayed()->ld(from, 0, O3); 1813 1814 // 1815 // Load 2 aligned 8-bytes chunks and use one from previous iteration 1816 // to form 2 aligned 8-bytes chunks to store. 1817 // 1818 // copy_16_bytes_forward_with_shift() is not used here since this 1819 // code is more optimal. 1820 1821 // copy with shift 4 elements (16 bytes) at a time 1822 __ dec(count, 4); // The cmp at the beginning guaranty count >= 4 1823 __ sllx(O3, 32, O3); 1824 1825 disjoint_copy_core(from, to, count, 2, 16, &StubGenerator::copy_16_bytes_loop); 1826 1827 __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes); 1828 __ delayed()->inc(count, 4); // restore 'count' 1829 1830 __ BIND(L_aligned_copy); 1831 } // !aligned 1832 1833 // copy 4 elements (16 bytes) at a time 1834 __ and3(count, 1, G4); // Save 1835 __ srl(count, 1, count); 1836 generate_disjoint_long_copy_core(aligned); 1837 __ mov(G4, count); // Restore 1838 1839 // copy 1 element at a time 1840 __ BIND(L_copy_4_bytes); 1841 __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit); 1842 __ BIND(L_copy_4_bytes_loop); 1843 __ ld(from, offset, O3); 1844 __ deccc(count); 1845 __ st(O3, to, offset); 1846 __ brx(Assembler::notZero, false, Assembler::pt, L_copy_4_bytes_loop); 1847 __ delayed()->inc(offset, 4); 1848 __ BIND(L_exit); 1849 } 1850 1851 // 1852 // Generate stub for disjoint int copy. If "aligned" is true, the 1853 // "from" and "to" addresses are assumed to be heapword aligned. 1854 // 1855 // Arguments for generated stub: 1856 // from: O0 1857 // to: O1 1858 // count: O2 treated as signed 1859 // 1860 address generate_disjoint_int_copy(bool aligned, address *entry, const char *name) { 1861 __ align(CodeEntryAlignment); 1862 StubCodeMark mark(this, "StubRoutines", name); 1863 address start = __ pc(); 1864 1865 const Register count = O2; 1866 assert_clean_int(count, O3); // Make sure 'count' is clean int. 1867 1868 if (entry != NULL) { 1869 *entry = __ pc(); 1870 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1871 BLOCK_COMMENT("Entry:"); 1872 } 1873 1874 generate_disjoint_int_copy_core(aligned); 1875 1876 // O3, O4 are used as temp registers 1877 inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4); 1878 __ retl(); 1879 __ delayed()->mov(G0, O0); // return 0 1880 return start; 1881 } 1882 1883 // 1884 // Generate core code for conjoint int copy (and oop copy on 32-bit). 1885 // If "aligned" is true, the "from" and "to" addresses are assumed 1886 // to be heapword aligned. 1887 // 1888 // Arguments: 1889 // from: O0 1890 // to: O1 1891 // count: O2 treated as signed 1892 // 1893 void generate_conjoint_int_copy_core(bool aligned) { 1894 // Do reverse copy. 1895 1896 Label L_skip_alignment, L_aligned_copy; 1897 Label L_copy_16_bytes, L_copy_4_bytes, L_copy_4_bytes_loop, L_exit; 1898 1899 const Register from = O0; // source array address 1900 const Register to = O1; // destination array address 1901 const Register count = O2; // elements count 1902 const Register end_from = from; // source array end address 1903 const Register end_to = to; // destination array end address 1904 // O3, O4, O5, G3 are used as temp registers 1905 1906 const Register byte_count = O3; // bytes count to copy 1907 1908 __ sllx(count, LogBytesPerInt, byte_count); 1909 __ add(to, byte_count, end_to); // offset after last copied element 1910 1911 __ cmp(count, 5); // for short arrays, just do single element copy 1912 __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes); 1913 __ delayed()->add(from, byte_count, end_from); 1914 1915 // copy 1 element to align 'to' on an 8 byte boundary 1916 __ andcc(end_to, 7, G0); 1917 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 1918 __ delayed()->nop(); 1919 __ dec(count); 1920 __ dec(end_from, 4); 1921 __ dec(end_to, 4); 1922 __ ld(end_from, 0, O4); 1923 __ st(O4, end_to, 0); 1924 __ BIND(L_skip_alignment); 1925 1926 // Check if 'end_from' and 'end_to' has the same alignment. 1927 __ andcc(end_from, 7, G0); 1928 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy); 1929 __ delayed()->dec(count, 4); // The cmp at the start guaranty cnt >= 4 1930 1931 // copy with shift 4 elements (16 bytes) at a time 1932 // 1933 // Load 2 aligned 8-bytes chunks and use one from previous iteration 1934 // to form 2 aligned 8-bytes chunks to store. 1935 // 1936 __ ldx(end_from, -4, O3); 1937 __ align(OptoLoopAlignment); 1938 __ BIND(L_copy_16_bytes); 1939 __ ldx(end_from, -12, O4); 1940 __ deccc(count, 4); 1941 __ ldx(end_from, -20, O5); 1942 __ dec(end_to, 16); 1943 __ dec(end_from, 16); 1944 __ srlx(O3, 32, O3); 1945 __ sllx(O4, 32, G3); 1946 __ bset(G3, O3); 1947 __ stx(O3, end_to, 8); 1948 __ srlx(O4, 32, O4); 1949 __ sllx(O5, 32, G3); 1950 __ bset(O4, G3); 1951 __ stx(G3, end_to, 0); 1952 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes); 1953 __ delayed()->mov(O5, O3); 1954 1955 __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes); 1956 __ delayed()->inc(count, 4); 1957 1958 // copy 4 elements (16 bytes) at a time 1959 __ align(OptoLoopAlignment); 1960 __ BIND(L_aligned_copy); 1961 __ dec(end_from, 16); 1962 __ ldx(end_from, 8, O3); 1963 __ ldx(end_from, 0, O4); 1964 __ dec(end_to, 16); 1965 __ deccc(count, 4); 1966 __ stx(O3, end_to, 8); 1967 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy); 1968 __ delayed()->stx(O4, end_to, 0); 1969 __ inc(count, 4); 1970 1971 // copy 1 element (4 bytes) at a time 1972 __ BIND(L_copy_4_bytes); 1973 __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit); 1974 __ BIND(L_copy_4_bytes_loop); 1975 __ dec(end_from, 4); 1976 __ dec(end_to, 4); 1977 __ ld(end_from, 0, O4); 1978 __ deccc(count); 1979 __ brx(Assembler::greater, false, Assembler::pt, L_copy_4_bytes_loop); 1980 __ delayed()->st(O4, end_to, 0); 1981 __ BIND(L_exit); 1982 } 1983 1984 // 1985 // Generate stub for conjoint int copy. If "aligned" is true, the 1986 // "from" and "to" addresses are assumed to be heapword aligned. 1987 // 1988 // Arguments for generated stub: 1989 // from: O0 1990 // to: O1 1991 // count: O2 treated as signed 1992 // 1993 address generate_conjoint_int_copy(bool aligned, address nooverlap_target, 1994 address *entry, const char *name) { 1995 __ align(CodeEntryAlignment); 1996 StubCodeMark mark(this, "StubRoutines", name); 1997 address start = __ pc(); 1998 1999 assert_clean_int(O2, O3); // Make sure 'count' is clean int. 2000 2001 if (entry != NULL) { 2002 *entry = __ pc(); 2003 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2004 BLOCK_COMMENT("Entry:"); 2005 } 2006 2007 array_overlap_test(nooverlap_target, 2); 2008 2009 generate_conjoint_int_copy_core(aligned); 2010 2011 // O3, O4 are used as temp registers 2012 inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4); 2013 __ retl(); 2014 __ delayed()->mov(G0, O0); // return 0 2015 return start; 2016 } 2017 2018 // 2019 // Helper methods for generate_disjoint_long_copy_core() 2020 // 2021 void copy_64_bytes_loop(Register from, Register to, Register count, int count_dec, 2022 Label& L_loop, bool use_prefetch, bool use_bis) { 2023 __ align(OptoLoopAlignment); 2024 __ BIND(L_loop); 2025 for (int off = 0; off < 64; off += 16) { 2026 if (use_prefetch && (off & 31) == 0) { 2027 if (ArraycopySrcPrefetchDistance > 0) { 2028 __ prefetch(from, ArraycopySrcPrefetchDistance+off, Assembler::severalReads); 2029 } 2030 if (ArraycopyDstPrefetchDistance > 0) { 2031 __ prefetch(to, ArraycopyDstPrefetchDistance+off, Assembler::severalWritesAndPossiblyReads); 2032 } 2033 } 2034 __ ldx(from, off+0, O4); 2035 __ ldx(from, off+8, O5); 2036 if (use_bis) { 2037 __ stxa(O4, to, off+0); 2038 __ stxa(O5, to, off+8); 2039 } else { 2040 __ stx(O4, to, off+0); 2041 __ stx(O5, to, off+8); 2042 } 2043 } 2044 __ deccc(count, 8); 2045 __ inc(from, 64); 2046 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop); 2047 __ delayed()->inc(to, 64); 2048 } 2049 2050 // 2051 // Generate core code for disjoint long copy (and oop copy on 64-bit). 2052 // "aligned" is ignored, because we must make the stronger 2053 // assumption that both addresses are always 64-bit aligned. 2054 // 2055 // Arguments: 2056 // from: O0 2057 // to: O1 2058 // count: O2 treated as signed 2059 // 2060 // count -= 2; 2061 // if ( count >= 0 ) { // >= 2 elements 2062 // if ( count > 6) { // >= 8 elements 2063 // count -= 6; // original count - 8 2064 // do { 2065 // copy_8_elements; 2066 // count -= 8; 2067 // } while ( count >= 0 ); 2068 // count += 6; 2069 // } 2070 // if ( count >= 0 ) { // >= 2 elements 2071 // do { 2072 // copy_2_elements; 2073 // } while ( (count=count-2) >= 0 ); 2074 // } 2075 // } 2076 // count += 2; 2077 // if ( count != 0 ) { // 1 element left 2078 // copy_1_element; 2079 // } 2080 // 2081 void generate_disjoint_long_copy_core(bool aligned) { 2082 Label L_copy_8_bytes, L_copy_16_bytes, L_exit; 2083 const Register from = O0; // source array address 2084 const Register to = O1; // destination array address 2085 const Register count = O2; // elements count 2086 const Register offset0 = O4; // element offset 2087 const Register offset8 = O5; // next element offset 2088 2089 __ deccc(count, 2); 2090 __ mov(G0, offset0); // offset from start of arrays (0) 2091 __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes ); 2092 __ delayed()->add(offset0, 8, offset8); 2093 2094 // Copy by 64 bytes chunks 2095 2096 const Register from64 = O3; // source address 2097 const Register to64 = G3; // destination address 2098 __ subcc(count, 6, O3); 2099 __ brx(Assembler::negative, false, Assembler::pt, L_copy_16_bytes ); 2100 __ delayed()->mov(to, to64); 2101 // Now we can use O4(offset0), O5(offset8) as temps 2102 __ mov(O3, count); 2103 // count >= 0 (original count - 8) 2104 __ mov(from, from64); 2105 2106 disjoint_copy_core(from64, to64, count, 3, 64, &StubGenerator::copy_64_bytes_loop); 2107 2108 // Restore O4(offset0), O5(offset8) 2109 __ sub(from64, from, offset0); 2110 __ inccc(count, 6); // restore count 2111 __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes ); 2112 __ delayed()->add(offset0, 8, offset8); 2113 2114 // Copy by 16 bytes chunks 2115 __ align(OptoLoopAlignment); 2116 __ BIND(L_copy_16_bytes); 2117 __ ldx(from, offset0, O3); 2118 __ ldx(from, offset8, G3); 2119 __ deccc(count, 2); 2120 __ stx(O3, to, offset0); 2121 __ inc(offset0, 16); 2122 __ stx(G3, to, offset8); 2123 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes); 2124 __ delayed()->inc(offset8, 16); 2125 2126 // Copy last 8 bytes 2127 __ BIND(L_copy_8_bytes); 2128 __ inccc(count, 2); 2129 __ brx(Assembler::zero, true, Assembler::pn, L_exit ); 2130 __ delayed()->mov(offset0, offset8); // Set O5 used by other stubs 2131 __ ldx(from, offset0, O3); 2132 __ stx(O3, to, offset0); 2133 __ BIND(L_exit); 2134 } 2135 2136 // 2137 // Generate stub for disjoint long copy. 2138 // "aligned" is ignored, because we must make the stronger 2139 // assumption that both addresses are always 64-bit aligned. 2140 // 2141 // Arguments for generated stub: 2142 // from: O0 2143 // to: O1 2144 // count: O2 treated as signed 2145 // 2146 address generate_disjoint_long_copy(bool aligned, address *entry, const char *name) { 2147 __ align(CodeEntryAlignment); 2148 StubCodeMark mark(this, "StubRoutines", name); 2149 address start = __ pc(); 2150 2151 assert_clean_int(O2, O3); // Make sure 'count' is clean int. 2152 2153 if (entry != NULL) { 2154 *entry = __ pc(); 2155 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2156 BLOCK_COMMENT("Entry:"); 2157 } 2158 2159 generate_disjoint_long_copy_core(aligned); 2160 2161 // O3, O4 are used as temp registers 2162 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4); 2163 __ retl(); 2164 __ delayed()->mov(G0, O0); // return 0 2165 return start; 2166 } 2167 2168 // 2169 // Generate core code for conjoint long copy (and oop copy on 64-bit). 2170 // "aligned" is ignored, because we must make the stronger 2171 // assumption that both addresses are always 64-bit aligned. 2172 // 2173 // Arguments: 2174 // from: O0 2175 // to: O1 2176 // count: O2 treated as signed 2177 // 2178 void generate_conjoint_long_copy_core(bool aligned) { 2179 // Do reverse copy. 2180 Label L_copy_8_bytes, L_copy_16_bytes, L_exit; 2181 const Register from = O0; // source array address 2182 const Register to = O1; // destination array address 2183 const Register count = O2; // elements count 2184 const Register offset8 = O4; // element offset 2185 const Register offset0 = O5; // previous element offset 2186 2187 __ subcc(count, 1, count); 2188 __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_8_bytes ); 2189 __ delayed()->sllx(count, LogBytesPerLong, offset8); 2190 __ sub(offset8, 8, offset0); 2191 __ align(OptoLoopAlignment); 2192 __ BIND(L_copy_16_bytes); 2193 __ ldx(from, offset8, O2); 2194 __ ldx(from, offset0, O3); 2195 __ stx(O2, to, offset8); 2196 __ deccc(offset8, 16); // use offset8 as counter 2197 __ stx(O3, to, offset0); 2198 __ brx(Assembler::greater, false, Assembler::pt, L_copy_16_bytes); 2199 __ delayed()->dec(offset0, 16); 2200 2201 __ BIND(L_copy_8_bytes); 2202 __ brx(Assembler::negative, false, Assembler::pn, L_exit ); 2203 __ delayed()->nop(); 2204 __ ldx(from, 0, O3); 2205 __ stx(O3, to, 0); 2206 __ BIND(L_exit); 2207 } 2208 2209 // Generate stub for conjoint long copy. 2210 // "aligned" is ignored, because we must make the stronger 2211 // assumption that both addresses are always 64-bit aligned. 2212 // 2213 // Arguments for generated stub: 2214 // from: O0 2215 // to: O1 2216 // count: O2 treated as signed 2217 // 2218 address generate_conjoint_long_copy(bool aligned, address nooverlap_target, 2219 address *entry, const char *name) { 2220 __ align(CodeEntryAlignment); 2221 StubCodeMark mark(this, "StubRoutines", name); 2222 address start = __ pc(); 2223 2224 assert(aligned, "Should always be aligned"); 2225 2226 assert_clean_int(O2, O3); // Make sure 'count' is clean int. 2227 2228 if (entry != NULL) { 2229 *entry = __ pc(); 2230 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2231 BLOCK_COMMENT("Entry:"); 2232 } 2233 2234 array_overlap_test(nooverlap_target, 3); 2235 2236 generate_conjoint_long_copy_core(aligned); 2237 2238 // O3, O4 are used as temp registers 2239 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4); 2240 __ retl(); 2241 __ delayed()->mov(G0, O0); // return 0 2242 return start; 2243 } 2244 2245 // Generate stub for disjoint oop copy. If "aligned" is true, the 2246 // "from" and "to" addresses are assumed to be heapword aligned. 2247 // 2248 // Arguments for generated stub: 2249 // from: O0 2250 // to: O1 2251 // count: O2 treated as signed 2252 // 2253 address generate_disjoint_oop_copy(bool aligned, address *entry, const char *name, 2254 bool dest_uninitialized = false) { 2255 2256 const Register from = O0; // source array address 2257 const Register to = O1; // destination array address 2258 const Register count = O2; // elements count 2259 2260 __ align(CodeEntryAlignment); 2261 StubCodeMark mark(this, "StubRoutines", name); 2262 address start = __ pc(); 2263 2264 assert_clean_int(count, O3); // Make sure 'count' is clean int. 2265 2266 if (entry != NULL) { 2267 *entry = __ pc(); 2268 // caller can pass a 64-bit byte count here 2269 BLOCK_COMMENT("Entry:"); 2270 } 2271 2272 BarrierSetCodeGen *bs = Universe::heap()->barrier_set()->code_gen(); 2273 DecoratorSet decorators = ARRAYCOPY_DISJOINT; 2274 if (dest_uninitialized) { 2275 decorators |= AS_DEST_NOT_INITIALIZED; 2276 2277 } 2278 if (aligned) { 2279 decorators |= ARRAYCOPY_ALIGNED; 2280 } 2281 bs->arraycopy_prologue(_masm, decorators, T_OBJECT, from, to, count); 2282 assert_clean_int(count, O3); // Make sure 'count' is clean int. 2283 if (UseCompressedOops) { 2284 generate_disjoint_int_copy_core(aligned); 2285 } else { 2286 generate_disjoint_long_copy_core(aligned); 2287 } 2288 bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, from, to, count); 2289 2290 // O3, O4 are used as temp registers 2291 inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4); 2292 __ retl(); 2293 __ delayed()->mov(G0, O0); // return 0 2294 return start; 2295 } 2296 2297 // Generate stub for conjoint oop copy. If "aligned" is true, the 2298 // "from" and "to" addresses are assumed to be heapword aligned. 2299 // 2300 // Arguments for generated stub: 2301 // from: O0 2302 // to: O1 2303 // count: O2 treated as signed 2304 // 2305 address generate_conjoint_oop_copy(bool aligned, address nooverlap_target, 2306 address *entry, const char *name, 2307 bool dest_uninitialized = false) { 2308 2309 const Register from = O0; // source array address 2310 const Register to = O1; // destination array address 2311 const Register count = O2; // elements count 2312 2313 __ align(CodeEntryAlignment); 2314 StubCodeMark mark(this, "StubRoutines", name); 2315 address start = __ pc(); 2316 2317 assert_clean_int(count, O3); // Make sure 'count' is clean int. 2318 2319 if (entry != NULL) { 2320 *entry = __ pc(); 2321 // caller can pass a 64-bit byte count here 2322 BLOCK_COMMENT("Entry:"); 2323 } 2324 2325 array_overlap_test(nooverlap_target, LogBytesPerHeapOop); 2326 2327 BarrierSetCodeGen *bs = Universe::heap()->barrier_set()->code_gen(); 2328 DecoratorSet decorators = 0; 2329 if (dest_uninitialized) { 2330 decorators |= AS_DEST_NOT_INITIALIZED; 2331 } 2332 if (aligned) { 2333 decorators |= ARRAYCOPY_ALIGNED; 2334 } 2335 bs->arraycopy_prologue(_masm, decorators, T_OBJECT, from, to, count); 2336 if (UseCompressedOops) { 2337 generate_conjoint_int_copy_core(aligned); 2338 } else { 2339 generate_conjoint_long_copy_core(aligned); 2340 } 2341 bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, from, to, count); 2342 2343 // O3, O4 are used as temp registers 2344 inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4); 2345 __ retl(); 2346 __ delayed()->mov(G0, O0); // return 0 2347 return start; 2348 } 2349 2350 2351 // Helper for generating a dynamic type check. 2352 // Smashes only the given temp registers. 2353 void generate_type_check(Register sub_klass, 2354 Register super_check_offset, 2355 Register super_klass, 2356 Register temp, 2357 Label& L_success) { 2358 assert_different_registers(sub_klass, super_check_offset, super_klass, temp); 2359 2360 BLOCK_COMMENT("type_check:"); 2361 2362 Label L_miss, L_pop_to_miss; 2363 2364 assert_clean_int(super_check_offset, temp); 2365 2366 __ check_klass_subtype_fast_path(sub_klass, super_klass, temp, noreg, 2367 &L_success, &L_miss, NULL, 2368 super_check_offset); 2369 2370 BLOCK_COMMENT("type_check_slow_path:"); 2371 __ save_frame(0); 2372 __ check_klass_subtype_slow_path(sub_klass->after_save(), 2373 super_klass->after_save(), 2374 L0, L1, L2, L4, 2375 NULL, &L_pop_to_miss); 2376 __ ba(L_success); 2377 __ delayed()->restore(); 2378 2379 __ bind(L_pop_to_miss); 2380 __ restore(); 2381 2382 // Fall through on failure! 2383 __ BIND(L_miss); 2384 } 2385 2386 2387 // Generate stub for checked oop copy. 2388 // 2389 // Arguments for generated stub: 2390 // from: O0 2391 // to: O1 2392 // count: O2 treated as signed 2393 // ckoff: O3 (super_check_offset) 2394 // ckval: O4 (super_klass) 2395 // ret: O0 zero for success; (-1^K) where K is partial transfer count 2396 // 2397 address generate_checkcast_copy(const char *name, address *entry, bool dest_uninitialized = false) { 2398 2399 const Register O0_from = O0; // source array address 2400 const Register O1_to = O1; // destination array address 2401 const Register O2_count = O2; // elements count 2402 const Register O3_ckoff = O3; // super_check_offset 2403 const Register O4_ckval = O4; // super_klass 2404 2405 const Register O5_offset = O5; // loop var, with stride wordSize 2406 const Register G1_remain = G1; // loop var, with stride -1 2407 const Register G3_oop = G3; // actual oop copied 2408 const Register G4_klass = G4; // oop._klass 2409 const Register G5_super = G5; // oop._klass._primary_supers[ckval] 2410 2411 __ align(CodeEntryAlignment); 2412 StubCodeMark mark(this, "StubRoutines", name); 2413 address start = __ pc(); 2414 2415 #ifdef ASSERT 2416 // We sometimes save a frame (see generate_type_check below). 2417 // If this will cause trouble, let's fail now instead of later. 2418 __ save_frame(0); 2419 __ restore(); 2420 #endif 2421 2422 assert_clean_int(O2_count, G1); // Make sure 'count' is clean int. 2423 2424 #ifdef ASSERT 2425 // caller guarantees that the arrays really are different 2426 // otherwise, we would have to make conjoint checks 2427 { Label L; 2428 __ mov(O3, G1); // spill: overlap test smashes O3 2429 __ mov(O4, G4); // spill: overlap test smashes O4 2430 array_overlap_test(L, LogBytesPerHeapOop); 2431 __ stop("checkcast_copy within a single array"); 2432 __ bind(L); 2433 __ mov(G1, O3); 2434 __ mov(G4, O4); 2435 } 2436 #endif //ASSERT 2437 2438 if (entry != NULL) { 2439 *entry = __ pc(); 2440 // caller can pass a 64-bit byte count here (from generic stub) 2441 BLOCK_COMMENT("Entry:"); 2442 } 2443 2444 BarrierSetCodeGen *bs = Universe::heap()->barrier_set()->code_gen(); 2445 DecoratorSet decorators = ARRAYCOPY_CHECKCAST; 2446 if (dest_uninitialized) { 2447 decorators |= AS_DEST_NOT_INITIALIZED; 2448 } 2449 2450 bs->arraycopy_prologue(_masm, decorators, T_OBJECT, O0_from, O1_to, O2_count); 2451 2452 Label load_element, store_element, do_epilogue, fail, done; 2453 __ addcc(O2_count, 0, G1_remain); // initialize loop index, and test it 2454 __ brx(Assembler::notZero, false, Assembler::pt, load_element); 2455 __ delayed()->mov(G0, O5_offset); // offset from start of arrays 2456 2457 // Empty array: Nothing to do. 2458 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4); 2459 __ retl(); 2460 __ delayed()->set(0, O0); // return 0 on (trivial) success 2461 2462 // ======== begin loop ======== 2463 // (Loop is rotated; its entry is load_element.) 2464 // Loop variables: 2465 // (O5 = 0; ; O5 += wordSize) --- offset from src, dest arrays 2466 // (O2 = len; O2 != 0; O2--) --- number of oops *remaining* 2467 // G3, G4, G5 --- current oop, oop.klass, oop.klass.super 2468 __ align(OptoLoopAlignment); 2469 2470 __ BIND(store_element); 2471 __ deccc(G1_remain); // decrement the count 2472 __ store_heap_oop(G3_oop, O1_to, O5_offset); // store the oop 2473 __ inc(O5_offset, heapOopSize); // step to next offset 2474 __ brx(Assembler::zero, true, Assembler::pt, do_epilogue); 2475 __ delayed()->set(0, O0); // return -1 on success 2476 2477 // ======== loop entry is here ======== 2478 __ BIND(load_element); 2479 __ load_heap_oop(O0_from, O5_offset, G3_oop); // load the oop 2480 __ br_null_short(G3_oop, Assembler::pt, store_element); 2481 2482 __ load_klass(G3_oop, G4_klass); // query the object klass 2483 2484 generate_type_check(G4_klass, O3_ckoff, O4_ckval, G5_super, 2485 // branch to this on success: 2486 store_element); 2487 // ======== end loop ======== 2488 2489 // It was a real error; we must depend on the caller to finish the job. 2490 // Register G1 has number of *remaining* oops, O2 number of *total* oops. 2491 // Emit GC store barriers for the oops we have copied (O2 minus G1), 2492 // and report their number to the caller. 2493 __ BIND(fail); 2494 __ subcc(O2_count, G1_remain, O2_count); 2495 __ brx(Assembler::zero, false, Assembler::pt, done); 2496 __ delayed()->not1(O2_count, O0); // report (-1^K) to caller 2497 2498 __ BIND(do_epilogue); 2499 bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, O0_from, O1_to, O2_count); 2500 2501 __ BIND(done); 2502 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4); 2503 __ retl(); 2504 __ delayed()->nop(); // return value in 00 2505 2506 return start; 2507 } 2508 2509 2510 // Generate 'unsafe' array copy stub 2511 // Though just as safe as the other stubs, it takes an unscaled 2512 // size_t argument instead of an element count. 2513 // 2514 // Arguments for generated stub: 2515 // from: O0 2516 // to: O1 2517 // count: O2 byte count, treated as ssize_t, can be zero 2518 // 2519 // Examines the alignment of the operands and dispatches 2520 // to a long, int, short, or byte copy loop. 2521 // 2522 address generate_unsafe_copy(const char* name, 2523 address byte_copy_entry, 2524 address short_copy_entry, 2525 address int_copy_entry, 2526 address long_copy_entry) { 2527 2528 const Register O0_from = O0; // source array address 2529 const Register O1_to = O1; // destination array address 2530 const Register O2_count = O2; // elements count 2531 2532 const Register G1_bits = G1; // test copy of low bits 2533 2534 __ align(CodeEntryAlignment); 2535 StubCodeMark mark(this, "StubRoutines", name); 2536 address start = __ pc(); 2537 2538 // bump this on entry, not on exit: 2539 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, G1, G3); 2540 2541 __ or3(O0_from, O1_to, G1_bits); 2542 __ or3(O2_count, G1_bits, G1_bits); 2543 2544 __ btst(BytesPerLong-1, G1_bits); 2545 __ br(Assembler::zero, true, Assembler::pt, 2546 long_copy_entry, relocInfo::runtime_call_type); 2547 // scale the count on the way out: 2548 __ delayed()->srax(O2_count, LogBytesPerLong, O2_count); 2549 2550 __ btst(BytesPerInt-1, G1_bits); 2551 __ br(Assembler::zero, true, Assembler::pt, 2552 int_copy_entry, relocInfo::runtime_call_type); 2553 // scale the count on the way out: 2554 __ delayed()->srax(O2_count, LogBytesPerInt, O2_count); 2555 2556 __ btst(BytesPerShort-1, G1_bits); 2557 __ br(Assembler::zero, true, Assembler::pt, 2558 short_copy_entry, relocInfo::runtime_call_type); 2559 // scale the count on the way out: 2560 __ delayed()->srax(O2_count, LogBytesPerShort, O2_count); 2561 2562 __ br(Assembler::always, false, Assembler::pt, 2563 byte_copy_entry, relocInfo::runtime_call_type); 2564 __ delayed()->nop(); 2565 2566 return start; 2567 } 2568 2569 2570 // Perform range checks on the proposed arraycopy. 2571 // Kills the two temps, but nothing else. 2572 // Also, clean the sign bits of src_pos and dst_pos. 2573 void arraycopy_range_checks(Register src, // source array oop (O0) 2574 Register src_pos, // source position (O1) 2575 Register dst, // destination array oo (O2) 2576 Register dst_pos, // destination position (O3) 2577 Register length, // length of copy (O4) 2578 Register temp1, Register temp2, 2579 Label& L_failed) { 2580 BLOCK_COMMENT("arraycopy_range_checks:"); 2581 2582 // if (src_pos + length > arrayOop(src)->length() ) FAIL; 2583 2584 const Register array_length = temp1; // scratch 2585 const Register end_pos = temp2; // scratch 2586 2587 // Note: This next instruction may be in the delay slot of a branch: 2588 __ add(length, src_pos, end_pos); // src_pos + length 2589 __ lduw(src, arrayOopDesc::length_offset_in_bytes(), array_length); 2590 __ cmp(end_pos, array_length); 2591 __ br(Assembler::greater, false, Assembler::pn, L_failed); 2592 2593 // if (dst_pos + length > arrayOop(dst)->length() ) FAIL; 2594 __ delayed()->add(length, dst_pos, end_pos); // dst_pos + length 2595 __ lduw(dst, arrayOopDesc::length_offset_in_bytes(), array_length); 2596 __ cmp(end_pos, array_length); 2597 __ br(Assembler::greater, false, Assembler::pn, L_failed); 2598 2599 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2600 // Move with sign extension can be used since they are positive. 2601 __ delayed()->signx(src_pos, src_pos); 2602 __ signx(dst_pos, dst_pos); 2603 2604 BLOCK_COMMENT("arraycopy_range_checks done"); 2605 } 2606 2607 2608 // 2609 // Generate generic array copy stubs 2610 // 2611 // Input: 2612 // O0 - src oop 2613 // O1 - src_pos 2614 // O2 - dst oop 2615 // O3 - dst_pos 2616 // O4 - element count 2617 // 2618 // Output: 2619 // O0 == 0 - success 2620 // O0 == -1 - need to call System.arraycopy 2621 // 2622 address generate_generic_copy(const char *name, 2623 address entry_jbyte_arraycopy, 2624 address entry_jshort_arraycopy, 2625 address entry_jint_arraycopy, 2626 address entry_oop_arraycopy, 2627 address entry_jlong_arraycopy, 2628 address entry_checkcast_arraycopy) { 2629 Label L_failed, L_objArray; 2630 2631 // Input registers 2632 const Register src = O0; // source array oop 2633 const Register src_pos = O1; // source position 2634 const Register dst = O2; // destination array oop 2635 const Register dst_pos = O3; // destination position 2636 const Register length = O4; // elements count 2637 2638 // registers used as temp 2639 const Register G3_src_klass = G3; // source array klass 2640 const Register G4_dst_klass = G4; // destination array klass 2641 const Register G5_lh = G5; // layout handler 2642 const Register O5_temp = O5; 2643 2644 __ align(CodeEntryAlignment); 2645 StubCodeMark mark(this, "StubRoutines", name); 2646 address start = __ pc(); 2647 2648 // bump this on entry, not on exit: 2649 inc_counter_np(SharedRuntime::_generic_array_copy_ctr, G1, G3); 2650 2651 // In principle, the int arguments could be dirty. 2652 //assert_clean_int(src_pos, G1); 2653 //assert_clean_int(dst_pos, G1); 2654 //assert_clean_int(length, G1); 2655 2656 //----------------------------------------------------------------------- 2657 // Assembler stubs will be used for this call to arraycopy 2658 // if the following conditions are met: 2659 // 2660 // (1) src and dst must not be null. 2661 // (2) src_pos must not be negative. 2662 // (3) dst_pos must not be negative. 2663 // (4) length must not be negative. 2664 // (5) src klass and dst klass should be the same and not NULL. 2665 // (6) src and dst should be arrays. 2666 // (7) src_pos + length must not exceed length of src. 2667 // (8) dst_pos + length must not exceed length of dst. 2668 BLOCK_COMMENT("arraycopy initial argument checks"); 2669 2670 // if (src == NULL) return -1; 2671 __ br_null(src, false, Assembler::pn, L_failed); 2672 2673 // if (src_pos < 0) return -1; 2674 __ delayed()->tst(src_pos); 2675 __ br(Assembler::negative, false, Assembler::pn, L_failed); 2676 __ delayed()->nop(); 2677 2678 // if (dst == NULL) return -1; 2679 __ br_null(dst, false, Assembler::pn, L_failed); 2680 2681 // if (dst_pos < 0) return -1; 2682 __ delayed()->tst(dst_pos); 2683 __ br(Assembler::negative, false, Assembler::pn, L_failed); 2684 2685 // if (length < 0) return -1; 2686 __ delayed()->tst(length); 2687 __ br(Assembler::negative, false, Assembler::pn, L_failed); 2688 2689 BLOCK_COMMENT("arraycopy argument klass checks"); 2690 // get src->klass() 2691 if (UseCompressedClassPointers) { 2692 __ delayed()->nop(); // ??? not good 2693 __ load_klass(src, G3_src_klass); 2694 } else { 2695 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), G3_src_klass); 2696 } 2697 2698 #ifdef ASSERT 2699 // assert(src->klass() != NULL); 2700 BLOCK_COMMENT("assert klasses not null"); 2701 { Label L_a, L_b; 2702 __ br_notnull_short(G3_src_klass, Assembler::pt, L_b); // it is broken if klass is NULL 2703 __ bind(L_a); 2704 __ stop("broken null klass"); 2705 __ bind(L_b); 2706 __ load_klass(dst, G4_dst_klass); 2707 __ br_null(G4_dst_klass, false, Assembler::pn, L_a); // this would be broken also 2708 __ delayed()->mov(G0, G4_dst_klass); // scribble the temp 2709 BLOCK_COMMENT("assert done"); 2710 } 2711 #endif 2712 2713 // Load layout helper 2714 // 2715 // |array_tag| | header_size | element_type | |log2_element_size| 2716 // 32 30 24 16 8 2 0 2717 // 2718 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2719 // 2720 2721 int lh_offset = in_bytes(Klass::layout_helper_offset()); 2722 2723 // Load 32-bits signed value. Use br() instruction with it to check icc. 2724 __ lduw(G3_src_klass, lh_offset, G5_lh); 2725 2726 if (UseCompressedClassPointers) { 2727 __ load_klass(dst, G4_dst_klass); 2728 } 2729 // Handle objArrays completely differently... 2730 juint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2731 __ set(objArray_lh, O5_temp); 2732 __ cmp(G5_lh, O5_temp); 2733 __ br(Assembler::equal, false, Assembler::pt, L_objArray); 2734 if (UseCompressedClassPointers) { 2735 __ delayed()->nop(); 2736 } else { 2737 __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass); 2738 } 2739 2740 // if (src->klass() != dst->klass()) return -1; 2741 __ cmp_and_brx_short(G3_src_klass, G4_dst_klass, Assembler::notEqual, Assembler::pn, L_failed); 2742 2743 // if (!src->is_Array()) return -1; 2744 __ cmp(G5_lh, Klass::_lh_neutral_value); // < 0 2745 __ br(Assembler::greaterEqual, false, Assembler::pn, L_failed); 2746 2747 // At this point, it is known to be a typeArray (array_tag 0x3). 2748 #ifdef ASSERT 2749 __ delayed()->nop(); 2750 { Label L; 2751 jint lh_prim_tag_in_place = (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift); 2752 __ set(lh_prim_tag_in_place, O5_temp); 2753 __ cmp(G5_lh, O5_temp); 2754 __ br(Assembler::greaterEqual, false, Assembler::pt, L); 2755 __ delayed()->nop(); 2756 __ stop("must be a primitive array"); 2757 __ bind(L); 2758 } 2759 #else 2760 __ delayed(); // match next insn to prev branch 2761 #endif 2762 2763 arraycopy_range_checks(src, src_pos, dst, dst_pos, length, 2764 O5_temp, G4_dst_klass, L_failed); 2765 2766 // TypeArrayKlass 2767 // 2768 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2769 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2770 // 2771 2772 const Register G4_offset = G4_dst_klass; // array offset 2773 const Register G3_elsize = G3_src_klass; // log2 element size 2774 2775 __ srl(G5_lh, Klass::_lh_header_size_shift, G4_offset); 2776 __ and3(G4_offset, Klass::_lh_header_size_mask, G4_offset); // array_offset 2777 __ add(src, G4_offset, src); // src array offset 2778 __ add(dst, G4_offset, dst); // dst array offset 2779 __ and3(G5_lh, Klass::_lh_log2_element_size_mask, G3_elsize); // log2 element size 2780 2781 // next registers should be set before the jump to corresponding stub 2782 const Register from = O0; // source array address 2783 const Register to = O1; // destination array address 2784 const Register count = O2; // elements count 2785 2786 // 'from', 'to', 'count' registers should be set in this order 2787 // since they are the same as 'src', 'src_pos', 'dst'. 2788 2789 BLOCK_COMMENT("scale indexes to element size"); 2790 __ sll_ptr(src_pos, G3_elsize, src_pos); 2791 __ sll_ptr(dst_pos, G3_elsize, dst_pos); 2792 __ add(src, src_pos, from); // src_addr 2793 __ add(dst, dst_pos, to); // dst_addr 2794 2795 BLOCK_COMMENT("choose copy loop based on element size"); 2796 __ cmp(G3_elsize, 0); 2797 __ br(Assembler::equal, true, Assembler::pt, entry_jbyte_arraycopy); 2798 __ delayed()->signx(length, count); // length 2799 2800 __ cmp(G3_elsize, LogBytesPerShort); 2801 __ br(Assembler::equal, true, Assembler::pt, entry_jshort_arraycopy); 2802 __ delayed()->signx(length, count); // length 2803 2804 __ cmp(G3_elsize, LogBytesPerInt); 2805 __ br(Assembler::equal, true, Assembler::pt, entry_jint_arraycopy); 2806 __ delayed()->signx(length, count); // length 2807 #ifdef ASSERT 2808 { Label L; 2809 __ cmp_and_br_short(G3_elsize, LogBytesPerLong, Assembler::equal, Assembler::pt, L); 2810 __ stop("must be long copy, but elsize is wrong"); 2811 __ bind(L); 2812 } 2813 #endif 2814 __ br(Assembler::always, false, Assembler::pt, entry_jlong_arraycopy); 2815 __ delayed()->signx(length, count); // length 2816 2817 // ObjArrayKlass 2818 __ BIND(L_objArray); 2819 // live at this point: G3_src_klass, G4_dst_klass, src[_pos], dst[_pos], length 2820 2821 Label L_plain_copy, L_checkcast_copy; 2822 // test array classes for subtyping 2823 __ cmp(G3_src_klass, G4_dst_klass); // usual case is exact equality 2824 __ brx(Assembler::notEqual, true, Assembler::pn, L_checkcast_copy); 2825 __ delayed()->lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted from below 2826 2827 // Identically typed arrays can be copied without element-wise checks. 2828 arraycopy_range_checks(src, src_pos, dst, dst_pos, length, 2829 O5_temp, G5_lh, L_failed); 2830 2831 __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset 2832 __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset 2833 __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos); 2834 __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos); 2835 __ add(src, src_pos, from); // src_addr 2836 __ add(dst, dst_pos, to); // dst_addr 2837 __ BIND(L_plain_copy); 2838 __ br(Assembler::always, false, Assembler::pt, entry_oop_arraycopy); 2839 __ delayed()->signx(length, count); // length 2840 2841 __ BIND(L_checkcast_copy); 2842 // live at this point: G3_src_klass, G4_dst_klass 2843 { 2844 // Before looking at dst.length, make sure dst is also an objArray. 2845 // lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted to delay slot 2846 __ cmp(G5_lh, O5_temp); 2847 __ br(Assembler::notEqual, false, Assembler::pn, L_failed); 2848 2849 // It is safe to examine both src.length and dst.length. 2850 __ delayed(); // match next insn to prev branch 2851 arraycopy_range_checks(src, src_pos, dst, dst_pos, length, 2852 O5_temp, G5_lh, L_failed); 2853 2854 // Marshal the base address arguments now, freeing registers. 2855 __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset 2856 __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset 2857 __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos); 2858 __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos); 2859 __ add(src, src_pos, from); // src_addr 2860 __ add(dst, dst_pos, to); // dst_addr 2861 __ signx(length, count); // length (reloaded) 2862 2863 Register sco_temp = O3; // this register is free now 2864 assert_different_registers(from, to, count, sco_temp, 2865 G4_dst_klass, G3_src_klass); 2866 2867 // Generate the type check. 2868 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2869 __ lduw(G4_dst_klass, sco_offset, sco_temp); 2870 generate_type_check(G3_src_klass, sco_temp, G4_dst_klass, 2871 O5_temp, L_plain_copy); 2872 2873 // Fetch destination element klass from the ObjArrayKlass header. 2874 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2875 2876 // the checkcast_copy loop needs two extra arguments: 2877 __ ld_ptr(G4_dst_klass, ek_offset, O4); // dest elem klass 2878 // lduw(O4, sco_offset, O3); // sco of elem klass 2879 2880 __ br(Assembler::always, false, Assembler::pt, entry_checkcast_arraycopy); 2881 __ delayed()->lduw(O4, sco_offset, O3); 2882 } 2883 2884 __ BIND(L_failed); 2885 __ retl(); 2886 __ delayed()->sub(G0, 1, O0); // return -1 2887 return start; 2888 } 2889 2890 // 2891 // Generate stub for heap zeroing. 2892 // "to" address is aligned to jlong (8 bytes). 2893 // 2894 // Arguments for generated stub: 2895 // to: O0 2896 // count: O1 treated as signed (count of HeapWord) 2897 // count could be 0 2898 // 2899 address generate_zero_aligned_words(const char* name) { 2900 __ align(CodeEntryAlignment); 2901 StubCodeMark mark(this, "StubRoutines", name); 2902 address start = __ pc(); 2903 2904 const Register to = O0; // source array address 2905 const Register count = O1; // HeapWords count 2906 const Register temp = O2; // scratch 2907 2908 Label Ldone; 2909 __ sllx(count, LogHeapWordSize, count); // to bytes count 2910 // Use BIS for zeroing 2911 __ bis_zeroing(to, count, temp, Ldone); 2912 __ bind(Ldone); 2913 __ retl(); 2914 __ delayed()->nop(); 2915 return start; 2916 } 2917 2918 void generate_arraycopy_stubs() { 2919 address entry; 2920 address entry_jbyte_arraycopy; 2921 address entry_jshort_arraycopy; 2922 address entry_jint_arraycopy; 2923 address entry_oop_arraycopy; 2924 address entry_jlong_arraycopy; 2925 address entry_checkcast_arraycopy; 2926 2927 //*** jbyte 2928 // Always need aligned and unaligned versions 2929 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 2930 "jbyte_disjoint_arraycopy"); 2931 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, 2932 &entry_jbyte_arraycopy, 2933 "jbyte_arraycopy"); 2934 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry, 2935 "arrayof_jbyte_disjoint_arraycopy"); 2936 StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, entry, NULL, 2937 "arrayof_jbyte_arraycopy"); 2938 2939 //*** jshort 2940 // Always need aligned and unaligned versions 2941 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 2942 "jshort_disjoint_arraycopy"); 2943 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, 2944 &entry_jshort_arraycopy, 2945 "jshort_arraycopy"); 2946 StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry, 2947 "arrayof_jshort_disjoint_arraycopy"); 2948 StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, entry, NULL, 2949 "arrayof_jshort_arraycopy"); 2950 2951 //*** jint 2952 // Aligned versions 2953 StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, &entry, 2954 "arrayof_jint_disjoint_arraycopy"); 2955 StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, entry, &entry_jint_arraycopy, 2956 "arrayof_jint_arraycopy"); 2957 // In 64 bit we need both aligned and unaligned versions of jint arraycopy. 2958 // entry_jint_arraycopy always points to the unaligned version (notice that we overwrite it). 2959 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, &entry, 2960 "jint_disjoint_arraycopy"); 2961 StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, entry, 2962 &entry_jint_arraycopy, 2963 "jint_arraycopy"); 2964 2965 //*** jlong 2966 // It is always aligned 2967 StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, &entry, 2968 "arrayof_jlong_disjoint_arraycopy"); 2969 StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy(true, entry, &entry_jlong_arraycopy, 2970 "arrayof_jlong_arraycopy"); 2971 StubRoutines::_jlong_disjoint_arraycopy = StubRoutines::_arrayof_jlong_disjoint_arraycopy; 2972 StubRoutines::_jlong_arraycopy = StubRoutines::_arrayof_jlong_arraycopy; 2973 2974 2975 //*** oops 2976 // Aligned versions 2977 StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(true, &entry, 2978 "arrayof_oop_disjoint_arraycopy"); 2979 StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(true, entry, &entry_oop_arraycopy, 2980 "arrayof_oop_arraycopy"); 2981 // Aligned versions without pre-barriers 2982 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, &entry, 2983 "arrayof_oop_disjoint_arraycopy_uninit", 2984 /*dest_uninitialized*/true); 2985 StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, entry, NULL, 2986 "arrayof_oop_arraycopy_uninit", 2987 /*dest_uninitialized*/true); 2988 if (UseCompressedOops) { 2989 // With compressed oops we need unaligned versions, notice that we overwrite entry_oop_arraycopy. 2990 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, &entry, 2991 "oop_disjoint_arraycopy"); 2992 StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(false, entry, &entry_oop_arraycopy, 2993 "oop_arraycopy"); 2994 // Unaligned versions without pre-barriers 2995 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(false, &entry, 2996 "oop_disjoint_arraycopy_uninit", 2997 /*dest_uninitialized*/true); 2998 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, entry, NULL, 2999 "oop_arraycopy_uninit", 3000 /*dest_uninitialized*/true); 3001 } else { 3002 // oop arraycopy is always aligned on 32bit and 64bit without compressed oops 3003 StubRoutines::_oop_disjoint_arraycopy = StubRoutines::_arrayof_oop_disjoint_arraycopy; 3004 StubRoutines::_oop_arraycopy = StubRoutines::_arrayof_oop_arraycopy; 3005 StubRoutines::_oop_disjoint_arraycopy_uninit = StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit; 3006 StubRoutines::_oop_arraycopy_uninit = StubRoutines::_arrayof_oop_arraycopy_uninit; 3007 } 3008 3009 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 3010 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 3011 /*dest_uninitialized*/true); 3012 3013 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 3014 entry_jbyte_arraycopy, 3015 entry_jshort_arraycopy, 3016 entry_jint_arraycopy, 3017 entry_jlong_arraycopy); 3018 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 3019 entry_jbyte_arraycopy, 3020 entry_jshort_arraycopy, 3021 entry_jint_arraycopy, 3022 entry_oop_arraycopy, 3023 entry_jlong_arraycopy, 3024 entry_checkcast_arraycopy); 3025 3026 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 3027 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 3028 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 3029 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 3030 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 3031 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 3032 3033 if (UseBlockZeroing) { 3034 StubRoutines::_zero_aligned_words = generate_zero_aligned_words("zero_aligned_words"); 3035 } 3036 } 3037 3038 address generate_aescrypt_encryptBlock() { 3039 // required since we read expanded key 'int' array starting first element without alignment considerations 3040 assert((arrayOopDesc::base_offset_in_bytes(T_INT) & 7) == 0, 3041 "the following code assumes that first element of an int array is aligned to 8 bytes"); 3042 __ align(CodeEntryAlignment); 3043 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 3044 Label L_load_misaligned_input, L_load_expanded_key, L_doLast128bit, L_storeOutput, L_store_misaligned_output; 3045 address start = __ pc(); 3046 Register from = O0; // source byte array 3047 Register to = O1; // destination byte array 3048 Register key = O2; // expanded key array 3049 const Register keylen = O4; //reg for storing expanded key array length 3050 3051 // read expanded key length 3052 __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0); 3053 3054 // Method to address arbitrary alignment for load instructions: 3055 // Check last 3 bits of 'from' address to see if it is aligned to 8-byte boundary 3056 // If zero/aligned then continue with double FP load instructions 3057 // If not zero/mis-aligned then alignaddr will set GSR.align with number of bytes to skip during faligndata 3058 // alignaddr will also convert arbitrary aligned 'from' address to nearest 8-byte aligned address 3059 // load 3 * 8-byte components (to read 16 bytes input) in 3 different FP regs starting at this aligned address 3060 // faligndata will then extract (based on GSR.align value) the appropriate 8 bytes from the 2 source regs 3061 3062 // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero 3063 __ andcc(from, 7, G0); 3064 __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input); 3065 __ delayed()->alignaddr(from, G0, from); 3066 3067 // aligned case: load input into F54-F56 3068 __ ldf(FloatRegisterImpl::D, from, 0, F54); 3069 __ ldf(FloatRegisterImpl::D, from, 8, F56); 3070 __ ba_short(L_load_expanded_key); 3071 3072 __ BIND(L_load_misaligned_input); 3073 __ ldf(FloatRegisterImpl::D, from, 0, F54); 3074 __ ldf(FloatRegisterImpl::D, from, 8, F56); 3075 __ ldf(FloatRegisterImpl::D, from, 16, F58); 3076 __ faligndata(F54, F56, F54); 3077 __ faligndata(F56, F58, F56); 3078 3079 __ BIND(L_load_expanded_key); 3080 // Since we load expanded key buffers starting first element, 8-byte alignment is guaranteed 3081 for ( int i = 0; i <= 38; i += 2 ) { 3082 __ ldf(FloatRegisterImpl::D, key, i*4, as_FloatRegister(i)); 3083 } 3084 3085 // perform cipher transformation 3086 __ fxor(FloatRegisterImpl::D, F0, F54, F54); 3087 __ fxor(FloatRegisterImpl::D, F2, F56, F56); 3088 // rounds 1 through 8 3089 for ( int i = 4; i <= 28; i += 8 ) { 3090 __ aes_eround01(as_FloatRegister(i), F54, F56, F58); 3091 __ aes_eround23(as_FloatRegister(i+2), F54, F56, F60); 3092 __ aes_eround01(as_FloatRegister(i+4), F58, F60, F54); 3093 __ aes_eround23(as_FloatRegister(i+6), F58, F60, F56); 3094 } 3095 __ aes_eround01(F36, F54, F56, F58); //round 9 3096 __ aes_eround23(F38, F54, F56, F60); 3097 3098 // 128-bit original key size 3099 __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pt, L_doLast128bit); 3100 3101 for ( int i = 40; i <= 50; i += 2 ) { 3102 __ ldf(FloatRegisterImpl::D, key, i*4, as_FloatRegister(i) ); 3103 } 3104 __ aes_eround01(F40, F58, F60, F54); //round 10 3105 __ aes_eround23(F42, F58, F60, F56); 3106 __ aes_eround01(F44, F54, F56, F58); //round 11 3107 __ aes_eround23(F46, F54, F56, F60); 3108 3109 // 192-bit original key size 3110 __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pt, L_storeOutput); 3111 3112 __ ldf(FloatRegisterImpl::D, key, 208, F52); 3113 __ aes_eround01(F48, F58, F60, F54); //round 12 3114 __ aes_eround23(F50, F58, F60, F56); 3115 __ ldf(FloatRegisterImpl::D, key, 216, F46); 3116 __ ldf(FloatRegisterImpl::D, key, 224, F48); 3117 __ ldf(FloatRegisterImpl::D, key, 232, F50); 3118 __ aes_eround01(F52, F54, F56, F58); //round 13 3119 __ aes_eround23(F46, F54, F56, F60); 3120 __ ba_short(L_storeOutput); 3121 3122 __ BIND(L_doLast128bit); 3123 __ ldf(FloatRegisterImpl::D, key, 160, F48); 3124 __ ldf(FloatRegisterImpl::D, key, 168, F50); 3125 3126 __ BIND(L_storeOutput); 3127 // perform last round of encryption common for all key sizes 3128 __ aes_eround01_l(F48, F58, F60, F54); //last round 3129 __ aes_eround23_l(F50, F58, F60, F56); 3130 3131 // Method to address arbitrary alignment for store instructions: 3132 // Check last 3 bits of 'dest' address to see if it is aligned to 8-byte boundary 3133 // If zero/aligned then continue with double FP store instructions 3134 // If not zero/mis-aligned then edge8n will generate edge mask in result reg (O3 in below case) 3135 // Example: If dest address is 0x07 and nearest 8-byte aligned address is 0x00 then edge mask will be 00000001 3136 // Compute (8-n) where n is # of bytes skipped by partial store(stpartialf) inst from edge mask, n=7 in this case 3137 // We get the value of n from the andcc that checks 'dest' alignment. n is available in O5 in below case. 3138 // Set GSR.align to (8-n) using alignaddr 3139 // Circular byte shift store values by n places so that the original bytes are at correct position for stpartialf 3140 // Set the arbitrarily aligned 'dest' address to nearest 8-byte aligned address 3141 // Store (partial) the original first (8-n) bytes starting at the original 'dest' address 3142 // Negate the edge mask so that the subsequent stpartialf can store the original (8-n-1)th through 8th bytes at appropriate address 3143 // We need to execute this process for both the 8-byte result values 3144 3145 // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero 3146 __ andcc(to, 7, O5); 3147 __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output); 3148 __ delayed()->edge8n(to, G0, O3); 3149 3150 // aligned case: store output into the destination array 3151 __ stf(FloatRegisterImpl::D, F54, to, 0); 3152 __ retl(); 3153 __ delayed()->stf(FloatRegisterImpl::D, F56, to, 8); 3154 3155 __ BIND(L_store_misaligned_output); 3156 __ add(to, 8, O4); 3157 __ mov(8, O2); 3158 __ sub(O2, O5, O2); 3159 __ alignaddr(O2, G0, O2); 3160 __ faligndata(F54, F54, F54); 3161 __ faligndata(F56, F56, F56); 3162 __ and3(to, -8, to); 3163 __ and3(O4, -8, O4); 3164 __ stpartialf(to, O3, F54, Assembler::ASI_PST8_PRIMARY); 3165 __ stpartialf(O4, O3, F56, Assembler::ASI_PST8_PRIMARY); 3166 __ add(to, 8, to); 3167 __ add(O4, 8, O4); 3168 __ orn(G0, O3, O3); 3169 __ stpartialf(to, O3, F54, Assembler::ASI_PST8_PRIMARY); 3170 __ retl(); 3171 __ delayed()->stpartialf(O4, O3, F56, Assembler::ASI_PST8_PRIMARY); 3172 3173 return start; 3174 } 3175 3176 address generate_aescrypt_decryptBlock() { 3177 assert((arrayOopDesc::base_offset_in_bytes(T_INT) & 7) == 0, 3178 "the following code assumes that first element of an int array is aligned to 8 bytes"); 3179 // required since we read original key 'byte' array as well in the decryption stubs 3180 assert((arrayOopDesc::base_offset_in_bytes(T_BYTE) & 7) == 0, 3181 "the following code assumes that first element of a byte array is aligned to 8 bytes"); 3182 __ align(CodeEntryAlignment); 3183 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3184 address start = __ pc(); 3185 Label L_load_misaligned_input, L_load_original_key, L_expand192bit, L_expand256bit, L_reload_misaligned_input; 3186 Label L_256bit_transform, L_common_transform, L_store_misaligned_output; 3187 Register from = O0; // source byte array 3188 Register to = O1; // destination byte array 3189 Register key = O2; // expanded key array 3190 Register original_key = O3; // original key array only required during decryption 3191 const Register keylen = O4; // reg for storing expanded key array length 3192 3193 // read expanded key array length 3194 __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0); 3195 3196 // save 'from' since we may need to recheck alignment in case of 256-bit decryption 3197 __ mov(from, G1); 3198 3199 // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero 3200 __ andcc(from, 7, G0); 3201 __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input); 3202 __ delayed()->alignaddr(from, G0, from); 3203 3204 // aligned case: load input into F52-F54 3205 __ ldf(FloatRegisterImpl::D, from, 0, F52); 3206 __ ldf(FloatRegisterImpl::D, from, 8, F54); 3207 __ ba_short(L_load_original_key); 3208 3209 __ BIND(L_load_misaligned_input); 3210 __ ldf(FloatRegisterImpl::D, from, 0, F52); 3211 __ ldf(FloatRegisterImpl::D, from, 8, F54); 3212 __ ldf(FloatRegisterImpl::D, from, 16, F56); 3213 __ faligndata(F52, F54, F52); 3214 __ faligndata(F54, F56, F54); 3215 3216 __ BIND(L_load_original_key); 3217 // load original key from SunJCE expanded decryption key 3218 // Since we load original key buffer starting first element, 8-byte alignment is guaranteed 3219 for ( int i = 0; i <= 3; i++ ) { 3220 __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i)); 3221 } 3222 3223 // 256-bit original key size 3224 __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_expand256bit); 3225 3226 // 192-bit original key size 3227 __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_expand192bit); 3228 3229 // 128-bit original key size 3230 // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions 3231 for ( int i = 0; i <= 36; i += 4 ) { 3232 __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+2), i/4, as_FloatRegister(i+4)); 3233 __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+4), as_FloatRegister(i+6)); 3234 } 3235 3236 // perform 128-bit key specific inverse cipher transformation 3237 __ fxor(FloatRegisterImpl::D, F42, F54, F54); 3238 __ fxor(FloatRegisterImpl::D, F40, F52, F52); 3239 __ ba_short(L_common_transform); 3240 3241 __ BIND(L_expand192bit); 3242 3243 // start loading rest of the 192-bit key 3244 __ ldf(FloatRegisterImpl::S, original_key, 16, F4); 3245 __ ldf(FloatRegisterImpl::S, original_key, 20, F5); 3246 3247 // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions 3248 for ( int i = 0; i <= 36; i += 6 ) { 3249 __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+4), i/6, as_FloatRegister(i+6)); 3250 __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+6), as_FloatRegister(i+8)); 3251 __ aes_kexpand2(as_FloatRegister(i+4), as_FloatRegister(i+8), as_FloatRegister(i+10)); 3252 } 3253 __ aes_kexpand1(F42, F46, 7, F48); 3254 __ aes_kexpand2(F44, F48, F50); 3255 3256 // perform 192-bit key specific inverse cipher transformation 3257 __ fxor(FloatRegisterImpl::D, F50, F54, F54); 3258 __ fxor(FloatRegisterImpl::D, F48, F52, F52); 3259 __ aes_dround23(F46, F52, F54, F58); 3260 __ aes_dround01(F44, F52, F54, F56); 3261 __ aes_dround23(F42, F56, F58, F54); 3262 __ aes_dround01(F40, F56, F58, F52); 3263 __ ba_short(L_common_transform); 3264 3265 __ BIND(L_expand256bit); 3266 3267 // load rest of the 256-bit key 3268 for ( int i = 4; i <= 7; i++ ) { 3269 __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i)); 3270 } 3271 3272 // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions 3273 for ( int i = 0; i <= 40; i += 8 ) { 3274 __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+6), i/8, as_FloatRegister(i+8)); 3275 __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+8), as_FloatRegister(i+10)); 3276 __ aes_kexpand0(as_FloatRegister(i+4), as_FloatRegister(i+10), as_FloatRegister(i+12)); 3277 __ aes_kexpand2(as_FloatRegister(i+6), as_FloatRegister(i+12), as_FloatRegister(i+14)); 3278 } 3279 __ aes_kexpand1(F48, F54, 6, F56); 3280 __ aes_kexpand2(F50, F56, F58); 3281 3282 for ( int i = 0; i <= 6; i += 2 ) { 3283 __ fsrc2(FloatRegisterImpl::D, as_FloatRegister(58-i), as_FloatRegister(i)); 3284 } 3285 3286 // reload original 'from' address 3287 __ mov(G1, from); 3288 3289 // re-check 8-byte alignment 3290 __ andcc(from, 7, G0); 3291 __ br(Assembler::notZero, true, Assembler::pn, L_reload_misaligned_input); 3292 __ delayed()->alignaddr(from, G0, from); 3293 3294 // aligned case: load input into F52-F54 3295 __ ldf(FloatRegisterImpl::D, from, 0, F52); 3296 __ ldf(FloatRegisterImpl::D, from, 8, F54); 3297 __ ba_short(L_256bit_transform); 3298 3299 __ BIND(L_reload_misaligned_input); 3300 __ ldf(FloatRegisterImpl::D, from, 0, F52); 3301 __ ldf(FloatRegisterImpl::D, from, 8, F54); 3302 __ ldf(FloatRegisterImpl::D, from, 16, F56); 3303 __ faligndata(F52, F54, F52); 3304 __ faligndata(F54, F56, F54); 3305 3306 // perform 256-bit key specific inverse cipher transformation 3307 __ BIND(L_256bit_transform); 3308 __ fxor(FloatRegisterImpl::D, F0, F54, F54); 3309 __ fxor(FloatRegisterImpl::D, F2, F52, F52); 3310 __ aes_dround23(F4, F52, F54, F58); 3311 __ aes_dround01(F6, F52, F54, F56); 3312 __ aes_dround23(F50, F56, F58, F54); 3313 __ aes_dround01(F48, F56, F58, F52); 3314 __ aes_dround23(F46, F52, F54, F58); 3315 __ aes_dround01(F44, F52, F54, F56); 3316 __ aes_dround23(F42, F56, F58, F54); 3317 __ aes_dround01(F40, F56, F58, F52); 3318 3319 for ( int i = 0; i <= 7; i++ ) { 3320 __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i)); 3321 } 3322 3323 // perform inverse cipher transformations common for all key sizes 3324 __ BIND(L_common_transform); 3325 for ( int i = 38; i >= 6; i -= 8 ) { 3326 __ aes_dround23(as_FloatRegister(i), F52, F54, F58); 3327 __ aes_dround01(as_FloatRegister(i-2), F52, F54, F56); 3328 if ( i != 6) { 3329 __ aes_dround23(as_FloatRegister(i-4), F56, F58, F54); 3330 __ aes_dround01(as_FloatRegister(i-6), F56, F58, F52); 3331 } else { 3332 __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F54); 3333 __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F52); 3334 } 3335 } 3336 3337 // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero 3338 __ andcc(to, 7, O5); 3339 __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output); 3340 __ delayed()->edge8n(to, G0, O3); 3341 3342 // aligned case: store output into the destination array 3343 __ stf(FloatRegisterImpl::D, F52, to, 0); 3344 __ retl(); 3345 __ delayed()->stf(FloatRegisterImpl::D, F54, to, 8); 3346 3347 __ BIND(L_store_misaligned_output); 3348 __ add(to, 8, O4); 3349 __ mov(8, O2); 3350 __ sub(O2, O5, O2); 3351 __ alignaddr(O2, G0, O2); 3352 __ faligndata(F52, F52, F52); 3353 __ faligndata(F54, F54, F54); 3354 __ and3(to, -8, to); 3355 __ and3(O4, -8, O4); 3356 __ stpartialf(to, O3, F52, Assembler::ASI_PST8_PRIMARY); 3357 __ stpartialf(O4, O3, F54, Assembler::ASI_PST8_PRIMARY); 3358 __ add(to, 8, to); 3359 __ add(O4, 8, O4); 3360 __ orn(G0, O3, O3); 3361 __ stpartialf(to, O3, F52, Assembler::ASI_PST8_PRIMARY); 3362 __ retl(); 3363 __ delayed()->stpartialf(O4, O3, F54, Assembler::ASI_PST8_PRIMARY); 3364 3365 return start; 3366 } 3367 3368 address generate_cipherBlockChaining_encryptAESCrypt() { 3369 assert((arrayOopDesc::base_offset_in_bytes(T_INT) & 7) == 0, 3370 "the following code assumes that first element of an int array is aligned to 8 bytes"); 3371 assert((arrayOopDesc::base_offset_in_bytes(T_BYTE) & 7) == 0, 3372 "the following code assumes that first element of a byte array is aligned to 8 bytes"); 3373 __ align(CodeEntryAlignment); 3374 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3375 Label L_cbcenc128, L_load_misaligned_input_128bit, L_128bit_transform, L_store_misaligned_output_128bit; 3376 Label L_check_loop_end_128bit, L_cbcenc192, L_load_misaligned_input_192bit, L_192bit_transform; 3377 Label L_store_misaligned_output_192bit, L_check_loop_end_192bit, L_cbcenc256, L_load_misaligned_input_256bit; 3378 Label L_256bit_transform, L_store_misaligned_output_256bit, L_check_loop_end_256bit; 3379 address start = __ pc(); 3380 Register from = I0; // source byte array 3381 Register to = I1; // destination byte array 3382 Register key = I2; // expanded key array 3383 Register rvec = I3; // init vector 3384 const Register len_reg = I4; // cipher length 3385 const Register keylen = I5; // reg for storing expanded key array length 3386 3387 __ save_frame(0); 3388 // save cipher len to return in the end 3389 __ mov(len_reg, L0); 3390 3391 // read expanded key length 3392 __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0); 3393 3394 // load initial vector, 8-byte alignment is guranteed 3395 __ ldf(FloatRegisterImpl::D, rvec, 0, F60); 3396 __ ldf(FloatRegisterImpl::D, rvec, 8, F62); 3397 // load key, 8-byte alignment is guranteed 3398 __ ldx(key,0,G1); 3399 __ ldx(key,8,G5); 3400 3401 // start loading expanded key, 8-byte alignment is guranteed 3402 for ( int i = 0, j = 16; i <= 38; i += 2, j += 8 ) { 3403 __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i)); 3404 } 3405 3406 // 128-bit original key size 3407 __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pt, L_cbcenc128); 3408 3409 for ( int i = 40, j = 176; i <= 46; i += 2, j += 8 ) { 3410 __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i)); 3411 } 3412 3413 // 192-bit original key size 3414 __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pt, L_cbcenc192); 3415 3416 for ( int i = 48, j = 208; i <= 54; i += 2, j += 8 ) { 3417 __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i)); 3418 } 3419 3420 // 256-bit original key size 3421 __ ba_short(L_cbcenc256); 3422 3423 __ align(OptoLoopAlignment); 3424 __ BIND(L_cbcenc128); 3425 // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero 3426 __ andcc(from, 7, G0); 3427 __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input_128bit); 3428 __ delayed()->mov(from, L1); // save original 'from' address before alignaddr 3429 3430 // aligned case: load input into G3 and G4 3431 __ ldx(from,0,G3); 3432 __ ldx(from,8,G4); 3433 __ ba_short(L_128bit_transform); 3434 3435 __ BIND(L_load_misaligned_input_128bit); 3436 // can clobber F48, F50 and F52 as they are not used in 128 and 192-bit key encryption 3437 __ alignaddr(from, G0, from); 3438 __ ldf(FloatRegisterImpl::D, from, 0, F48); 3439 __ ldf(FloatRegisterImpl::D, from, 8, F50); 3440 __ ldf(FloatRegisterImpl::D, from, 16, F52); 3441 __ faligndata(F48, F50, F48); 3442 __ faligndata(F50, F52, F50); 3443 __ movdtox(F48, G3); 3444 __ movdtox(F50, G4); 3445 __ mov(L1, from); 3446 3447 __ BIND(L_128bit_transform); 3448 __ xor3(G1,G3,G3); 3449 __ xor3(G5,G4,G4); 3450 __ movxtod(G3,F56); 3451 __ movxtod(G4,F58); 3452 __ fxor(FloatRegisterImpl::D, F60, F56, F60); 3453 __ fxor(FloatRegisterImpl::D, F62, F58, F62); 3454 3455 // TEN_EROUNDS 3456 for ( int i = 0; i <= 32; i += 8 ) { 3457 __ aes_eround01(as_FloatRegister(i), F60, F62, F56); 3458 __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58); 3459 if (i != 32 ) { 3460 __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60); 3461 __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62); 3462 } else { 3463 __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60); 3464 __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62); 3465 } 3466 } 3467 3468 // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero 3469 __ andcc(to, 7, L1); 3470 __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_128bit); 3471 __ delayed()->edge8n(to, G0, L2); 3472 3473 // aligned case: store output into the destination array 3474 __ stf(FloatRegisterImpl::D, F60, to, 0); 3475 __ stf(FloatRegisterImpl::D, F62, to, 8); 3476 __ ba_short(L_check_loop_end_128bit); 3477 3478 __ BIND(L_store_misaligned_output_128bit); 3479 __ add(to, 8, L3); 3480 __ mov(8, L4); 3481 __ sub(L4, L1, L4); 3482 __ alignaddr(L4, G0, L4); 3483 // save cipher text before circular right shift 3484 // as it needs to be stored as iv for next block (see code before next retl) 3485 __ movdtox(F60, L6); 3486 __ movdtox(F62, L7); 3487 __ faligndata(F60, F60, F60); 3488 __ faligndata(F62, F62, F62); 3489 __ mov(to, L5); 3490 __ and3(to, -8, to); 3491 __ and3(L3, -8, L3); 3492 __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY); 3493 __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY); 3494 __ add(to, 8, to); 3495 __ add(L3, 8, L3); 3496 __ orn(G0, L2, L2); 3497 __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY); 3498 __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY); 3499 __ mov(L5, to); 3500 __ movxtod(L6, F60); 3501 __ movxtod(L7, F62); 3502 3503 __ BIND(L_check_loop_end_128bit); 3504 __ add(from, 16, from); 3505 __ add(to, 16, to); 3506 __ subcc(len_reg, 16, len_reg); 3507 __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc128); 3508 __ delayed()->nop(); 3509 // re-init intial vector for next block, 8-byte alignment is guaranteed 3510 __ stf(FloatRegisterImpl::D, F60, rvec, 0); 3511 __ stf(FloatRegisterImpl::D, F62, rvec, 8); 3512 __ mov(L0, I0); 3513 __ ret(); 3514 __ delayed()->restore(); 3515 3516 __ align(OptoLoopAlignment); 3517 __ BIND(L_cbcenc192); 3518 // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero 3519 __ andcc(from, 7, G0); 3520 __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input_192bit); 3521 __ delayed()->mov(from, L1); // save original 'from' address before alignaddr 3522 3523 // aligned case: load input into G3 and G4 3524 __ ldx(from,0,G3); 3525 __ ldx(from,8,G4); 3526 __ ba_short(L_192bit_transform); 3527 3528 __ BIND(L_load_misaligned_input_192bit); 3529 // can clobber F48, F50 and F52 as they are not used in 128 and 192-bit key encryption 3530 __ alignaddr(from, G0, from); 3531 __ ldf(FloatRegisterImpl::D, from, 0, F48); 3532 __ ldf(FloatRegisterImpl::D, from, 8, F50); 3533 __ ldf(FloatRegisterImpl::D, from, 16, F52); 3534 __ faligndata(F48, F50, F48); 3535 __ faligndata(F50, F52, F50); 3536 __ movdtox(F48, G3); 3537 __ movdtox(F50, G4); 3538 __ mov(L1, from); 3539 3540 __ BIND(L_192bit_transform); 3541 __ xor3(G1,G3,G3); 3542 __ xor3(G5,G4,G4); 3543 __ movxtod(G3,F56); 3544 __ movxtod(G4,F58); 3545 __ fxor(FloatRegisterImpl::D, F60, F56, F60); 3546 __ fxor(FloatRegisterImpl::D, F62, F58, F62); 3547 3548 // TWELEVE_EROUNDS 3549 for ( int i = 0; i <= 40; i += 8 ) { 3550 __ aes_eround01(as_FloatRegister(i), F60, F62, F56); 3551 __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58); 3552 if (i != 40 ) { 3553 __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60); 3554 __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62); 3555 } else { 3556 __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60); 3557 __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62); 3558 } 3559 } 3560 3561 // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero 3562 __ andcc(to, 7, L1); 3563 __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_192bit); 3564 __ delayed()->edge8n(to, G0, L2); 3565 3566 // aligned case: store output into the destination array 3567 __ stf(FloatRegisterImpl::D, F60, to, 0); 3568 __ stf(FloatRegisterImpl::D, F62, to, 8); 3569 __ ba_short(L_check_loop_end_192bit); 3570 3571 __ BIND(L_store_misaligned_output_192bit); 3572 __ add(to, 8, L3); 3573 __ mov(8, L4); 3574 __ sub(L4, L1, L4); 3575 __ alignaddr(L4, G0, L4); 3576 __ movdtox(F60, L6); 3577 __ movdtox(F62, L7); 3578 __ faligndata(F60, F60, F60); 3579 __ faligndata(F62, F62, F62); 3580 __ mov(to, L5); 3581 __ and3(to, -8, to); 3582 __ and3(L3, -8, L3); 3583 __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY); 3584 __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY); 3585 __ add(to, 8, to); 3586 __ add(L3, 8, L3); 3587 __ orn(G0, L2, L2); 3588 __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY); 3589 __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY); 3590 __ mov(L5, to); 3591 __ movxtod(L6, F60); 3592 __ movxtod(L7, F62); 3593 3594 __ BIND(L_check_loop_end_192bit); 3595 __ add(from, 16, from); 3596 __ subcc(len_reg, 16, len_reg); 3597 __ add(to, 16, to); 3598 __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc192); 3599 __ delayed()->nop(); 3600 // re-init intial vector for next block, 8-byte alignment is guaranteed 3601 __ stf(FloatRegisterImpl::D, F60, rvec, 0); 3602 __ stf(FloatRegisterImpl::D, F62, rvec, 8); 3603 __ mov(L0, I0); 3604 __ ret(); 3605 __ delayed()->restore(); 3606 3607 __ align(OptoLoopAlignment); 3608 __ BIND(L_cbcenc256); 3609 // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero 3610 __ andcc(from, 7, G0); 3611 __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input_256bit); 3612 __ delayed()->mov(from, L1); // save original 'from' address before alignaddr 3613 3614 // aligned case: load input into G3 and G4 3615 __ ldx(from,0,G3); 3616 __ ldx(from,8,G4); 3617 __ ba_short(L_256bit_transform); 3618 3619 __ BIND(L_load_misaligned_input_256bit); 3620 // cannot clobber F48, F50 and F52. F56, F58 can be used though 3621 __ alignaddr(from, G0, from); 3622 __ movdtox(F60, L2); // save F60 before overwriting 3623 __ ldf(FloatRegisterImpl::D, from, 0, F56); 3624 __ ldf(FloatRegisterImpl::D, from, 8, F58); 3625 __ ldf(FloatRegisterImpl::D, from, 16, F60); 3626 __ faligndata(F56, F58, F56); 3627 __ faligndata(F58, F60, F58); 3628 __ movdtox(F56, G3); 3629 __ movdtox(F58, G4); 3630 __ mov(L1, from); 3631 __ movxtod(L2, F60); 3632 3633 __ BIND(L_256bit_transform); 3634 __ xor3(G1,G3,G3); 3635 __ xor3(G5,G4,G4); 3636 __ movxtod(G3,F56); 3637 __ movxtod(G4,F58); 3638 __ fxor(FloatRegisterImpl::D, F60, F56, F60); 3639 __ fxor(FloatRegisterImpl::D, F62, F58, F62); 3640 3641 // FOURTEEN_EROUNDS 3642 for ( int i = 0; i <= 48; i += 8 ) { 3643 __ aes_eround01(as_FloatRegister(i), F60, F62, F56); 3644 __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58); 3645 if (i != 48 ) { 3646 __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60); 3647 __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62); 3648 } else { 3649 __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60); 3650 __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62); 3651 } 3652 } 3653 3654 // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero 3655 __ andcc(to, 7, L1); 3656 __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_256bit); 3657 __ delayed()->edge8n(to, G0, L2); 3658 3659 // aligned case: store output into the destination array 3660 __ stf(FloatRegisterImpl::D, F60, to, 0); 3661 __ stf(FloatRegisterImpl::D, F62, to, 8); 3662 __ ba_short(L_check_loop_end_256bit); 3663 3664 __ BIND(L_store_misaligned_output_256bit); 3665 __ add(to, 8, L3); 3666 __ mov(8, L4); 3667 __ sub(L4, L1, L4); 3668 __ alignaddr(L4, G0, L4); 3669 __ movdtox(F60, L6); 3670 __ movdtox(F62, L7); 3671 __ faligndata(F60, F60, F60); 3672 __ faligndata(F62, F62, F62); 3673 __ mov(to, L5); 3674 __ and3(to, -8, to); 3675 __ and3(L3, -8, L3); 3676 __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY); 3677 __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY); 3678 __ add(to, 8, to); 3679 __ add(L3, 8, L3); 3680 __ orn(G0, L2, L2); 3681 __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY); 3682 __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY); 3683 __ mov(L5, to); 3684 __ movxtod(L6, F60); 3685 __ movxtod(L7, F62); 3686 3687 __ BIND(L_check_loop_end_256bit); 3688 __ add(from, 16, from); 3689 __ subcc(len_reg, 16, len_reg); 3690 __ add(to, 16, to); 3691 __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc256); 3692 __ delayed()->nop(); 3693 // re-init intial vector for next block, 8-byte alignment is guaranteed 3694 __ stf(FloatRegisterImpl::D, F60, rvec, 0); 3695 __ stf(FloatRegisterImpl::D, F62, rvec, 8); 3696 __ mov(L0, I0); 3697 __ ret(); 3698 __ delayed()->restore(); 3699 3700 return start; 3701 } 3702 3703 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3704 assert((arrayOopDesc::base_offset_in_bytes(T_INT) & 7) == 0, 3705 "the following code assumes that first element of an int array is aligned to 8 bytes"); 3706 assert((arrayOopDesc::base_offset_in_bytes(T_BYTE) & 7) == 0, 3707 "the following code assumes that first element of a byte array is aligned to 8 bytes"); 3708 __ align(CodeEntryAlignment); 3709 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3710 Label L_cbcdec_end, L_expand192bit, L_expand256bit, L_dec_first_block_start; 3711 Label L_dec_first_block128, L_dec_first_block192, L_dec_next2_blocks128, L_dec_next2_blocks192, L_dec_next2_blocks256; 3712 Label L_load_misaligned_input_first_block, L_transform_first_block, L_load_misaligned_next2_blocks128, L_transform_next2_blocks128; 3713 Label L_load_misaligned_next2_blocks192, L_transform_next2_blocks192, L_load_misaligned_next2_blocks256, L_transform_next2_blocks256; 3714 Label L_store_misaligned_output_first_block, L_check_decrypt_end, L_store_misaligned_output_next2_blocks128; 3715 Label L_check_decrypt_loop_end128, L_store_misaligned_output_next2_blocks192, L_check_decrypt_loop_end192; 3716 Label L_store_misaligned_output_next2_blocks256, L_check_decrypt_loop_end256; 3717 address start = __ pc(); 3718 Register from = I0; // source byte array 3719 Register to = I1; // destination byte array 3720 Register key = I2; // expanded key array 3721 Register rvec = I3; // init vector 3722 const Register len_reg = I4; // cipher length 3723 const Register original_key = I5; // original key array only required during decryption 3724 const Register keylen = L6; // reg for storing expanded key array length 3725 3726 __ save_frame(0); //args are read from I* registers since we save the frame in the beginning 3727 // save cipher len to return in the end 3728 __ mov(len_reg, L7); 3729 3730 // load original key from SunJCE expanded decryption key 3731 // Since we load original key buffer starting first element, 8-byte alignment is guaranteed 3732 for ( int i = 0; i <= 3; i++ ) { 3733 __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i)); 3734 } 3735 3736 // load initial vector, 8-byte alignment is guaranteed 3737 __ ldx(rvec,0,L0); 3738 __ ldx(rvec,8,L1); 3739 3740 // read expanded key array length 3741 __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0); 3742 3743 // 256-bit original key size 3744 __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_expand256bit); 3745 3746 // 192-bit original key size 3747 __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_expand192bit); 3748 3749 // 128-bit original key size 3750 // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions 3751 for ( int i = 0; i <= 36; i += 4 ) { 3752 __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+2), i/4, as_FloatRegister(i+4)); 3753 __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+4), as_FloatRegister(i+6)); 3754 } 3755 3756 // load expanded key[last-1] and key[last] elements 3757 __ movdtox(F40,L2); 3758 __ movdtox(F42,L3); 3759 3760 __ and3(len_reg, 16, L4); 3761 __ br_null_short(L4, Assembler::pt, L_dec_next2_blocks128); 3762 __ nop(); 3763 3764 __ ba_short(L_dec_first_block_start); 3765 3766 __ BIND(L_expand192bit); 3767 // load rest of the 192-bit key 3768 __ ldf(FloatRegisterImpl::S, original_key, 16, F4); 3769 __ ldf(FloatRegisterImpl::S, original_key, 20, F5); 3770 3771 // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions 3772 for ( int i = 0; i <= 36; i += 6 ) { 3773 __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+4), i/6, as_FloatRegister(i+6)); 3774 __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+6), as_FloatRegister(i+8)); 3775 __ aes_kexpand2(as_FloatRegister(i+4), as_FloatRegister(i+8), as_FloatRegister(i+10)); 3776 } 3777 __ aes_kexpand1(F42, F46, 7, F48); 3778 __ aes_kexpand2(F44, F48, F50); 3779 3780 // load expanded key[last-1] and key[last] elements 3781 __ movdtox(F48,L2); 3782 __ movdtox(F50,L3); 3783 3784 __ and3(len_reg, 16, L4); 3785 __ br_null_short(L4, Assembler::pt, L_dec_next2_blocks192); 3786 __ nop(); 3787 3788 __ ba_short(L_dec_first_block_start); 3789 3790 __ BIND(L_expand256bit); 3791 // load rest of the 256-bit key 3792 for ( int i = 4; i <= 7; i++ ) { 3793 __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i)); 3794 } 3795 3796 // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions 3797 for ( int i = 0; i <= 40; i += 8 ) { 3798 __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+6), i/8, as_FloatRegister(i+8)); 3799 __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+8), as_FloatRegister(i+10)); 3800 __ aes_kexpand0(as_FloatRegister(i+4), as_FloatRegister(i+10), as_FloatRegister(i+12)); 3801 __ aes_kexpand2(as_FloatRegister(i+6), as_FloatRegister(i+12), as_FloatRegister(i+14)); 3802 } 3803 __ aes_kexpand1(F48, F54, 6, F56); 3804 __ aes_kexpand2(F50, F56, F58); 3805 3806 // load expanded key[last-1] and key[last] elements 3807 __ movdtox(F56,L2); 3808 __ movdtox(F58,L3); 3809 3810 __ and3(len_reg, 16, L4); 3811 __ br_null_short(L4, Assembler::pt, L_dec_next2_blocks256); 3812 3813 __ BIND(L_dec_first_block_start); 3814 // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero 3815 __ andcc(from, 7, G0); 3816 __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input_first_block); 3817 __ delayed()->mov(from, G1); // save original 'from' address before alignaddr 3818 3819 // aligned case: load input into L4 and L5 3820 __ ldx(from,0,L4); 3821 __ ldx(from,8,L5); 3822 __ ba_short(L_transform_first_block); 3823 3824 __ BIND(L_load_misaligned_input_first_block); 3825 __ alignaddr(from, G0, from); 3826 // F58, F60, F62 can be clobbered 3827 __ ldf(FloatRegisterImpl::D, from, 0, F58); 3828 __ ldf(FloatRegisterImpl::D, from, 8, F60); 3829 __ ldf(FloatRegisterImpl::D, from, 16, F62); 3830 __ faligndata(F58, F60, F58); 3831 __ faligndata(F60, F62, F60); 3832 __ movdtox(F58, L4); 3833 __ movdtox(F60, L5); 3834 __ mov(G1, from); 3835 3836 __ BIND(L_transform_first_block); 3837 __ xor3(L2,L4,G1); 3838 __ movxtod(G1,F60); 3839 __ xor3(L3,L5,G1); 3840 __ movxtod(G1,F62); 3841 3842 // 128-bit original key size 3843 __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pn, L_dec_first_block128); 3844 3845 // 192-bit original key size 3846 __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_dec_first_block192); 3847 3848 __ aes_dround23(F54, F60, F62, F58); 3849 __ aes_dround01(F52, F60, F62, F56); 3850 __ aes_dround23(F50, F56, F58, F62); 3851 __ aes_dround01(F48, F56, F58, F60); 3852 3853 __ BIND(L_dec_first_block192); 3854 __ aes_dround23(F46, F60, F62, F58); 3855 __ aes_dround01(F44, F60, F62, F56); 3856 __ aes_dround23(F42, F56, F58, F62); 3857 __ aes_dround01(F40, F56, F58, F60); 3858 3859 __ BIND(L_dec_first_block128); 3860 for ( int i = 38; i >= 6; i -= 8 ) { 3861 __ aes_dround23(as_FloatRegister(i), F60, F62, F58); 3862 __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56); 3863 if ( i != 6) { 3864 __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62); 3865 __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60); 3866 } else { 3867 __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62); 3868 __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60); 3869 } 3870 } 3871 3872 __ movxtod(L0,F56); 3873 __ movxtod(L1,F58); 3874 __ mov(L4,L0); 3875 __ mov(L5,L1); 3876 __ fxor(FloatRegisterImpl::D, F56, F60, F60); 3877 __ fxor(FloatRegisterImpl::D, F58, F62, F62); 3878 3879 // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero 3880 __ andcc(to, 7, G1); 3881 __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_first_block); 3882 __ delayed()->edge8n(to, G0, G2); 3883 3884 // aligned case: store output into the destination array 3885 __ stf(FloatRegisterImpl::D, F60, to, 0); 3886 __ stf(FloatRegisterImpl::D, F62, to, 8); 3887 __ ba_short(L_check_decrypt_end); 3888 3889 __ BIND(L_store_misaligned_output_first_block); 3890 __ add(to, 8, G3); 3891 __ mov(8, G4); 3892 __ sub(G4, G1, G4); 3893 __ alignaddr(G4, G0, G4); 3894 __ faligndata(F60, F60, F60); 3895 __ faligndata(F62, F62, F62); 3896 __ mov(to, G1); 3897 __ and3(to, -8, to); 3898 __ and3(G3, -8, G3); 3899 __ stpartialf(to, G2, F60, Assembler::ASI_PST8_PRIMARY); 3900 __ stpartialf(G3, G2, F62, Assembler::ASI_PST8_PRIMARY); 3901 __ add(to, 8, to); 3902 __ add(G3, 8, G3); 3903 __ orn(G0, G2, G2); 3904 __ stpartialf(to, G2, F60, Assembler::ASI_PST8_PRIMARY); 3905 __ stpartialf(G3, G2, F62, Assembler::ASI_PST8_PRIMARY); 3906 __ mov(G1, to); 3907 3908 __ BIND(L_check_decrypt_end); 3909 __ add(from, 16, from); 3910 __ add(to, 16, to); 3911 __ subcc(len_reg, 16, len_reg); 3912 __ br(Assembler::equal, false, Assembler::pt, L_cbcdec_end); 3913 __ delayed()->nop(); 3914 3915 // 256-bit original key size 3916 __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_dec_next2_blocks256); 3917 3918 // 192-bit original key size 3919 __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_dec_next2_blocks192); 3920 3921 __ align(OptoLoopAlignment); 3922 __ BIND(L_dec_next2_blocks128); 3923 __ nop(); 3924 3925 // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero 3926 __ andcc(from, 7, G0); 3927 __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_next2_blocks128); 3928 __ delayed()->mov(from, G1); // save original 'from' address before alignaddr 3929 3930 // aligned case: load input into G4, G5, L4 and L5 3931 __ ldx(from,0,G4); 3932 __ ldx(from,8,G5); 3933 __ ldx(from,16,L4); 3934 __ ldx(from,24,L5); 3935 __ ba_short(L_transform_next2_blocks128); 3936 3937 __ BIND(L_load_misaligned_next2_blocks128); 3938 __ alignaddr(from, G0, from); 3939 // F40, F42, F58, F60, F62 can be clobbered 3940 __ ldf(FloatRegisterImpl::D, from, 0, F40); 3941 __ ldf(FloatRegisterImpl::D, from, 8, F42); 3942 __ ldf(FloatRegisterImpl::D, from, 16, F60); 3943 __ ldf(FloatRegisterImpl::D, from, 24, F62); 3944 __ ldf(FloatRegisterImpl::D, from, 32, F58); 3945 __ faligndata(F40, F42, F40); 3946 __ faligndata(F42, F60, F42); 3947 __ faligndata(F60, F62, F60); 3948 __ faligndata(F62, F58, F62); 3949 __ movdtox(F40, G4); 3950 __ movdtox(F42, G5); 3951 __ movdtox(F60, L4); 3952 __ movdtox(F62, L5); 3953 __ mov(G1, from); 3954 3955 __ BIND(L_transform_next2_blocks128); 3956 // F40:F42 used for first 16-bytes 3957 __ xor3(L2,G4,G1); 3958 __ movxtod(G1,F40); 3959 __ xor3(L3,G5,G1); 3960 __ movxtod(G1,F42); 3961 3962 // F60:F62 used for next 16-bytes 3963 __ xor3(L2,L4,G1); 3964 __ movxtod(G1,F60); 3965 __ xor3(L3,L5,G1); 3966 __ movxtod(G1,F62); 3967 3968 for ( int i = 38; i >= 6; i -= 8 ) { 3969 __ aes_dround23(as_FloatRegister(i), F40, F42, F44); 3970 __ aes_dround01(as_FloatRegister(i-2), F40, F42, F46); 3971 __ aes_dround23(as_FloatRegister(i), F60, F62, F58); 3972 __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56); 3973 if (i != 6 ) { 3974 __ aes_dround23(as_FloatRegister(i-4), F46, F44, F42); 3975 __ aes_dround01(as_FloatRegister(i-6), F46, F44, F40); 3976 __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62); 3977 __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60); 3978 } else { 3979 __ aes_dround23_l(as_FloatRegister(i-4), F46, F44, F42); 3980 __ aes_dround01_l(as_FloatRegister(i-6), F46, F44, F40); 3981 __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62); 3982 __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60); 3983 } 3984 } 3985 3986 __ movxtod(L0,F46); 3987 __ movxtod(L1,F44); 3988 __ fxor(FloatRegisterImpl::D, F46, F40, F40); 3989 __ fxor(FloatRegisterImpl::D, F44, F42, F42); 3990 3991 __ movxtod(G4,F56); 3992 __ movxtod(G5,F58); 3993 __ mov(L4,L0); 3994 __ mov(L5,L1); 3995 __ fxor(FloatRegisterImpl::D, F56, F60, F60); 3996 __ fxor(FloatRegisterImpl::D, F58, F62, F62); 3997 3998 // For mis-aligned store of 32 bytes of result we can do: 3999 // Circular right-shift all 4 FP registers so that 'head' and 'tail' 4000 // parts that need to be stored starting at mis-aligned address are in a FP reg 4001 // the other 3 FP regs can thus be stored using regular store 4002 // we then use the edge + partial-store mechanism to store the 'head' and 'tail' parts 4003 4004 // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero 4005 __ andcc(to, 7, G1); 4006 __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_next2_blocks128); 4007 __ delayed()->edge8n(to, G0, G2); 4008 4009 // aligned case: store output into the destination array 4010 __ stf(FloatRegisterImpl::D, F40, to, 0); 4011 __ stf(FloatRegisterImpl::D, F42, to, 8); 4012 __ stf(FloatRegisterImpl::D, F60, to, 16); 4013 __ stf(FloatRegisterImpl::D, F62, to, 24); 4014 __ ba_short(L_check_decrypt_loop_end128); 4015 4016 __ BIND(L_store_misaligned_output_next2_blocks128); 4017 __ mov(8, G4); 4018 __ sub(G4, G1, G4); 4019 __ alignaddr(G4, G0, G4); 4020 __ faligndata(F40, F42, F56); // F56 can be clobbered 4021 __ faligndata(F42, F60, F42); 4022 __ faligndata(F60, F62, F60); 4023 __ faligndata(F62, F40, F40); 4024 __ mov(to, G1); 4025 __ and3(to, -8, to); 4026 __ stpartialf(to, G2, F40, Assembler::ASI_PST8_PRIMARY); 4027 __ stf(FloatRegisterImpl::D, F56, to, 8); 4028 __ stf(FloatRegisterImpl::D, F42, to, 16); 4029 __ stf(FloatRegisterImpl::D, F60, to, 24); 4030 __ add(to, 32, to); 4031 __ orn(G0, G2, G2); 4032 __ stpartialf(to, G2, F40, Assembler::ASI_PST8_PRIMARY); 4033 __ mov(G1, to); 4034 4035 __ BIND(L_check_decrypt_loop_end128); 4036 __ add(from, 32, from); 4037 __ add(to, 32, to); 4038 __ subcc(len_reg, 32, len_reg); 4039 __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks128); 4040 __ delayed()->nop(); 4041 __ ba_short(L_cbcdec_end); 4042 4043 __ align(OptoLoopAlignment); 4044 __ BIND(L_dec_next2_blocks192); 4045 __ nop(); 4046 4047 // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero 4048 __ andcc(from, 7, G0); 4049 __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_next2_blocks192); 4050 __ delayed()->mov(from, G1); // save original 'from' address before alignaddr 4051 4052 // aligned case: load input into G4, G5, L4 and L5 4053 __ ldx(from,0,G4); 4054 __ ldx(from,8,G5); 4055 __ ldx(from,16,L4); 4056 __ ldx(from,24,L5); 4057 __ ba_short(L_transform_next2_blocks192); 4058 4059 __ BIND(L_load_misaligned_next2_blocks192); 4060 __ alignaddr(from, G0, from); 4061 // F48, F50, F52, F60, F62 can be clobbered 4062 __ ldf(FloatRegisterImpl::D, from, 0, F48); 4063 __ ldf(FloatRegisterImpl::D, from, 8, F50); 4064 __ ldf(FloatRegisterImpl::D, from, 16, F60); 4065 __ ldf(FloatRegisterImpl::D, from, 24, F62); 4066 __ ldf(FloatRegisterImpl::D, from, 32, F52); 4067 __ faligndata(F48, F50, F48); 4068 __ faligndata(F50, F60, F50); 4069 __ faligndata(F60, F62, F60); 4070 __ faligndata(F62, F52, F62); 4071 __ movdtox(F48, G4); 4072 __ movdtox(F50, G5); 4073 __ movdtox(F60, L4); 4074 __ movdtox(F62, L5); 4075 __ mov(G1, from); 4076 4077 __ BIND(L_transform_next2_blocks192); 4078 // F48:F50 used for first 16-bytes 4079 __ xor3(L2,G4,G1); 4080 __ movxtod(G1,F48); 4081 __ xor3(L3,G5,G1); 4082 __ movxtod(G1,F50); 4083 4084 // F60:F62 used for next 16-bytes 4085 __ xor3(L2,L4,G1); 4086 __ movxtod(G1,F60); 4087 __ xor3(L3,L5,G1); 4088 __ movxtod(G1,F62); 4089 4090 for ( int i = 46; i >= 6; i -= 8 ) { 4091 __ aes_dround23(as_FloatRegister(i), F48, F50, F52); 4092 __ aes_dround01(as_FloatRegister(i-2), F48, F50, F54); 4093 __ aes_dround23(as_FloatRegister(i), F60, F62, F58); 4094 __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56); 4095 if (i != 6 ) { 4096 __ aes_dround23(as_FloatRegister(i-4), F54, F52, F50); 4097 __ aes_dround01(as_FloatRegister(i-6), F54, F52, F48); 4098 __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62); 4099 __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60); 4100 } else { 4101 __ aes_dround23_l(as_FloatRegister(i-4), F54, F52, F50); 4102 __ aes_dround01_l(as_FloatRegister(i-6), F54, F52, F48); 4103 __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62); 4104 __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60); 4105 } 4106 } 4107 4108 __ movxtod(L0,F54); 4109 __ movxtod(L1,F52); 4110 __ fxor(FloatRegisterImpl::D, F54, F48, F48); 4111 __ fxor(FloatRegisterImpl::D, F52, F50, F50); 4112 4113 __ movxtod(G4,F56); 4114 __ movxtod(G5,F58); 4115 __ mov(L4,L0); 4116 __ mov(L5,L1); 4117 __ fxor(FloatRegisterImpl::D, F56, F60, F60); 4118 __ fxor(FloatRegisterImpl::D, F58, F62, F62); 4119 4120 // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero 4121 __ andcc(to, 7, G1); 4122 __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_next2_blocks192); 4123 __ delayed()->edge8n(to, G0, G2); 4124 4125 // aligned case: store output into the destination array 4126 __ stf(FloatRegisterImpl::D, F48, to, 0); 4127 __ stf(FloatRegisterImpl::D, F50, to, 8); 4128 __ stf(FloatRegisterImpl::D, F60, to, 16); 4129 __ stf(FloatRegisterImpl::D, F62, to, 24); 4130 __ ba_short(L_check_decrypt_loop_end192); 4131 4132 __ BIND(L_store_misaligned_output_next2_blocks192); 4133 __ mov(8, G4); 4134 __ sub(G4, G1, G4); 4135 __ alignaddr(G4, G0, G4); 4136 __ faligndata(F48, F50, F56); // F56 can be clobbered 4137 __ faligndata(F50, F60, F50); 4138 __ faligndata(F60, F62, F60); 4139 __ faligndata(F62, F48, F48); 4140 __ mov(to, G1); 4141 __ and3(to, -8, to); 4142 __ stpartialf(to, G2, F48, Assembler::ASI_PST8_PRIMARY); 4143 __ stf(FloatRegisterImpl::D, F56, to, 8); 4144 __ stf(FloatRegisterImpl::D, F50, to, 16); 4145 __ stf(FloatRegisterImpl::D, F60, to, 24); 4146 __ add(to, 32, to); 4147 __ orn(G0, G2, G2); 4148 __ stpartialf(to, G2, F48, Assembler::ASI_PST8_PRIMARY); 4149 __ mov(G1, to); 4150 4151 __ BIND(L_check_decrypt_loop_end192); 4152 __ add(from, 32, from); 4153 __ add(to, 32, to); 4154 __ subcc(len_reg, 32, len_reg); 4155 __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks192); 4156 __ delayed()->nop(); 4157 __ ba_short(L_cbcdec_end); 4158 4159 __ align(OptoLoopAlignment); 4160 __ BIND(L_dec_next2_blocks256); 4161 __ nop(); 4162 4163 // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero 4164 __ andcc(from, 7, G0); 4165 __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_next2_blocks256); 4166 __ delayed()->mov(from, G1); // save original 'from' address before alignaddr 4167 4168 // aligned case: load input into G4, G5, L4 and L5 4169 __ ldx(from,0,G4); 4170 __ ldx(from,8,G5); 4171 __ ldx(from,16,L4); 4172 __ ldx(from,24,L5); 4173 __ ba_short(L_transform_next2_blocks256); 4174 4175 __ BIND(L_load_misaligned_next2_blocks256); 4176 __ alignaddr(from, G0, from); 4177 // F0, F2, F4, F60, F62 can be clobbered 4178 __ ldf(FloatRegisterImpl::D, from, 0, F0); 4179 __ ldf(FloatRegisterImpl::D, from, 8, F2); 4180 __ ldf(FloatRegisterImpl::D, from, 16, F60); 4181 __ ldf(FloatRegisterImpl::D, from, 24, F62); 4182 __ ldf(FloatRegisterImpl::D, from, 32, F4); 4183 __ faligndata(F0, F2, F0); 4184 __ faligndata(F2, F60, F2); 4185 __ faligndata(F60, F62, F60); 4186 __ faligndata(F62, F4, F62); 4187 __ movdtox(F0, G4); 4188 __ movdtox(F2, G5); 4189 __ movdtox(F60, L4); 4190 __ movdtox(F62, L5); 4191 __ mov(G1, from); 4192 4193 __ BIND(L_transform_next2_blocks256); 4194 // F0:F2 used for first 16-bytes 4195 __ xor3(L2,G4,G1); 4196 __ movxtod(G1,F0); 4197 __ xor3(L3,G5,G1); 4198 __ movxtod(G1,F2); 4199 4200 // F60:F62 used for next 16-bytes 4201 __ xor3(L2,L4,G1); 4202 __ movxtod(G1,F60); 4203 __ xor3(L3,L5,G1); 4204 __ movxtod(G1,F62); 4205 4206 __ aes_dround23(F54, F0, F2, F4); 4207 __ aes_dround01(F52, F0, F2, F6); 4208 __ aes_dround23(F54, F60, F62, F58); 4209 __ aes_dround01(F52, F60, F62, F56); 4210 __ aes_dround23(F50, F6, F4, F2); 4211 __ aes_dround01(F48, F6, F4, F0); 4212 __ aes_dround23(F50, F56, F58, F62); 4213 __ aes_dround01(F48, F56, F58, F60); 4214 // save F48:F54 in temp registers 4215 __ movdtox(F54,G2); 4216 __ movdtox(F52,G3); 4217 __ movdtox(F50,G6); 4218 __ movdtox(F48,G1); 4219 for ( int i = 46; i >= 14; i -= 8 ) { 4220 __ aes_dround23(as_FloatRegister(i), F0, F2, F4); 4221 __ aes_dround01(as_FloatRegister(i-2), F0, F2, F6); 4222 __ aes_dround23(as_FloatRegister(i), F60, F62, F58); 4223 __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56); 4224 __ aes_dround23(as_FloatRegister(i-4), F6, F4, F2); 4225 __ aes_dround01(as_FloatRegister(i-6), F6, F4, F0); 4226 __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62); 4227 __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60); 4228 } 4229 // init F48:F54 with F0:F6 values (original key) 4230 __ ldf(FloatRegisterImpl::D, original_key, 0, F48); 4231 __ ldf(FloatRegisterImpl::D, original_key, 8, F50); 4232 __ ldf(FloatRegisterImpl::D, original_key, 16, F52); 4233 __ ldf(FloatRegisterImpl::D, original_key, 24, F54); 4234 __ aes_dround23(F54, F0, F2, F4); 4235 __ aes_dround01(F52, F0, F2, F6); 4236 __ aes_dround23(F54, F60, F62, F58); 4237 __ aes_dround01(F52, F60, F62, F56); 4238 __ aes_dround23_l(F50, F6, F4, F2); 4239 __ aes_dround01_l(F48, F6, F4, F0); 4240 __ aes_dround23_l(F50, F56, F58, F62); 4241 __ aes_dround01_l(F48, F56, F58, F60); 4242 // re-init F48:F54 with their original values 4243 __ movxtod(G2,F54); 4244 __ movxtod(G3,F52); 4245 __ movxtod(G6,F50); 4246 __ movxtod(G1,F48); 4247 4248 __ movxtod(L0,F6); 4249 __ movxtod(L1,F4); 4250 __ fxor(FloatRegisterImpl::D, F6, F0, F0); 4251 __ fxor(FloatRegisterImpl::D, F4, F2, F2); 4252 4253 __ movxtod(G4,F56); 4254 __ movxtod(G5,F58); 4255 __ mov(L4,L0); 4256 __ mov(L5,L1); 4257 __ fxor(FloatRegisterImpl::D, F56, F60, F60); 4258 __ fxor(FloatRegisterImpl::D, F58, F62, F62); 4259 4260 // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero 4261 __ andcc(to, 7, G1); 4262 __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_next2_blocks256); 4263 __ delayed()->edge8n(to, G0, G2); 4264 4265 // aligned case: store output into the destination array 4266 __ stf(FloatRegisterImpl::D, F0, to, 0); 4267 __ stf(FloatRegisterImpl::D, F2, to, 8); 4268 __ stf(FloatRegisterImpl::D, F60, to, 16); 4269 __ stf(FloatRegisterImpl::D, F62, to, 24); 4270 __ ba_short(L_check_decrypt_loop_end256); 4271 4272 __ BIND(L_store_misaligned_output_next2_blocks256); 4273 __ mov(8, G4); 4274 __ sub(G4, G1, G4); 4275 __ alignaddr(G4, G0, G4); 4276 __ faligndata(F0, F2, F56); // F56 can be clobbered 4277 __ faligndata(F2, F60, F2); 4278 __ faligndata(F60, F62, F60); 4279 __ faligndata(F62, F0, F0); 4280 __ mov(to, G1); 4281 __ and3(to, -8, to); 4282 __ stpartialf(to, G2, F0, Assembler::ASI_PST8_PRIMARY); 4283 __ stf(FloatRegisterImpl::D, F56, to, 8); 4284 __ stf(FloatRegisterImpl::D, F2, to, 16); 4285 __ stf(FloatRegisterImpl::D, F60, to, 24); 4286 __ add(to, 32, to); 4287 __ orn(G0, G2, G2); 4288 __ stpartialf(to, G2, F0, Assembler::ASI_PST8_PRIMARY); 4289 __ mov(G1, to); 4290 4291 __ BIND(L_check_decrypt_loop_end256); 4292 __ add(from, 32, from); 4293 __ add(to, 32, to); 4294 __ subcc(len_reg, 32, len_reg); 4295 __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks256); 4296 __ delayed()->nop(); 4297 4298 __ BIND(L_cbcdec_end); 4299 // re-init intial vector for next block, 8-byte alignment is guaranteed 4300 __ stx(L0, rvec, 0); 4301 __ stx(L1, rvec, 8); 4302 __ mov(L7, I0); 4303 __ ret(); 4304 __ delayed()->restore(); 4305 4306 return start; 4307 } 4308 4309 address generate_sha1_implCompress(bool multi_block, const char *name) { 4310 __ align(CodeEntryAlignment); 4311 StubCodeMark mark(this, "StubRoutines", name); 4312 address start = __ pc(); 4313 4314 Label L_sha1_loop, L_sha1_unaligned_input, L_sha1_unaligned_input_loop; 4315 int i; 4316 4317 Register buf = O0; // byte[] source+offset 4318 Register state = O1; // int[] SHA.state 4319 Register ofs = O2; // int offset 4320 Register limit = O3; // int limit 4321 4322 // load state into F0-F4 4323 for (i = 0; i < 5; i++) { 4324 __ ldf(FloatRegisterImpl::S, state, i*4, as_FloatRegister(i)); 4325 } 4326 4327 __ andcc(buf, 7, G0); 4328 __ br(Assembler::notZero, false, Assembler::pn, L_sha1_unaligned_input); 4329 __ delayed()->nop(); 4330 4331 __ BIND(L_sha1_loop); 4332 // load buf into F8-F22 4333 for (i = 0; i < 8; i++) { 4334 __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8)); 4335 } 4336 __ sha1(); 4337 if (multi_block) { 4338 __ add(ofs, 64, ofs); 4339 __ add(buf, 64, buf); 4340 __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha1_loop); 4341 __ mov(ofs, O0); // to be returned 4342 } 4343 4344 // store F0-F4 into state and return 4345 for (i = 0; i < 4; i++) { 4346 __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4); 4347 } 4348 __ retl(); 4349 __ delayed()->stf(FloatRegisterImpl::S, F4, state, 0x10); 4350 4351 __ BIND(L_sha1_unaligned_input); 4352 __ alignaddr(buf, G0, buf); 4353 4354 __ BIND(L_sha1_unaligned_input_loop); 4355 // load buf into F8-F22 4356 for (i = 0; i < 9; i++) { 4357 __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8)); 4358 } 4359 for (i = 0; i < 8; i++) { 4360 __ faligndata(as_FloatRegister(i*2 + 8), as_FloatRegister(i*2 + 10), as_FloatRegister(i*2 + 8)); 4361 } 4362 __ sha1(); 4363 if (multi_block) { 4364 __ add(ofs, 64, ofs); 4365 __ add(buf, 64, buf); 4366 __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha1_unaligned_input_loop); 4367 __ mov(ofs, O0); // to be returned 4368 } 4369 4370 // store F0-F4 into state and return 4371 for (i = 0; i < 4; i++) { 4372 __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4); 4373 } 4374 __ retl(); 4375 __ delayed()->stf(FloatRegisterImpl::S, F4, state, 0x10); 4376 4377 return start; 4378 } 4379 4380 address generate_sha256_implCompress(bool multi_block, const char *name) { 4381 __ align(CodeEntryAlignment); 4382 StubCodeMark mark(this, "StubRoutines", name); 4383 address start = __ pc(); 4384 4385 Label L_sha256_loop, L_sha256_unaligned_input, L_sha256_unaligned_input_loop; 4386 int i; 4387 4388 Register buf = O0; // byte[] source+offset 4389 Register state = O1; // int[] SHA2.state 4390 Register ofs = O2; // int offset 4391 Register limit = O3; // int limit 4392 4393 // load state into F0-F7 4394 for (i = 0; i < 8; i++) { 4395 __ ldf(FloatRegisterImpl::S, state, i*4, as_FloatRegister(i)); 4396 } 4397 4398 __ andcc(buf, 7, G0); 4399 __ br(Assembler::notZero, false, Assembler::pn, L_sha256_unaligned_input); 4400 __ delayed()->nop(); 4401 4402 __ BIND(L_sha256_loop); 4403 // load buf into F8-F22 4404 for (i = 0; i < 8; i++) { 4405 __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8)); 4406 } 4407 __ sha256(); 4408 if (multi_block) { 4409 __ add(ofs, 64, ofs); 4410 __ add(buf, 64, buf); 4411 __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha256_loop); 4412 __ mov(ofs, O0); // to be returned 4413 } 4414 4415 // store F0-F7 into state and return 4416 for (i = 0; i < 7; i++) { 4417 __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4); 4418 } 4419 __ retl(); 4420 __ delayed()->stf(FloatRegisterImpl::S, F7, state, 0x1c); 4421 4422 __ BIND(L_sha256_unaligned_input); 4423 __ alignaddr(buf, G0, buf); 4424 4425 __ BIND(L_sha256_unaligned_input_loop); 4426 // load buf into F8-F22 4427 for (i = 0; i < 9; i++) { 4428 __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8)); 4429 } 4430 for (i = 0; i < 8; i++) { 4431 __ faligndata(as_FloatRegister(i*2 + 8), as_FloatRegister(i*2 + 10), as_FloatRegister(i*2 + 8)); 4432 } 4433 __ sha256(); 4434 if (multi_block) { 4435 __ add(ofs, 64, ofs); 4436 __ add(buf, 64, buf); 4437 __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha256_unaligned_input_loop); 4438 __ mov(ofs, O0); // to be returned 4439 } 4440 4441 // store F0-F7 into state and return 4442 for (i = 0; i < 7; i++) { 4443 __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4); 4444 } 4445 __ retl(); 4446 __ delayed()->stf(FloatRegisterImpl::S, F7, state, 0x1c); 4447 4448 return start; 4449 } 4450 4451 address generate_sha512_implCompress(bool multi_block, const char *name) { 4452 __ align(CodeEntryAlignment); 4453 StubCodeMark mark(this, "StubRoutines", name); 4454 address start = __ pc(); 4455 4456 Label L_sha512_loop, L_sha512_unaligned_input, L_sha512_unaligned_input_loop; 4457 int i; 4458 4459 Register buf = O0; // byte[] source+offset 4460 Register state = O1; // long[] SHA5.state 4461 Register ofs = O2; // int offset 4462 Register limit = O3; // int limit 4463 4464 // load state into F0-F14 4465 for (i = 0; i < 8; i++) { 4466 __ ldf(FloatRegisterImpl::D, state, i*8, as_FloatRegister(i*2)); 4467 } 4468 4469 __ andcc(buf, 7, G0); 4470 __ br(Assembler::notZero, false, Assembler::pn, L_sha512_unaligned_input); 4471 __ delayed()->nop(); 4472 4473 __ BIND(L_sha512_loop); 4474 // load buf into F16-F46 4475 for (i = 0; i < 16; i++) { 4476 __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 16)); 4477 } 4478 __ sha512(); 4479 if (multi_block) { 4480 __ add(ofs, 128, ofs); 4481 __ add(buf, 128, buf); 4482 __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha512_loop); 4483 __ mov(ofs, O0); // to be returned 4484 } 4485 4486 // store F0-F14 into state and return 4487 for (i = 0; i < 7; i++) { 4488 __ stf(FloatRegisterImpl::D, as_FloatRegister(i*2), state, i*8); 4489 } 4490 __ retl(); 4491 __ delayed()->stf(FloatRegisterImpl::D, F14, state, 0x38); 4492 4493 __ BIND(L_sha512_unaligned_input); 4494 __ alignaddr(buf, G0, buf); 4495 4496 __ BIND(L_sha512_unaligned_input_loop); 4497 // load buf into F16-F46 4498 for (i = 0; i < 17; i++) { 4499 __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 16)); 4500 } 4501 for (i = 0; i < 16; i++) { 4502 __ faligndata(as_FloatRegister(i*2 + 16), as_FloatRegister(i*2 + 18), as_FloatRegister(i*2 + 16)); 4503 } 4504 __ sha512(); 4505 if (multi_block) { 4506 __ add(ofs, 128, ofs); 4507 __ add(buf, 128, buf); 4508 __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha512_unaligned_input_loop); 4509 __ mov(ofs, O0); // to be returned 4510 } 4511 4512 // store F0-F14 into state and return 4513 for (i = 0; i < 7; i++) { 4514 __ stf(FloatRegisterImpl::D, as_FloatRegister(i*2), state, i*8); 4515 } 4516 __ retl(); 4517 __ delayed()->stf(FloatRegisterImpl::D, F14, state, 0x38); 4518 4519 return start; 4520 } 4521 4522 /* Single and multi-block ghash operations */ 4523 address generate_ghash_processBlocks() { 4524 __ align(CodeEntryAlignment); 4525 Label L_ghash_loop, L_aligned, L_main; 4526 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4527 address start = __ pc(); 4528 4529 Register state = I0; 4530 Register subkeyH = I1; 4531 Register data = I2; 4532 Register len = I3; 4533 4534 __ save_frame(0); 4535 4536 __ ldx(state, 0, O0); 4537 __ ldx(state, 8, O1); 4538 4539 // Loop label for multiblock operations 4540 __ BIND(L_ghash_loop); 4541 4542 // Check if 'data' is unaligned 4543 __ andcc(data, 7, G1); 4544 __ br(Assembler::zero, false, Assembler::pt, L_aligned); 4545 __ delayed()->nop(); 4546 4547 Register left_shift = L1; 4548 Register right_shift = L2; 4549 Register data_ptr = L3; 4550 4551 // Get left and right shift values in bits 4552 __ sll(G1, LogBitsPerByte, left_shift); 4553 __ mov(64, right_shift); 4554 __ sub(right_shift, left_shift, right_shift); 4555 4556 // Align to read 'data' 4557 __ sub(data, G1, data_ptr); 4558 4559 // Load first 8 bytes of 'data' 4560 __ ldx(data_ptr, 0, O4); 4561 __ sllx(O4, left_shift, O4); 4562 __ ldx(data_ptr, 8, O5); 4563 __ srlx(O5, right_shift, G4); 4564 __ bset(G4, O4); 4565 4566 // Load second 8 bytes of 'data' 4567 __ sllx(O5, left_shift, O5); 4568 __ ldx(data_ptr, 16, G4); 4569 __ srlx(G4, right_shift, G4); 4570 __ ba(L_main); 4571 __ delayed()->bset(G4, O5); 4572 4573 // If 'data' is aligned, load normally 4574 __ BIND(L_aligned); 4575 __ ldx(data, 0, O4); 4576 __ ldx(data, 8, O5); 4577 4578 __ BIND(L_main); 4579 __ ldx(subkeyH, 0, O2); 4580 __ ldx(subkeyH, 8, O3); 4581 4582 __ xor3(O0, O4, O0); 4583 __ xor3(O1, O5, O1); 4584 4585 __ xmulxhi(O0, O3, G3); 4586 __ xmulx(O0, O2, O5); 4587 __ xmulxhi(O1, O2, G4); 4588 __ xmulxhi(O1, O3, G5); 4589 __ xmulx(O0, O3, G1); 4590 __ xmulx(O1, O3, G2); 4591 __ xmulx(O1, O2, O3); 4592 __ xmulxhi(O0, O2, O4); 4593 4594 __ mov(0xE1, O0); 4595 __ sllx(O0, 56, O0); 4596 4597 __ xor3(O5, G3, O5); 4598 __ xor3(O5, G4, O5); 4599 __ xor3(G5, G1, G1); 4600 __ xor3(G1, O3, G1); 4601 __ srlx(G2, 63, O1); 4602 __ srlx(G1, 63, G3); 4603 __ sllx(G2, 63, O3); 4604 __ sllx(G2, 58, O2); 4605 __ xor3(O3, O2, O2); 4606 4607 __ sllx(G1, 1, G1); 4608 __ or3(G1, O1, G1); 4609 4610 __ xor3(G1, O2, G1); 4611 4612 __ sllx(G2, 1, G2); 4613 4614 __ xmulxhi(G1, O0, O1); 4615 __ xmulx(G1, O0, O2); 4616 __ xmulxhi(G2, O0, O3); 4617 __ xmulx(G2, O0, G1); 4618 4619 __ xor3(O4, O1, O4); 4620 __ xor3(O5, O2, O5); 4621 __ xor3(O5, O3, O5); 4622 4623 __ sllx(O4, 1, O2); 4624 __ srlx(O5, 63, O3); 4625 4626 __ or3(O2, O3, O0); 4627 4628 __ sllx(O5, 1, O1); 4629 __ srlx(G1, 63, O2); 4630 __ or3(O1, O2, O1); 4631 __ xor3(O1, G3, O1); 4632 4633 __ deccc(len); 4634 __ br(Assembler::notZero, true, Assembler::pt, L_ghash_loop); 4635 __ delayed()->add(data, 16, data); 4636 4637 __ stx(O0, I0, 0); 4638 __ stx(O1, I0, 8); 4639 4640 __ ret(); 4641 __ delayed()->restore(); 4642 4643 return start; 4644 } 4645 4646 /** 4647 * Arguments: 4648 * 4649 * Inputs: 4650 * O0 - int crc 4651 * O1 - byte* buf 4652 * O2 - int len 4653 * O3 - int* table 4654 * 4655 * Output: 4656 * O0 - int crc result 4657 */ 4658 address generate_updateBytesCRC32C() { 4659 assert(UseCRC32CIntrinsics, "need CRC32C instruction"); 4660 4661 __ align(CodeEntryAlignment); 4662 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); 4663 address start = __ pc(); 4664 4665 const Register crc = O0; // crc 4666 const Register buf = O1; // source java byte array address 4667 const Register len = O2; // number of bytes 4668 const Register table = O3; // byteTable 4669 4670 __ kernel_crc32c(crc, buf, len, table); 4671 4672 __ retl(); 4673 __ delayed()->nop(); 4674 4675 return start; 4676 } 4677 4678 #define ADLER32_NUM_TEMPS 16 4679 4680 /** 4681 * Arguments: 4682 * 4683 * Inputs: 4684 * O0 - int adler 4685 * O1 - byte* buff 4686 * O2 - int len 4687 * 4688 * Output: 4689 * O0 - int adler result 4690 */ 4691 address generate_updateBytesAdler32() { 4692 __ align(CodeEntryAlignment); 4693 StubCodeMark mark(this, "StubRoutines", "updateBytesAdler32"); 4694 address start = __ pc(); 4695 4696 Label L_cleanup_loop, L_cleanup_loop_check; 4697 Label L_main_loop_check, L_main_loop, L_inner_loop, L_inner_loop_check; 4698 Label L_nmax_check_done; 4699 4700 // Aliases 4701 Register s1 = O0; 4702 Register s2 = O3; 4703 Register buff = O1; 4704 Register len = O2; 4705 Register temp[ADLER32_NUM_TEMPS] = {L0, L1, L2, L3, L4, L5, L6, L7, I0, I1, I2, I3, I4, I5, G3, I7}; 4706 4707 // Max number of bytes we can process before having to take the mod 4708 // 0x15B0 is 5552 in decimal, the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 4709 unsigned long NMAX = 0x15B0; 4710 4711 // Zero-out the upper bits of len 4712 __ clruwu(len); 4713 4714 // Create the mask 0xFFFF 4715 __ set64(0x00FFFF, O4, O5); // O5 is the temp register 4716 4717 // s1 is initialized to the lower 16 bits of adler 4718 // s2 is initialized to the upper 16 bits of adler 4719 __ srlx(O0, 16, O5); // adler >> 16 4720 __ and3(O0, O4, s1); // s1 = (adler & 0xFFFF) 4721 __ and3(O5, O4, s2); // s2 = ((adler >> 16) & 0xFFFF) 4722 4723 // The pipelined loop needs at least 16 elements for 1 iteration 4724 // It does check this, but it is more effective to skip to the cleanup loop 4725 // Setup the constant for cutoff checking 4726 __ mov(15, O4); 4727 4728 // Check if we are above the cutoff, if not go to the cleanup loop immediately 4729 __ cmp_and_br_short(len, O4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_loop_check); 4730 4731 // Free up some registers for our use 4732 for (int i = 0; i < ADLER32_NUM_TEMPS; i++) { 4733 __ movxtod(temp[i], as_FloatRegister(2*i)); 4734 } 4735 4736 // Loop maintenance stuff is done at the end of the loop, so skip to there 4737 __ ba_short(L_main_loop_check); 4738 4739 __ BIND(L_main_loop); 4740 4741 // Prologue for inner loop 4742 __ ldub(buff, 0, L0); 4743 __ dec(O5); 4744 4745 for (int i = 1; i < 8; i++) { 4746 __ ldub(buff, i, temp[i]); 4747 } 4748 4749 __ inc(buff, 8); 4750 4751 // Inner loop processes 16 elements at a time, might never execute if only 16 elements 4752 // to be processed by the outter loop 4753 __ ba_short(L_inner_loop_check); 4754 4755 __ BIND(L_inner_loop); 4756 4757 for (int i = 0; i < 8; i++) { 4758 __ ldub(buff, (2*i), temp[(8+(2*i)) % ADLER32_NUM_TEMPS]); 4759 __ add(s1, temp[i], s1); 4760 __ ldub(buff, (2*i)+1, temp[(8+(2*i)+1) % ADLER32_NUM_TEMPS]); 4761 __ add(s2, s1, s2); 4762 } 4763 4764 // Original temp 0-7 used and new loads to temp 0-7 issued 4765 // temp 8-15 ready to be consumed 4766 __ add(s1, I0, s1); 4767 __ dec(O5); 4768 __ add(s2, s1, s2); 4769 __ add(s1, I1, s1); 4770 __ inc(buff, 16); 4771 __ add(s2, s1, s2); 4772 4773 for (int i = 0; i < 6; i++) { 4774 __ add(s1, temp[10+i], s1); 4775 __ add(s2, s1, s2); 4776 } 4777 4778 __ BIND(L_inner_loop_check); 4779 __ nop(); 4780 __ cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_inner_loop); 4781 4782 // Epilogue 4783 for (int i = 0; i < 4; i++) { 4784 __ ldub(buff, (2*i), temp[8+(2*i)]); 4785 __ add(s1, temp[i], s1); 4786 __ ldub(buff, (2*i)+1, temp[8+(2*i)+1]); 4787 __ add(s2, s1, s2); 4788 } 4789 4790 __ add(s1, temp[4], s1); 4791 __ inc(buff, 8); 4792 4793 for (int i = 0; i < 11; i++) { 4794 __ add(s2, s1, s2); 4795 __ add(s1, temp[5+i], s1); 4796 } 4797 4798 __ add(s2, s1, s2); 4799 4800 // Take the mod for s1 and s2 4801 __ set64(0xFFF1, L0, L1); 4802 __ udivx(s1, L0, L1); 4803 __ udivx(s2, L0, L2); 4804 __ mulx(L0, L1, L1); 4805 __ mulx(L0, L2, L2); 4806 __ sub(s1, L1, s1); 4807 __ sub(s2, L2, s2); 4808 4809 // Make sure there is something left to process 4810 __ BIND(L_main_loop_check); 4811 __ set64(NMAX, L0, L1); 4812 // k = len < NMAX ? len : NMAX 4813 __ cmp_and_br_short(len, L0, Assembler::greaterEqualUnsigned, Assembler::pt, L_nmax_check_done); 4814 __ andn(len, 0x0F, L0); // only loop a multiple of 16 times 4815 __ BIND(L_nmax_check_done); 4816 __ mov(L0, O5); 4817 __ sub(len, L0, len); // len -= k 4818 4819 __ srlx(O5, 4, O5); // multiplies of 16 4820 __ cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_main_loop); 4821 4822 // Restore anything we used, take the mod one last time, combine and return 4823 // Restore any registers we saved 4824 for (int i = 0; i < ADLER32_NUM_TEMPS; i++) { 4825 __ movdtox(as_FloatRegister(2*i), temp[i]); 4826 } 4827 4828 // There might be nothing left to process 4829 __ ba_short(L_cleanup_loop_check); 4830 4831 __ BIND(L_cleanup_loop); 4832 __ ldub(buff, 0, O4); // load single byte form buffer 4833 __ inc(buff); // buff++ 4834 __ add(s1, O4, s1); // s1 += *buff++; 4835 __ dec(len); // len-- 4836 __ add(s1, s2, s2); // s2 += s1; 4837 __ BIND(L_cleanup_loop_check); 4838 __ nop(); 4839 __ cmp_and_br_short(len, 0, Assembler::notEqual, Assembler::pt, L_cleanup_loop); 4840 4841 // Take the mod one last time 4842 __ set64(0xFFF1, O1, O2); 4843 __ udivx(s1, O1, O2); 4844 __ udivx(s2, O1, O5); 4845 __ mulx(O1, O2, O2); 4846 __ mulx(O1, O5, O5); 4847 __ sub(s1, O2, s1); 4848 __ sub(s2, O5, s2); 4849 4850 // Combine lower bits and higher bits 4851 __ sllx(s2, 16, s2); // s2 = s2 << 16 4852 __ or3(s1, s2, s1); // adler = s2 | s1 4853 // Final return value is in O0 4854 __ retl(); 4855 __ delayed()->nop(); 4856 4857 return start; 4858 } 4859 4860 /** 4861 * Arguments: 4862 * 4863 * Inputs: 4864 * O0 - int crc 4865 * O1 - byte* buf 4866 * O2 - int len 4867 * O3 - int* table 4868 * 4869 * Output: 4870 * O0 - int crc result 4871 */ 4872 address generate_updateBytesCRC32() { 4873 assert(UseCRC32Intrinsics, "need VIS3 instructions"); 4874 4875 __ align(CodeEntryAlignment); 4876 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 4877 address start = __ pc(); 4878 4879 const Register crc = O0; // crc 4880 const Register buf = O1; // source java byte array address 4881 const Register len = O2; // length 4882 const Register table = O3; // crc_table address (reuse register) 4883 4884 __ kernel_crc32(crc, buf, len, table); 4885 4886 __ retl(); 4887 __ delayed()->nop(); 4888 4889 return start; 4890 } 4891 4892 /** 4893 * Arguments: 4894 * 4895 * Inputs: 4896 * I0 - int* x-addr 4897 * I1 - int x-len 4898 * I2 - int* y-addr 4899 * I3 - int y-len 4900 * I4 - int* z-addr (output vector) 4901 * I5 - int z-len 4902 */ 4903 address generate_multiplyToLen() { 4904 assert(UseMultiplyToLenIntrinsic, "need VIS3 instructions"); 4905 4906 __ align(CodeEntryAlignment); 4907 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 4908 address start = __ pc(); 4909 4910 __ save_frame(0); 4911 4912 const Register xptr = I0; // input address 4913 const Register xlen = I1; // ...and length in 32b-words 4914 const Register yptr = I2; // 4915 const Register ylen = I3; // 4916 const Register zptr = I4; // output address 4917 const Register zlen = I5; // ...and length in 32b-words 4918 4919 /* The minimal "limb" representation suggest that odd length vectors are as 4920 * likely as even length dittos. This in turn suggests that we need to cope 4921 * with odd/even length arrays and data not aligned properly for 64-bit read 4922 * and write operations. We thus use a number of different kernels: 4923 * 4924 * if (is_even(x.len) && is_even(y.len)) 4925 * if (is_align64(x) && is_align64(y) && is_align64(z)) 4926 * if (x.len == y.len && 16 <= x.len && x.len <= 64) 4927 * memv_mult_mpmul(...) 4928 * else 4929 * memv_mult_64x64(...) 4930 * else 4931 * memv_mult_64x64u(...) 4932 * else 4933 * memv_mult_32x32(...) 4934 * 4935 * Here we assume VIS3 support (for 'umulxhi', 'addxc' and 'addxccc'). 4936 * In case CBCOND instructions are supported, we will use 'cxbX'. If the 4937 * MPMUL instruction is supported, we will generate a kernel using 'mpmul' 4938 * (for vectors with proper characteristics). 4939 */ 4940 const Register tmp0 = L0; 4941 const Register tmp1 = L1; 4942 4943 Label L_mult_32x32; 4944 Label L_mult_64x64u; 4945 Label L_mult_64x64; 4946 Label L_exit; 4947 4948 if_both_even(xlen, ylen, tmp0, false, L_mult_32x32); 4949 if_all3_aligned(xptr, yptr, zptr, tmp1, 64, false, L_mult_64x64u); 4950 4951 if (UseMPMUL) { 4952 if_eq(xlen, ylen, false, L_mult_64x64); 4953 if_in_rng(xlen, 16, 64, tmp0, tmp1, false, L_mult_64x64); 4954 4955 // 1. Multiply naturally aligned 64b-datums using a generic 'mpmul' kernel, 4956 // operating on equal length vectors of size [16..64]. 4957 gen_mult_mpmul(xlen, xptr, yptr, zptr, L_exit); 4958 } 4959 4960 // 2. Multiply naturally aligned 64-bit datums (64x64). 4961 __ bind(L_mult_64x64); 4962 gen_mult_64x64(xptr, xlen, yptr, ylen, zptr, zlen, L_exit); 4963 4964 // 3. Multiply unaligned 64-bit datums (64x64). 4965 __ bind(L_mult_64x64u); 4966 gen_mult_64x64_unaligned(xptr, xlen, yptr, ylen, zptr, zlen, L_exit); 4967 4968 // 4. Multiply naturally aligned 32-bit datums (32x32). 4969 __ bind(L_mult_32x32); 4970 gen_mult_32x32(xptr, xlen, yptr, ylen, zptr, zlen, L_exit); 4971 4972 __ bind(L_exit); 4973 __ ret(); 4974 __ delayed()->restore(); 4975 4976 return start; 4977 } 4978 4979 // Additional help functions used by multiplyToLen generation. 4980 4981 void if_both_even(Register r1, Register r2, Register tmp, bool iseven, Label &L) 4982 { 4983 __ or3(r1, r2, tmp); 4984 __ andcc(tmp, 0x1, tmp); 4985 __ br_icc_zero(iseven, Assembler::pn, L); 4986 } 4987 4988 void if_all3_aligned(Register r1, Register r2, Register r3, 4989 Register tmp, uint align, bool isalign, Label &L) 4990 { 4991 __ or3(r1, r2, tmp); 4992 __ or3(r3, tmp, tmp); 4993 __ andcc(tmp, (align - 1), tmp); 4994 __ br_icc_zero(isalign, Assembler::pn, L); 4995 } 4996 4997 void if_eq(Register x, Register y, bool iseq, Label &L) 4998 { 4999 Assembler::Condition cf = (iseq ? Assembler::equal : Assembler::notEqual); 5000 __ cmp_and_br_short(x, y, cf, Assembler::pt, L); 5001 } 5002 5003 void if_in_rng(Register x, int lb, int ub, Register t1, Register t2, bool inrng, Label &L) 5004 { 5005 assert(Assembler::is_simm13(lb), "Small ints only!"); 5006 assert(Assembler::is_simm13(ub), "Small ints only!"); 5007 // Compute (x - lb) * (ub - x) >= 0 5008 // NOTE: With the local use of this routine, we rely on small integers to 5009 // guarantee that we do not overflow in the multiplication. 5010 __ add(G0, ub, t2); 5011 __ sub(x, lb, t1); 5012 __ sub(t2, x, t2); 5013 __ mulx(t1, t2, t1); 5014 Assembler::Condition cf = (inrng ? Assembler::greaterEqual : Assembler::less); 5015 __ cmp_and_br_short(t1, G0, cf, Assembler::pt, L); 5016 } 5017 5018 void ldd_entry(Register base, Register offs, FloatRegister dest) 5019 { 5020 __ ldd(base, offs, dest); 5021 __ inc(offs, 8); 5022 } 5023 5024 void ldx_entry(Register base, Register offs, Register dest) 5025 { 5026 __ ldx(base, offs, dest); 5027 __ inc(offs, 8); 5028 } 5029 5030 void mpmul_entry(int m, Label &next) 5031 { 5032 __ mpmul(m); 5033 __ cbcond(Assembler::equal, Assembler::icc, G0, G0, next); 5034 } 5035 5036 void stx_entry(Label &L, Register r1, Register r2, Register base, Register offs) 5037 { 5038 __ bind(L); 5039 __ stx(r1, base, offs); 5040 __ inc(offs, 8); 5041 __ stx(r2, base, offs); 5042 __ inc(offs, 8); 5043 } 5044 5045 void offs_entry(Label &Lbl0, Label &Lbl1) 5046 { 5047 assert(Lbl0.is_bound(), "must be"); 5048 assert(Lbl1.is_bound(), "must be"); 5049 5050 int offset = Lbl0.loc_pos() - Lbl1.loc_pos(); 5051 5052 __ emit_data(offset); 5053 } 5054 5055 /* Generate the actual multiplication kernels for BigInteger vectors: 5056 * 5057 * 1. gen_mult_mpmul(...) 5058 * 5059 * 2. gen_mult_64x64(...) 5060 * 5061 * 3. gen_mult_64x64_unaligned(...) 5062 * 5063 * 4. gen_mult_32x32(...) 5064 */ 5065 void gen_mult_mpmul(Register len, Register xptr, Register yptr, Register zptr, 5066 Label &L_exit) 5067 { 5068 const Register zero = G0; 5069 const Register gxp = G1; // Need to use global registers across RWs. 5070 const Register gyp = G2; 5071 const Register gzp = G3; 5072 const Register disp = G4; 5073 const Register offs = G5; 5074 5075 __ mov(xptr, gxp); 5076 __ mov(yptr, gyp); 5077 __ mov(zptr, gzp); 5078 5079 /* Compute jump vector entry: 5080 * 5081 * 1. mpmul input size (0..31) x 64b 5082 * 2. vector input size in 32b limbs (even number) 5083 * 3. branch entries in reverse order (31..0), using two 5084 * instructions per entry (2 * 4 bytes). 5085 * 5086 * displacement = byte_offset(bra_offset(len)) 5087 * = byte_offset((64 - len)/2) 5088 * = 8 * (64 - len)/2 5089 * = 4 * (64 - len) 5090 */ 5091 Register temp = I5; // Alright to use input regs. in first batch. 5092 5093 __ sub(zero, len, temp); 5094 __ add(temp, 64, temp); 5095 __ sllx(temp, 2, disp); // disp := (64 - len) << 2 5096 5097 // Dispatch relative current PC, into instruction table below. 5098 __ rdpc(temp); 5099 __ add(temp, 16, temp); 5100 __ jmp(temp, disp); 5101 __ delayed()->clr(offs); 5102 5103 ldd_entry(gxp, offs, F22); 5104 ldd_entry(gxp, offs, F20); 5105 ldd_entry(gxp, offs, F18); 5106 ldd_entry(gxp, offs, F16); 5107 ldd_entry(gxp, offs, F14); 5108 ldd_entry(gxp, offs, F12); 5109 ldd_entry(gxp, offs, F10); 5110 ldd_entry(gxp, offs, F8); 5111 ldd_entry(gxp, offs, F6); 5112 ldd_entry(gxp, offs, F4); 5113 ldx_entry(gxp, offs, I5); 5114 ldx_entry(gxp, offs, I4); 5115 ldx_entry(gxp, offs, I3); 5116 ldx_entry(gxp, offs, I2); 5117 ldx_entry(gxp, offs, I1); 5118 ldx_entry(gxp, offs, I0); 5119 ldx_entry(gxp, offs, L7); 5120 ldx_entry(gxp, offs, L6); 5121 ldx_entry(gxp, offs, L5); 5122 ldx_entry(gxp, offs, L4); 5123 ldx_entry(gxp, offs, L3); 5124 ldx_entry(gxp, offs, L2); 5125 ldx_entry(gxp, offs, L1); 5126 ldx_entry(gxp, offs, L0); 5127 ldd_entry(gxp, offs, F2); 5128 ldd_entry(gxp, offs, F0); 5129 ldx_entry(gxp, offs, O5); 5130 ldx_entry(gxp, offs, O4); 5131 ldx_entry(gxp, offs, O3); 5132 ldx_entry(gxp, offs, O2); 5133 ldx_entry(gxp, offs, O1); 5134 ldx_entry(gxp, offs, O0); 5135 5136 __ save(SP, -176, SP); 5137 5138 const Register addr = gxp; // Alright to reuse 'gxp'. 5139 5140 // Dispatch relative current PC, into instruction table below. 5141 __ rdpc(addr); 5142 __ add(addr, 16, addr); 5143 __ jmp(addr, disp); 5144 __ delayed()->clr(offs); 5145 5146 ldd_entry(gyp, offs, F58); 5147 ldd_entry(gyp, offs, F56); 5148 ldd_entry(gyp, offs, F54); 5149 ldd_entry(gyp, offs, F52); 5150 ldd_entry(gyp, offs, F50); 5151 ldd_entry(gyp, offs, F48); 5152 ldd_entry(gyp, offs, F46); 5153 ldd_entry(gyp, offs, F44); 5154 ldd_entry(gyp, offs, F42); 5155 ldd_entry(gyp, offs, F40); 5156 ldd_entry(gyp, offs, F38); 5157 ldd_entry(gyp, offs, F36); 5158 ldd_entry(gyp, offs, F34); 5159 ldd_entry(gyp, offs, F32); 5160 ldd_entry(gyp, offs, F30); 5161 ldd_entry(gyp, offs, F28); 5162 ldd_entry(gyp, offs, F26); 5163 ldd_entry(gyp, offs, F24); 5164 ldx_entry(gyp, offs, O5); 5165 ldx_entry(gyp, offs, O4); 5166 ldx_entry(gyp, offs, O3); 5167 ldx_entry(gyp, offs, O2); 5168 ldx_entry(gyp, offs, O1); 5169 ldx_entry(gyp, offs, O0); 5170 ldx_entry(gyp, offs, L7); 5171 ldx_entry(gyp, offs, L6); 5172 ldx_entry(gyp, offs, L5); 5173 ldx_entry(gyp, offs, L4); 5174 ldx_entry(gyp, offs, L3); 5175 ldx_entry(gyp, offs, L2); 5176 ldx_entry(gyp, offs, L1); 5177 ldx_entry(gyp, offs, L0); 5178 5179 __ save(SP, -176, SP); 5180 __ save(SP, -176, SP); 5181 __ save(SP, -176, SP); 5182 __ save(SP, -176, SP); 5183 __ save(SP, -176, SP); 5184 5185 Label L_mpmul_restore_4, L_mpmul_restore_3, L_mpmul_restore_2; 5186 Label L_mpmul_restore_1, L_mpmul_restore_0; 5187 5188 // Dispatch relative current PC, into instruction table below. 5189 __ rdpc(addr); 5190 __ add(addr, 16, addr); 5191 __ jmp(addr, disp); 5192 __ delayed()->clr(offs); 5193 5194 mpmul_entry(31, L_mpmul_restore_0); 5195 mpmul_entry(30, L_mpmul_restore_0); 5196 mpmul_entry(29, L_mpmul_restore_0); 5197 mpmul_entry(28, L_mpmul_restore_0); 5198 mpmul_entry(27, L_mpmul_restore_1); 5199 mpmul_entry(26, L_mpmul_restore_1); 5200 mpmul_entry(25, L_mpmul_restore_1); 5201 mpmul_entry(24, L_mpmul_restore_1); 5202 mpmul_entry(23, L_mpmul_restore_1); 5203 mpmul_entry(22, L_mpmul_restore_1); 5204 mpmul_entry(21, L_mpmul_restore_1); 5205 mpmul_entry(20, L_mpmul_restore_2); 5206 mpmul_entry(19, L_mpmul_restore_2); 5207 mpmul_entry(18, L_mpmul_restore_2); 5208 mpmul_entry(17, L_mpmul_restore_2); 5209 mpmul_entry(16, L_mpmul_restore_2); 5210 mpmul_entry(15, L_mpmul_restore_2); 5211 mpmul_entry(14, L_mpmul_restore_2); 5212 mpmul_entry(13, L_mpmul_restore_3); 5213 mpmul_entry(12, L_mpmul_restore_3); 5214 mpmul_entry(11, L_mpmul_restore_3); 5215 mpmul_entry(10, L_mpmul_restore_3); 5216 mpmul_entry( 9, L_mpmul_restore_3); 5217 mpmul_entry( 8, L_mpmul_restore_3); 5218 mpmul_entry( 7, L_mpmul_restore_3); 5219 mpmul_entry( 6, L_mpmul_restore_4); 5220 mpmul_entry( 5, L_mpmul_restore_4); 5221 mpmul_entry( 4, L_mpmul_restore_4); 5222 mpmul_entry( 3, L_mpmul_restore_4); 5223 mpmul_entry( 2, L_mpmul_restore_4); 5224 mpmul_entry( 1, L_mpmul_restore_4); 5225 mpmul_entry( 0, L_mpmul_restore_4); 5226 5227 Label L_z31, L_z30, L_z29, L_z28, L_z27, L_z26, L_z25, L_z24; 5228 Label L_z23, L_z22, L_z21, L_z20, L_z19, L_z18, L_z17, L_z16; 5229 Label L_z15, L_z14, L_z13, L_z12, L_z11, L_z10, L_z09, L_z08; 5230 Label L_z07, L_z06, L_z05, L_z04, L_z03, L_z02, L_z01, L_z00; 5231 5232 Label L_zst_base; // Store sequence base address. 5233 __ bind(L_zst_base); 5234 5235 stx_entry(L_z31, L7, L6, gzp, offs); 5236 stx_entry(L_z30, L5, L4, gzp, offs); 5237 stx_entry(L_z29, L3, L2, gzp, offs); 5238 stx_entry(L_z28, L1, L0, gzp, offs); 5239 __ restore(); 5240 stx_entry(L_z27, O5, O4, gzp, offs); 5241 stx_entry(L_z26, O3, O2, gzp, offs); 5242 stx_entry(L_z25, O1, O0, gzp, offs); 5243 stx_entry(L_z24, L7, L6, gzp, offs); 5244 stx_entry(L_z23, L5, L4, gzp, offs); 5245 stx_entry(L_z22, L3, L2, gzp, offs); 5246 stx_entry(L_z21, L1, L0, gzp, offs); 5247 __ restore(); 5248 stx_entry(L_z20, O5, O4, gzp, offs); 5249 stx_entry(L_z19, O3, O2, gzp, offs); 5250 stx_entry(L_z18, O1, O0, gzp, offs); 5251 stx_entry(L_z17, L7, L6, gzp, offs); 5252 stx_entry(L_z16, L5, L4, gzp, offs); 5253 stx_entry(L_z15, L3, L2, gzp, offs); 5254 stx_entry(L_z14, L1, L0, gzp, offs); 5255 __ restore(); 5256 stx_entry(L_z13, O5, O4, gzp, offs); 5257 stx_entry(L_z12, O3, O2, gzp, offs); 5258 stx_entry(L_z11, O1, O0, gzp, offs); 5259 stx_entry(L_z10, L7, L6, gzp, offs); 5260 stx_entry(L_z09, L5, L4, gzp, offs); 5261 stx_entry(L_z08, L3, L2, gzp, offs); 5262 stx_entry(L_z07, L1, L0, gzp, offs); 5263 __ restore(); 5264 stx_entry(L_z06, O5, O4, gzp, offs); 5265 stx_entry(L_z05, O3, O2, gzp, offs); 5266 stx_entry(L_z04, O1, O0, gzp, offs); 5267 stx_entry(L_z03, L7, L6, gzp, offs); 5268 stx_entry(L_z02, L5, L4, gzp, offs); 5269 stx_entry(L_z01, L3, L2, gzp, offs); 5270 stx_entry(L_z00, L1, L0, gzp, offs); 5271 5272 __ restore(); 5273 __ restore(); 5274 // Exit out of 'mpmul' routine, back to multiplyToLen. 5275 __ ba_short(L_exit); 5276 5277 Label L_zst_offs; 5278 __ bind(L_zst_offs); 5279 5280 offs_entry(L_z31, L_zst_base); // index 31: 2048x2048 5281 offs_entry(L_z30, L_zst_base); 5282 offs_entry(L_z29, L_zst_base); 5283 offs_entry(L_z28, L_zst_base); 5284 offs_entry(L_z27, L_zst_base); 5285 offs_entry(L_z26, L_zst_base); 5286 offs_entry(L_z25, L_zst_base); 5287 offs_entry(L_z24, L_zst_base); 5288 offs_entry(L_z23, L_zst_base); 5289 offs_entry(L_z22, L_zst_base); 5290 offs_entry(L_z21, L_zst_base); 5291 offs_entry(L_z20, L_zst_base); 5292 offs_entry(L_z19, L_zst_base); 5293 offs_entry(L_z18, L_zst_base); 5294 offs_entry(L_z17, L_zst_base); 5295 offs_entry(L_z16, L_zst_base); 5296 offs_entry(L_z15, L_zst_base); 5297 offs_entry(L_z14, L_zst_base); 5298 offs_entry(L_z13, L_zst_base); 5299 offs_entry(L_z12, L_zst_base); 5300 offs_entry(L_z11, L_zst_base); 5301 offs_entry(L_z10, L_zst_base); 5302 offs_entry(L_z09, L_zst_base); 5303 offs_entry(L_z08, L_zst_base); 5304 offs_entry(L_z07, L_zst_base); 5305 offs_entry(L_z06, L_zst_base); 5306 offs_entry(L_z05, L_zst_base); 5307 offs_entry(L_z04, L_zst_base); 5308 offs_entry(L_z03, L_zst_base); 5309 offs_entry(L_z02, L_zst_base); 5310 offs_entry(L_z01, L_zst_base); 5311 offs_entry(L_z00, L_zst_base); // index 0: 64x64 5312 5313 __ bind(L_mpmul_restore_4); 5314 __ restore(); 5315 __ bind(L_mpmul_restore_3); 5316 __ restore(); 5317 __ bind(L_mpmul_restore_2); 5318 __ restore(); 5319 __ bind(L_mpmul_restore_1); 5320 __ restore(); 5321 __ bind(L_mpmul_restore_0); 5322 5323 // Dispatch via offset vector entry, into z-store sequence. 5324 Label L_zst_rdpc; 5325 __ bind(L_zst_rdpc); 5326 5327 assert(L_zst_base.is_bound(), "must be"); 5328 assert(L_zst_offs.is_bound(), "must be"); 5329 assert(L_zst_rdpc.is_bound(), "must be"); 5330 5331 int dbase = L_zst_rdpc.loc_pos() - L_zst_base.loc_pos(); 5332 int doffs = L_zst_rdpc.loc_pos() - L_zst_offs.loc_pos(); 5333 5334 temp = gyp; // Alright to reuse 'gyp'. 5335 5336 __ rdpc(addr); 5337 __ sub(addr, doffs, temp); 5338 __ srlx(disp, 1, disp); 5339 __ lduw(temp, disp, offs); 5340 __ sub(addr, dbase, temp); 5341 __ jmp(temp, offs); 5342 __ delayed()->clr(offs); 5343 } 5344 5345 void gen_mult_64x64(Register xp, Register xn, 5346 Register yp, Register yn, 5347 Register zp, Register zn, Label &L_exit) 5348 { 5349 // Assuming that a stack frame has already been created, i.e. local and 5350 // output registers are available for immediate use. 5351 5352 const Register ri = L0; // Outer loop index, xv[i] 5353 const Register rj = L1; // Inner loop index, yv[j] 5354 const Register rk = L2; // Output loop index, zv[k] 5355 const Register rx = L4; // x-vector datum [i] 5356 const Register ry = L5; // y-vector datum [j] 5357 const Register rz = L6; // z-vector datum [k] 5358 const Register rc = L7; // carry over (to z-vector datum [k-1]) 5359 5360 const Register lop = O0; // lo-64b product 5361 const Register hip = O1; // hi-64b product 5362 5363 const Register zero = G0; 5364 5365 Label L_loop_i, L_exit_loop_i; 5366 Label L_loop_j; 5367 Label L_loop_i2, L_exit_loop_i2; 5368 5369 __ srlx(xn, 1, xn); // index for u32 to u64 ditto 5370 __ srlx(yn, 1, yn); // index for u32 to u64 ditto 5371 __ srlx(zn, 1, zn); // index for u32 to u64 ditto 5372 __ dec(xn); // Adjust [0..(N/2)-1] 5373 __ dec(yn); 5374 __ dec(zn); 5375 __ clr(rc); // u64 c = 0 5376 __ sllx(xn, 3, ri); // int i = xn (byte offset i = 8*xn) 5377 __ sllx(yn, 3, rj); // int j = yn (byte offset i = 8*xn) 5378 __ sllx(zn, 3, rk); // int k = zn (byte offset k = 8*zn) 5379 __ ldx(yp, rj, ry); // u64 y = yp[yn] 5380 5381 // for (int i = xn; i >= 0; i--) 5382 __ bind(L_loop_i); 5383 5384 __ cmp_and_br_short(ri, 0, // i >= 0 5385 Assembler::less, Assembler::pn, L_exit_loop_i); 5386 __ ldx(xp, ri, rx); // x = xp[i] 5387 __ mulx(rx, ry, lop); // lo-64b-part of result 64x64 5388 __ umulxhi(rx, ry, hip); // hi-64b-part of result 64x64 5389 __ addcc(rc, lop, lop); // Accumulate lower order bits (producing carry) 5390 __ addxc(hip, zero, rc); // carry over to next datum [k-1] 5391 __ stx(lop, zp, rk); // z[k] = lop 5392 __ dec(rk, 8); // k-- 5393 __ dec(ri, 8); // i-- 5394 __ ba_short(L_loop_i); 5395 5396 __ bind(L_exit_loop_i); 5397 __ stx(rc, zp, rk); // z[k] = c 5398 5399 // for (int j = yn - 1; j >= 0; j--) 5400 __ sllx(yn, 3, rj); // int j = yn - 1 (byte offset j = 8*yn) 5401 __ dec(rj, 8); 5402 5403 __ bind(L_loop_j); 5404 5405 __ cmp_and_br_short(rj, 0, // j >= 0 5406 Assembler::less, Assembler::pn, L_exit); 5407 __ clr(rc); // u64 c = 0 5408 __ ldx(yp, rj, ry); // u64 y = yp[j] 5409 5410 // for (int i = xn, k = --zn; i >= 0; i--) 5411 __ dec(zn); // --zn 5412 __ sllx(xn, 3, ri); // int i = xn (byte offset i = 8*xn) 5413 __ sllx(zn, 3, rk); // int k = zn (byte offset k = 8*zn) 5414 5415 __ bind(L_loop_i2); 5416 5417 __ cmp_and_br_short(ri, 0, // i >= 0 5418 Assembler::less, Assembler::pn, L_exit_loop_i2); 5419 __ ldx(xp, ri, rx); // x = xp[i] 5420 __ ldx(zp, rk, rz); // z = zp[k], accumulator 5421 __ mulx(rx, ry, lop); // lo-64b-part of result 64x64 5422 __ umulxhi(rx, ry, hip); // hi-64b-part of result 64x64 5423 __ addcc(rz, rc, rz); // Accumulate lower order bits, 5424 __ addxc(hip, zero, rc); // Accumulate higher order bits to carry 5425 __ addcc(rz, lop, rz); // z += lo(p) + c 5426 __ addxc(rc, zero, rc); 5427 __ stx(rz, zp, rk); // zp[k] = z 5428 __ dec(rk, 8); // k-- 5429 __ dec(ri, 8); // i-- 5430 __ ba_short(L_loop_i2); 5431 5432 __ bind(L_exit_loop_i2); 5433 __ stx(rc, zp, rk); // z[k] = c 5434 __ dec(rj, 8); // j-- 5435 __ ba_short(L_loop_j); 5436 } 5437 5438 void gen_mult_64x64_unaligned(Register xp, Register xn, 5439 Register yp, Register yn, 5440 Register zp, Register zn, Label &L_exit) 5441 { 5442 // Assuming that a stack frame has already been created, i.e. local and 5443 // output registers are available for use. 5444 5445 const Register xpc = L0; // Outer loop cursor, xp[i] 5446 const Register ypc = L1; // Inner loop cursor, yp[j] 5447 const Register zpc = L2; // Output loop cursor, zp[k] 5448 const Register rx = L4; // x-vector datum [i] 5449 const Register ry = L5; // y-vector datum [j] 5450 const Register rz = L6; // z-vector datum [k] 5451 const Register rc = L7; // carry over (to z-vector datum [k-1]) 5452 const Register rt = O2; 5453 5454 const Register lop = O0; // lo-64b product 5455 const Register hip = O1; // hi-64b product 5456 5457 const Register zero = G0; 5458 5459 Label L_loop_i, L_exit_loop_i; 5460 Label L_loop_j; 5461 Label L_loop_i2, L_exit_loop_i2; 5462 5463 __ srlx(xn, 1, xn); // index for u32 to u64 ditto 5464 __ srlx(yn, 1, yn); // index for u32 to u64 ditto 5465 __ srlx(zn, 1, zn); // index for u32 to u64 ditto 5466 __ dec(xn); // Adjust [0..(N/2)-1] 5467 __ dec(yn); 5468 __ dec(zn); 5469 __ clr(rc); // u64 c = 0 5470 __ sllx(xn, 3, xpc); // u32* xpc = &xp[xn] (byte offset 8*xn) 5471 __ add(xp, xpc, xpc); 5472 __ sllx(yn, 3, ypc); // u32* ypc = &yp[yn] (byte offset 8*yn) 5473 __ add(yp, ypc, ypc); 5474 __ sllx(zn, 3, zpc); // u32* zpc = &zp[zn] (byte offset 8*zn) 5475 __ add(zp, zpc, zpc); 5476 __ lduw(ypc, 0, rt); // u64 y = yp[yn] 5477 __ lduw(ypc, 4, ry); // ... 5478 __ sllx(rt, 32, rt); 5479 __ or3(rt, ry, ry); 5480 5481 // for (int i = xn; i >= 0; i--) 5482 __ bind(L_loop_i); 5483 5484 __ cmp_and_brx_short(xpc, xp,// i >= 0 5485 Assembler::lessUnsigned, Assembler::pn, L_exit_loop_i); 5486 __ lduw(xpc, 0, rt); // u64 x = xp[i] 5487 __ lduw(xpc, 4, rx); // ... 5488 __ sllx(rt, 32, rt); 5489 __ or3(rt, rx, rx); 5490 __ mulx(rx, ry, lop); // lo-64b-part of result 64x64 5491 __ umulxhi(rx, ry, hip); // hi-64b-part of result 64x64 5492 __ addcc(rc, lop, lop); // Accumulate lower order bits (producing carry) 5493 __ addxc(hip, zero, rc); // carry over to next datum [k-1] 5494 __ srlx(lop, 32, rt); 5495 __ stw(rt, zpc, 0); // z[k] = lop 5496 __ stw(lop, zpc, 4); // ... 5497 __ dec(zpc, 8); // k-- (zpc--) 5498 __ dec(xpc, 8); // i-- (xpc--) 5499 __ ba_short(L_loop_i); 5500 5501 __ bind(L_exit_loop_i); 5502 __ srlx(rc, 32, rt); 5503 __ stw(rt, zpc, 0); // z[k] = c 5504 __ stw(rc, zpc, 4); 5505 5506 // for (int j = yn - 1; j >= 0; j--) 5507 __ sllx(yn, 3, ypc); // u32* ypc = &yp[yn] (byte offset 8*yn) 5508 __ add(yp, ypc, ypc); 5509 __ dec(ypc, 8); // yn - 1 (ypc--) 5510 5511 __ bind(L_loop_j); 5512 5513 __ cmp_and_brx_short(ypc, yp,// j >= 0 5514 Assembler::lessUnsigned, Assembler::pn, L_exit); 5515 __ clr(rc); // u64 c = 0 5516 __ lduw(ypc, 0, rt); // u64 y = yp[j] (= *ypc) 5517 __ lduw(ypc, 4, ry); // ... 5518 __ sllx(rt, 32, rt); 5519 __ or3(rt, ry, ry); 5520 5521 // for (int i = xn, k = --zn; i >= 0; i--) 5522 __ sllx(xn, 3, xpc); // u32* xpc = &xp[xn] (byte offset 8*xn) 5523 __ add(xp, xpc, xpc); 5524 __ dec(zn); // --zn 5525 __ sllx(zn, 3, zpc); // u32* zpc = &zp[zn] (byte offset 8*zn) 5526 __ add(zp, zpc, zpc); 5527 5528 __ bind(L_loop_i2); 5529 5530 __ cmp_and_brx_short(xpc, xp,// i >= 0 5531 Assembler::lessUnsigned, Assembler::pn, L_exit_loop_i2); 5532 __ lduw(xpc, 0, rt); // u64 x = xp[i] (= *xpc) 5533 __ lduw(xpc, 4, rx); // ... 5534 __ sllx(rt, 32, rt); 5535 __ or3(rt, rx, rx); 5536 5537 __ lduw(zpc, 0, rt); // u64 z = zp[k] (= *zpc) 5538 __ lduw(zpc, 4, rz); // ... 5539 __ sllx(rt, 32, rt); 5540 __ or3(rt, rz, rz); 5541 5542 __ mulx(rx, ry, lop); // lo-64b-part of result 64x64 5543 __ umulxhi(rx, ry, hip); // hi-64b-part of result 64x64 5544 __ addcc(rz, rc, rz); // Accumulate lower order bits... 5545 __ addxc(hip, zero, rc); // Accumulate higher order bits to carry 5546 __ addcc(rz, lop, rz); // ... z += lo(p) + c 5547 __ addxccc(rc, zero, rc); 5548 __ srlx(rz, 32, rt); 5549 __ stw(rt, zpc, 0); // zp[k] = z (*zpc = z) 5550 __ stw(rz, zpc, 4); 5551 __ dec(zpc, 8); // k-- (zpc--) 5552 __ dec(xpc, 8); // i-- (xpc--) 5553 __ ba_short(L_loop_i2); 5554 5555 __ bind(L_exit_loop_i2); 5556 __ srlx(rc, 32, rt); 5557 __ stw(rt, zpc, 0); // z[k] = c 5558 __ stw(rc, zpc, 4); 5559 __ dec(ypc, 8); // j-- (ypc--) 5560 __ ba_short(L_loop_j); 5561 } 5562 5563 void gen_mult_32x32(Register xp, Register xn, 5564 Register yp, Register yn, 5565 Register zp, Register zn, Label &L_exit) 5566 { 5567 // Assuming that a stack frame has already been created, i.e. local and 5568 // output registers are available for use. 5569 5570 const Register ri = L0; // Outer loop index, xv[i] 5571 const Register rj = L1; // Inner loop index, yv[j] 5572 const Register rk = L2; // Output loop index, zv[k] 5573 const Register rx = L4; // x-vector datum [i] 5574 const Register ry = L5; // y-vector datum [j] 5575 const Register rz = L6; // z-vector datum [k] 5576 const Register rc = L7; // carry over (to z-vector datum [k-1]) 5577 5578 const Register p64 = O0; // 64b product 5579 const Register z65 = O1; // carry+64b accumulator 5580 const Register c65 = O2; // carry at bit 65 5581 const Register c33 = O2; // carry at bit 33 (after shift) 5582 5583 const Register zero = G0; 5584 5585 Label L_loop_i, L_exit_loop_i; 5586 Label L_loop_j; 5587 Label L_loop_i2, L_exit_loop_i2; 5588 5589 __ dec(xn); // Adjust [0..N-1] 5590 __ dec(yn); 5591 __ dec(zn); 5592 __ clr(rc); // u32 c = 0 5593 __ sllx(xn, 2, ri); // int i = xn (byte offset i = 4*xn) 5594 __ sllx(yn, 2, rj); // int j = yn (byte offset i = 4*xn) 5595 __ sllx(zn, 2, rk); // int k = zn (byte offset k = 4*zn) 5596 __ lduw(yp, rj, ry); // u32 y = yp[yn] 5597 5598 // for (int i = xn; i >= 0; i--) 5599 __ bind(L_loop_i); 5600 5601 __ cmp_and_br_short(ri, 0, // i >= 0 5602 Assembler::less, Assembler::pn, L_exit_loop_i); 5603 __ lduw(xp, ri, rx); // x = xp[i] 5604 __ mulx(rx, ry, p64); // 64b result of 32x32 5605 __ addcc(rc, p64, z65); // Accumulate to 65 bits (producing carry) 5606 __ addxc(zero, zero, c65); // Materialise carry (in bit 65) into lsb, 5607 __ sllx(c65, 32, c33); // and shift into bit 33 5608 __ srlx(z65, 32, rc); // carry = c33 | hi(z65) >> 32 5609 __ add(c33, rc, rc); // carry over to next datum [k-1] 5610 __ stw(z65, zp, rk); // z[k] = lo(z65) 5611 __ dec(rk, 4); // k-- 5612 __ dec(ri, 4); // i-- 5613 __ ba_short(L_loop_i); 5614 5615 __ bind(L_exit_loop_i); 5616 __ stw(rc, zp, rk); // z[k] = c 5617 5618 // for (int j = yn - 1; j >= 0; j--) 5619 __ sllx(yn, 2, rj); // int j = yn - 1 (byte offset j = 4*yn) 5620 __ dec(rj, 4); 5621 5622 __ bind(L_loop_j); 5623 5624 __ cmp_and_br_short(rj, 0, // j >= 0 5625 Assembler::less, Assembler::pn, L_exit); 5626 __ clr(rc); // u32 c = 0 5627 __ lduw(yp, rj, ry); // u32 y = yp[j] 5628 5629 // for (int i = xn, k = --zn; i >= 0; i--) 5630 __ dec(zn); // --zn 5631 __ sllx(xn, 2, ri); // int i = xn (byte offset i = 4*xn) 5632 __ sllx(zn, 2, rk); // int k = zn (byte offset k = 4*zn) 5633 5634 __ bind(L_loop_i2); 5635 5636 __ cmp_and_br_short(ri, 0, // i >= 0 5637 Assembler::less, Assembler::pn, L_exit_loop_i2); 5638 __ lduw(xp, ri, rx); // x = xp[i] 5639 __ lduw(zp, rk, rz); // z = zp[k], accumulator 5640 __ mulx(rx, ry, p64); // 64b result of 32x32 5641 __ add(rz, rc, rz); // Accumulate lower order bits, 5642 __ addcc(rz, p64, z65); // z += lo(p64) + c 5643 __ addxc(zero, zero, c65); // Materialise carry (in bit 65) into lsb, 5644 __ sllx(c65, 32, c33); // and shift into bit 33 5645 __ srlx(z65, 32, rc); // carry = c33 | hi(z65) >> 32 5646 __ add(c33, rc, rc); // carry over to next datum [k-1] 5647 __ stw(z65, zp, rk); // zp[k] = lo(z65) 5648 __ dec(rk, 4); // k-- 5649 __ dec(ri, 4); // i-- 5650 __ ba_short(L_loop_i2); 5651 5652 __ bind(L_exit_loop_i2); 5653 __ stw(rc, zp, rk); // z[k] = c 5654 __ dec(rj, 4); // j-- 5655 __ ba_short(L_loop_j); 5656 } 5657 5658 5659 void generate_initial() { 5660 // Generates all stubs and initializes the entry points 5661 5662 //------------------------------------------------------------------------------------------------------------------------ 5663 // entry points that exist in all platforms 5664 // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than 5665 // the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp. 5666 StubRoutines::_forward_exception_entry = generate_forward_exception(); 5667 5668 StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address); 5669 StubRoutines::_catch_exception_entry = generate_catch_exception(); 5670 5671 //------------------------------------------------------------------------------------------------------------------------ 5672 // entry points that are platform specific 5673 StubRoutines::Sparc::_test_stop_entry = generate_test_stop(); 5674 5675 StubRoutines::Sparc::_stop_subroutine_entry = generate_stop_subroutine(); 5676 StubRoutines::Sparc::_flush_callers_register_windows_entry = generate_flush_callers_register_windows(); 5677 5678 // Build this early so it's available for the interpreter. 5679 StubRoutines::_throw_StackOverflowError_entry = 5680 generate_throw_exception("StackOverflowError throw_exception", 5681 CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); 5682 StubRoutines::_throw_delayed_StackOverflowError_entry = 5683 generate_throw_exception("delayed StackOverflowError throw_exception", 5684 CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError)); 5685 5686 if (UseCRC32Intrinsics) { 5687 // set table address before stub generation which use it 5688 StubRoutines::_crc_table_adr = (address)StubRoutines::Sparc::_crc_table; 5689 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 5690 } 5691 5692 if (UseCRC32CIntrinsics) { 5693 // set table address before stub generation which use it 5694 StubRoutines::_crc32c_table_addr = (address)StubRoutines::Sparc::_crc32c_table; 5695 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(); 5696 } 5697 } 5698 5699 5700 void generate_all() { 5701 // Generates all stubs and initializes the entry points 5702 5703 // Generate partial_subtype_check first here since its code depends on 5704 // UseZeroBaseCompressedOops which is defined after heap initialization. 5705 StubRoutines::Sparc::_partial_subtype_check = generate_partial_subtype_check(); 5706 // These entry points require SharedInfo::stack0 to be set up in non-core builds 5707 StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError)); 5708 StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError)); 5709 StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call)); 5710 5711 // support for verify_oop (must happen after universe_init) 5712 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine(); 5713 5714 // arraycopy stubs used by compilers 5715 generate_arraycopy_stubs(); 5716 5717 // Don't initialize the platform math functions since sparc 5718 // doesn't have intrinsics for these operations. 5719 5720 // Safefetch stubs. 5721 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 5722 &StubRoutines::_safefetch32_fault_pc, 5723 &StubRoutines::_safefetch32_continuation_pc); 5724 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 5725 &StubRoutines::_safefetchN_fault_pc, 5726 &StubRoutines::_safefetchN_continuation_pc); 5727 5728 // generate AES intrinsics code 5729 if (UseAESIntrinsics) { 5730 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 5731 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 5732 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 5733 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 5734 } 5735 // generate GHASH intrinsics code 5736 if (UseGHASHIntrinsics) { 5737 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 5738 } 5739 5740 // generate SHA1/SHA256/SHA512 intrinsics code 5741 if (UseSHA1Intrinsics) { 5742 StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); 5743 StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); 5744 } 5745 if (UseSHA256Intrinsics) { 5746 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 5747 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 5748 } 5749 if (UseSHA512Intrinsics) { 5750 StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); 5751 StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); 5752 } 5753 // generate Adler32 intrinsics code 5754 if (UseAdler32Intrinsics) { 5755 StubRoutines::_updateBytesAdler32 = generate_updateBytesAdler32(); 5756 } 5757 5758 #ifdef COMPILER2 5759 // Intrinsics supported by C2 only: 5760 if (UseMultiplyToLenIntrinsic) { 5761 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 5762 } 5763 #endif // COMPILER2 5764 } 5765 5766 public: 5767 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 5768 // replace the standard masm with a special one: 5769 _masm = new MacroAssembler(code); 5770 5771 _stub_count = !all ? 0x100 : 0x200; 5772 if (all) { 5773 generate_all(); 5774 } else { 5775 generate_initial(); 5776 } 5777 5778 // make sure this stub is available for all local calls 5779 if (_atomic_add_stub.is_unbound()) { 5780 // generate a second time, if necessary 5781 (void) generate_atomic_add(); 5782 } 5783 } 5784 5785 5786 private: 5787 int _stub_count; 5788 void stub_prolog(StubCodeDesc* cdesc) { 5789 # ifdef ASSERT 5790 // put extra information in the stub code, to make it more readable 5791 // Write the high part of the address 5792 // [RGV] Check if there is a dependency on the size of this prolog 5793 __ emit_data((intptr_t)cdesc >> 32, relocInfo::none); 5794 __ emit_data((intptr_t)cdesc, relocInfo::none); 5795 __ emit_data(++_stub_count, relocInfo::none); 5796 # endif 5797 align(true); 5798 } 5799 5800 void align(bool at_header = false) { 5801 // %%%%% move this constant somewhere else 5802 // UltraSPARC cache line size is 8 instructions: 5803 const unsigned int icache_line_size = 32; 5804 const unsigned int icache_half_line_size = 16; 5805 5806 if (at_header) { 5807 while ((intptr_t)(__ pc()) % icache_line_size != 0) { 5808 __ emit_data(0, relocInfo::none); 5809 } 5810 } else { 5811 while ((intptr_t)(__ pc()) % icache_half_line_size != 0) { 5812 __ nop(); 5813 } 5814 } 5815 } 5816 5817 }; // end class declaration 5818 5819 void StubGenerator_generate(CodeBuffer* code, bool all) { 5820 StubGenerator g(code, all); 5821 }