1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "gc/shared/barrierSet.hpp" 28 #include "gc/shared/barrierSetAssembler.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "nativeInst_sparc.hpp" 31 #include "oops/instanceOop.hpp" 32 #include "oops/method.hpp" 33 #include "oops/objArrayKlass.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "prims/methodHandles.hpp" 36 #include "runtime/frame.inline.hpp" 37 #include "runtime/handles.inline.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 #include "runtime/stubCodeGenerator.hpp" 40 #include "runtime/stubRoutines.hpp" 41 #include "runtime/thread.inline.hpp" 42 #ifdef COMPILER2 43 #include "opto/runtime.hpp" 44 #endif 45 46 // Declaration and definition of StubGenerator (no .hpp file). 47 // For a more detailed description of the stub routine structure 48 // see the comment in stubRoutines.hpp. 49 50 #define __ _masm-> 51 52 #ifdef PRODUCT 53 #define BLOCK_COMMENT(str) /* nothing */ 54 #else 55 #define BLOCK_COMMENT(str) __ block_comment(str) 56 #endif 57 58 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 59 60 // Note: The register L7 is used as L7_thread_cache, and may not be used 61 // any other way within this module. 62 63 static const Register& Lstub_temp = L2; 64 65 // ------------------------------------------------------------------------------------------------------------------------- 66 // Stub Code definitions 67 68 class StubGenerator: public StubCodeGenerator { 69 private: 70 71 #ifdef PRODUCT 72 #define inc_counter_np(a,b,c) 73 #else 74 #define inc_counter_np(counter, t1, t2) \ 75 BLOCK_COMMENT("inc_counter " #counter); \ 76 __ inc_counter(&counter, t1, t2); 77 #endif 78 79 //---------------------------------------------------------------------------------------------------- 80 // Call stubs are used to call Java from C 81 82 address generate_call_stub(address& return_pc) { 83 StubCodeMark mark(this, "StubRoutines", "call_stub"); 84 address start = __ pc(); 85 86 // Incoming arguments: 87 // 88 // o0 : call wrapper address 89 // o1 : result (address) 90 // o2 : result type 91 // o3 : method 92 // o4 : (interpreter) entry point 93 // o5 : parameters (address) 94 // [sp + 0x5c]: parameter size (in words) 95 // [sp + 0x60]: thread 96 // 97 // +---------------+ <--- sp + 0 98 // | | 99 // . reg save area . 100 // | | 101 // +---------------+ <--- sp + 0x40 102 // | | 103 // . extra 7 slots . 104 // | | 105 // +---------------+ <--- sp + 0x5c 106 // | param. size | 107 // +---------------+ <--- sp + 0x60 108 // | thread | 109 // +---------------+ 110 // | | 111 112 // note: if the link argument position changes, adjust 113 // the code in frame::entry_frame_call_wrapper() 114 115 const Argument link = Argument(0, false); // used only for GC 116 const Argument result = Argument(1, false); 117 const Argument result_type = Argument(2, false); 118 const Argument method = Argument(3, false); 119 const Argument entry_point = Argument(4, false); 120 const Argument parameters = Argument(5, false); 121 const Argument parameter_size = Argument(6, false); 122 const Argument thread = Argument(7, false); 123 124 // setup thread register 125 __ ld_ptr(thread.as_address(), G2_thread); 126 __ reinit_heapbase(); 127 128 #ifdef ASSERT 129 // make sure we have no pending exceptions 130 { const Register t = G3_scratch; 131 Label L; 132 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), t); 133 __ br_null_short(t, Assembler::pt, L); 134 __ stop("StubRoutines::call_stub: entered with pending exception"); 135 __ bind(L); 136 } 137 #endif 138 139 // create activation frame & allocate space for parameters 140 { const Register t = G3_scratch; 141 __ ld_ptr(parameter_size.as_address(), t); // get parameter size (in words) 142 __ add(t, frame::memory_parameter_word_sp_offset, t); // add space for save area (in words) 143 __ round_to(t, WordsPerLong); // make sure it is multiple of 2 (in words) 144 __ sll(t, Interpreter::logStackElementSize, t); // compute number of bytes 145 __ neg(t); // negate so it can be used with save 146 __ save(SP, t, SP); // setup new frame 147 } 148 149 // +---------------+ <--- sp + 0 150 // | | 151 // . reg save area . 152 // | | 153 // +---------------+ <--- sp + 0x40 154 // | | 155 // . extra 7 slots . 156 // | | 157 // +---------------+ <--- sp + 0x5c 158 // | empty slot | (only if parameter size is even) 159 // +---------------+ 160 // | | 161 // . parameters . 162 // | | 163 // +---------------+ <--- fp + 0 164 // | | 165 // . reg save area . 166 // | | 167 // +---------------+ <--- fp + 0x40 168 // | | 169 // . extra 7 slots . 170 // | | 171 // +---------------+ <--- fp + 0x5c 172 // | param. size | 173 // +---------------+ <--- fp + 0x60 174 // | thread | 175 // +---------------+ 176 // | | 177 178 // pass parameters if any 179 BLOCK_COMMENT("pass parameters if any"); 180 { const Register src = parameters.as_in().as_register(); 181 const Register dst = Lentry_args; 182 const Register tmp = G3_scratch; 183 const Register cnt = G4_scratch; 184 185 // test if any parameters & setup of Lentry_args 186 Label exit; 187 __ ld_ptr(parameter_size.as_in().as_address(), cnt); // parameter counter 188 __ add( FP, STACK_BIAS, dst ); 189 __ cmp_zero_and_br(Assembler::zero, cnt, exit); 190 __ delayed()->sub(dst, BytesPerWord, dst); // setup Lentry_args 191 192 // copy parameters if any 193 Label loop; 194 __ BIND(loop); 195 // Store parameter value 196 __ ld_ptr(src, 0, tmp); 197 __ add(src, BytesPerWord, src); 198 __ st_ptr(tmp, dst, 0); 199 __ deccc(cnt); 200 __ br(Assembler::greater, false, Assembler::pt, loop); 201 __ delayed()->sub(dst, Interpreter::stackElementSize, dst); 202 203 // done 204 __ BIND(exit); 205 } 206 207 // setup parameters, method & call Java function 208 #ifdef ASSERT 209 // layout_activation_impl checks it's notion of saved SP against 210 // this register, so if this changes update it as well. 211 const Register saved_SP = Lscratch; 212 __ mov(SP, saved_SP); // keep track of SP before call 213 #endif 214 215 // setup parameters 216 const Register t = G3_scratch; 217 __ ld_ptr(parameter_size.as_in().as_address(), t); // get parameter size (in words) 218 __ sll(t, Interpreter::logStackElementSize, t); // compute number of bytes 219 __ sub(FP, t, Gargs); // setup parameter pointer 220 __ add( Gargs, STACK_BIAS, Gargs ); // Account for LP64 stack bias 221 __ mov(SP, O5_savedSP); 222 223 224 // do the call 225 // 226 // the following register must be setup: 227 // 228 // G2_thread 229 // G5_method 230 // Gargs 231 BLOCK_COMMENT("call Java function"); 232 __ jmpl(entry_point.as_in().as_register(), G0, O7); 233 __ delayed()->mov(method.as_in().as_register(), G5_method); // setup method 234 235 BLOCK_COMMENT("call_stub_return_address:"); 236 return_pc = __ pc(); 237 238 // The callee, if it wasn't interpreted, can return with SP changed so 239 // we can no longer assert of change of SP. 240 241 // store result depending on type 242 // (everything that is not T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE 243 // is treated as T_INT) 244 { const Register addr = result .as_in().as_register(); 245 const Register type = result_type.as_in().as_register(); 246 Label is_long, is_float, is_double, is_object, exit; 247 __ cmp(type, T_OBJECT); __ br(Assembler::equal, false, Assembler::pn, is_object); 248 __ delayed()->cmp(type, T_FLOAT); __ br(Assembler::equal, false, Assembler::pn, is_float); 249 __ delayed()->cmp(type, T_DOUBLE); __ br(Assembler::equal, false, Assembler::pn, is_double); 250 __ delayed()->cmp(type, T_LONG); __ br(Assembler::equal, false, Assembler::pn, is_long); 251 __ delayed()->nop(); 252 253 // store int result 254 __ st(O0, addr, G0); 255 256 __ BIND(exit); 257 __ ret(); 258 __ delayed()->restore(); 259 260 __ BIND(is_object); 261 __ ba(exit); 262 __ delayed()->st_ptr(O0, addr, G0); 263 264 __ BIND(is_float); 265 __ ba(exit); 266 __ delayed()->stf(FloatRegisterImpl::S, F0, addr, G0); 267 268 __ BIND(is_double); 269 __ ba(exit); 270 __ delayed()->stf(FloatRegisterImpl::D, F0, addr, G0); 271 272 __ BIND(is_long); 273 __ ba(exit); 274 __ delayed()->st_long(O0, addr, G0); // store entire long 275 } 276 return start; 277 } 278 279 280 //---------------------------------------------------------------------------------------------------- 281 // Return point for a Java call if there's an exception thrown in Java code. 282 // The exception is caught and transformed into a pending exception stored in 283 // JavaThread that can be tested from within the VM. 284 // 285 // Oexception: exception oop 286 287 address generate_catch_exception() { 288 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 289 290 address start = __ pc(); 291 // verify that thread corresponds 292 __ verify_thread(); 293 294 const Register& temp_reg = Gtemp; 295 Address pending_exception_addr (G2_thread, Thread::pending_exception_offset()); 296 Address exception_file_offset_addr(G2_thread, Thread::exception_file_offset ()); 297 Address exception_line_offset_addr(G2_thread, Thread::exception_line_offset ()); 298 299 // set pending exception 300 __ verify_oop(Oexception); 301 __ st_ptr(Oexception, pending_exception_addr); 302 __ set((intptr_t)__FILE__, temp_reg); 303 __ st_ptr(temp_reg, exception_file_offset_addr); 304 __ set((intptr_t)__LINE__, temp_reg); 305 __ st(temp_reg, exception_line_offset_addr); 306 307 // complete return to VM 308 assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before"); 309 310 AddressLiteral stub_ret(StubRoutines::_call_stub_return_address); 311 __ jump_to(stub_ret, temp_reg); 312 __ delayed()->nop(); 313 314 return start; 315 } 316 317 318 //---------------------------------------------------------------------------------------------------- 319 // Continuation point for runtime calls returning with a pending exception 320 // The pending exception check happened in the runtime or native call stub 321 // The pending exception in Thread is converted into a Java-level exception 322 // 323 // Contract with Java-level exception handler: O0 = exception 324 // O1 = throwing pc 325 326 address generate_forward_exception() { 327 StubCodeMark mark(this, "StubRoutines", "forward_exception"); 328 address start = __ pc(); 329 330 // Upon entry, O7 has the return address returning into Java 331 // (interpreted or compiled) code; i.e. the return address 332 // becomes the throwing pc. 333 334 const Register& handler_reg = Gtemp; 335 336 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 337 338 #ifdef ASSERT 339 // make sure that this code is only executed if there is a pending exception 340 { Label L; 341 __ ld_ptr(exception_addr, Gtemp); 342 __ br_notnull_short(Gtemp, Assembler::pt, L); 343 __ stop("StubRoutines::forward exception: no pending exception (1)"); 344 __ bind(L); 345 } 346 #endif 347 348 // compute exception handler into handler_reg 349 __ get_thread(); 350 __ ld_ptr(exception_addr, Oexception); 351 __ verify_oop(Oexception); 352 __ save_frame(0); // compensates for compiler weakness 353 __ add(O7->after_save(), frame::pc_return_offset, Lscratch); // save the issuing PC 354 BLOCK_COMMENT("call exception_handler_for_return_address"); 355 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), G2_thread, Lscratch); 356 __ mov(O0, handler_reg); 357 __ restore(); // compensates for compiler weakness 358 359 __ ld_ptr(exception_addr, Oexception); 360 __ add(O7, frame::pc_return_offset, Oissuing_pc); // save the issuing PC 361 362 #ifdef ASSERT 363 // make sure exception is set 364 { Label L; 365 __ br_notnull_short(Oexception, Assembler::pt, L); 366 __ stop("StubRoutines::forward exception: no pending exception (2)"); 367 __ bind(L); 368 } 369 #endif 370 // jump to exception handler 371 __ jmp(handler_reg, 0); 372 // clear pending exception 373 __ delayed()->st_ptr(G0, exception_addr); 374 375 return start; 376 } 377 378 // Safefetch stubs. 379 void generate_safefetch(const char* name, int size, address* entry, 380 address* fault_pc, address* continuation_pc) { 381 // safefetch signatures: 382 // int SafeFetch32(int* adr, int errValue); 383 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 384 // 385 // arguments: 386 // o0 = adr 387 // o1 = errValue 388 // 389 // result: 390 // o0 = *adr or errValue 391 392 StubCodeMark mark(this, "StubRoutines", name); 393 394 // Entry point, pc or function descriptor. 395 __ align(CodeEntryAlignment); 396 *entry = __ pc(); 397 398 __ mov(O0, G1); // g1 = o0 399 __ mov(O1, O0); // o0 = o1 400 // Load *adr into c_rarg1, may fault. 401 *fault_pc = __ pc(); 402 switch (size) { 403 case 4: 404 // int32_t 405 __ ldsw(G1, 0, O0); // o0 = [g1] 406 break; 407 case 8: 408 // int64_t 409 __ ldx(G1, 0, O0); // o0 = [g1] 410 break; 411 default: 412 ShouldNotReachHere(); 413 } 414 415 // return errValue or *adr 416 *continuation_pc = __ pc(); 417 // By convention with the trap handler we ensure there is a non-CTI 418 // instruction in the trap shadow. 419 __ nop(); 420 __ retl(); 421 __ delayed()->nop(); 422 } 423 424 //------------------------------------------------------------------------------------------------------------------------ 425 // Continuation point for throwing of implicit exceptions that are not handled in 426 // the current activation. Fabricates an exception oop and initiates normal 427 // exception dispatching in this frame. Only callee-saved registers are preserved 428 // (through the normal register window / RegisterMap handling). 429 // If the compiler needs all registers to be preserved between the fault 430 // point and the exception handler then it must assume responsibility for that in 431 // AbstractCompiler::continuation_for_implicit_null_exception or 432 // continuation_for_implicit_division_by_zero_exception. All other implicit 433 // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are 434 // either at call sites or otherwise assume that stack unwinding will be initiated, 435 // so caller saved registers were assumed volatile in the compiler. 436 437 // Note that we generate only this stub into a RuntimeStub, because it needs to be 438 // properly traversed and ignored during GC, so we change the meaning of the "__" 439 // macro within this method. 440 #undef __ 441 #define __ masm-> 442 443 address generate_throw_exception(const char* name, address runtime_entry, 444 Register arg1 = noreg, Register arg2 = noreg) { 445 #ifdef ASSERT 446 int insts_size = VerifyThread ? 1 * K : 600; 447 #else 448 int insts_size = VerifyThread ? 1 * K : 256; 449 #endif /* ASSERT */ 450 int locs_size = 32; 451 452 CodeBuffer code(name, insts_size, locs_size); 453 MacroAssembler* masm = new MacroAssembler(&code); 454 455 __ verify_thread(); 456 457 // This is an inlined and slightly modified version of call_VM 458 // which has the ability to fetch the return PC out of thread-local storage 459 __ assert_not_delayed(); 460 461 // Note that we always push a frame because on the SPARC 462 // architecture, for all of our implicit exception kinds at call 463 // sites, the implicit exception is taken before the callee frame 464 // is pushed. 465 __ save_frame(0); 466 467 int frame_complete = __ offset(); 468 469 // Note that we always have a runtime stub frame on the top of stack by this point 470 Register last_java_sp = SP; 471 // 64-bit last_java_sp is biased! 472 __ set_last_Java_frame(last_java_sp, G0); 473 if (VerifyThread) __ mov(G2_thread, O0); // about to be smashed; pass early 474 __ save_thread(noreg); 475 if (arg1 != noreg) { 476 assert(arg2 != O1, "clobbered"); 477 __ mov(arg1, O1); 478 } 479 if (arg2 != noreg) { 480 __ mov(arg2, O2); 481 } 482 // do the call 483 BLOCK_COMMENT("call runtime_entry"); 484 __ call(runtime_entry, relocInfo::runtime_call_type); 485 if (!VerifyThread) 486 __ delayed()->mov(G2_thread, O0); // pass thread as first argument 487 else 488 __ delayed()->nop(); // (thread already passed) 489 __ restore_thread(noreg); 490 __ reset_last_Java_frame(); 491 492 // check for pending exceptions. use Gtemp as scratch register. 493 #ifdef ASSERT 494 Label L; 495 496 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 497 Register scratch_reg = Gtemp; 498 __ ld_ptr(exception_addr, scratch_reg); 499 __ br_notnull_short(scratch_reg, Assembler::pt, L); 500 __ should_not_reach_here(); 501 __ bind(L); 502 #endif // ASSERT 503 BLOCK_COMMENT("call forward_exception_entry"); 504 __ call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 505 // we use O7 linkage so that forward_exception_entry has the issuing PC 506 __ delayed()->restore(); 507 508 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, masm->total_frame_size_in_bytes(0), NULL, false); 509 return stub->entry_point(); 510 } 511 512 #undef __ 513 #define __ _masm-> 514 515 516 // Generate a routine that sets all the registers so we 517 // can tell if the stop routine prints them correctly. 518 address generate_test_stop() { 519 StubCodeMark mark(this, "StubRoutines", "test_stop"); 520 address start = __ pc(); 521 522 int i; 523 524 __ save_frame(0); 525 526 static jfloat zero = 0.0, one = 1.0; 527 528 // put addr in L0, then load through L0 to F0 529 __ set((intptr_t)&zero, L0); __ ldf( FloatRegisterImpl::S, L0, 0, F0); 530 __ set((intptr_t)&one, L0); __ ldf( FloatRegisterImpl::S, L0, 0, F1); // 1.0 to F1 531 532 // use add to put 2..18 in F2..F18 533 for ( i = 2; i <= 18; ++i ) { 534 __ fadd( FloatRegisterImpl::S, F1, as_FloatRegister(i-1), as_FloatRegister(i)); 535 } 536 537 // Now put double 2 in F16, double 18 in F18 538 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F2, F16 ); 539 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F18, F18 ); 540 541 // use add to put 20..32 in F20..F32 542 for (i = 20; i < 32; i += 2) { 543 __ fadd( FloatRegisterImpl::D, F16, as_FloatRegister(i-2), as_FloatRegister(i)); 544 } 545 546 // put 0..7 in i's, 8..15 in l's, 16..23 in o's, 24..31 in g's 547 for ( i = 0; i < 8; ++i ) { 548 if (i < 6) { 549 __ set( i, as_iRegister(i)); 550 __ set(16 + i, as_oRegister(i)); 551 __ set(24 + i, as_gRegister(i)); 552 } 553 __ set( 8 + i, as_lRegister(i)); 554 } 555 556 __ stop("testing stop"); 557 558 559 __ ret(); 560 __ delayed()->restore(); 561 562 return start; 563 } 564 565 566 address generate_stop_subroutine() { 567 StubCodeMark mark(this, "StubRoutines", "stop_subroutine"); 568 address start = __ pc(); 569 570 __ stop_subroutine(); 571 572 return start; 573 } 574 575 address generate_flush_callers_register_windows() { 576 StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows"); 577 address start = __ pc(); 578 579 __ flushw(); 580 __ retl(false); 581 __ delayed()->add( FP, STACK_BIAS, O0 ); 582 // The returned value must be a stack pointer whose register save area 583 // is flushed, and will stay flushed while the caller executes. 584 585 return start; 586 } 587 588 // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest). 589 // 590 // Arguments: 591 // 592 // exchange_value: O0 593 // dest: O1 594 // 595 // Results: 596 // 597 // O0: the value previously stored in dest 598 // 599 address generate_atomic_xchg() { 600 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 601 address start = __ pc(); 602 603 if (UseCASForSwap) { 604 // Use CAS instead of swap, just in case the MP hardware 605 // prefers to work with just one kind of synch. instruction. 606 Label retry; 607 __ BIND(retry); 608 __ mov(O0, O3); // scratch copy of exchange value 609 __ ld(O1, 0, O2); // observe the previous value 610 // try to replace O2 with O3 611 __ cas(O1, O2, O3); 612 __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry); 613 614 __ retl(false); 615 __ delayed()->mov(O2, O0); // report previous value to caller 616 } else { 617 __ retl(false); 618 __ delayed()->swap(O1, 0, O0); 619 } 620 621 return start; 622 } 623 624 625 // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) 626 // 627 // Arguments: 628 // 629 // exchange_value: O0 630 // dest: O1 631 // compare_value: O2 632 // 633 // Results: 634 // 635 // O0: the value previously stored in dest 636 // 637 address generate_atomic_cmpxchg() { 638 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 639 address start = __ pc(); 640 641 // cmpxchg(dest, compare_value, exchange_value) 642 __ cas(O1, O2, O0); 643 __ retl(false); 644 __ delayed()->nop(); 645 646 return start; 647 } 648 649 // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value) 650 // 651 // Arguments: 652 // 653 // exchange_value: O1:O0 654 // dest: O2 655 // compare_value: O4:O3 656 // 657 // Results: 658 // 659 // O1:O0: the value previously stored in dest 660 // 661 // Overwrites: G1,G2,G3 662 // 663 address generate_atomic_cmpxchg_long() { 664 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 665 address start = __ pc(); 666 667 __ sllx(O0, 32, O0); 668 __ srl(O1, 0, O1); 669 __ or3(O0,O1,O0); // O0 holds 64-bit value from compare_value 670 __ sllx(O3, 32, O3); 671 __ srl(O4, 0, O4); 672 __ or3(O3,O4,O3); // O3 holds 64-bit value from exchange_value 673 __ casx(O2, O3, O0); 674 __ srl(O0, 0, O1); // unpacked return value in O1:O0 675 __ retl(false); 676 __ delayed()->srlx(O0, 32, O0); 677 678 return start; 679 } 680 681 682 // Support for jint Atomic::add(jint add_value, volatile jint* dest). 683 // 684 // Arguments: 685 // 686 // add_value: O0 (e.g., +1 or -1) 687 // dest: O1 688 // 689 // Results: 690 // 691 // O0: the new value stored in dest 692 // 693 // Overwrites: O3 694 // 695 address generate_atomic_add() { 696 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 697 address start = __ pc(); 698 __ BIND(_atomic_add_stub); 699 700 Label(retry); 701 __ BIND(retry); 702 703 __ lduw(O1, 0, O2); 704 __ add(O0, O2, O3); 705 __ cas(O1, O2, O3); 706 __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry); 707 __ retl(false); 708 __ delayed()->add(O0, O2, O0); // note that cas made O2==O3 709 710 return start; 711 } 712 Label _atomic_add_stub; // called from other stubs 713 714 715 // Support for uint StubRoutine::Sparc::partial_subtype_check( Klass sub, Klass super ); 716 // Arguments : 717 // 718 // ret : O0, returned 719 // icc/xcc: set as O0 (depending on wordSize) 720 // sub : O1, argument, not changed 721 // super: O2, argument, not changed 722 // raddr: O7, blown by call 723 address generate_partial_subtype_check() { 724 __ align(CodeEntryAlignment); 725 StubCodeMark mark(this, "StubRoutines", "partial_subtype_check"); 726 address start = __ pc(); 727 Label miss; 728 729 __ save_frame(0); 730 Register Rret = I0; 731 Register Rsub = I1; 732 Register Rsuper = I2; 733 734 Register L0_ary_len = L0; 735 Register L1_ary_ptr = L1; 736 Register L2_super = L2; 737 Register L3_index = L3; 738 739 __ check_klass_subtype_slow_path(Rsub, Rsuper, 740 L0, L1, L2, L3, 741 NULL, &miss); 742 743 // Match falls through here. 744 __ addcc(G0,0,Rret); // set Z flags, Z result 745 746 __ ret(); // Result in Rret is zero; flags set to Z 747 __ delayed()->restore(); 748 749 __ BIND(miss); 750 __ addcc(G0,1,Rret); // set NZ flags, NZ result 751 752 __ ret(); // Result in Rret is != 0; flags set to NZ 753 __ delayed()->restore(); 754 755 return start; 756 } 757 758 759 // Called from MacroAssembler::verify_oop 760 // 761 address generate_verify_oop_subroutine() { 762 StubCodeMark mark(this, "StubRoutines", "verify_oop_stub"); 763 764 address start = __ pc(); 765 766 __ verify_oop_subroutine(); 767 768 return start; 769 } 770 771 772 // 773 // Verify that a register contains clean 32-bits positive value 774 // (high 32-bits are 0) so it could be used in 64-bits shifts (sllx, srax). 775 // 776 // Input: 777 // Rint - 32-bits value 778 // Rtmp - scratch 779 // 780 void assert_clean_int(Register Rint, Register Rtmp) { 781 #if defined(ASSERT) 782 __ signx(Rint, Rtmp); 783 __ cmp(Rint, Rtmp); 784 __ breakpoint_trap(Assembler::notEqual, Assembler::xcc); 785 #endif 786 } 787 788 // 789 // Generate overlap test for array copy stubs 790 // 791 // Input: 792 // O0 - array1 793 // O1 - array2 794 // O2 - element count 795 // 796 // Kills temps: O3, O4 797 // 798 void array_overlap_test(address no_overlap_target, int log2_elem_size) { 799 assert(no_overlap_target != NULL, "must be generated"); 800 array_overlap_test(no_overlap_target, NULL, log2_elem_size); 801 } 802 void array_overlap_test(Label& L_no_overlap, int log2_elem_size) { 803 array_overlap_test(NULL, &L_no_overlap, log2_elem_size); 804 } 805 void array_overlap_test(address no_overlap_target, Label* NOLp, int log2_elem_size) { 806 const Register from = O0; 807 const Register to = O1; 808 const Register count = O2; 809 const Register to_from = O3; // to - from 810 const Register byte_count = O4; // count << log2_elem_size 811 812 __ subcc(to, from, to_from); 813 __ sll_ptr(count, log2_elem_size, byte_count); 814 if (NOLp == NULL) 815 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, no_overlap_target); 816 else 817 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, (*NOLp)); 818 __ delayed()->cmp(to_from, byte_count); 819 if (NOLp == NULL) 820 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, no_overlap_target); 821 else 822 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, (*NOLp)); 823 __ delayed()->nop(); 824 } 825 826 827 // 828 // Generate main code for disjoint arraycopy 829 // 830 typedef void (StubGenerator::*CopyLoopFunc)(Register from, Register to, Register count, int count_dec, 831 Label& L_loop, bool use_prefetch, bool use_bis); 832 833 void disjoint_copy_core(Register from, Register to, Register count, int log2_elem_size, 834 int iter_size, StubGenerator::CopyLoopFunc copy_loop_func) { 835 Label L_copy; 836 837 assert(log2_elem_size <= 3, "the following code should be changed"); 838 int count_dec = 16>>log2_elem_size; 839 840 int prefetch_dist = MAX2(ArraycopySrcPrefetchDistance, ArraycopyDstPrefetchDistance); 841 assert(prefetch_dist < 4096, "invalid value"); 842 prefetch_dist = (prefetch_dist + (iter_size-1)) & (-iter_size); // round up to one iteration copy size 843 int prefetch_count = (prefetch_dist >> log2_elem_size); // elements count 844 845 if (UseBlockCopy) { 846 Label L_block_copy, L_block_copy_prefetch, L_skip_block_copy; 847 848 // 64 bytes tail + bytes copied in one loop iteration 849 int tail_size = 64 + iter_size; 850 int block_copy_count = (MAX2(tail_size, (int)BlockCopyLowLimit)) >> log2_elem_size; 851 // Use BIS copy only for big arrays since it requires membar. 852 __ set(block_copy_count, O4); 853 __ cmp_and_br_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_skip_block_copy); 854 // This code is for disjoint source and destination: 855 // to <= from || to >= from+count 856 // but BIS will stomp over 'from' if (to > from-tail_size && to <= from) 857 __ sub(from, to, O4); 858 __ srax(O4, 4, O4); // divide by 16 since following short branch have only 5 bits for imm. 859 __ cmp_and_br_short(O4, (tail_size>>4), Assembler::lessEqualUnsigned, Assembler::pn, L_skip_block_copy); 860 861 __ wrasi(G0, Assembler::ASI_ST_BLKINIT_PRIMARY); 862 // BIS should not be used to copy tail (64 bytes+iter_size) 863 // to avoid zeroing of following values. 864 __ sub(count, (tail_size>>log2_elem_size), count); // count is still positive >= 0 865 866 if (prefetch_count > 0) { // rounded up to one iteration count 867 // Do prefetching only if copy size is bigger 868 // than prefetch distance. 869 __ set(prefetch_count, O4); 870 __ cmp_and_brx_short(count, O4, Assembler::less, Assembler::pt, L_block_copy); 871 __ sub(count, O4, count); 872 873 (this->*copy_loop_func)(from, to, count, count_dec, L_block_copy_prefetch, true, true); 874 __ set(prefetch_count, O4); 875 __ add(count, O4, count); 876 877 } // prefetch_count > 0 878 879 (this->*copy_loop_func)(from, to, count, count_dec, L_block_copy, false, true); 880 __ add(count, (tail_size>>log2_elem_size), count); // restore count 881 882 __ wrasi(G0, Assembler::ASI_PRIMARY_NOFAULT); 883 // BIS needs membar. 884 __ membar(Assembler::StoreLoad); 885 // Copy tail 886 __ ba_short(L_copy); 887 888 __ BIND(L_skip_block_copy); 889 } // UseBlockCopy 890 891 if (prefetch_count > 0) { // rounded up to one iteration count 892 // Do prefetching only if copy size is bigger 893 // than prefetch distance. 894 __ set(prefetch_count, O4); 895 __ cmp_and_brx_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_copy); 896 __ sub(count, O4, count); 897 898 Label L_copy_prefetch; 899 (this->*copy_loop_func)(from, to, count, count_dec, L_copy_prefetch, true, false); 900 __ set(prefetch_count, O4); 901 __ add(count, O4, count); 902 903 } // prefetch_count > 0 904 905 (this->*copy_loop_func)(from, to, count, count_dec, L_copy, false, false); 906 } 907 908 909 910 // 911 // Helper methods for copy_16_bytes_forward_with_shift() 912 // 913 void copy_16_bytes_shift_loop(Register from, Register to, Register count, int count_dec, 914 Label& L_loop, bool use_prefetch, bool use_bis) { 915 916 const Register left_shift = G1; // left shift bit counter 917 const Register right_shift = G5; // right shift bit counter 918 919 __ align(OptoLoopAlignment); 920 __ BIND(L_loop); 921 if (use_prefetch) { 922 if (ArraycopySrcPrefetchDistance > 0) { 923 __ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads); 924 } 925 if (ArraycopyDstPrefetchDistance > 0) { 926 __ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads); 927 } 928 } 929 __ ldx(from, 0, O4); 930 __ ldx(from, 8, G4); 931 __ inc(to, 16); 932 __ inc(from, 16); 933 __ deccc(count, count_dec); // Can we do next iteration after this one? 934 __ srlx(O4, right_shift, G3); 935 __ bset(G3, O3); 936 __ sllx(O4, left_shift, O4); 937 __ srlx(G4, right_shift, G3); 938 __ bset(G3, O4); 939 if (use_bis) { 940 __ stxa(O3, to, -16); 941 __ stxa(O4, to, -8); 942 } else { 943 __ stx(O3, to, -16); 944 __ stx(O4, to, -8); 945 } 946 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop); 947 __ delayed()->sllx(G4, left_shift, O3); 948 } 949 950 // Copy big chunks forward with shift 951 // 952 // Inputs: 953 // from - source arrays 954 // to - destination array aligned to 8-bytes 955 // count - elements count to copy >= the count equivalent to 16 bytes 956 // count_dec - elements count's decrement equivalent to 16 bytes 957 // L_copy_bytes - copy exit label 958 // 959 void copy_16_bytes_forward_with_shift(Register from, Register to, 960 Register count, int log2_elem_size, Label& L_copy_bytes) { 961 Label L_aligned_copy, L_copy_last_bytes; 962 assert(log2_elem_size <= 3, "the following code should be changed"); 963 int count_dec = 16>>log2_elem_size; 964 965 // if both arrays have the same alignment mod 8, do 8 bytes aligned copy 966 __ andcc(from, 7, G1); // misaligned bytes 967 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy); 968 __ delayed()->nop(); 969 970 const Register left_shift = G1; // left shift bit counter 971 const Register right_shift = G5; // right shift bit counter 972 973 __ sll(G1, LogBitsPerByte, left_shift); 974 __ mov(64, right_shift); 975 __ sub(right_shift, left_shift, right_shift); 976 977 // 978 // Load 2 aligned 8-bytes chunks and use one from previous iteration 979 // to form 2 aligned 8-bytes chunks to store. 980 // 981 __ dec(count, count_dec); // Pre-decrement 'count' 982 __ andn(from, 7, from); // Align address 983 __ ldx(from, 0, O3); 984 __ inc(from, 8); 985 __ sllx(O3, left_shift, O3); 986 987 disjoint_copy_core(from, to, count, log2_elem_size, 16, &StubGenerator::copy_16_bytes_shift_loop); 988 989 __ inccc(count, count_dec>>1 ); // + 8 bytes 990 __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes); 991 __ delayed()->inc(count, count_dec>>1); // restore 'count' 992 993 // copy 8 bytes, part of them already loaded in O3 994 __ ldx(from, 0, O4); 995 __ inc(to, 8); 996 __ inc(from, 8); 997 __ srlx(O4, right_shift, G3); 998 __ bset(O3, G3); 999 __ stx(G3, to, -8); 1000 1001 __ BIND(L_copy_last_bytes); 1002 __ srl(right_shift, LogBitsPerByte, right_shift); // misaligned bytes 1003 __ br(Assembler::always, false, Assembler::pt, L_copy_bytes); 1004 __ delayed()->sub(from, right_shift, from); // restore address 1005 1006 __ BIND(L_aligned_copy); 1007 } 1008 1009 // Copy big chunks backward with shift 1010 // 1011 // Inputs: 1012 // end_from - source arrays end address 1013 // end_to - destination array end address aligned to 8-bytes 1014 // count - elements count to copy >= the count equivalent to 16 bytes 1015 // count_dec - elements count's decrement equivalent to 16 bytes 1016 // L_aligned_copy - aligned copy exit label 1017 // L_copy_bytes - copy exit label 1018 // 1019 void copy_16_bytes_backward_with_shift(Register end_from, Register end_to, 1020 Register count, int count_dec, 1021 Label& L_aligned_copy, Label& L_copy_bytes) { 1022 Label L_loop, L_copy_last_bytes; 1023 1024 // if both arrays have the same alignment mod 8, do 8 bytes aligned copy 1025 __ andcc(end_from, 7, G1); // misaligned bytes 1026 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy); 1027 __ delayed()->deccc(count, count_dec); // Pre-decrement 'count' 1028 1029 const Register left_shift = G1; // left shift bit counter 1030 const Register right_shift = G5; // right shift bit counter 1031 1032 __ sll(G1, LogBitsPerByte, left_shift); 1033 __ mov(64, right_shift); 1034 __ sub(right_shift, left_shift, right_shift); 1035 1036 // 1037 // Load 2 aligned 8-bytes chunks and use one from previous iteration 1038 // to form 2 aligned 8-bytes chunks to store. 1039 // 1040 __ andn(end_from, 7, end_from); // Align address 1041 __ ldx(end_from, 0, O3); 1042 __ align(OptoLoopAlignment); 1043 __ BIND(L_loop); 1044 __ ldx(end_from, -8, O4); 1045 __ deccc(count, count_dec); // Can we do next iteration after this one? 1046 __ ldx(end_from, -16, G4); 1047 __ dec(end_to, 16); 1048 __ dec(end_from, 16); 1049 __ srlx(O3, right_shift, O3); 1050 __ sllx(O4, left_shift, G3); 1051 __ bset(G3, O3); 1052 __ stx(O3, end_to, 8); 1053 __ srlx(O4, right_shift, O4); 1054 __ sllx(G4, left_shift, G3); 1055 __ bset(G3, O4); 1056 __ stx(O4, end_to, 0); 1057 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop); 1058 __ delayed()->mov(G4, O3); 1059 1060 __ inccc(count, count_dec>>1 ); // + 8 bytes 1061 __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes); 1062 __ delayed()->inc(count, count_dec>>1); // restore 'count' 1063 1064 // copy 8 bytes, part of them already loaded in O3 1065 __ ldx(end_from, -8, O4); 1066 __ dec(end_to, 8); 1067 __ dec(end_from, 8); 1068 __ srlx(O3, right_shift, O3); 1069 __ sllx(O4, left_shift, G3); 1070 __ bset(O3, G3); 1071 __ stx(G3, end_to, 0); 1072 1073 __ BIND(L_copy_last_bytes); 1074 __ srl(left_shift, LogBitsPerByte, left_shift); // misaligned bytes 1075 __ br(Assembler::always, false, Assembler::pt, L_copy_bytes); 1076 __ delayed()->add(end_from, left_shift, end_from); // restore address 1077 } 1078 1079 // 1080 // Generate stub for disjoint byte copy. If "aligned" is true, the 1081 // "from" and "to" addresses are assumed to be heapword aligned. 1082 // 1083 // Arguments for generated stub: 1084 // from: O0 1085 // to: O1 1086 // count: O2 treated as signed 1087 // 1088 address generate_disjoint_byte_copy(bool aligned, address *entry, const char *name) { 1089 __ align(CodeEntryAlignment); 1090 StubCodeMark mark(this, "StubRoutines", name); 1091 address start = __ pc(); 1092 1093 Label L_skip_alignment, L_align; 1094 Label L_copy_byte, L_copy_byte_loop, L_exit; 1095 1096 const Register from = O0; // source array address 1097 const Register to = O1; // destination array address 1098 const Register count = O2; // elements count 1099 const Register offset = O5; // offset from start of arrays 1100 // O3, O4, G3, G4 are used as temp registers 1101 1102 assert_clean_int(count, O3); // Make sure 'count' is clean int. 1103 1104 if (entry != NULL) { 1105 *entry = __ pc(); 1106 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1107 BLOCK_COMMENT("Entry:"); 1108 } 1109 1110 // for short arrays, just do single element copy 1111 __ cmp(count, 23); // 16 + 7 1112 __ brx(Assembler::less, false, Assembler::pn, L_copy_byte); 1113 __ delayed()->mov(G0, offset); 1114 1115 if (aligned) { 1116 // 'aligned' == true when it is known statically during compilation 1117 // of this arraycopy call site that both 'from' and 'to' addresses 1118 // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()). 1119 // 1120 // Aligned arrays have 4 bytes alignment in 32-bits VM 1121 // and 8 bytes - in 64-bits VM. So we do it only for 32-bits VM 1122 // 1123 } else { 1124 // copy bytes to align 'to' on 8 byte boundary 1125 __ andcc(to, 7, G1); // misaligned bytes 1126 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 1127 __ delayed()->neg(G1); 1128 __ inc(G1, 8); // bytes need to copy to next 8-bytes alignment 1129 __ sub(count, G1, count); 1130 __ BIND(L_align); 1131 __ ldub(from, 0, O3); 1132 __ deccc(G1); 1133 __ inc(from); 1134 __ stb(O3, to, 0); 1135 __ br(Assembler::notZero, false, Assembler::pt, L_align); 1136 __ delayed()->inc(to); 1137 __ BIND(L_skip_alignment); 1138 } 1139 if (!aligned) { 1140 // Copy with shift 16 bytes per iteration if arrays do not have 1141 // the same alignment mod 8, otherwise fall through to the next 1142 // code for aligned copy. 1143 // The compare above (count >= 23) guarantes 'count' >= 16 bytes. 1144 // Also jump over aligned copy after the copy with shift completed. 1145 1146 copy_16_bytes_forward_with_shift(from, to, count, 0, L_copy_byte); 1147 } 1148 1149 // Both array are 8 bytes aligned, copy 16 bytes at a time 1150 __ and3(count, 7, G4); // Save count 1151 __ srl(count, 3, count); 1152 generate_disjoint_long_copy_core(aligned); 1153 __ mov(G4, count); // Restore count 1154 1155 // copy tailing bytes 1156 __ BIND(L_copy_byte); 1157 __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit); 1158 __ align(OptoLoopAlignment); 1159 __ BIND(L_copy_byte_loop); 1160 __ ldub(from, offset, O3); 1161 __ deccc(count); 1162 __ stb(O3, to, offset); 1163 __ brx(Assembler::notZero, false, Assembler::pt, L_copy_byte_loop); 1164 __ delayed()->inc(offset); 1165 1166 __ BIND(L_exit); 1167 // O3, O4 are used as temp registers 1168 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4); 1169 __ retl(); 1170 __ delayed()->mov(G0, O0); // return 0 1171 return start; 1172 } 1173 1174 // 1175 // Generate stub for conjoint byte copy. If "aligned" is true, the 1176 // "from" and "to" addresses are assumed to be heapword aligned. 1177 // 1178 // Arguments for generated stub: 1179 // from: O0 1180 // to: O1 1181 // count: O2 treated as signed 1182 // 1183 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1184 address *entry, const char *name) { 1185 // Do reverse copy. 1186 1187 __ align(CodeEntryAlignment); 1188 StubCodeMark mark(this, "StubRoutines", name); 1189 address start = __ pc(); 1190 1191 Label L_skip_alignment, L_align, L_aligned_copy; 1192 Label L_copy_byte, L_copy_byte_loop, L_exit; 1193 1194 const Register from = O0; // source array address 1195 const Register to = O1; // destination array address 1196 const Register count = O2; // elements count 1197 const Register end_from = from; // source array end address 1198 const Register end_to = to; // destination array end address 1199 1200 assert_clean_int(count, O3); // Make sure 'count' is clean int. 1201 1202 if (entry != NULL) { 1203 *entry = __ pc(); 1204 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1205 BLOCK_COMMENT("Entry:"); 1206 } 1207 1208 array_overlap_test(nooverlap_target, 0); 1209 1210 __ add(to, count, end_to); // offset after last copied element 1211 1212 // for short arrays, just do single element copy 1213 __ cmp(count, 23); // 16 + 7 1214 __ brx(Assembler::less, false, Assembler::pn, L_copy_byte); 1215 __ delayed()->add(from, count, end_from); 1216 1217 { 1218 // Align end of arrays since they could be not aligned even 1219 // when arrays itself are aligned. 1220 1221 // copy bytes to align 'end_to' on 8 byte boundary 1222 __ andcc(end_to, 7, G1); // misaligned bytes 1223 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 1224 __ delayed()->nop(); 1225 __ sub(count, G1, count); 1226 __ BIND(L_align); 1227 __ dec(end_from); 1228 __ dec(end_to); 1229 __ ldub(end_from, 0, O3); 1230 __ deccc(G1); 1231 __ brx(Assembler::notZero, false, Assembler::pt, L_align); 1232 __ delayed()->stb(O3, end_to, 0); 1233 __ BIND(L_skip_alignment); 1234 } 1235 if (aligned) { 1236 // Both arrays are aligned to 8-bytes in 64-bits VM. 1237 // The 'count' is decremented in copy_16_bytes_backward_with_shift() 1238 // in unaligned case. 1239 __ dec(count, 16); 1240 } else { 1241 // Copy with shift 16 bytes per iteration if arrays do not have 1242 // the same alignment mod 8, otherwise jump to the next 1243 // code for aligned copy (and substracting 16 from 'count' before jump). 1244 // The compare above (count >= 11) guarantes 'count' >= 16 bytes. 1245 // Also jump over aligned copy after the copy with shift completed. 1246 1247 copy_16_bytes_backward_with_shift(end_from, end_to, count, 16, 1248 L_aligned_copy, L_copy_byte); 1249 } 1250 // copy 4 elements (16 bytes) at a time 1251 __ align(OptoLoopAlignment); 1252 __ BIND(L_aligned_copy); 1253 __ dec(end_from, 16); 1254 __ ldx(end_from, 8, O3); 1255 __ ldx(end_from, 0, O4); 1256 __ dec(end_to, 16); 1257 __ deccc(count, 16); 1258 __ stx(O3, end_to, 8); 1259 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy); 1260 __ delayed()->stx(O4, end_to, 0); 1261 __ inc(count, 16); 1262 1263 // copy 1 element (2 bytes) at a time 1264 __ BIND(L_copy_byte); 1265 __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit); 1266 __ align(OptoLoopAlignment); 1267 __ BIND(L_copy_byte_loop); 1268 __ dec(end_from); 1269 __ dec(end_to); 1270 __ ldub(end_from, 0, O4); 1271 __ deccc(count); 1272 __ brx(Assembler::greater, false, Assembler::pt, L_copy_byte_loop); 1273 __ delayed()->stb(O4, end_to, 0); 1274 1275 __ BIND(L_exit); 1276 // O3, O4 are used as temp registers 1277 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4); 1278 __ retl(); 1279 __ delayed()->mov(G0, O0); // return 0 1280 return start; 1281 } 1282 1283 // 1284 // Generate stub for disjoint short copy. If "aligned" is true, the 1285 // "from" and "to" addresses are assumed to be heapword aligned. 1286 // 1287 // Arguments for generated stub: 1288 // from: O0 1289 // to: O1 1290 // count: O2 treated as signed 1291 // 1292 address generate_disjoint_short_copy(bool aligned, address *entry, const char * name) { 1293 __ align(CodeEntryAlignment); 1294 StubCodeMark mark(this, "StubRoutines", name); 1295 address start = __ pc(); 1296 1297 Label L_skip_alignment, L_skip_alignment2; 1298 Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit; 1299 1300 const Register from = O0; // source array address 1301 const Register to = O1; // destination array address 1302 const Register count = O2; // elements count 1303 const Register offset = O5; // offset from start of arrays 1304 // O3, O4, G3, G4 are used as temp registers 1305 1306 assert_clean_int(count, O3); // Make sure 'count' is clean int. 1307 1308 if (entry != NULL) { 1309 *entry = __ pc(); 1310 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1311 BLOCK_COMMENT("Entry:"); 1312 } 1313 1314 // for short arrays, just do single element copy 1315 __ cmp(count, 11); // 8 + 3 (22 bytes) 1316 __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes); 1317 __ delayed()->mov(G0, offset); 1318 1319 if (aligned) { 1320 // 'aligned' == true when it is known statically during compilation 1321 // of this arraycopy call site that both 'from' and 'to' addresses 1322 // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()). 1323 // 1324 // Aligned arrays have 4 bytes alignment in 32-bits VM 1325 // and 8 bytes - in 64-bits VM. 1326 // 1327 } else { 1328 // copy 1 element if necessary to align 'to' on an 4 bytes 1329 __ andcc(to, 3, G0); 1330 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 1331 __ delayed()->lduh(from, 0, O3); 1332 __ inc(from, 2); 1333 __ inc(to, 2); 1334 __ dec(count); 1335 __ sth(O3, to, -2); 1336 __ BIND(L_skip_alignment); 1337 1338 // copy 2 elements to align 'to' on an 8 byte boundary 1339 __ andcc(to, 7, G0); 1340 __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2); 1341 __ delayed()->lduh(from, 0, O3); 1342 __ dec(count, 2); 1343 __ lduh(from, 2, O4); 1344 __ inc(from, 4); 1345 __ inc(to, 4); 1346 __ sth(O3, to, -4); 1347 __ sth(O4, to, -2); 1348 __ BIND(L_skip_alignment2); 1349 } 1350 if (!aligned) { 1351 // Copy with shift 16 bytes per iteration if arrays do not have 1352 // the same alignment mod 8, otherwise fall through to the next 1353 // code for aligned copy. 1354 // The compare above (count >= 11) guarantes 'count' >= 16 bytes. 1355 // Also jump over aligned copy after the copy with shift completed. 1356 1357 copy_16_bytes_forward_with_shift(from, to, count, 1, L_copy_2_bytes); 1358 } 1359 1360 // Both array are 8 bytes aligned, copy 16 bytes at a time 1361 __ and3(count, 3, G4); // Save 1362 __ srl(count, 2, count); 1363 generate_disjoint_long_copy_core(aligned); 1364 __ mov(G4, count); // restore 1365 1366 // copy 1 element at a time 1367 __ BIND(L_copy_2_bytes); 1368 __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit); 1369 __ align(OptoLoopAlignment); 1370 __ BIND(L_copy_2_bytes_loop); 1371 __ lduh(from, offset, O3); 1372 __ deccc(count); 1373 __ sth(O3, to, offset); 1374 __ brx(Assembler::notZero, false, Assembler::pt, L_copy_2_bytes_loop); 1375 __ delayed()->inc(offset, 2); 1376 1377 __ BIND(L_exit); 1378 // O3, O4 are used as temp registers 1379 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4); 1380 __ retl(); 1381 __ delayed()->mov(G0, O0); // return 0 1382 return start; 1383 } 1384 1385 // 1386 // Generate stub for disjoint short fill. If "aligned" is true, the 1387 // "to" address is assumed to be heapword aligned. 1388 // 1389 // Arguments for generated stub: 1390 // to: O0 1391 // value: O1 1392 // count: O2 treated as signed 1393 // 1394 address generate_fill(BasicType t, bool aligned, const char* name) { 1395 __ align(CodeEntryAlignment); 1396 StubCodeMark mark(this, "StubRoutines", name); 1397 address start = __ pc(); 1398 1399 const Register to = O0; // source array address 1400 const Register value = O1; // fill value 1401 const Register count = O2; // elements count 1402 // O3 is used as a temp register 1403 1404 assert_clean_int(count, O3); // Make sure 'count' is clean int. 1405 1406 Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte; 1407 Label L_fill_2_bytes, L_fill_elements, L_fill_32_bytes; 1408 1409 int shift = -1; 1410 switch (t) { 1411 case T_BYTE: 1412 shift = 2; 1413 break; 1414 case T_SHORT: 1415 shift = 1; 1416 break; 1417 case T_INT: 1418 shift = 0; 1419 break; 1420 default: ShouldNotReachHere(); 1421 } 1422 1423 BLOCK_COMMENT("Entry:"); 1424 1425 if (t == T_BYTE) { 1426 // Zero extend value 1427 __ and3(value, 0xff, value); 1428 __ sllx(value, 8, O3); 1429 __ or3(value, O3, value); 1430 } 1431 if (t == T_SHORT) { 1432 // Zero extend value 1433 __ sllx(value, 48, value); 1434 __ srlx(value, 48, value); 1435 } 1436 if (t == T_BYTE || t == T_SHORT) { 1437 __ sllx(value, 16, O3); 1438 __ or3(value, O3, value); 1439 } 1440 1441 __ cmp(count, 2<<shift); // Short arrays (< 8 bytes) fill by element 1442 __ brx(Assembler::lessUnsigned, false, Assembler::pn, L_fill_elements); // use unsigned cmp 1443 __ delayed()->andcc(count, 1, G0); 1444 1445 if (!aligned && (t == T_BYTE || t == T_SHORT)) { 1446 // align source address at 4 bytes address boundary 1447 if (t == T_BYTE) { 1448 // One byte misalignment happens only for byte arrays 1449 __ andcc(to, 1, G0); 1450 __ br(Assembler::zero, false, Assembler::pt, L_skip_align1); 1451 __ delayed()->nop(); 1452 __ stb(value, to, 0); 1453 __ inc(to, 1); 1454 __ dec(count, 1); 1455 __ BIND(L_skip_align1); 1456 } 1457 // Two bytes misalignment happens only for byte and short (char) arrays 1458 __ andcc(to, 2, G0); 1459 __ br(Assembler::zero, false, Assembler::pt, L_skip_align2); 1460 __ delayed()->nop(); 1461 __ sth(value, to, 0); 1462 __ inc(to, 2); 1463 __ dec(count, 1 << (shift - 1)); 1464 __ BIND(L_skip_align2); 1465 } 1466 if (!aligned) { 1467 // align to 8 bytes, we know we are 4 byte aligned to start 1468 __ andcc(to, 7, G0); 1469 __ br(Assembler::zero, false, Assembler::pt, L_fill_32_bytes); 1470 __ delayed()->nop(); 1471 __ stw(value, to, 0); 1472 __ inc(to, 4); 1473 __ dec(count, 1 << shift); 1474 __ BIND(L_fill_32_bytes); 1475 } 1476 1477 if (t == T_INT) { 1478 // Zero extend value 1479 __ srl(value, 0, value); 1480 } 1481 if (t == T_BYTE || t == T_SHORT || t == T_INT) { 1482 __ sllx(value, 32, O3); 1483 __ or3(value, O3, value); 1484 } 1485 1486 Label L_check_fill_8_bytes; 1487 // Fill 32-byte chunks 1488 __ subcc(count, 8 << shift, count); 1489 __ brx(Assembler::less, false, Assembler::pt, L_check_fill_8_bytes); 1490 __ delayed()->nop(); 1491 1492 Label L_fill_32_bytes_loop, L_fill_4_bytes; 1493 __ align(16); 1494 __ BIND(L_fill_32_bytes_loop); 1495 1496 __ stx(value, to, 0); 1497 __ stx(value, to, 8); 1498 __ stx(value, to, 16); 1499 __ stx(value, to, 24); 1500 1501 __ subcc(count, 8 << shift, count); 1502 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_fill_32_bytes_loop); 1503 __ delayed()->add(to, 32, to); 1504 1505 __ BIND(L_check_fill_8_bytes); 1506 __ addcc(count, 8 << shift, count); 1507 __ brx(Assembler::zero, false, Assembler::pn, L_exit); 1508 __ delayed()->subcc(count, 1 << (shift + 1), count); 1509 __ brx(Assembler::less, false, Assembler::pn, L_fill_4_bytes); 1510 __ delayed()->andcc(count, 1<<shift, G0); 1511 1512 // 1513 // length is too short, just fill 8 bytes at a time 1514 // 1515 Label L_fill_8_bytes_loop; 1516 __ BIND(L_fill_8_bytes_loop); 1517 __ stx(value, to, 0); 1518 __ subcc(count, 1 << (shift + 1), count); 1519 __ brx(Assembler::greaterEqual, false, Assembler::pn, L_fill_8_bytes_loop); 1520 __ delayed()->add(to, 8, to); 1521 1522 // fill trailing 4 bytes 1523 __ andcc(count, 1<<shift, G0); // in delay slot of branches 1524 if (t == T_INT) { 1525 __ BIND(L_fill_elements); 1526 } 1527 __ BIND(L_fill_4_bytes); 1528 __ brx(Assembler::zero, false, Assembler::pt, L_fill_2_bytes); 1529 if (t == T_BYTE || t == T_SHORT) { 1530 __ delayed()->andcc(count, 1<<(shift-1), G0); 1531 } else { 1532 __ delayed()->nop(); 1533 } 1534 __ stw(value, to, 0); 1535 if (t == T_BYTE || t == T_SHORT) { 1536 __ inc(to, 4); 1537 // fill trailing 2 bytes 1538 __ andcc(count, 1<<(shift-1), G0); // in delay slot of branches 1539 __ BIND(L_fill_2_bytes); 1540 __ brx(Assembler::zero, false, Assembler::pt, L_fill_byte); 1541 __ delayed()->andcc(count, 1, count); 1542 __ sth(value, to, 0); 1543 if (t == T_BYTE) { 1544 __ inc(to, 2); 1545 // fill trailing byte 1546 __ andcc(count, 1, count); // in delay slot of branches 1547 __ BIND(L_fill_byte); 1548 __ brx(Assembler::zero, false, Assembler::pt, L_exit); 1549 __ delayed()->nop(); 1550 __ stb(value, to, 0); 1551 } else { 1552 __ BIND(L_fill_byte); 1553 } 1554 } else { 1555 __ BIND(L_fill_2_bytes); 1556 } 1557 __ BIND(L_exit); 1558 __ retl(); 1559 __ delayed()->nop(); 1560 1561 // Handle copies less than 8 bytes. Int is handled elsewhere. 1562 if (t == T_BYTE) { 1563 __ BIND(L_fill_elements); 1564 Label L_fill_2, L_fill_4; 1565 // in delay slot __ andcc(count, 1, G0); 1566 __ brx(Assembler::zero, false, Assembler::pt, L_fill_2); 1567 __ delayed()->andcc(count, 2, G0); 1568 __ stb(value, to, 0); 1569 __ inc(to, 1); 1570 __ BIND(L_fill_2); 1571 __ brx(Assembler::zero, false, Assembler::pt, L_fill_4); 1572 __ delayed()->andcc(count, 4, G0); 1573 __ stb(value, to, 0); 1574 __ stb(value, to, 1); 1575 __ inc(to, 2); 1576 __ BIND(L_fill_4); 1577 __ brx(Assembler::zero, false, Assembler::pt, L_exit); 1578 __ delayed()->nop(); 1579 __ stb(value, to, 0); 1580 __ stb(value, to, 1); 1581 __ stb(value, to, 2); 1582 __ retl(); 1583 __ delayed()->stb(value, to, 3); 1584 } 1585 1586 if (t == T_SHORT) { 1587 Label L_fill_2; 1588 __ BIND(L_fill_elements); 1589 // in delay slot __ andcc(count, 1, G0); 1590 __ brx(Assembler::zero, false, Assembler::pt, L_fill_2); 1591 __ delayed()->andcc(count, 2, G0); 1592 __ sth(value, to, 0); 1593 __ inc(to, 2); 1594 __ BIND(L_fill_2); 1595 __ brx(Assembler::zero, false, Assembler::pt, L_exit); 1596 __ delayed()->nop(); 1597 __ sth(value, to, 0); 1598 __ retl(); 1599 __ delayed()->sth(value, to, 2); 1600 } 1601 return start; 1602 } 1603 1604 // 1605 // Generate stub for conjoint short copy. If "aligned" is true, the 1606 // "from" and "to" addresses are assumed to be heapword aligned. 1607 // 1608 // Arguments for generated stub: 1609 // from: O0 1610 // to: O1 1611 // count: O2 treated as signed 1612 // 1613 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1614 address *entry, const char *name) { 1615 // Do reverse copy. 1616 1617 __ align(CodeEntryAlignment); 1618 StubCodeMark mark(this, "StubRoutines", name); 1619 address start = __ pc(); 1620 1621 Label L_skip_alignment, L_skip_alignment2, L_aligned_copy; 1622 Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit; 1623 1624 const Register from = O0; // source array address 1625 const Register to = O1; // destination array address 1626 const Register count = O2; // elements count 1627 const Register end_from = from; // source array end address 1628 const Register end_to = to; // destination array end address 1629 1630 const Register byte_count = O3; // bytes count to copy 1631 1632 assert_clean_int(count, O3); // Make sure 'count' is clean int. 1633 1634 if (entry != NULL) { 1635 *entry = __ pc(); 1636 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1637 BLOCK_COMMENT("Entry:"); 1638 } 1639 1640 array_overlap_test(nooverlap_target, 1); 1641 1642 __ sllx(count, LogBytesPerShort, byte_count); 1643 __ add(to, byte_count, end_to); // offset after last copied element 1644 1645 // for short arrays, just do single element copy 1646 __ cmp(count, 11); // 8 + 3 (22 bytes) 1647 __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes); 1648 __ delayed()->add(from, byte_count, end_from); 1649 1650 { 1651 // Align end of arrays since they could be not aligned even 1652 // when arrays itself are aligned. 1653 1654 // copy 1 element if necessary to align 'end_to' on an 4 bytes 1655 __ andcc(end_to, 3, G0); 1656 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 1657 __ delayed()->lduh(end_from, -2, O3); 1658 __ dec(end_from, 2); 1659 __ dec(end_to, 2); 1660 __ dec(count); 1661 __ sth(O3, end_to, 0); 1662 __ BIND(L_skip_alignment); 1663 1664 // copy 2 elements to align 'end_to' on an 8 byte boundary 1665 __ andcc(end_to, 7, G0); 1666 __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2); 1667 __ delayed()->lduh(end_from, -2, O3); 1668 __ dec(count, 2); 1669 __ lduh(end_from, -4, O4); 1670 __ dec(end_from, 4); 1671 __ dec(end_to, 4); 1672 __ sth(O3, end_to, 2); 1673 __ sth(O4, end_to, 0); 1674 __ BIND(L_skip_alignment2); 1675 } 1676 if (aligned) { 1677 // Both arrays are aligned to 8-bytes in 64-bits VM. 1678 // The 'count' is decremented in copy_16_bytes_backward_with_shift() 1679 // in unaligned case. 1680 __ dec(count, 8); 1681 } else { 1682 // Copy with shift 16 bytes per iteration if arrays do not have 1683 // the same alignment mod 8, otherwise jump to the next 1684 // code for aligned copy (and substracting 8 from 'count' before jump). 1685 // The compare above (count >= 11) guarantes 'count' >= 16 bytes. 1686 // Also jump over aligned copy after the copy with shift completed. 1687 1688 copy_16_bytes_backward_with_shift(end_from, end_to, count, 8, 1689 L_aligned_copy, L_copy_2_bytes); 1690 } 1691 // copy 4 elements (16 bytes) at a time 1692 __ align(OptoLoopAlignment); 1693 __ BIND(L_aligned_copy); 1694 __ dec(end_from, 16); 1695 __ ldx(end_from, 8, O3); 1696 __ ldx(end_from, 0, O4); 1697 __ dec(end_to, 16); 1698 __ deccc(count, 8); 1699 __ stx(O3, end_to, 8); 1700 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy); 1701 __ delayed()->stx(O4, end_to, 0); 1702 __ inc(count, 8); 1703 1704 // copy 1 element (2 bytes) at a time 1705 __ BIND(L_copy_2_bytes); 1706 __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit); 1707 __ BIND(L_copy_2_bytes_loop); 1708 __ dec(end_from, 2); 1709 __ dec(end_to, 2); 1710 __ lduh(end_from, 0, O4); 1711 __ deccc(count); 1712 __ brx(Assembler::greater, false, Assembler::pt, L_copy_2_bytes_loop); 1713 __ delayed()->sth(O4, end_to, 0); 1714 1715 __ BIND(L_exit); 1716 // O3, O4 are used as temp registers 1717 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4); 1718 __ retl(); 1719 __ delayed()->mov(G0, O0); // return 0 1720 return start; 1721 } 1722 1723 // 1724 // Helper methods for generate_disjoint_int_copy_core() 1725 // 1726 void copy_16_bytes_loop(Register from, Register to, Register count, int count_dec, 1727 Label& L_loop, bool use_prefetch, bool use_bis) { 1728 1729 __ align(OptoLoopAlignment); 1730 __ BIND(L_loop); 1731 if (use_prefetch) { 1732 if (ArraycopySrcPrefetchDistance > 0) { 1733 __ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads); 1734 } 1735 if (ArraycopyDstPrefetchDistance > 0) { 1736 __ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads); 1737 } 1738 } 1739 __ ldx(from, 4, O4); 1740 __ ldx(from, 12, G4); 1741 __ inc(to, 16); 1742 __ inc(from, 16); 1743 __ deccc(count, 4); // Can we do next iteration after this one? 1744 1745 __ srlx(O4, 32, G3); 1746 __ bset(G3, O3); 1747 __ sllx(O4, 32, O4); 1748 __ srlx(G4, 32, G3); 1749 __ bset(G3, O4); 1750 if (use_bis) { 1751 __ stxa(O3, to, -16); 1752 __ stxa(O4, to, -8); 1753 } else { 1754 __ stx(O3, to, -16); 1755 __ stx(O4, to, -8); 1756 } 1757 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop); 1758 __ delayed()->sllx(G4, 32, O3); 1759 1760 } 1761 1762 // 1763 // Generate core code for disjoint int copy (and oop copy on 32-bit). 1764 // If "aligned" is true, the "from" and "to" addresses are assumed 1765 // to be heapword aligned. 1766 // 1767 // Arguments: 1768 // from: O0 1769 // to: O1 1770 // count: O2 treated as signed 1771 // 1772 void generate_disjoint_int_copy_core(bool aligned) { 1773 1774 Label L_skip_alignment, L_aligned_copy; 1775 Label L_copy_4_bytes, L_copy_4_bytes_loop, L_exit; 1776 1777 const Register from = O0; // source array address 1778 const Register to = O1; // destination array address 1779 const Register count = O2; // elements count 1780 const Register offset = O5; // offset from start of arrays 1781 // O3, O4, G3, G4 are used as temp registers 1782 1783 // 'aligned' == true when it is known statically during compilation 1784 // of this arraycopy call site that both 'from' and 'to' addresses 1785 // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()). 1786 // 1787 // Aligned arrays have 4 bytes alignment in 32-bits VM 1788 // and 8 bytes - in 64-bits VM. 1789 // 1790 if (!aligned) { 1791 // The next check could be put under 'ifndef' since the code in 1792 // generate_disjoint_long_copy_core() has own checks and set 'offset'. 1793 1794 // for short arrays, just do single element copy 1795 __ cmp(count, 5); // 4 + 1 (20 bytes) 1796 __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes); 1797 __ delayed()->mov(G0, offset); 1798 1799 // copy 1 element to align 'to' on an 8 byte boundary 1800 __ andcc(to, 7, G0); 1801 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 1802 __ delayed()->ld(from, 0, O3); 1803 __ inc(from, 4); 1804 __ inc(to, 4); 1805 __ dec(count); 1806 __ st(O3, to, -4); 1807 __ BIND(L_skip_alignment); 1808 1809 // if arrays have same alignment mod 8, do 4 elements copy 1810 __ andcc(from, 7, G0); 1811 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy); 1812 __ delayed()->ld(from, 0, O3); 1813 1814 // 1815 // Load 2 aligned 8-bytes chunks and use one from previous iteration 1816 // to form 2 aligned 8-bytes chunks to store. 1817 // 1818 // copy_16_bytes_forward_with_shift() is not used here since this 1819 // code is more optimal. 1820 1821 // copy with shift 4 elements (16 bytes) at a time 1822 __ dec(count, 4); // The cmp at the beginning guaranty count >= 4 1823 __ sllx(O3, 32, O3); 1824 1825 disjoint_copy_core(from, to, count, 2, 16, &StubGenerator::copy_16_bytes_loop); 1826 1827 __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes); 1828 __ delayed()->inc(count, 4); // restore 'count' 1829 1830 __ BIND(L_aligned_copy); 1831 } // !aligned 1832 1833 // copy 4 elements (16 bytes) at a time 1834 __ and3(count, 1, G4); // Save 1835 __ srl(count, 1, count); 1836 generate_disjoint_long_copy_core(aligned); 1837 __ mov(G4, count); // Restore 1838 1839 // copy 1 element at a time 1840 __ BIND(L_copy_4_bytes); 1841 __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit); 1842 __ BIND(L_copy_4_bytes_loop); 1843 __ ld(from, offset, O3); 1844 __ deccc(count); 1845 __ st(O3, to, offset); 1846 __ brx(Assembler::notZero, false, Assembler::pt, L_copy_4_bytes_loop); 1847 __ delayed()->inc(offset, 4); 1848 __ BIND(L_exit); 1849 } 1850 1851 // 1852 // Generate stub for disjoint int copy. If "aligned" is true, the 1853 // "from" and "to" addresses are assumed to be heapword aligned. 1854 // 1855 // Arguments for generated stub: 1856 // from: O0 1857 // to: O1 1858 // count: O2 treated as signed 1859 // 1860 address generate_disjoint_int_copy(bool aligned, address *entry, const char *name) { 1861 __ align(CodeEntryAlignment); 1862 StubCodeMark mark(this, "StubRoutines", name); 1863 address start = __ pc(); 1864 1865 const Register count = O2; 1866 assert_clean_int(count, O3); // Make sure 'count' is clean int. 1867 1868 if (entry != NULL) { 1869 *entry = __ pc(); 1870 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1871 BLOCK_COMMENT("Entry:"); 1872 } 1873 1874 generate_disjoint_int_copy_core(aligned); 1875 1876 // O3, O4 are used as temp registers 1877 inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4); 1878 __ retl(); 1879 __ delayed()->mov(G0, O0); // return 0 1880 return start; 1881 } 1882 1883 // 1884 // Generate core code for conjoint int copy (and oop copy on 32-bit). 1885 // If "aligned" is true, the "from" and "to" addresses are assumed 1886 // to be heapword aligned. 1887 // 1888 // Arguments: 1889 // from: O0 1890 // to: O1 1891 // count: O2 treated as signed 1892 // 1893 void generate_conjoint_int_copy_core(bool aligned) { 1894 // Do reverse copy. 1895 1896 Label L_skip_alignment, L_aligned_copy; 1897 Label L_copy_16_bytes, L_copy_4_bytes, L_copy_4_bytes_loop, L_exit; 1898 1899 const Register from = O0; // source array address 1900 const Register to = O1; // destination array address 1901 const Register count = O2; // elements count 1902 const Register end_from = from; // source array end address 1903 const Register end_to = to; // destination array end address 1904 // O3, O4, O5, G3 are used as temp registers 1905 1906 const Register byte_count = O3; // bytes count to copy 1907 1908 __ sllx(count, LogBytesPerInt, byte_count); 1909 __ add(to, byte_count, end_to); // offset after last copied element 1910 1911 __ cmp(count, 5); // for short arrays, just do single element copy 1912 __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes); 1913 __ delayed()->add(from, byte_count, end_from); 1914 1915 // copy 1 element to align 'to' on an 8 byte boundary 1916 __ andcc(end_to, 7, G0); 1917 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment); 1918 __ delayed()->nop(); 1919 __ dec(count); 1920 __ dec(end_from, 4); 1921 __ dec(end_to, 4); 1922 __ ld(end_from, 0, O4); 1923 __ st(O4, end_to, 0); 1924 __ BIND(L_skip_alignment); 1925 1926 // Check if 'end_from' and 'end_to' has the same alignment. 1927 __ andcc(end_from, 7, G0); 1928 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy); 1929 __ delayed()->dec(count, 4); // The cmp at the start guaranty cnt >= 4 1930 1931 // copy with shift 4 elements (16 bytes) at a time 1932 // 1933 // Load 2 aligned 8-bytes chunks and use one from previous iteration 1934 // to form 2 aligned 8-bytes chunks to store. 1935 // 1936 __ ldx(end_from, -4, O3); 1937 __ align(OptoLoopAlignment); 1938 __ BIND(L_copy_16_bytes); 1939 __ ldx(end_from, -12, O4); 1940 __ deccc(count, 4); 1941 __ ldx(end_from, -20, O5); 1942 __ dec(end_to, 16); 1943 __ dec(end_from, 16); 1944 __ srlx(O3, 32, O3); 1945 __ sllx(O4, 32, G3); 1946 __ bset(G3, O3); 1947 __ stx(O3, end_to, 8); 1948 __ srlx(O4, 32, O4); 1949 __ sllx(O5, 32, G3); 1950 __ bset(O4, G3); 1951 __ stx(G3, end_to, 0); 1952 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes); 1953 __ delayed()->mov(O5, O3); 1954 1955 __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes); 1956 __ delayed()->inc(count, 4); 1957 1958 // copy 4 elements (16 bytes) at a time 1959 __ align(OptoLoopAlignment); 1960 __ BIND(L_aligned_copy); 1961 __ dec(end_from, 16); 1962 __ ldx(end_from, 8, O3); 1963 __ ldx(end_from, 0, O4); 1964 __ dec(end_to, 16); 1965 __ deccc(count, 4); 1966 __ stx(O3, end_to, 8); 1967 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy); 1968 __ delayed()->stx(O4, end_to, 0); 1969 __ inc(count, 4); 1970 1971 // copy 1 element (4 bytes) at a time 1972 __ BIND(L_copy_4_bytes); 1973 __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit); 1974 __ BIND(L_copy_4_bytes_loop); 1975 __ dec(end_from, 4); 1976 __ dec(end_to, 4); 1977 __ ld(end_from, 0, O4); 1978 __ deccc(count); 1979 __ brx(Assembler::greater, false, Assembler::pt, L_copy_4_bytes_loop); 1980 __ delayed()->st(O4, end_to, 0); 1981 __ BIND(L_exit); 1982 } 1983 1984 // 1985 // Generate stub for conjoint int copy. If "aligned" is true, the 1986 // "from" and "to" addresses are assumed to be heapword aligned. 1987 // 1988 // Arguments for generated stub: 1989 // from: O0 1990 // to: O1 1991 // count: O2 treated as signed 1992 // 1993 address generate_conjoint_int_copy(bool aligned, address nooverlap_target, 1994 address *entry, const char *name) { 1995 __ align(CodeEntryAlignment); 1996 StubCodeMark mark(this, "StubRoutines", name); 1997 address start = __ pc(); 1998 1999 assert_clean_int(O2, O3); // Make sure 'count' is clean int. 2000 2001 if (entry != NULL) { 2002 *entry = __ pc(); 2003 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2004 BLOCK_COMMENT("Entry:"); 2005 } 2006 2007 array_overlap_test(nooverlap_target, 2); 2008 2009 generate_conjoint_int_copy_core(aligned); 2010 2011 // O3, O4 are used as temp registers 2012 inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4); 2013 __ retl(); 2014 __ delayed()->mov(G0, O0); // return 0 2015 return start; 2016 } 2017 2018 // 2019 // Helper methods for generate_disjoint_long_copy_core() 2020 // 2021 void copy_64_bytes_loop(Register from, Register to, Register count, int count_dec, 2022 Label& L_loop, bool use_prefetch, bool use_bis) { 2023 __ align(OptoLoopAlignment); 2024 __ BIND(L_loop); 2025 for (int off = 0; off < 64; off += 16) { 2026 if (use_prefetch && (off & 31) == 0) { 2027 if (ArraycopySrcPrefetchDistance > 0) { 2028 __ prefetch(from, ArraycopySrcPrefetchDistance+off, Assembler::severalReads); 2029 } 2030 if (ArraycopyDstPrefetchDistance > 0) { 2031 __ prefetch(to, ArraycopyDstPrefetchDistance+off, Assembler::severalWritesAndPossiblyReads); 2032 } 2033 } 2034 __ ldx(from, off+0, O4); 2035 __ ldx(from, off+8, O5); 2036 if (use_bis) { 2037 __ stxa(O4, to, off+0); 2038 __ stxa(O5, to, off+8); 2039 } else { 2040 __ stx(O4, to, off+0); 2041 __ stx(O5, to, off+8); 2042 } 2043 } 2044 __ deccc(count, 8); 2045 __ inc(from, 64); 2046 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop); 2047 __ delayed()->inc(to, 64); 2048 } 2049 2050 // 2051 // Generate core code for disjoint long copy (and oop copy on 64-bit). 2052 // "aligned" is ignored, because we must make the stronger 2053 // assumption that both addresses are always 64-bit aligned. 2054 // 2055 // Arguments: 2056 // from: O0 2057 // to: O1 2058 // count: O2 treated as signed 2059 // 2060 // count -= 2; 2061 // if ( count >= 0 ) { // >= 2 elements 2062 // if ( count > 6) { // >= 8 elements 2063 // count -= 6; // original count - 8 2064 // do { 2065 // copy_8_elements; 2066 // count -= 8; 2067 // } while ( count >= 0 ); 2068 // count += 6; 2069 // } 2070 // if ( count >= 0 ) { // >= 2 elements 2071 // do { 2072 // copy_2_elements; 2073 // } while ( (count=count-2) >= 0 ); 2074 // } 2075 // } 2076 // count += 2; 2077 // if ( count != 0 ) { // 1 element left 2078 // copy_1_element; 2079 // } 2080 // 2081 void generate_disjoint_long_copy_core(bool aligned) { 2082 Label L_copy_8_bytes, L_copy_16_bytes, L_exit; 2083 const Register from = O0; // source array address 2084 const Register to = O1; // destination array address 2085 const Register count = O2; // elements count 2086 const Register offset0 = O4; // element offset 2087 const Register offset8 = O5; // next element offset 2088 2089 __ deccc(count, 2); 2090 __ mov(G0, offset0); // offset from start of arrays (0) 2091 __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes ); 2092 __ delayed()->add(offset0, 8, offset8); 2093 2094 // Copy by 64 bytes chunks 2095 2096 const Register from64 = O3; // source address 2097 const Register to64 = G3; // destination address 2098 __ subcc(count, 6, O3); 2099 __ brx(Assembler::negative, false, Assembler::pt, L_copy_16_bytes ); 2100 __ delayed()->mov(to, to64); 2101 // Now we can use O4(offset0), O5(offset8) as temps 2102 __ mov(O3, count); 2103 // count >= 0 (original count - 8) 2104 __ mov(from, from64); 2105 2106 disjoint_copy_core(from64, to64, count, 3, 64, &StubGenerator::copy_64_bytes_loop); 2107 2108 // Restore O4(offset0), O5(offset8) 2109 __ sub(from64, from, offset0); 2110 __ inccc(count, 6); // restore count 2111 __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes ); 2112 __ delayed()->add(offset0, 8, offset8); 2113 2114 // Copy by 16 bytes chunks 2115 __ align(OptoLoopAlignment); 2116 __ BIND(L_copy_16_bytes); 2117 __ ldx(from, offset0, O3); 2118 __ ldx(from, offset8, G3); 2119 __ deccc(count, 2); 2120 __ stx(O3, to, offset0); 2121 __ inc(offset0, 16); 2122 __ stx(G3, to, offset8); 2123 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes); 2124 __ delayed()->inc(offset8, 16); 2125 2126 // Copy last 8 bytes 2127 __ BIND(L_copy_8_bytes); 2128 __ inccc(count, 2); 2129 __ brx(Assembler::zero, true, Assembler::pn, L_exit ); 2130 __ delayed()->mov(offset0, offset8); // Set O5 used by other stubs 2131 __ ldx(from, offset0, O3); 2132 __ stx(O3, to, offset0); 2133 __ BIND(L_exit); 2134 } 2135 2136 // 2137 // Generate stub for disjoint long copy. 2138 // "aligned" is ignored, because we must make the stronger 2139 // assumption that both addresses are always 64-bit aligned. 2140 // 2141 // Arguments for generated stub: 2142 // from: O0 2143 // to: O1 2144 // count: O2 treated as signed 2145 // 2146 address generate_disjoint_long_copy(bool aligned, address *entry, const char *name) { 2147 __ align(CodeEntryAlignment); 2148 StubCodeMark mark(this, "StubRoutines", name); 2149 address start = __ pc(); 2150 2151 assert_clean_int(O2, O3); // Make sure 'count' is clean int. 2152 2153 if (entry != NULL) { 2154 *entry = __ pc(); 2155 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2156 BLOCK_COMMENT("Entry:"); 2157 } 2158 2159 generate_disjoint_long_copy_core(aligned); 2160 2161 // O3, O4 are used as temp registers 2162 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4); 2163 __ retl(); 2164 __ delayed()->mov(G0, O0); // return 0 2165 return start; 2166 } 2167 2168 // 2169 // Generate core code for conjoint long copy (and oop copy on 64-bit). 2170 // "aligned" is ignored, because we must make the stronger 2171 // assumption that both addresses are always 64-bit aligned. 2172 // 2173 // Arguments: 2174 // from: O0 2175 // to: O1 2176 // count: O2 treated as signed 2177 // 2178 void generate_conjoint_long_copy_core(bool aligned) { 2179 // Do reverse copy. 2180 Label L_copy_8_bytes, L_copy_16_bytes, L_exit; 2181 const Register from = O0; // source array address 2182 const Register to = O1; // destination array address 2183 const Register count = O2; // elements count 2184 const Register offset8 = O4; // element offset 2185 const Register offset0 = O5; // previous element offset 2186 2187 __ subcc(count, 1, count); 2188 __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_8_bytes ); 2189 __ delayed()->sllx(count, LogBytesPerLong, offset8); 2190 __ sub(offset8, 8, offset0); 2191 __ align(OptoLoopAlignment); 2192 __ BIND(L_copy_16_bytes); 2193 __ ldx(from, offset8, O2); 2194 __ ldx(from, offset0, O3); 2195 __ stx(O2, to, offset8); 2196 __ deccc(offset8, 16); // use offset8 as counter 2197 __ stx(O3, to, offset0); 2198 __ brx(Assembler::greater, false, Assembler::pt, L_copy_16_bytes); 2199 __ delayed()->dec(offset0, 16); 2200 2201 __ BIND(L_copy_8_bytes); 2202 __ brx(Assembler::negative, false, Assembler::pn, L_exit ); 2203 __ delayed()->nop(); 2204 __ ldx(from, 0, O3); 2205 __ stx(O3, to, 0); 2206 __ BIND(L_exit); 2207 } 2208 2209 // Generate stub for conjoint long copy. 2210 // "aligned" is ignored, because we must make the stronger 2211 // assumption that both addresses are always 64-bit aligned. 2212 // 2213 // Arguments for generated stub: 2214 // from: O0 2215 // to: O1 2216 // count: O2 treated as signed 2217 // 2218 address generate_conjoint_long_copy(bool aligned, address nooverlap_target, 2219 address *entry, const char *name) { 2220 __ align(CodeEntryAlignment); 2221 StubCodeMark mark(this, "StubRoutines", name); 2222 address start = __ pc(); 2223 2224 assert(aligned, "Should always be aligned"); 2225 2226 assert_clean_int(O2, O3); // Make sure 'count' is clean int. 2227 2228 if (entry != NULL) { 2229 *entry = __ pc(); 2230 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2231 BLOCK_COMMENT("Entry:"); 2232 } 2233 2234 array_overlap_test(nooverlap_target, 3); 2235 2236 generate_conjoint_long_copy_core(aligned); 2237 2238 // O3, O4 are used as temp registers 2239 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4); 2240 __ retl(); 2241 __ delayed()->mov(G0, O0); // return 0 2242 return start; 2243 } 2244 2245 // Generate stub for disjoint oop copy. If "aligned" is true, the 2246 // "from" and "to" addresses are assumed to be heapword aligned. 2247 // 2248 // Arguments for generated stub: 2249 // from: O0 2250 // to: O1 2251 // count: O2 treated as signed 2252 // 2253 address generate_disjoint_oop_copy(bool aligned, address *entry, const char *name, 2254 bool dest_uninitialized = false) { 2255 2256 const Register from = O0; // source array address 2257 const Register to = O1; // destination array address 2258 const Register count = O2; // elements count 2259 2260 __ align(CodeEntryAlignment); 2261 StubCodeMark mark(this, "StubRoutines", name); 2262 address start = __ pc(); 2263 2264 assert_clean_int(count, O3); // Make sure 'count' is clean int. 2265 2266 if (entry != NULL) { 2267 *entry = __ pc(); 2268 // caller can pass a 64-bit byte count here 2269 BLOCK_COMMENT("Entry:"); 2270 } 2271 2272 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 2273 if (dest_uninitialized) { 2274 decorators |= IS_DEST_UNINITIALIZED; 2275 } 2276 if (aligned) { 2277 decorators |= ARRAYCOPY_ALIGNED; 2278 } 2279 2280 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2281 bs->arraycopy_prologue(_masm, decorators, T_OBJECT, from, to, count); 2282 2283 assert_clean_int(count, O3); // Make sure 'count' is clean int. 2284 if (UseCompressedOops) { 2285 generate_disjoint_int_copy_core(aligned); 2286 } else { 2287 generate_disjoint_long_copy_core(aligned); 2288 } 2289 2290 bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, from, to, count); 2291 2292 // O3, O4 are used as temp registers 2293 inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4); 2294 __ retl(); 2295 __ delayed()->mov(G0, O0); // return 0 2296 return start; 2297 } 2298 2299 // Generate stub for conjoint oop copy. If "aligned" is true, the 2300 // "from" and "to" addresses are assumed to be heapword aligned. 2301 // 2302 // Arguments for generated stub: 2303 // from: O0 2304 // to: O1 2305 // count: O2 treated as signed 2306 // 2307 address generate_conjoint_oop_copy(bool aligned, address nooverlap_target, 2308 address *entry, const char *name, 2309 bool dest_uninitialized = false) { 2310 2311 const Register from = O0; // source array address 2312 const Register to = O1; // destination array address 2313 const Register count = O2; // elements count 2314 2315 __ align(CodeEntryAlignment); 2316 StubCodeMark mark(this, "StubRoutines", name); 2317 address start = __ pc(); 2318 2319 assert_clean_int(count, O3); // Make sure 'count' is clean int. 2320 2321 if (entry != NULL) { 2322 *entry = __ pc(); 2323 // caller can pass a 64-bit byte count here 2324 BLOCK_COMMENT("Entry:"); 2325 } 2326 2327 array_overlap_test(nooverlap_target, LogBytesPerHeapOop); 2328 2329 DecoratorSet decorators = IN_HEAP | IS_ARRAY; 2330 if (dest_uninitialized) { 2331 decorators |= IS_DEST_UNINITIALIZED; 2332 } 2333 if (aligned) { 2334 decorators |= ARRAYCOPY_ALIGNED; 2335 } 2336 2337 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2338 bs->arraycopy_prologue(_masm, decorators, T_OBJECT, from, to, count); 2339 2340 if (UseCompressedOops) { 2341 generate_conjoint_int_copy_core(aligned); 2342 } else { 2343 generate_conjoint_long_copy_core(aligned); 2344 } 2345 2346 bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, from, to, count); 2347 2348 // O3, O4 are used as temp registers 2349 inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4); 2350 __ retl(); 2351 __ delayed()->mov(G0, O0); // return 0 2352 return start; 2353 } 2354 2355 2356 // Helper for generating a dynamic type check. 2357 // Smashes only the given temp registers. 2358 void generate_type_check(Register sub_klass, 2359 Register super_check_offset, 2360 Register super_klass, 2361 Register temp, 2362 Label& L_success) { 2363 assert_different_registers(sub_klass, super_check_offset, super_klass, temp); 2364 2365 BLOCK_COMMENT("type_check:"); 2366 2367 Label L_miss, L_pop_to_miss; 2368 2369 assert_clean_int(super_check_offset, temp); 2370 2371 __ check_klass_subtype_fast_path(sub_klass, super_klass, temp, noreg, 2372 &L_success, &L_miss, NULL, 2373 super_check_offset); 2374 2375 BLOCK_COMMENT("type_check_slow_path:"); 2376 __ save_frame(0); 2377 __ check_klass_subtype_slow_path(sub_klass->after_save(), 2378 super_klass->after_save(), 2379 L0, L1, L2, L4, 2380 NULL, &L_pop_to_miss); 2381 __ ba(L_success); 2382 __ delayed()->restore(); 2383 2384 __ bind(L_pop_to_miss); 2385 __ restore(); 2386 2387 // Fall through on failure! 2388 __ BIND(L_miss); 2389 } 2390 2391 2392 // Generate stub for checked oop copy. 2393 // 2394 // Arguments for generated stub: 2395 // from: O0 2396 // to: O1 2397 // count: O2 treated as signed 2398 // ckoff: O3 (super_check_offset) 2399 // ckval: O4 (super_klass) 2400 // ret: O0 zero for success; (-1^K) where K is partial transfer count 2401 // 2402 address generate_checkcast_copy(const char *name, address *entry, bool dest_uninitialized = false) { 2403 2404 const Register O0_from = O0; // source array address 2405 const Register O1_to = O1; // destination array address 2406 const Register O2_count = O2; // elements count 2407 const Register O3_ckoff = O3; // super_check_offset 2408 const Register O4_ckval = O4; // super_klass 2409 2410 const Register O5_offset = O5; // loop var, with stride wordSize 2411 const Register G1_remain = G1; // loop var, with stride -1 2412 const Register G3_oop = G3; // actual oop copied 2413 const Register G4_klass = G4; // oop._klass 2414 const Register G5_super = G5; // oop._klass._primary_supers[ckval] 2415 2416 __ align(CodeEntryAlignment); 2417 StubCodeMark mark(this, "StubRoutines", name); 2418 address start = __ pc(); 2419 2420 #ifdef ASSERT 2421 // We sometimes save a frame (see generate_type_check below). 2422 // If this will cause trouble, let's fail now instead of later. 2423 __ save_frame(0); 2424 __ restore(); 2425 #endif 2426 2427 assert_clean_int(O2_count, G1); // Make sure 'count' is clean int. 2428 2429 #ifdef ASSERT 2430 // caller guarantees that the arrays really are different 2431 // otherwise, we would have to make conjoint checks 2432 { Label L; 2433 __ mov(O3, G1); // spill: overlap test smashes O3 2434 __ mov(O4, G4); // spill: overlap test smashes O4 2435 array_overlap_test(L, LogBytesPerHeapOop); 2436 __ stop("checkcast_copy within a single array"); 2437 __ bind(L); 2438 __ mov(G1, O3); 2439 __ mov(G4, O4); 2440 } 2441 #endif //ASSERT 2442 2443 if (entry != NULL) { 2444 *entry = __ pc(); 2445 // caller can pass a 64-bit byte count here (from generic stub) 2446 BLOCK_COMMENT("Entry:"); 2447 } 2448 2449 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST; 2450 if (dest_uninitialized) { 2451 decorators |= IS_DEST_UNINITIALIZED; 2452 } 2453 2454 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2455 bs->arraycopy_prologue(_masm, decorators, T_OBJECT, O0_from, O1_to, O2_count); 2456 2457 Label load_element, store_element, do_epilogue, fail, done; 2458 __ addcc(O2_count, 0, G1_remain); // initialize loop index, and test it 2459 __ brx(Assembler::notZero, false, Assembler::pt, load_element); 2460 __ delayed()->mov(G0, O5_offset); // offset from start of arrays 2461 2462 // Empty array: Nothing to do. 2463 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4); 2464 __ retl(); 2465 __ delayed()->set(0, O0); // return 0 on (trivial) success 2466 2467 // ======== begin loop ======== 2468 // (Loop is rotated; its entry is load_element.) 2469 // Loop variables: 2470 // (O5 = 0; ; O5 += wordSize) --- offset from src, dest arrays 2471 // (O2 = len; O2 != 0; O2--) --- number of oops *remaining* 2472 // G3, G4, G5 --- current oop, oop.klass, oop.klass.super 2473 __ align(OptoLoopAlignment); 2474 2475 __ BIND(store_element); 2476 __ deccc(G1_remain); // decrement the count 2477 __ store_heap_oop(G3_oop, O1_to, O5_offset, noreg, AS_RAW); // store the oop 2478 __ inc(O5_offset, heapOopSize); // step to next offset 2479 __ brx(Assembler::zero, true, Assembler::pt, do_epilogue); 2480 __ delayed()->set(0, O0); // return -1 on success 2481 2482 // ======== loop entry is here ======== 2483 __ BIND(load_element); 2484 __ load_heap_oop(O0_from, O5_offset, G3_oop, noreg, AS_RAW); // load the oop 2485 __ br_null_short(G3_oop, Assembler::pt, store_element); 2486 2487 __ load_klass(G3_oop, G4_klass); // query the object klass 2488 2489 generate_type_check(G4_klass, O3_ckoff, O4_ckval, G5_super, 2490 // branch to this on success: 2491 store_element); 2492 // ======== end loop ======== 2493 2494 // It was a real error; we must depend on the caller to finish the job. 2495 // Register G1 has number of *remaining* oops, O2 number of *total* oops. 2496 // Emit GC store barriers for the oops we have copied (O2 minus G1), 2497 // and report their number to the caller. 2498 __ BIND(fail); 2499 __ subcc(O2_count, G1_remain, O2_count); 2500 __ brx(Assembler::zero, false, Assembler::pt, done); 2501 __ delayed()->not1(O2_count, O0); // report (-1^K) to caller 2502 2503 __ BIND(do_epilogue); 2504 bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, O0_from, O1_to, O2_count); 2505 2506 __ BIND(done); 2507 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4); 2508 __ retl(); 2509 __ delayed()->nop(); // return value in 00 2510 2511 return start; 2512 } 2513 2514 2515 // Generate 'unsafe' array copy stub 2516 // Though just as safe as the other stubs, it takes an unscaled 2517 // size_t argument instead of an element count. 2518 // 2519 // Arguments for generated stub: 2520 // from: O0 2521 // to: O1 2522 // count: O2 byte count, treated as ssize_t, can be zero 2523 // 2524 // Examines the alignment of the operands and dispatches 2525 // to a long, int, short, or byte copy loop. 2526 // 2527 address generate_unsafe_copy(const char* name, 2528 address byte_copy_entry, 2529 address short_copy_entry, 2530 address int_copy_entry, 2531 address long_copy_entry) { 2532 2533 const Register O0_from = O0; // source array address 2534 const Register O1_to = O1; // destination array address 2535 const Register O2_count = O2; // elements count 2536 2537 const Register G1_bits = G1; // test copy of low bits 2538 2539 __ align(CodeEntryAlignment); 2540 StubCodeMark mark(this, "StubRoutines", name); 2541 address start = __ pc(); 2542 2543 // bump this on entry, not on exit: 2544 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, G1, G3); 2545 2546 __ or3(O0_from, O1_to, G1_bits); 2547 __ or3(O2_count, G1_bits, G1_bits); 2548 2549 __ btst(BytesPerLong-1, G1_bits); 2550 __ br(Assembler::zero, true, Assembler::pt, 2551 long_copy_entry, relocInfo::runtime_call_type); 2552 // scale the count on the way out: 2553 __ delayed()->srax(O2_count, LogBytesPerLong, O2_count); 2554 2555 __ btst(BytesPerInt-1, G1_bits); 2556 __ br(Assembler::zero, true, Assembler::pt, 2557 int_copy_entry, relocInfo::runtime_call_type); 2558 // scale the count on the way out: 2559 __ delayed()->srax(O2_count, LogBytesPerInt, O2_count); 2560 2561 __ btst(BytesPerShort-1, G1_bits); 2562 __ br(Assembler::zero, true, Assembler::pt, 2563 short_copy_entry, relocInfo::runtime_call_type); 2564 // scale the count on the way out: 2565 __ delayed()->srax(O2_count, LogBytesPerShort, O2_count); 2566 2567 __ br(Assembler::always, false, Assembler::pt, 2568 byte_copy_entry, relocInfo::runtime_call_type); 2569 __ delayed()->nop(); 2570 2571 return start; 2572 } 2573 2574 2575 // Perform range checks on the proposed arraycopy. 2576 // Kills the two temps, but nothing else. 2577 // Also, clean the sign bits of src_pos and dst_pos. 2578 void arraycopy_range_checks(Register src, // source array oop (O0) 2579 Register src_pos, // source position (O1) 2580 Register dst, // destination array oo (O2) 2581 Register dst_pos, // destination position (O3) 2582 Register length, // length of copy (O4) 2583 Register temp1, Register temp2, 2584 Label& L_failed) { 2585 BLOCK_COMMENT("arraycopy_range_checks:"); 2586 2587 // if (src_pos + length > arrayOop(src)->length() ) FAIL; 2588 2589 const Register array_length = temp1; // scratch 2590 const Register end_pos = temp2; // scratch 2591 2592 // Note: This next instruction may be in the delay slot of a branch: 2593 __ add(length, src_pos, end_pos); // src_pos + length 2594 __ lduw(src, arrayOopDesc::length_offset_in_bytes(), array_length); 2595 __ cmp(end_pos, array_length); 2596 __ br(Assembler::greater, false, Assembler::pn, L_failed); 2597 2598 // if (dst_pos + length > arrayOop(dst)->length() ) FAIL; 2599 __ delayed()->add(length, dst_pos, end_pos); // dst_pos + length 2600 __ lduw(dst, arrayOopDesc::length_offset_in_bytes(), array_length); 2601 __ cmp(end_pos, array_length); 2602 __ br(Assembler::greater, false, Assembler::pn, L_failed); 2603 2604 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2605 // Move with sign extension can be used since they are positive. 2606 __ delayed()->signx(src_pos, src_pos); 2607 __ signx(dst_pos, dst_pos); 2608 2609 BLOCK_COMMENT("arraycopy_range_checks done"); 2610 } 2611 2612 2613 // 2614 // Generate generic array copy stubs 2615 // 2616 // Input: 2617 // O0 - src oop 2618 // O1 - src_pos 2619 // O2 - dst oop 2620 // O3 - dst_pos 2621 // O4 - element count 2622 // 2623 // Output: 2624 // O0 == 0 - success 2625 // O0 == -1 - need to call System.arraycopy 2626 // 2627 address generate_generic_copy(const char *name, 2628 address entry_jbyte_arraycopy, 2629 address entry_jshort_arraycopy, 2630 address entry_jint_arraycopy, 2631 address entry_oop_arraycopy, 2632 address entry_jlong_arraycopy, 2633 address entry_checkcast_arraycopy) { 2634 Label L_failed, L_objArray; 2635 2636 // Input registers 2637 const Register src = O0; // source array oop 2638 const Register src_pos = O1; // source position 2639 const Register dst = O2; // destination array oop 2640 const Register dst_pos = O3; // destination position 2641 const Register length = O4; // elements count 2642 2643 // registers used as temp 2644 const Register G3_src_klass = G3; // source array klass 2645 const Register G4_dst_klass = G4; // destination array klass 2646 const Register G5_lh = G5; // layout handler 2647 const Register O5_temp = O5; 2648 2649 __ align(CodeEntryAlignment); 2650 StubCodeMark mark(this, "StubRoutines", name); 2651 address start = __ pc(); 2652 2653 // bump this on entry, not on exit: 2654 inc_counter_np(SharedRuntime::_generic_array_copy_ctr, G1, G3); 2655 2656 // In principle, the int arguments could be dirty. 2657 //assert_clean_int(src_pos, G1); 2658 //assert_clean_int(dst_pos, G1); 2659 //assert_clean_int(length, G1); 2660 2661 //----------------------------------------------------------------------- 2662 // Assembler stubs will be used for this call to arraycopy 2663 // if the following conditions are met: 2664 // 2665 // (1) src and dst must not be null. 2666 // (2) src_pos must not be negative. 2667 // (3) dst_pos must not be negative. 2668 // (4) length must not be negative. 2669 // (5) src klass and dst klass should be the same and not NULL. 2670 // (6) src and dst should be arrays. 2671 // (7) src_pos + length must not exceed length of src. 2672 // (8) dst_pos + length must not exceed length of dst. 2673 BLOCK_COMMENT("arraycopy initial argument checks"); 2674 2675 // if (src == NULL) return -1; 2676 __ br_null(src, false, Assembler::pn, L_failed); 2677 2678 // if (src_pos < 0) return -1; 2679 __ delayed()->tst(src_pos); 2680 __ br(Assembler::negative, false, Assembler::pn, L_failed); 2681 __ delayed()->nop(); 2682 2683 // if (dst == NULL) return -1; 2684 __ br_null(dst, false, Assembler::pn, L_failed); 2685 2686 // if (dst_pos < 0) return -1; 2687 __ delayed()->tst(dst_pos); 2688 __ br(Assembler::negative, false, Assembler::pn, L_failed); 2689 2690 // if (length < 0) return -1; 2691 __ delayed()->tst(length); 2692 __ br(Assembler::negative, false, Assembler::pn, L_failed); 2693 2694 BLOCK_COMMENT("arraycopy argument klass checks"); 2695 // get src->klass() 2696 if (UseCompressedClassPointers) { 2697 __ delayed()->nop(); // ??? not good 2698 __ load_klass(src, G3_src_klass); 2699 } else { 2700 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), G3_src_klass); 2701 } 2702 2703 #ifdef ASSERT 2704 // assert(src->klass() != NULL); 2705 BLOCK_COMMENT("assert klasses not null"); 2706 { Label L_a, L_b; 2707 __ br_notnull_short(G3_src_klass, Assembler::pt, L_b); // it is broken if klass is NULL 2708 __ bind(L_a); 2709 __ stop("broken null klass"); 2710 __ bind(L_b); 2711 __ load_klass(dst, G4_dst_klass); 2712 __ br_null(G4_dst_klass, false, Assembler::pn, L_a); // this would be broken also 2713 __ delayed()->mov(G0, G4_dst_klass); // scribble the temp 2714 BLOCK_COMMENT("assert done"); 2715 } 2716 #endif 2717 2718 // Load layout helper 2719 // 2720 // |array_tag| | header_size | element_type | |log2_element_size| 2721 // 32 30 24 16 8 2 0 2722 // 2723 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2724 // 2725 2726 int lh_offset = in_bytes(Klass::layout_helper_offset()); 2727 2728 // Load 32-bits signed value. Use br() instruction with it to check icc. 2729 __ lduw(G3_src_klass, lh_offset, G5_lh); 2730 2731 if (UseCompressedClassPointers) { 2732 __ load_klass(dst, G4_dst_klass); 2733 } 2734 // Handle objArrays completely differently... 2735 juint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2736 __ set(objArray_lh, O5_temp); 2737 __ cmp(G5_lh, O5_temp); 2738 __ br(Assembler::equal, false, Assembler::pt, L_objArray); 2739 if (UseCompressedClassPointers) { 2740 __ delayed()->nop(); 2741 } else { 2742 __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass); 2743 } 2744 2745 // if (src->klass() != dst->klass()) return -1; 2746 __ cmp_and_brx_short(G3_src_klass, G4_dst_klass, Assembler::notEqual, Assembler::pn, L_failed); 2747 2748 // if (!src->is_Array()) return -1; 2749 __ cmp(G5_lh, Klass::_lh_neutral_value); // < 0 2750 __ br(Assembler::greaterEqual, false, Assembler::pn, L_failed); 2751 2752 // At this point, it is known to be a typeArray (array_tag 0x3). 2753 #ifdef ASSERT 2754 __ delayed()->nop(); 2755 { Label L; 2756 jint lh_prim_tag_in_place = (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift); 2757 __ set(lh_prim_tag_in_place, O5_temp); 2758 __ cmp(G5_lh, O5_temp); 2759 __ br(Assembler::greaterEqual, false, Assembler::pt, L); 2760 __ delayed()->nop(); 2761 __ stop("must be a primitive array"); 2762 __ bind(L); 2763 } 2764 #else 2765 __ delayed(); // match next insn to prev branch 2766 #endif 2767 2768 arraycopy_range_checks(src, src_pos, dst, dst_pos, length, 2769 O5_temp, G4_dst_klass, L_failed); 2770 2771 // TypeArrayKlass 2772 // 2773 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2774 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2775 // 2776 2777 const Register G4_offset = G4_dst_klass; // array offset 2778 const Register G3_elsize = G3_src_klass; // log2 element size 2779 2780 __ srl(G5_lh, Klass::_lh_header_size_shift, G4_offset); 2781 __ and3(G4_offset, Klass::_lh_header_size_mask, G4_offset); // array_offset 2782 __ add(src, G4_offset, src); // src array offset 2783 __ add(dst, G4_offset, dst); // dst array offset 2784 __ and3(G5_lh, Klass::_lh_log2_element_size_mask, G3_elsize); // log2 element size 2785 2786 // next registers should be set before the jump to corresponding stub 2787 const Register from = O0; // source array address 2788 const Register to = O1; // destination array address 2789 const Register count = O2; // elements count 2790 2791 // 'from', 'to', 'count' registers should be set in this order 2792 // since they are the same as 'src', 'src_pos', 'dst'. 2793 2794 BLOCK_COMMENT("scale indexes to element size"); 2795 __ sll_ptr(src_pos, G3_elsize, src_pos); 2796 __ sll_ptr(dst_pos, G3_elsize, dst_pos); 2797 __ add(src, src_pos, from); // src_addr 2798 __ add(dst, dst_pos, to); // dst_addr 2799 2800 BLOCK_COMMENT("choose copy loop based on element size"); 2801 __ cmp(G3_elsize, 0); 2802 __ br(Assembler::equal, true, Assembler::pt, entry_jbyte_arraycopy); 2803 __ delayed()->signx(length, count); // length 2804 2805 __ cmp(G3_elsize, LogBytesPerShort); 2806 __ br(Assembler::equal, true, Assembler::pt, entry_jshort_arraycopy); 2807 __ delayed()->signx(length, count); // length 2808 2809 __ cmp(G3_elsize, LogBytesPerInt); 2810 __ br(Assembler::equal, true, Assembler::pt, entry_jint_arraycopy); 2811 __ delayed()->signx(length, count); // length 2812 #ifdef ASSERT 2813 { Label L; 2814 __ cmp_and_br_short(G3_elsize, LogBytesPerLong, Assembler::equal, Assembler::pt, L); 2815 __ stop("must be long copy, but elsize is wrong"); 2816 __ bind(L); 2817 } 2818 #endif 2819 __ br(Assembler::always, false, Assembler::pt, entry_jlong_arraycopy); 2820 __ delayed()->signx(length, count); // length 2821 2822 // ObjArrayKlass 2823 __ BIND(L_objArray); 2824 // live at this point: G3_src_klass, G4_dst_klass, src[_pos], dst[_pos], length 2825 2826 Label L_plain_copy, L_checkcast_copy; 2827 // test array classes for subtyping 2828 __ cmp(G3_src_klass, G4_dst_klass); // usual case is exact equality 2829 __ brx(Assembler::notEqual, true, Assembler::pn, L_checkcast_copy); 2830 __ delayed()->lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted from below 2831 2832 // Identically typed arrays can be copied without element-wise checks. 2833 arraycopy_range_checks(src, src_pos, dst, dst_pos, length, 2834 O5_temp, G5_lh, L_failed); 2835 2836 __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset 2837 __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset 2838 __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos); 2839 __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos); 2840 __ add(src, src_pos, from); // src_addr 2841 __ add(dst, dst_pos, to); // dst_addr 2842 __ BIND(L_plain_copy); 2843 __ br(Assembler::always, false, Assembler::pt, entry_oop_arraycopy); 2844 __ delayed()->signx(length, count); // length 2845 2846 __ BIND(L_checkcast_copy); 2847 // live at this point: G3_src_klass, G4_dst_klass 2848 { 2849 // Before looking at dst.length, make sure dst is also an objArray. 2850 // lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted to delay slot 2851 __ cmp(G5_lh, O5_temp); 2852 __ br(Assembler::notEqual, false, Assembler::pn, L_failed); 2853 2854 // It is safe to examine both src.length and dst.length. 2855 __ delayed(); // match next insn to prev branch 2856 arraycopy_range_checks(src, src_pos, dst, dst_pos, length, 2857 O5_temp, G5_lh, L_failed); 2858 2859 // Marshal the base address arguments now, freeing registers. 2860 __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset 2861 __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset 2862 __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos); 2863 __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos); 2864 __ add(src, src_pos, from); // src_addr 2865 __ add(dst, dst_pos, to); // dst_addr 2866 __ signx(length, count); // length (reloaded) 2867 2868 Register sco_temp = O3; // this register is free now 2869 assert_different_registers(from, to, count, sco_temp, 2870 G4_dst_klass, G3_src_klass); 2871 2872 // Generate the type check. 2873 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2874 __ lduw(G4_dst_klass, sco_offset, sco_temp); 2875 generate_type_check(G3_src_klass, sco_temp, G4_dst_klass, 2876 O5_temp, L_plain_copy); 2877 2878 // Fetch destination element klass from the ObjArrayKlass header. 2879 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2880 2881 // the checkcast_copy loop needs two extra arguments: 2882 __ ld_ptr(G4_dst_klass, ek_offset, O4); // dest elem klass 2883 // lduw(O4, sco_offset, O3); // sco of elem klass 2884 2885 __ br(Assembler::always, false, Assembler::pt, entry_checkcast_arraycopy); 2886 __ delayed()->lduw(O4, sco_offset, O3); 2887 } 2888 2889 __ BIND(L_failed); 2890 __ retl(); 2891 __ delayed()->sub(G0, 1, O0); // return -1 2892 return start; 2893 } 2894 2895 // 2896 // Generate stub for heap zeroing. 2897 // "to" address is aligned to jlong (8 bytes). 2898 // 2899 // Arguments for generated stub: 2900 // to: O0 2901 // count: O1 treated as signed (count of HeapWord) 2902 // count could be 0 2903 // 2904 address generate_zero_aligned_words(const char* name) { 2905 __ align(CodeEntryAlignment); 2906 StubCodeMark mark(this, "StubRoutines", name); 2907 address start = __ pc(); 2908 2909 const Register to = O0; // source array address 2910 const Register count = O1; // HeapWords count 2911 const Register temp = O2; // scratch 2912 2913 Label Ldone; 2914 __ sllx(count, LogHeapWordSize, count); // to bytes count 2915 // Use BIS for zeroing 2916 __ bis_zeroing(to, count, temp, Ldone); 2917 __ bind(Ldone); 2918 __ retl(); 2919 __ delayed()->nop(); 2920 return start; 2921 } 2922 2923 void generate_arraycopy_stubs() { 2924 address entry; 2925 address entry_jbyte_arraycopy; 2926 address entry_jshort_arraycopy; 2927 address entry_jint_arraycopy; 2928 address entry_oop_arraycopy; 2929 address entry_jlong_arraycopy; 2930 address entry_checkcast_arraycopy; 2931 2932 //*** jbyte 2933 // Always need aligned and unaligned versions 2934 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 2935 "jbyte_disjoint_arraycopy"); 2936 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, 2937 &entry_jbyte_arraycopy, 2938 "jbyte_arraycopy"); 2939 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry, 2940 "arrayof_jbyte_disjoint_arraycopy"); 2941 StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, entry, NULL, 2942 "arrayof_jbyte_arraycopy"); 2943 2944 //*** jshort 2945 // Always need aligned and unaligned versions 2946 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 2947 "jshort_disjoint_arraycopy"); 2948 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, 2949 &entry_jshort_arraycopy, 2950 "jshort_arraycopy"); 2951 StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry, 2952 "arrayof_jshort_disjoint_arraycopy"); 2953 StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, entry, NULL, 2954 "arrayof_jshort_arraycopy"); 2955 2956 //*** jint 2957 // Aligned versions 2958 StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, &entry, 2959 "arrayof_jint_disjoint_arraycopy"); 2960 StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, entry, &entry_jint_arraycopy, 2961 "arrayof_jint_arraycopy"); 2962 // In 64 bit we need both aligned and unaligned versions of jint arraycopy. 2963 // entry_jint_arraycopy always points to the unaligned version (notice that we overwrite it). 2964 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, &entry, 2965 "jint_disjoint_arraycopy"); 2966 StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, entry, 2967 &entry_jint_arraycopy, 2968 "jint_arraycopy"); 2969 2970 //*** jlong 2971 // It is always aligned 2972 StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, &entry, 2973 "arrayof_jlong_disjoint_arraycopy"); 2974 StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy(true, entry, &entry_jlong_arraycopy, 2975 "arrayof_jlong_arraycopy"); 2976 StubRoutines::_jlong_disjoint_arraycopy = StubRoutines::_arrayof_jlong_disjoint_arraycopy; 2977 StubRoutines::_jlong_arraycopy = StubRoutines::_arrayof_jlong_arraycopy; 2978 2979 2980 //*** oops 2981 // Aligned versions 2982 StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(true, &entry, 2983 "arrayof_oop_disjoint_arraycopy"); 2984 StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(true, entry, &entry_oop_arraycopy, 2985 "arrayof_oop_arraycopy"); 2986 // Aligned versions without pre-barriers 2987 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, &entry, 2988 "arrayof_oop_disjoint_arraycopy_uninit", 2989 /*dest_uninitialized*/true); 2990 StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, entry, NULL, 2991 "arrayof_oop_arraycopy_uninit", 2992 /*dest_uninitialized*/true); 2993 if (UseCompressedOops) { 2994 // With compressed oops we need unaligned versions, notice that we overwrite entry_oop_arraycopy. 2995 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, &entry, 2996 "oop_disjoint_arraycopy"); 2997 StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(false, entry, &entry_oop_arraycopy, 2998 "oop_arraycopy"); 2999 // Unaligned versions without pre-barriers 3000 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(false, &entry, 3001 "oop_disjoint_arraycopy_uninit", 3002 /*dest_uninitialized*/true); 3003 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, entry, NULL, 3004 "oop_arraycopy_uninit", 3005 /*dest_uninitialized*/true); 3006 } else { 3007 // oop arraycopy is always aligned on 32bit and 64bit without compressed oops 3008 StubRoutines::_oop_disjoint_arraycopy = StubRoutines::_arrayof_oop_disjoint_arraycopy; 3009 StubRoutines::_oop_arraycopy = StubRoutines::_arrayof_oop_arraycopy; 3010 StubRoutines::_oop_disjoint_arraycopy_uninit = StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit; 3011 StubRoutines::_oop_arraycopy_uninit = StubRoutines::_arrayof_oop_arraycopy_uninit; 3012 } 3013 3014 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 3015 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 3016 /*dest_uninitialized*/true); 3017 3018 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 3019 entry_jbyte_arraycopy, 3020 entry_jshort_arraycopy, 3021 entry_jint_arraycopy, 3022 entry_jlong_arraycopy); 3023 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 3024 entry_jbyte_arraycopy, 3025 entry_jshort_arraycopy, 3026 entry_jint_arraycopy, 3027 entry_oop_arraycopy, 3028 entry_jlong_arraycopy, 3029 entry_checkcast_arraycopy); 3030 3031 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 3032 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 3033 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 3034 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 3035 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 3036 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 3037 3038 if (UseBlockZeroing) { 3039 StubRoutines::_zero_aligned_words = generate_zero_aligned_words("zero_aligned_words"); 3040 } 3041 } 3042 3043 address generate_aescrypt_encryptBlock() { 3044 // required since we read expanded key 'int' array starting first element without alignment considerations 3045 assert((arrayOopDesc::base_offset_in_bytes(T_INT) & 7) == 0, 3046 "the following code assumes that first element of an int array is aligned to 8 bytes"); 3047 __ align(CodeEntryAlignment); 3048 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 3049 Label L_load_misaligned_input, L_load_expanded_key, L_doLast128bit, L_storeOutput, L_store_misaligned_output; 3050 address start = __ pc(); 3051 Register from = O0; // source byte array 3052 Register to = O1; // destination byte array 3053 Register key = O2; // expanded key array 3054 const Register keylen = O4; //reg for storing expanded key array length 3055 3056 // read expanded key length 3057 __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0); 3058 3059 // Method to address arbitrary alignment for load instructions: 3060 // Check last 3 bits of 'from' address to see if it is aligned to 8-byte boundary 3061 // If zero/aligned then continue with double FP load instructions 3062 // If not zero/mis-aligned then alignaddr will set GSR.align with number of bytes to skip during faligndata 3063 // alignaddr will also convert arbitrary aligned 'from' address to nearest 8-byte aligned address 3064 // load 3 * 8-byte components (to read 16 bytes input) in 3 different FP regs starting at this aligned address 3065 // faligndata will then extract (based on GSR.align value) the appropriate 8 bytes from the 2 source regs 3066 3067 // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero 3068 __ andcc(from, 7, G0); 3069 __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input); 3070 __ delayed()->alignaddr(from, G0, from); 3071 3072 // aligned case: load input into F54-F56 3073 __ ldf(FloatRegisterImpl::D, from, 0, F54); 3074 __ ldf(FloatRegisterImpl::D, from, 8, F56); 3075 __ ba_short(L_load_expanded_key); 3076 3077 __ BIND(L_load_misaligned_input); 3078 __ ldf(FloatRegisterImpl::D, from, 0, F54); 3079 __ ldf(FloatRegisterImpl::D, from, 8, F56); 3080 __ ldf(FloatRegisterImpl::D, from, 16, F58); 3081 __ faligndata(F54, F56, F54); 3082 __ faligndata(F56, F58, F56); 3083 3084 __ BIND(L_load_expanded_key); 3085 // Since we load expanded key buffers starting first element, 8-byte alignment is guaranteed 3086 for ( int i = 0; i <= 38; i += 2 ) { 3087 __ ldf(FloatRegisterImpl::D, key, i*4, as_FloatRegister(i)); 3088 } 3089 3090 // perform cipher transformation 3091 __ fxor(FloatRegisterImpl::D, F0, F54, F54); 3092 __ fxor(FloatRegisterImpl::D, F2, F56, F56); 3093 // rounds 1 through 8 3094 for ( int i = 4; i <= 28; i += 8 ) { 3095 __ aes_eround01(as_FloatRegister(i), F54, F56, F58); 3096 __ aes_eround23(as_FloatRegister(i+2), F54, F56, F60); 3097 __ aes_eround01(as_FloatRegister(i+4), F58, F60, F54); 3098 __ aes_eround23(as_FloatRegister(i+6), F58, F60, F56); 3099 } 3100 __ aes_eround01(F36, F54, F56, F58); //round 9 3101 __ aes_eround23(F38, F54, F56, F60); 3102 3103 // 128-bit original key size 3104 __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pt, L_doLast128bit); 3105 3106 for ( int i = 40; i <= 50; i += 2 ) { 3107 __ ldf(FloatRegisterImpl::D, key, i*4, as_FloatRegister(i) ); 3108 } 3109 __ aes_eround01(F40, F58, F60, F54); //round 10 3110 __ aes_eround23(F42, F58, F60, F56); 3111 __ aes_eround01(F44, F54, F56, F58); //round 11 3112 __ aes_eround23(F46, F54, F56, F60); 3113 3114 // 192-bit original key size 3115 __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pt, L_storeOutput); 3116 3117 __ ldf(FloatRegisterImpl::D, key, 208, F52); 3118 __ aes_eround01(F48, F58, F60, F54); //round 12 3119 __ aes_eround23(F50, F58, F60, F56); 3120 __ ldf(FloatRegisterImpl::D, key, 216, F46); 3121 __ ldf(FloatRegisterImpl::D, key, 224, F48); 3122 __ ldf(FloatRegisterImpl::D, key, 232, F50); 3123 __ aes_eround01(F52, F54, F56, F58); //round 13 3124 __ aes_eround23(F46, F54, F56, F60); 3125 __ ba_short(L_storeOutput); 3126 3127 __ BIND(L_doLast128bit); 3128 __ ldf(FloatRegisterImpl::D, key, 160, F48); 3129 __ ldf(FloatRegisterImpl::D, key, 168, F50); 3130 3131 __ BIND(L_storeOutput); 3132 // perform last round of encryption common for all key sizes 3133 __ aes_eround01_l(F48, F58, F60, F54); //last round 3134 __ aes_eround23_l(F50, F58, F60, F56); 3135 3136 // Method to address arbitrary alignment for store instructions: 3137 // Check last 3 bits of 'dest' address to see if it is aligned to 8-byte boundary 3138 // If zero/aligned then continue with double FP store instructions 3139 // If not zero/mis-aligned then edge8n will generate edge mask in result reg (O3 in below case) 3140 // Example: If dest address is 0x07 and nearest 8-byte aligned address is 0x00 then edge mask will be 00000001 3141 // Compute (8-n) where n is # of bytes skipped by partial store(stpartialf) inst from edge mask, n=7 in this case 3142 // We get the value of n from the andcc that checks 'dest' alignment. n is available in O5 in below case. 3143 // Set GSR.align to (8-n) using alignaddr 3144 // Circular byte shift store values by n places so that the original bytes are at correct position for stpartialf 3145 // Set the arbitrarily aligned 'dest' address to nearest 8-byte aligned address 3146 // Store (partial) the original first (8-n) bytes starting at the original 'dest' address 3147 // Negate the edge mask so that the subsequent stpartialf can store the original (8-n-1)th through 8th bytes at appropriate address 3148 // We need to execute this process for both the 8-byte result values 3149 3150 // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero 3151 __ andcc(to, 7, O5); 3152 __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output); 3153 __ delayed()->edge8n(to, G0, O3); 3154 3155 // aligned case: store output into the destination array 3156 __ stf(FloatRegisterImpl::D, F54, to, 0); 3157 __ retl(); 3158 __ delayed()->stf(FloatRegisterImpl::D, F56, to, 8); 3159 3160 __ BIND(L_store_misaligned_output); 3161 __ add(to, 8, O4); 3162 __ mov(8, O2); 3163 __ sub(O2, O5, O2); 3164 __ alignaddr(O2, G0, O2); 3165 __ faligndata(F54, F54, F54); 3166 __ faligndata(F56, F56, F56); 3167 __ and3(to, -8, to); 3168 __ and3(O4, -8, O4); 3169 __ stpartialf(to, O3, F54, Assembler::ASI_PST8_PRIMARY); 3170 __ stpartialf(O4, O3, F56, Assembler::ASI_PST8_PRIMARY); 3171 __ add(to, 8, to); 3172 __ add(O4, 8, O4); 3173 __ orn(G0, O3, O3); 3174 __ stpartialf(to, O3, F54, Assembler::ASI_PST8_PRIMARY); 3175 __ retl(); 3176 __ delayed()->stpartialf(O4, O3, F56, Assembler::ASI_PST8_PRIMARY); 3177 3178 return start; 3179 } 3180 3181 address generate_aescrypt_decryptBlock() { 3182 assert((arrayOopDesc::base_offset_in_bytes(T_INT) & 7) == 0, 3183 "the following code assumes that first element of an int array is aligned to 8 bytes"); 3184 // required since we read original key 'byte' array as well in the decryption stubs 3185 assert((arrayOopDesc::base_offset_in_bytes(T_BYTE) & 7) == 0, 3186 "the following code assumes that first element of a byte array is aligned to 8 bytes"); 3187 __ align(CodeEntryAlignment); 3188 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3189 address start = __ pc(); 3190 Label L_load_misaligned_input, L_load_original_key, L_expand192bit, L_expand256bit, L_reload_misaligned_input; 3191 Label L_256bit_transform, L_common_transform, L_store_misaligned_output; 3192 Register from = O0; // source byte array 3193 Register to = O1; // destination byte array 3194 Register key = O2; // expanded key array 3195 Register original_key = O3; // original key array only required during decryption 3196 const Register keylen = O4; // reg for storing expanded key array length 3197 3198 // read expanded key array length 3199 __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0); 3200 3201 // save 'from' since we may need to recheck alignment in case of 256-bit decryption 3202 __ mov(from, G1); 3203 3204 // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero 3205 __ andcc(from, 7, G0); 3206 __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input); 3207 __ delayed()->alignaddr(from, G0, from); 3208 3209 // aligned case: load input into F52-F54 3210 __ ldf(FloatRegisterImpl::D, from, 0, F52); 3211 __ ldf(FloatRegisterImpl::D, from, 8, F54); 3212 __ ba_short(L_load_original_key); 3213 3214 __ BIND(L_load_misaligned_input); 3215 __ ldf(FloatRegisterImpl::D, from, 0, F52); 3216 __ ldf(FloatRegisterImpl::D, from, 8, F54); 3217 __ ldf(FloatRegisterImpl::D, from, 16, F56); 3218 __ faligndata(F52, F54, F52); 3219 __ faligndata(F54, F56, F54); 3220 3221 __ BIND(L_load_original_key); 3222 // load original key from SunJCE expanded decryption key 3223 // Since we load original key buffer starting first element, 8-byte alignment is guaranteed 3224 for ( int i = 0; i <= 3; i++ ) { 3225 __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i)); 3226 } 3227 3228 // 256-bit original key size 3229 __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_expand256bit); 3230 3231 // 192-bit original key size 3232 __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_expand192bit); 3233 3234 // 128-bit original key size 3235 // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions 3236 for ( int i = 0; i <= 36; i += 4 ) { 3237 __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+2), i/4, as_FloatRegister(i+4)); 3238 __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+4), as_FloatRegister(i+6)); 3239 } 3240 3241 // perform 128-bit key specific inverse cipher transformation 3242 __ fxor(FloatRegisterImpl::D, F42, F54, F54); 3243 __ fxor(FloatRegisterImpl::D, F40, F52, F52); 3244 __ ba_short(L_common_transform); 3245 3246 __ BIND(L_expand192bit); 3247 3248 // start loading rest of the 192-bit key 3249 __ ldf(FloatRegisterImpl::S, original_key, 16, F4); 3250 __ ldf(FloatRegisterImpl::S, original_key, 20, F5); 3251 3252 // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions 3253 for ( int i = 0; i <= 36; i += 6 ) { 3254 __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+4), i/6, as_FloatRegister(i+6)); 3255 __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+6), as_FloatRegister(i+8)); 3256 __ aes_kexpand2(as_FloatRegister(i+4), as_FloatRegister(i+8), as_FloatRegister(i+10)); 3257 } 3258 __ aes_kexpand1(F42, F46, 7, F48); 3259 __ aes_kexpand2(F44, F48, F50); 3260 3261 // perform 192-bit key specific inverse cipher transformation 3262 __ fxor(FloatRegisterImpl::D, F50, F54, F54); 3263 __ fxor(FloatRegisterImpl::D, F48, F52, F52); 3264 __ aes_dround23(F46, F52, F54, F58); 3265 __ aes_dround01(F44, F52, F54, F56); 3266 __ aes_dround23(F42, F56, F58, F54); 3267 __ aes_dround01(F40, F56, F58, F52); 3268 __ ba_short(L_common_transform); 3269 3270 __ BIND(L_expand256bit); 3271 3272 // load rest of the 256-bit key 3273 for ( int i = 4; i <= 7; i++ ) { 3274 __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i)); 3275 } 3276 3277 // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions 3278 for ( int i = 0; i <= 40; i += 8 ) { 3279 __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+6), i/8, as_FloatRegister(i+8)); 3280 __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+8), as_FloatRegister(i+10)); 3281 __ aes_kexpand0(as_FloatRegister(i+4), as_FloatRegister(i+10), as_FloatRegister(i+12)); 3282 __ aes_kexpand2(as_FloatRegister(i+6), as_FloatRegister(i+12), as_FloatRegister(i+14)); 3283 } 3284 __ aes_kexpand1(F48, F54, 6, F56); 3285 __ aes_kexpand2(F50, F56, F58); 3286 3287 for ( int i = 0; i <= 6; i += 2 ) { 3288 __ fsrc2(FloatRegisterImpl::D, as_FloatRegister(58-i), as_FloatRegister(i)); 3289 } 3290 3291 // reload original 'from' address 3292 __ mov(G1, from); 3293 3294 // re-check 8-byte alignment 3295 __ andcc(from, 7, G0); 3296 __ br(Assembler::notZero, true, Assembler::pn, L_reload_misaligned_input); 3297 __ delayed()->alignaddr(from, G0, from); 3298 3299 // aligned case: load input into F52-F54 3300 __ ldf(FloatRegisterImpl::D, from, 0, F52); 3301 __ ldf(FloatRegisterImpl::D, from, 8, F54); 3302 __ ba_short(L_256bit_transform); 3303 3304 __ BIND(L_reload_misaligned_input); 3305 __ ldf(FloatRegisterImpl::D, from, 0, F52); 3306 __ ldf(FloatRegisterImpl::D, from, 8, F54); 3307 __ ldf(FloatRegisterImpl::D, from, 16, F56); 3308 __ faligndata(F52, F54, F52); 3309 __ faligndata(F54, F56, F54); 3310 3311 // perform 256-bit key specific inverse cipher transformation 3312 __ BIND(L_256bit_transform); 3313 __ fxor(FloatRegisterImpl::D, F0, F54, F54); 3314 __ fxor(FloatRegisterImpl::D, F2, F52, F52); 3315 __ aes_dround23(F4, F52, F54, F58); 3316 __ aes_dround01(F6, F52, F54, F56); 3317 __ aes_dround23(F50, F56, F58, F54); 3318 __ aes_dround01(F48, F56, F58, F52); 3319 __ aes_dround23(F46, F52, F54, F58); 3320 __ aes_dround01(F44, F52, F54, F56); 3321 __ aes_dround23(F42, F56, F58, F54); 3322 __ aes_dround01(F40, F56, F58, F52); 3323 3324 for ( int i = 0; i <= 7; i++ ) { 3325 __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i)); 3326 } 3327 3328 // perform inverse cipher transformations common for all key sizes 3329 __ BIND(L_common_transform); 3330 for ( int i = 38; i >= 6; i -= 8 ) { 3331 __ aes_dround23(as_FloatRegister(i), F52, F54, F58); 3332 __ aes_dround01(as_FloatRegister(i-2), F52, F54, F56); 3333 if ( i != 6) { 3334 __ aes_dround23(as_FloatRegister(i-4), F56, F58, F54); 3335 __ aes_dround01(as_FloatRegister(i-6), F56, F58, F52); 3336 } else { 3337 __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F54); 3338 __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F52); 3339 } 3340 } 3341 3342 // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero 3343 __ andcc(to, 7, O5); 3344 __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output); 3345 __ delayed()->edge8n(to, G0, O3); 3346 3347 // aligned case: store output into the destination array 3348 __ stf(FloatRegisterImpl::D, F52, to, 0); 3349 __ retl(); 3350 __ delayed()->stf(FloatRegisterImpl::D, F54, to, 8); 3351 3352 __ BIND(L_store_misaligned_output); 3353 __ add(to, 8, O4); 3354 __ mov(8, O2); 3355 __ sub(O2, O5, O2); 3356 __ alignaddr(O2, G0, O2); 3357 __ faligndata(F52, F52, F52); 3358 __ faligndata(F54, F54, F54); 3359 __ and3(to, -8, to); 3360 __ and3(O4, -8, O4); 3361 __ stpartialf(to, O3, F52, Assembler::ASI_PST8_PRIMARY); 3362 __ stpartialf(O4, O3, F54, Assembler::ASI_PST8_PRIMARY); 3363 __ add(to, 8, to); 3364 __ add(O4, 8, O4); 3365 __ orn(G0, O3, O3); 3366 __ stpartialf(to, O3, F52, Assembler::ASI_PST8_PRIMARY); 3367 __ retl(); 3368 __ delayed()->stpartialf(O4, O3, F54, Assembler::ASI_PST8_PRIMARY); 3369 3370 return start; 3371 } 3372 3373 address generate_cipherBlockChaining_encryptAESCrypt() { 3374 assert((arrayOopDesc::base_offset_in_bytes(T_INT) & 7) == 0, 3375 "the following code assumes that first element of an int array is aligned to 8 bytes"); 3376 assert((arrayOopDesc::base_offset_in_bytes(T_BYTE) & 7) == 0, 3377 "the following code assumes that first element of a byte array is aligned to 8 bytes"); 3378 __ align(CodeEntryAlignment); 3379 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3380 Label L_cbcenc128, L_load_misaligned_input_128bit, L_128bit_transform, L_store_misaligned_output_128bit; 3381 Label L_check_loop_end_128bit, L_cbcenc192, L_load_misaligned_input_192bit, L_192bit_transform; 3382 Label L_store_misaligned_output_192bit, L_check_loop_end_192bit, L_cbcenc256, L_load_misaligned_input_256bit; 3383 Label L_256bit_transform, L_store_misaligned_output_256bit, L_check_loop_end_256bit; 3384 address start = __ pc(); 3385 Register from = I0; // source byte array 3386 Register to = I1; // destination byte array 3387 Register key = I2; // expanded key array 3388 Register rvec = I3; // init vector 3389 const Register len_reg = I4; // cipher length 3390 const Register keylen = I5; // reg for storing expanded key array length 3391 3392 __ save_frame(0); 3393 // save cipher len to return in the end 3394 __ mov(len_reg, L0); 3395 3396 // read expanded key length 3397 __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0); 3398 3399 // load initial vector, 8-byte alignment is guranteed 3400 __ ldf(FloatRegisterImpl::D, rvec, 0, F60); 3401 __ ldf(FloatRegisterImpl::D, rvec, 8, F62); 3402 // load key, 8-byte alignment is guranteed 3403 __ ldx(key,0,G1); 3404 __ ldx(key,8,G5); 3405 3406 // start loading expanded key, 8-byte alignment is guranteed 3407 for ( int i = 0, j = 16; i <= 38; i += 2, j += 8 ) { 3408 __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i)); 3409 } 3410 3411 // 128-bit original key size 3412 __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pt, L_cbcenc128); 3413 3414 for ( int i = 40, j = 176; i <= 46; i += 2, j += 8 ) { 3415 __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i)); 3416 } 3417 3418 // 192-bit original key size 3419 __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pt, L_cbcenc192); 3420 3421 for ( int i = 48, j = 208; i <= 54; i += 2, j += 8 ) { 3422 __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i)); 3423 } 3424 3425 // 256-bit original key size 3426 __ ba_short(L_cbcenc256); 3427 3428 __ align(OptoLoopAlignment); 3429 __ BIND(L_cbcenc128); 3430 // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero 3431 __ andcc(from, 7, G0); 3432 __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input_128bit); 3433 __ delayed()->mov(from, L1); // save original 'from' address before alignaddr 3434 3435 // aligned case: load input into G3 and G4 3436 __ ldx(from,0,G3); 3437 __ ldx(from,8,G4); 3438 __ ba_short(L_128bit_transform); 3439 3440 __ BIND(L_load_misaligned_input_128bit); 3441 // can clobber F48, F50 and F52 as they are not used in 128 and 192-bit key encryption 3442 __ alignaddr(from, G0, from); 3443 __ ldf(FloatRegisterImpl::D, from, 0, F48); 3444 __ ldf(FloatRegisterImpl::D, from, 8, F50); 3445 __ ldf(FloatRegisterImpl::D, from, 16, F52); 3446 __ faligndata(F48, F50, F48); 3447 __ faligndata(F50, F52, F50); 3448 __ movdtox(F48, G3); 3449 __ movdtox(F50, G4); 3450 __ mov(L1, from); 3451 3452 __ BIND(L_128bit_transform); 3453 __ xor3(G1,G3,G3); 3454 __ xor3(G5,G4,G4); 3455 __ movxtod(G3,F56); 3456 __ movxtod(G4,F58); 3457 __ fxor(FloatRegisterImpl::D, F60, F56, F60); 3458 __ fxor(FloatRegisterImpl::D, F62, F58, F62); 3459 3460 // TEN_EROUNDS 3461 for ( int i = 0; i <= 32; i += 8 ) { 3462 __ aes_eround01(as_FloatRegister(i), F60, F62, F56); 3463 __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58); 3464 if (i != 32 ) { 3465 __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60); 3466 __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62); 3467 } else { 3468 __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60); 3469 __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62); 3470 } 3471 } 3472 3473 // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero 3474 __ andcc(to, 7, L1); 3475 __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_128bit); 3476 __ delayed()->edge8n(to, G0, L2); 3477 3478 // aligned case: store output into the destination array 3479 __ stf(FloatRegisterImpl::D, F60, to, 0); 3480 __ stf(FloatRegisterImpl::D, F62, to, 8); 3481 __ ba_short(L_check_loop_end_128bit); 3482 3483 __ BIND(L_store_misaligned_output_128bit); 3484 __ add(to, 8, L3); 3485 __ mov(8, L4); 3486 __ sub(L4, L1, L4); 3487 __ alignaddr(L4, G0, L4); 3488 // save cipher text before circular right shift 3489 // as it needs to be stored as iv for next block (see code before next retl) 3490 __ movdtox(F60, L6); 3491 __ movdtox(F62, L7); 3492 __ faligndata(F60, F60, F60); 3493 __ faligndata(F62, F62, F62); 3494 __ mov(to, L5); 3495 __ and3(to, -8, to); 3496 __ and3(L3, -8, L3); 3497 __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY); 3498 __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY); 3499 __ add(to, 8, to); 3500 __ add(L3, 8, L3); 3501 __ orn(G0, L2, L2); 3502 __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY); 3503 __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY); 3504 __ mov(L5, to); 3505 __ movxtod(L6, F60); 3506 __ movxtod(L7, F62); 3507 3508 __ BIND(L_check_loop_end_128bit); 3509 __ add(from, 16, from); 3510 __ add(to, 16, to); 3511 __ subcc(len_reg, 16, len_reg); 3512 __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc128); 3513 __ delayed()->nop(); 3514 // re-init intial vector for next block, 8-byte alignment is guaranteed 3515 __ stf(FloatRegisterImpl::D, F60, rvec, 0); 3516 __ stf(FloatRegisterImpl::D, F62, rvec, 8); 3517 __ mov(L0, I0); 3518 __ ret(); 3519 __ delayed()->restore(); 3520 3521 __ align(OptoLoopAlignment); 3522 __ BIND(L_cbcenc192); 3523 // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero 3524 __ andcc(from, 7, G0); 3525 __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input_192bit); 3526 __ delayed()->mov(from, L1); // save original 'from' address before alignaddr 3527 3528 // aligned case: load input into G3 and G4 3529 __ ldx(from,0,G3); 3530 __ ldx(from,8,G4); 3531 __ ba_short(L_192bit_transform); 3532 3533 __ BIND(L_load_misaligned_input_192bit); 3534 // can clobber F48, F50 and F52 as they are not used in 128 and 192-bit key encryption 3535 __ alignaddr(from, G0, from); 3536 __ ldf(FloatRegisterImpl::D, from, 0, F48); 3537 __ ldf(FloatRegisterImpl::D, from, 8, F50); 3538 __ ldf(FloatRegisterImpl::D, from, 16, F52); 3539 __ faligndata(F48, F50, F48); 3540 __ faligndata(F50, F52, F50); 3541 __ movdtox(F48, G3); 3542 __ movdtox(F50, G4); 3543 __ mov(L1, from); 3544 3545 __ BIND(L_192bit_transform); 3546 __ xor3(G1,G3,G3); 3547 __ xor3(G5,G4,G4); 3548 __ movxtod(G3,F56); 3549 __ movxtod(G4,F58); 3550 __ fxor(FloatRegisterImpl::D, F60, F56, F60); 3551 __ fxor(FloatRegisterImpl::D, F62, F58, F62); 3552 3553 // TWELEVE_EROUNDS 3554 for ( int i = 0; i <= 40; i += 8 ) { 3555 __ aes_eround01(as_FloatRegister(i), F60, F62, F56); 3556 __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58); 3557 if (i != 40 ) { 3558 __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60); 3559 __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62); 3560 } else { 3561 __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60); 3562 __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62); 3563 } 3564 } 3565 3566 // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero 3567 __ andcc(to, 7, L1); 3568 __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_192bit); 3569 __ delayed()->edge8n(to, G0, L2); 3570 3571 // aligned case: store output into the destination array 3572 __ stf(FloatRegisterImpl::D, F60, to, 0); 3573 __ stf(FloatRegisterImpl::D, F62, to, 8); 3574 __ ba_short(L_check_loop_end_192bit); 3575 3576 __ BIND(L_store_misaligned_output_192bit); 3577 __ add(to, 8, L3); 3578 __ mov(8, L4); 3579 __ sub(L4, L1, L4); 3580 __ alignaddr(L4, G0, L4); 3581 __ movdtox(F60, L6); 3582 __ movdtox(F62, L7); 3583 __ faligndata(F60, F60, F60); 3584 __ faligndata(F62, F62, F62); 3585 __ mov(to, L5); 3586 __ and3(to, -8, to); 3587 __ and3(L3, -8, L3); 3588 __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY); 3589 __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY); 3590 __ add(to, 8, to); 3591 __ add(L3, 8, L3); 3592 __ orn(G0, L2, L2); 3593 __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY); 3594 __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY); 3595 __ mov(L5, to); 3596 __ movxtod(L6, F60); 3597 __ movxtod(L7, F62); 3598 3599 __ BIND(L_check_loop_end_192bit); 3600 __ add(from, 16, from); 3601 __ subcc(len_reg, 16, len_reg); 3602 __ add(to, 16, to); 3603 __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc192); 3604 __ delayed()->nop(); 3605 // re-init intial vector for next block, 8-byte alignment is guaranteed 3606 __ stf(FloatRegisterImpl::D, F60, rvec, 0); 3607 __ stf(FloatRegisterImpl::D, F62, rvec, 8); 3608 __ mov(L0, I0); 3609 __ ret(); 3610 __ delayed()->restore(); 3611 3612 __ align(OptoLoopAlignment); 3613 __ BIND(L_cbcenc256); 3614 // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero 3615 __ andcc(from, 7, G0); 3616 __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input_256bit); 3617 __ delayed()->mov(from, L1); // save original 'from' address before alignaddr 3618 3619 // aligned case: load input into G3 and G4 3620 __ ldx(from,0,G3); 3621 __ ldx(from,8,G4); 3622 __ ba_short(L_256bit_transform); 3623 3624 __ BIND(L_load_misaligned_input_256bit); 3625 // cannot clobber F48, F50 and F52. F56, F58 can be used though 3626 __ alignaddr(from, G0, from); 3627 __ movdtox(F60, L2); // save F60 before overwriting 3628 __ ldf(FloatRegisterImpl::D, from, 0, F56); 3629 __ ldf(FloatRegisterImpl::D, from, 8, F58); 3630 __ ldf(FloatRegisterImpl::D, from, 16, F60); 3631 __ faligndata(F56, F58, F56); 3632 __ faligndata(F58, F60, F58); 3633 __ movdtox(F56, G3); 3634 __ movdtox(F58, G4); 3635 __ mov(L1, from); 3636 __ movxtod(L2, F60); 3637 3638 __ BIND(L_256bit_transform); 3639 __ xor3(G1,G3,G3); 3640 __ xor3(G5,G4,G4); 3641 __ movxtod(G3,F56); 3642 __ movxtod(G4,F58); 3643 __ fxor(FloatRegisterImpl::D, F60, F56, F60); 3644 __ fxor(FloatRegisterImpl::D, F62, F58, F62); 3645 3646 // FOURTEEN_EROUNDS 3647 for ( int i = 0; i <= 48; i += 8 ) { 3648 __ aes_eround01(as_FloatRegister(i), F60, F62, F56); 3649 __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58); 3650 if (i != 48 ) { 3651 __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60); 3652 __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62); 3653 } else { 3654 __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60); 3655 __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62); 3656 } 3657 } 3658 3659 // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero 3660 __ andcc(to, 7, L1); 3661 __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_256bit); 3662 __ delayed()->edge8n(to, G0, L2); 3663 3664 // aligned case: store output into the destination array 3665 __ stf(FloatRegisterImpl::D, F60, to, 0); 3666 __ stf(FloatRegisterImpl::D, F62, to, 8); 3667 __ ba_short(L_check_loop_end_256bit); 3668 3669 __ BIND(L_store_misaligned_output_256bit); 3670 __ add(to, 8, L3); 3671 __ mov(8, L4); 3672 __ sub(L4, L1, L4); 3673 __ alignaddr(L4, G0, L4); 3674 __ movdtox(F60, L6); 3675 __ movdtox(F62, L7); 3676 __ faligndata(F60, F60, F60); 3677 __ faligndata(F62, F62, F62); 3678 __ mov(to, L5); 3679 __ and3(to, -8, to); 3680 __ and3(L3, -8, L3); 3681 __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY); 3682 __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY); 3683 __ add(to, 8, to); 3684 __ add(L3, 8, L3); 3685 __ orn(G0, L2, L2); 3686 __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY); 3687 __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY); 3688 __ mov(L5, to); 3689 __ movxtod(L6, F60); 3690 __ movxtod(L7, F62); 3691 3692 __ BIND(L_check_loop_end_256bit); 3693 __ add(from, 16, from); 3694 __ subcc(len_reg, 16, len_reg); 3695 __ add(to, 16, to); 3696 __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc256); 3697 __ delayed()->nop(); 3698 // re-init intial vector for next block, 8-byte alignment is guaranteed 3699 __ stf(FloatRegisterImpl::D, F60, rvec, 0); 3700 __ stf(FloatRegisterImpl::D, F62, rvec, 8); 3701 __ mov(L0, I0); 3702 __ ret(); 3703 __ delayed()->restore(); 3704 3705 return start; 3706 } 3707 3708 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3709 assert((arrayOopDesc::base_offset_in_bytes(T_INT) & 7) == 0, 3710 "the following code assumes that first element of an int array is aligned to 8 bytes"); 3711 assert((arrayOopDesc::base_offset_in_bytes(T_BYTE) & 7) == 0, 3712 "the following code assumes that first element of a byte array is aligned to 8 bytes"); 3713 __ align(CodeEntryAlignment); 3714 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3715 Label L_cbcdec_end, L_expand192bit, L_expand256bit, L_dec_first_block_start; 3716 Label L_dec_first_block128, L_dec_first_block192, L_dec_next2_blocks128, L_dec_next2_blocks192, L_dec_next2_blocks256; 3717 Label L_load_misaligned_input_first_block, L_transform_first_block, L_load_misaligned_next2_blocks128, L_transform_next2_blocks128; 3718 Label L_load_misaligned_next2_blocks192, L_transform_next2_blocks192, L_load_misaligned_next2_blocks256, L_transform_next2_blocks256; 3719 Label L_store_misaligned_output_first_block, L_check_decrypt_end, L_store_misaligned_output_next2_blocks128; 3720 Label L_check_decrypt_loop_end128, L_store_misaligned_output_next2_blocks192, L_check_decrypt_loop_end192; 3721 Label L_store_misaligned_output_next2_blocks256, L_check_decrypt_loop_end256; 3722 address start = __ pc(); 3723 Register from = I0; // source byte array 3724 Register to = I1; // destination byte array 3725 Register key = I2; // expanded key array 3726 Register rvec = I3; // init vector 3727 const Register len_reg = I4; // cipher length 3728 const Register original_key = I5; // original key array only required during decryption 3729 const Register keylen = L6; // reg for storing expanded key array length 3730 3731 __ save_frame(0); //args are read from I* registers since we save the frame in the beginning 3732 // save cipher len to return in the end 3733 __ mov(len_reg, L7); 3734 3735 // load original key from SunJCE expanded decryption key 3736 // Since we load original key buffer starting first element, 8-byte alignment is guaranteed 3737 for ( int i = 0; i <= 3; i++ ) { 3738 __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i)); 3739 } 3740 3741 // load initial vector, 8-byte alignment is guaranteed 3742 __ ldx(rvec,0,L0); 3743 __ ldx(rvec,8,L1); 3744 3745 // read expanded key array length 3746 __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0); 3747 3748 // 256-bit original key size 3749 __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_expand256bit); 3750 3751 // 192-bit original key size 3752 __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_expand192bit); 3753 3754 // 128-bit original key size 3755 // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions 3756 for ( int i = 0; i <= 36; i += 4 ) { 3757 __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+2), i/4, as_FloatRegister(i+4)); 3758 __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+4), as_FloatRegister(i+6)); 3759 } 3760 3761 // load expanded key[last-1] and key[last] elements 3762 __ movdtox(F40,L2); 3763 __ movdtox(F42,L3); 3764 3765 __ and3(len_reg, 16, L4); 3766 __ br_null_short(L4, Assembler::pt, L_dec_next2_blocks128); 3767 __ nop(); 3768 3769 __ ba_short(L_dec_first_block_start); 3770 3771 __ BIND(L_expand192bit); 3772 // load rest of the 192-bit key 3773 __ ldf(FloatRegisterImpl::S, original_key, 16, F4); 3774 __ ldf(FloatRegisterImpl::S, original_key, 20, F5); 3775 3776 // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions 3777 for ( int i = 0; i <= 36; i += 6 ) { 3778 __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+4), i/6, as_FloatRegister(i+6)); 3779 __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+6), as_FloatRegister(i+8)); 3780 __ aes_kexpand2(as_FloatRegister(i+4), as_FloatRegister(i+8), as_FloatRegister(i+10)); 3781 } 3782 __ aes_kexpand1(F42, F46, 7, F48); 3783 __ aes_kexpand2(F44, F48, F50); 3784 3785 // load expanded key[last-1] and key[last] elements 3786 __ movdtox(F48,L2); 3787 __ movdtox(F50,L3); 3788 3789 __ and3(len_reg, 16, L4); 3790 __ br_null_short(L4, Assembler::pt, L_dec_next2_blocks192); 3791 __ nop(); 3792 3793 __ ba_short(L_dec_first_block_start); 3794 3795 __ BIND(L_expand256bit); 3796 // load rest of the 256-bit key 3797 for ( int i = 4; i <= 7; i++ ) { 3798 __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i)); 3799 } 3800 3801 // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions 3802 for ( int i = 0; i <= 40; i += 8 ) { 3803 __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+6), i/8, as_FloatRegister(i+8)); 3804 __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+8), as_FloatRegister(i+10)); 3805 __ aes_kexpand0(as_FloatRegister(i+4), as_FloatRegister(i+10), as_FloatRegister(i+12)); 3806 __ aes_kexpand2(as_FloatRegister(i+6), as_FloatRegister(i+12), as_FloatRegister(i+14)); 3807 } 3808 __ aes_kexpand1(F48, F54, 6, F56); 3809 __ aes_kexpand2(F50, F56, F58); 3810 3811 // load expanded key[last-1] and key[last] elements 3812 __ movdtox(F56,L2); 3813 __ movdtox(F58,L3); 3814 3815 __ and3(len_reg, 16, L4); 3816 __ br_null_short(L4, Assembler::pt, L_dec_next2_blocks256); 3817 3818 __ BIND(L_dec_first_block_start); 3819 // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero 3820 __ andcc(from, 7, G0); 3821 __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input_first_block); 3822 __ delayed()->mov(from, G1); // save original 'from' address before alignaddr 3823 3824 // aligned case: load input into L4 and L5 3825 __ ldx(from,0,L4); 3826 __ ldx(from,8,L5); 3827 __ ba_short(L_transform_first_block); 3828 3829 __ BIND(L_load_misaligned_input_first_block); 3830 __ alignaddr(from, G0, from); 3831 // F58, F60, F62 can be clobbered 3832 __ ldf(FloatRegisterImpl::D, from, 0, F58); 3833 __ ldf(FloatRegisterImpl::D, from, 8, F60); 3834 __ ldf(FloatRegisterImpl::D, from, 16, F62); 3835 __ faligndata(F58, F60, F58); 3836 __ faligndata(F60, F62, F60); 3837 __ movdtox(F58, L4); 3838 __ movdtox(F60, L5); 3839 __ mov(G1, from); 3840 3841 __ BIND(L_transform_first_block); 3842 __ xor3(L2,L4,G1); 3843 __ movxtod(G1,F60); 3844 __ xor3(L3,L5,G1); 3845 __ movxtod(G1,F62); 3846 3847 // 128-bit original key size 3848 __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pn, L_dec_first_block128); 3849 3850 // 192-bit original key size 3851 __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_dec_first_block192); 3852 3853 __ aes_dround23(F54, F60, F62, F58); 3854 __ aes_dround01(F52, F60, F62, F56); 3855 __ aes_dround23(F50, F56, F58, F62); 3856 __ aes_dround01(F48, F56, F58, F60); 3857 3858 __ BIND(L_dec_first_block192); 3859 __ aes_dround23(F46, F60, F62, F58); 3860 __ aes_dround01(F44, F60, F62, F56); 3861 __ aes_dround23(F42, F56, F58, F62); 3862 __ aes_dround01(F40, F56, F58, F60); 3863 3864 __ BIND(L_dec_first_block128); 3865 for ( int i = 38; i >= 6; i -= 8 ) { 3866 __ aes_dround23(as_FloatRegister(i), F60, F62, F58); 3867 __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56); 3868 if ( i != 6) { 3869 __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62); 3870 __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60); 3871 } else { 3872 __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62); 3873 __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60); 3874 } 3875 } 3876 3877 __ movxtod(L0,F56); 3878 __ movxtod(L1,F58); 3879 __ mov(L4,L0); 3880 __ mov(L5,L1); 3881 __ fxor(FloatRegisterImpl::D, F56, F60, F60); 3882 __ fxor(FloatRegisterImpl::D, F58, F62, F62); 3883 3884 // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero 3885 __ andcc(to, 7, G1); 3886 __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_first_block); 3887 __ delayed()->edge8n(to, G0, G2); 3888 3889 // aligned case: store output into the destination array 3890 __ stf(FloatRegisterImpl::D, F60, to, 0); 3891 __ stf(FloatRegisterImpl::D, F62, to, 8); 3892 __ ba_short(L_check_decrypt_end); 3893 3894 __ BIND(L_store_misaligned_output_first_block); 3895 __ add(to, 8, G3); 3896 __ mov(8, G4); 3897 __ sub(G4, G1, G4); 3898 __ alignaddr(G4, G0, G4); 3899 __ faligndata(F60, F60, F60); 3900 __ faligndata(F62, F62, F62); 3901 __ mov(to, G1); 3902 __ and3(to, -8, to); 3903 __ and3(G3, -8, G3); 3904 __ stpartialf(to, G2, F60, Assembler::ASI_PST8_PRIMARY); 3905 __ stpartialf(G3, G2, F62, Assembler::ASI_PST8_PRIMARY); 3906 __ add(to, 8, to); 3907 __ add(G3, 8, G3); 3908 __ orn(G0, G2, G2); 3909 __ stpartialf(to, G2, F60, Assembler::ASI_PST8_PRIMARY); 3910 __ stpartialf(G3, G2, F62, Assembler::ASI_PST8_PRIMARY); 3911 __ mov(G1, to); 3912 3913 __ BIND(L_check_decrypt_end); 3914 __ add(from, 16, from); 3915 __ add(to, 16, to); 3916 __ subcc(len_reg, 16, len_reg); 3917 __ br(Assembler::equal, false, Assembler::pt, L_cbcdec_end); 3918 __ delayed()->nop(); 3919 3920 // 256-bit original key size 3921 __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_dec_next2_blocks256); 3922 3923 // 192-bit original key size 3924 __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_dec_next2_blocks192); 3925 3926 __ align(OptoLoopAlignment); 3927 __ BIND(L_dec_next2_blocks128); 3928 __ nop(); 3929 3930 // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero 3931 __ andcc(from, 7, G0); 3932 __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_next2_blocks128); 3933 __ delayed()->mov(from, G1); // save original 'from' address before alignaddr 3934 3935 // aligned case: load input into G4, G5, L4 and L5 3936 __ ldx(from,0,G4); 3937 __ ldx(from,8,G5); 3938 __ ldx(from,16,L4); 3939 __ ldx(from,24,L5); 3940 __ ba_short(L_transform_next2_blocks128); 3941 3942 __ BIND(L_load_misaligned_next2_blocks128); 3943 __ alignaddr(from, G0, from); 3944 // F40, F42, F58, F60, F62 can be clobbered 3945 __ ldf(FloatRegisterImpl::D, from, 0, F40); 3946 __ ldf(FloatRegisterImpl::D, from, 8, F42); 3947 __ ldf(FloatRegisterImpl::D, from, 16, F60); 3948 __ ldf(FloatRegisterImpl::D, from, 24, F62); 3949 __ ldf(FloatRegisterImpl::D, from, 32, F58); 3950 __ faligndata(F40, F42, F40); 3951 __ faligndata(F42, F60, F42); 3952 __ faligndata(F60, F62, F60); 3953 __ faligndata(F62, F58, F62); 3954 __ movdtox(F40, G4); 3955 __ movdtox(F42, G5); 3956 __ movdtox(F60, L4); 3957 __ movdtox(F62, L5); 3958 __ mov(G1, from); 3959 3960 __ BIND(L_transform_next2_blocks128); 3961 // F40:F42 used for first 16-bytes 3962 __ xor3(L2,G4,G1); 3963 __ movxtod(G1,F40); 3964 __ xor3(L3,G5,G1); 3965 __ movxtod(G1,F42); 3966 3967 // F60:F62 used for next 16-bytes 3968 __ xor3(L2,L4,G1); 3969 __ movxtod(G1,F60); 3970 __ xor3(L3,L5,G1); 3971 __ movxtod(G1,F62); 3972 3973 for ( int i = 38; i >= 6; i -= 8 ) { 3974 __ aes_dround23(as_FloatRegister(i), F40, F42, F44); 3975 __ aes_dround01(as_FloatRegister(i-2), F40, F42, F46); 3976 __ aes_dround23(as_FloatRegister(i), F60, F62, F58); 3977 __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56); 3978 if (i != 6 ) { 3979 __ aes_dround23(as_FloatRegister(i-4), F46, F44, F42); 3980 __ aes_dround01(as_FloatRegister(i-6), F46, F44, F40); 3981 __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62); 3982 __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60); 3983 } else { 3984 __ aes_dround23_l(as_FloatRegister(i-4), F46, F44, F42); 3985 __ aes_dround01_l(as_FloatRegister(i-6), F46, F44, F40); 3986 __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62); 3987 __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60); 3988 } 3989 } 3990 3991 __ movxtod(L0,F46); 3992 __ movxtod(L1,F44); 3993 __ fxor(FloatRegisterImpl::D, F46, F40, F40); 3994 __ fxor(FloatRegisterImpl::D, F44, F42, F42); 3995 3996 __ movxtod(G4,F56); 3997 __ movxtod(G5,F58); 3998 __ mov(L4,L0); 3999 __ mov(L5,L1); 4000 __ fxor(FloatRegisterImpl::D, F56, F60, F60); 4001 __ fxor(FloatRegisterImpl::D, F58, F62, F62); 4002 4003 // For mis-aligned store of 32 bytes of result we can do: 4004 // Circular right-shift all 4 FP registers so that 'head' and 'tail' 4005 // parts that need to be stored starting at mis-aligned address are in a FP reg 4006 // the other 3 FP regs can thus be stored using regular store 4007 // we then use the edge + partial-store mechanism to store the 'head' and 'tail' parts 4008 4009 // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero 4010 __ andcc(to, 7, G1); 4011 __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_next2_blocks128); 4012 __ delayed()->edge8n(to, G0, G2); 4013 4014 // aligned case: store output into the destination array 4015 __ stf(FloatRegisterImpl::D, F40, to, 0); 4016 __ stf(FloatRegisterImpl::D, F42, to, 8); 4017 __ stf(FloatRegisterImpl::D, F60, to, 16); 4018 __ stf(FloatRegisterImpl::D, F62, to, 24); 4019 __ ba_short(L_check_decrypt_loop_end128); 4020 4021 __ BIND(L_store_misaligned_output_next2_blocks128); 4022 __ mov(8, G4); 4023 __ sub(G4, G1, G4); 4024 __ alignaddr(G4, G0, G4); 4025 __ faligndata(F40, F42, F56); // F56 can be clobbered 4026 __ faligndata(F42, F60, F42); 4027 __ faligndata(F60, F62, F60); 4028 __ faligndata(F62, F40, F40); 4029 __ mov(to, G1); 4030 __ and3(to, -8, to); 4031 __ stpartialf(to, G2, F40, Assembler::ASI_PST8_PRIMARY); 4032 __ stf(FloatRegisterImpl::D, F56, to, 8); 4033 __ stf(FloatRegisterImpl::D, F42, to, 16); 4034 __ stf(FloatRegisterImpl::D, F60, to, 24); 4035 __ add(to, 32, to); 4036 __ orn(G0, G2, G2); 4037 __ stpartialf(to, G2, F40, Assembler::ASI_PST8_PRIMARY); 4038 __ mov(G1, to); 4039 4040 __ BIND(L_check_decrypt_loop_end128); 4041 __ add(from, 32, from); 4042 __ add(to, 32, to); 4043 __ subcc(len_reg, 32, len_reg); 4044 __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks128); 4045 __ delayed()->nop(); 4046 __ ba_short(L_cbcdec_end); 4047 4048 __ align(OptoLoopAlignment); 4049 __ BIND(L_dec_next2_blocks192); 4050 __ nop(); 4051 4052 // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero 4053 __ andcc(from, 7, G0); 4054 __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_next2_blocks192); 4055 __ delayed()->mov(from, G1); // save original 'from' address before alignaddr 4056 4057 // aligned case: load input into G4, G5, L4 and L5 4058 __ ldx(from,0,G4); 4059 __ ldx(from,8,G5); 4060 __ ldx(from,16,L4); 4061 __ ldx(from,24,L5); 4062 __ ba_short(L_transform_next2_blocks192); 4063 4064 __ BIND(L_load_misaligned_next2_blocks192); 4065 __ alignaddr(from, G0, from); 4066 // F48, F50, F52, F60, F62 can be clobbered 4067 __ ldf(FloatRegisterImpl::D, from, 0, F48); 4068 __ ldf(FloatRegisterImpl::D, from, 8, F50); 4069 __ ldf(FloatRegisterImpl::D, from, 16, F60); 4070 __ ldf(FloatRegisterImpl::D, from, 24, F62); 4071 __ ldf(FloatRegisterImpl::D, from, 32, F52); 4072 __ faligndata(F48, F50, F48); 4073 __ faligndata(F50, F60, F50); 4074 __ faligndata(F60, F62, F60); 4075 __ faligndata(F62, F52, F62); 4076 __ movdtox(F48, G4); 4077 __ movdtox(F50, G5); 4078 __ movdtox(F60, L4); 4079 __ movdtox(F62, L5); 4080 __ mov(G1, from); 4081 4082 __ BIND(L_transform_next2_blocks192); 4083 // F48:F50 used for first 16-bytes 4084 __ xor3(L2,G4,G1); 4085 __ movxtod(G1,F48); 4086 __ xor3(L3,G5,G1); 4087 __ movxtod(G1,F50); 4088 4089 // F60:F62 used for next 16-bytes 4090 __ xor3(L2,L4,G1); 4091 __ movxtod(G1,F60); 4092 __ xor3(L3,L5,G1); 4093 __ movxtod(G1,F62); 4094 4095 for ( int i = 46; i >= 6; i -= 8 ) { 4096 __ aes_dround23(as_FloatRegister(i), F48, F50, F52); 4097 __ aes_dround01(as_FloatRegister(i-2), F48, F50, F54); 4098 __ aes_dround23(as_FloatRegister(i), F60, F62, F58); 4099 __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56); 4100 if (i != 6 ) { 4101 __ aes_dround23(as_FloatRegister(i-4), F54, F52, F50); 4102 __ aes_dround01(as_FloatRegister(i-6), F54, F52, F48); 4103 __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62); 4104 __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60); 4105 } else { 4106 __ aes_dround23_l(as_FloatRegister(i-4), F54, F52, F50); 4107 __ aes_dround01_l(as_FloatRegister(i-6), F54, F52, F48); 4108 __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62); 4109 __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60); 4110 } 4111 } 4112 4113 __ movxtod(L0,F54); 4114 __ movxtod(L1,F52); 4115 __ fxor(FloatRegisterImpl::D, F54, F48, F48); 4116 __ fxor(FloatRegisterImpl::D, F52, F50, F50); 4117 4118 __ movxtod(G4,F56); 4119 __ movxtod(G5,F58); 4120 __ mov(L4,L0); 4121 __ mov(L5,L1); 4122 __ fxor(FloatRegisterImpl::D, F56, F60, F60); 4123 __ fxor(FloatRegisterImpl::D, F58, F62, F62); 4124 4125 // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero 4126 __ andcc(to, 7, G1); 4127 __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_next2_blocks192); 4128 __ delayed()->edge8n(to, G0, G2); 4129 4130 // aligned case: store output into the destination array 4131 __ stf(FloatRegisterImpl::D, F48, to, 0); 4132 __ stf(FloatRegisterImpl::D, F50, to, 8); 4133 __ stf(FloatRegisterImpl::D, F60, to, 16); 4134 __ stf(FloatRegisterImpl::D, F62, to, 24); 4135 __ ba_short(L_check_decrypt_loop_end192); 4136 4137 __ BIND(L_store_misaligned_output_next2_blocks192); 4138 __ mov(8, G4); 4139 __ sub(G4, G1, G4); 4140 __ alignaddr(G4, G0, G4); 4141 __ faligndata(F48, F50, F56); // F56 can be clobbered 4142 __ faligndata(F50, F60, F50); 4143 __ faligndata(F60, F62, F60); 4144 __ faligndata(F62, F48, F48); 4145 __ mov(to, G1); 4146 __ and3(to, -8, to); 4147 __ stpartialf(to, G2, F48, Assembler::ASI_PST8_PRIMARY); 4148 __ stf(FloatRegisterImpl::D, F56, to, 8); 4149 __ stf(FloatRegisterImpl::D, F50, to, 16); 4150 __ stf(FloatRegisterImpl::D, F60, to, 24); 4151 __ add(to, 32, to); 4152 __ orn(G0, G2, G2); 4153 __ stpartialf(to, G2, F48, Assembler::ASI_PST8_PRIMARY); 4154 __ mov(G1, to); 4155 4156 __ BIND(L_check_decrypt_loop_end192); 4157 __ add(from, 32, from); 4158 __ add(to, 32, to); 4159 __ subcc(len_reg, 32, len_reg); 4160 __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks192); 4161 __ delayed()->nop(); 4162 __ ba_short(L_cbcdec_end); 4163 4164 __ align(OptoLoopAlignment); 4165 __ BIND(L_dec_next2_blocks256); 4166 __ nop(); 4167 4168 // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero 4169 __ andcc(from, 7, G0); 4170 __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_next2_blocks256); 4171 __ delayed()->mov(from, G1); // save original 'from' address before alignaddr 4172 4173 // aligned case: load input into G4, G5, L4 and L5 4174 __ ldx(from,0,G4); 4175 __ ldx(from,8,G5); 4176 __ ldx(from,16,L4); 4177 __ ldx(from,24,L5); 4178 __ ba_short(L_transform_next2_blocks256); 4179 4180 __ BIND(L_load_misaligned_next2_blocks256); 4181 __ alignaddr(from, G0, from); 4182 // F0, F2, F4, F60, F62 can be clobbered 4183 __ ldf(FloatRegisterImpl::D, from, 0, F0); 4184 __ ldf(FloatRegisterImpl::D, from, 8, F2); 4185 __ ldf(FloatRegisterImpl::D, from, 16, F60); 4186 __ ldf(FloatRegisterImpl::D, from, 24, F62); 4187 __ ldf(FloatRegisterImpl::D, from, 32, F4); 4188 __ faligndata(F0, F2, F0); 4189 __ faligndata(F2, F60, F2); 4190 __ faligndata(F60, F62, F60); 4191 __ faligndata(F62, F4, F62); 4192 __ movdtox(F0, G4); 4193 __ movdtox(F2, G5); 4194 __ movdtox(F60, L4); 4195 __ movdtox(F62, L5); 4196 __ mov(G1, from); 4197 4198 __ BIND(L_transform_next2_blocks256); 4199 // F0:F2 used for first 16-bytes 4200 __ xor3(L2,G4,G1); 4201 __ movxtod(G1,F0); 4202 __ xor3(L3,G5,G1); 4203 __ movxtod(G1,F2); 4204 4205 // F60:F62 used for next 16-bytes 4206 __ xor3(L2,L4,G1); 4207 __ movxtod(G1,F60); 4208 __ xor3(L3,L5,G1); 4209 __ movxtod(G1,F62); 4210 4211 __ aes_dround23(F54, F0, F2, F4); 4212 __ aes_dround01(F52, F0, F2, F6); 4213 __ aes_dround23(F54, F60, F62, F58); 4214 __ aes_dround01(F52, F60, F62, F56); 4215 __ aes_dround23(F50, F6, F4, F2); 4216 __ aes_dround01(F48, F6, F4, F0); 4217 __ aes_dround23(F50, F56, F58, F62); 4218 __ aes_dround01(F48, F56, F58, F60); 4219 // save F48:F54 in temp registers 4220 __ movdtox(F54,G2); 4221 __ movdtox(F52,G3); 4222 __ movdtox(F50,L6); 4223 __ movdtox(F48,G1); 4224 for ( int i = 46; i >= 14; i -= 8 ) { 4225 __ aes_dround23(as_FloatRegister(i), F0, F2, F4); 4226 __ aes_dround01(as_FloatRegister(i-2), F0, F2, F6); 4227 __ aes_dround23(as_FloatRegister(i), F60, F62, F58); 4228 __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56); 4229 __ aes_dround23(as_FloatRegister(i-4), F6, F4, F2); 4230 __ aes_dround01(as_FloatRegister(i-6), F6, F4, F0); 4231 __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62); 4232 __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60); 4233 } 4234 // init F48:F54 with F0:F6 values (original key) 4235 __ ldf(FloatRegisterImpl::D, original_key, 0, F48); 4236 __ ldf(FloatRegisterImpl::D, original_key, 8, F50); 4237 __ ldf(FloatRegisterImpl::D, original_key, 16, F52); 4238 __ ldf(FloatRegisterImpl::D, original_key, 24, F54); 4239 __ aes_dround23(F54, F0, F2, F4); 4240 __ aes_dround01(F52, F0, F2, F6); 4241 __ aes_dround23(F54, F60, F62, F58); 4242 __ aes_dround01(F52, F60, F62, F56); 4243 __ aes_dround23_l(F50, F6, F4, F2); 4244 __ aes_dround01_l(F48, F6, F4, F0); 4245 __ aes_dround23_l(F50, F56, F58, F62); 4246 __ aes_dround01_l(F48, F56, F58, F60); 4247 // re-init F48:F54 with their original values 4248 __ movxtod(G2,F54); 4249 __ movxtod(G3,F52); 4250 __ movxtod(L6,F50); 4251 __ movxtod(G1,F48); 4252 4253 __ movxtod(L0,F6); 4254 __ movxtod(L1,F4); 4255 __ fxor(FloatRegisterImpl::D, F6, F0, F0); 4256 __ fxor(FloatRegisterImpl::D, F4, F2, F2); 4257 4258 __ movxtod(G4,F56); 4259 __ movxtod(G5,F58); 4260 __ mov(L4,L0); 4261 __ mov(L5,L1); 4262 __ fxor(FloatRegisterImpl::D, F56, F60, F60); 4263 __ fxor(FloatRegisterImpl::D, F58, F62, F62); 4264 4265 // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero 4266 __ andcc(to, 7, G1); 4267 __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_next2_blocks256); 4268 __ delayed()->edge8n(to, G0, G2); 4269 4270 // aligned case: store output into the destination array 4271 __ stf(FloatRegisterImpl::D, F0, to, 0); 4272 __ stf(FloatRegisterImpl::D, F2, to, 8); 4273 __ stf(FloatRegisterImpl::D, F60, to, 16); 4274 __ stf(FloatRegisterImpl::D, F62, to, 24); 4275 __ ba_short(L_check_decrypt_loop_end256); 4276 4277 __ BIND(L_store_misaligned_output_next2_blocks256); 4278 __ mov(8, G4); 4279 __ sub(G4, G1, G4); 4280 __ alignaddr(G4, G0, G4); 4281 __ faligndata(F0, F2, F56); // F56 can be clobbered 4282 __ faligndata(F2, F60, F2); 4283 __ faligndata(F60, F62, F60); 4284 __ faligndata(F62, F0, F0); 4285 __ mov(to, G1); 4286 __ and3(to, -8, to); 4287 __ stpartialf(to, G2, F0, Assembler::ASI_PST8_PRIMARY); 4288 __ stf(FloatRegisterImpl::D, F56, to, 8); 4289 __ stf(FloatRegisterImpl::D, F2, to, 16); 4290 __ stf(FloatRegisterImpl::D, F60, to, 24); 4291 __ add(to, 32, to); 4292 __ orn(G0, G2, G2); 4293 __ stpartialf(to, G2, F0, Assembler::ASI_PST8_PRIMARY); 4294 __ mov(G1, to); 4295 4296 __ BIND(L_check_decrypt_loop_end256); 4297 __ add(from, 32, from); 4298 __ add(to, 32, to); 4299 __ subcc(len_reg, 32, len_reg); 4300 __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks256); 4301 __ delayed()->nop(); 4302 4303 __ BIND(L_cbcdec_end); 4304 // re-init intial vector for next block, 8-byte alignment is guaranteed 4305 __ stx(L0, rvec, 0); 4306 __ stx(L1, rvec, 8); 4307 __ mov(L7, I0); 4308 __ ret(); 4309 __ delayed()->restore(); 4310 4311 return start; 4312 } 4313 4314 address generate_sha1_implCompress(bool multi_block, const char *name) { 4315 __ align(CodeEntryAlignment); 4316 StubCodeMark mark(this, "StubRoutines", name); 4317 address start = __ pc(); 4318 4319 Label L_sha1_loop, L_sha1_unaligned_input, L_sha1_unaligned_input_loop; 4320 int i; 4321 4322 Register buf = O0; // byte[] source+offset 4323 Register state = O1; // int[] SHA.state 4324 Register ofs = O2; // int offset 4325 Register limit = O3; // int limit 4326 4327 // load state into F0-F4 4328 for (i = 0; i < 5; i++) { 4329 __ ldf(FloatRegisterImpl::S, state, i*4, as_FloatRegister(i)); 4330 } 4331 4332 __ andcc(buf, 7, G0); 4333 __ br(Assembler::notZero, false, Assembler::pn, L_sha1_unaligned_input); 4334 __ delayed()->nop(); 4335 4336 __ BIND(L_sha1_loop); 4337 // load buf into F8-F22 4338 for (i = 0; i < 8; i++) { 4339 __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8)); 4340 } 4341 __ sha1(); 4342 if (multi_block) { 4343 __ add(ofs, 64, ofs); 4344 __ add(buf, 64, buf); 4345 __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha1_loop); 4346 __ mov(ofs, O0); // to be returned 4347 } 4348 4349 // store F0-F4 into state and return 4350 for (i = 0; i < 4; i++) { 4351 __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4); 4352 } 4353 __ retl(); 4354 __ delayed()->stf(FloatRegisterImpl::S, F4, state, 0x10); 4355 4356 __ BIND(L_sha1_unaligned_input); 4357 __ alignaddr(buf, G0, buf); 4358 4359 __ BIND(L_sha1_unaligned_input_loop); 4360 // load buf into F8-F22 4361 for (i = 0; i < 9; i++) { 4362 __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8)); 4363 } 4364 for (i = 0; i < 8; i++) { 4365 __ faligndata(as_FloatRegister(i*2 + 8), as_FloatRegister(i*2 + 10), as_FloatRegister(i*2 + 8)); 4366 } 4367 __ sha1(); 4368 if (multi_block) { 4369 __ add(ofs, 64, ofs); 4370 __ add(buf, 64, buf); 4371 __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha1_unaligned_input_loop); 4372 __ mov(ofs, O0); // to be returned 4373 } 4374 4375 // store F0-F4 into state and return 4376 for (i = 0; i < 4; i++) { 4377 __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4); 4378 } 4379 __ retl(); 4380 __ delayed()->stf(FloatRegisterImpl::S, F4, state, 0x10); 4381 4382 return start; 4383 } 4384 4385 address generate_sha256_implCompress(bool multi_block, const char *name) { 4386 __ align(CodeEntryAlignment); 4387 StubCodeMark mark(this, "StubRoutines", name); 4388 address start = __ pc(); 4389 4390 Label L_sha256_loop, L_sha256_unaligned_input, L_sha256_unaligned_input_loop; 4391 int i; 4392 4393 Register buf = O0; // byte[] source+offset 4394 Register state = O1; // int[] SHA2.state 4395 Register ofs = O2; // int offset 4396 Register limit = O3; // int limit 4397 4398 // load state into F0-F7 4399 for (i = 0; i < 8; i++) { 4400 __ ldf(FloatRegisterImpl::S, state, i*4, as_FloatRegister(i)); 4401 } 4402 4403 __ andcc(buf, 7, G0); 4404 __ br(Assembler::notZero, false, Assembler::pn, L_sha256_unaligned_input); 4405 __ delayed()->nop(); 4406 4407 __ BIND(L_sha256_loop); 4408 // load buf into F8-F22 4409 for (i = 0; i < 8; i++) { 4410 __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8)); 4411 } 4412 __ sha256(); 4413 if (multi_block) { 4414 __ add(ofs, 64, ofs); 4415 __ add(buf, 64, buf); 4416 __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha256_loop); 4417 __ mov(ofs, O0); // to be returned 4418 } 4419 4420 // store F0-F7 into state and return 4421 for (i = 0; i < 7; i++) { 4422 __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4); 4423 } 4424 __ retl(); 4425 __ delayed()->stf(FloatRegisterImpl::S, F7, state, 0x1c); 4426 4427 __ BIND(L_sha256_unaligned_input); 4428 __ alignaddr(buf, G0, buf); 4429 4430 __ BIND(L_sha256_unaligned_input_loop); 4431 // load buf into F8-F22 4432 for (i = 0; i < 9; i++) { 4433 __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8)); 4434 } 4435 for (i = 0; i < 8; i++) { 4436 __ faligndata(as_FloatRegister(i*2 + 8), as_FloatRegister(i*2 + 10), as_FloatRegister(i*2 + 8)); 4437 } 4438 __ sha256(); 4439 if (multi_block) { 4440 __ add(ofs, 64, ofs); 4441 __ add(buf, 64, buf); 4442 __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha256_unaligned_input_loop); 4443 __ mov(ofs, O0); // to be returned 4444 } 4445 4446 // store F0-F7 into state and return 4447 for (i = 0; i < 7; i++) { 4448 __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4); 4449 } 4450 __ retl(); 4451 __ delayed()->stf(FloatRegisterImpl::S, F7, state, 0x1c); 4452 4453 return start; 4454 } 4455 4456 address generate_sha512_implCompress(bool multi_block, const char *name) { 4457 __ align(CodeEntryAlignment); 4458 StubCodeMark mark(this, "StubRoutines", name); 4459 address start = __ pc(); 4460 4461 Label L_sha512_loop, L_sha512_unaligned_input, L_sha512_unaligned_input_loop; 4462 int i; 4463 4464 Register buf = O0; // byte[] source+offset 4465 Register state = O1; // long[] SHA5.state 4466 Register ofs = O2; // int offset 4467 Register limit = O3; // int limit 4468 4469 // load state into F0-F14 4470 for (i = 0; i < 8; i++) { 4471 __ ldf(FloatRegisterImpl::D, state, i*8, as_FloatRegister(i*2)); 4472 } 4473 4474 __ andcc(buf, 7, G0); 4475 __ br(Assembler::notZero, false, Assembler::pn, L_sha512_unaligned_input); 4476 __ delayed()->nop(); 4477 4478 __ BIND(L_sha512_loop); 4479 // load buf into F16-F46 4480 for (i = 0; i < 16; i++) { 4481 __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 16)); 4482 } 4483 __ sha512(); 4484 if (multi_block) { 4485 __ add(ofs, 128, ofs); 4486 __ add(buf, 128, buf); 4487 __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha512_loop); 4488 __ mov(ofs, O0); // to be returned 4489 } 4490 4491 // store F0-F14 into state and return 4492 for (i = 0; i < 7; i++) { 4493 __ stf(FloatRegisterImpl::D, as_FloatRegister(i*2), state, i*8); 4494 } 4495 __ retl(); 4496 __ delayed()->stf(FloatRegisterImpl::D, F14, state, 0x38); 4497 4498 __ BIND(L_sha512_unaligned_input); 4499 __ alignaddr(buf, G0, buf); 4500 4501 __ BIND(L_sha512_unaligned_input_loop); 4502 // load buf into F16-F46 4503 for (i = 0; i < 17; i++) { 4504 __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 16)); 4505 } 4506 for (i = 0; i < 16; i++) { 4507 __ faligndata(as_FloatRegister(i*2 + 16), as_FloatRegister(i*2 + 18), as_FloatRegister(i*2 + 16)); 4508 } 4509 __ sha512(); 4510 if (multi_block) { 4511 __ add(ofs, 128, ofs); 4512 __ add(buf, 128, buf); 4513 __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha512_unaligned_input_loop); 4514 __ mov(ofs, O0); // to be returned 4515 } 4516 4517 // store F0-F14 into state and return 4518 for (i = 0; i < 7; i++) { 4519 __ stf(FloatRegisterImpl::D, as_FloatRegister(i*2), state, i*8); 4520 } 4521 __ retl(); 4522 __ delayed()->stf(FloatRegisterImpl::D, F14, state, 0x38); 4523 4524 return start; 4525 } 4526 4527 /* Single and multi-block ghash operations */ 4528 address generate_ghash_processBlocks() { 4529 __ align(CodeEntryAlignment); 4530 Label L_ghash_loop, L_aligned, L_main; 4531 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4532 address start = __ pc(); 4533 4534 Register state = I0; 4535 Register subkeyH = I1; 4536 Register data = I2; 4537 Register len = I3; 4538 4539 __ save_frame(0); 4540 4541 __ ldx(state, 0, O0); 4542 __ ldx(state, 8, O1); 4543 4544 // Loop label for multiblock operations 4545 __ BIND(L_ghash_loop); 4546 4547 // Check if 'data' is unaligned 4548 __ andcc(data, 7, G1); 4549 __ br(Assembler::zero, false, Assembler::pt, L_aligned); 4550 __ delayed()->nop(); 4551 4552 Register left_shift = L1; 4553 Register right_shift = L2; 4554 Register data_ptr = L3; 4555 4556 // Get left and right shift values in bits 4557 __ sll(G1, LogBitsPerByte, left_shift); 4558 __ mov(64, right_shift); 4559 __ sub(right_shift, left_shift, right_shift); 4560 4561 // Align to read 'data' 4562 __ sub(data, G1, data_ptr); 4563 4564 // Load first 8 bytes of 'data' 4565 __ ldx(data_ptr, 0, O4); 4566 __ sllx(O4, left_shift, O4); 4567 __ ldx(data_ptr, 8, O5); 4568 __ srlx(O5, right_shift, G4); 4569 __ bset(G4, O4); 4570 4571 // Load second 8 bytes of 'data' 4572 __ sllx(O5, left_shift, O5); 4573 __ ldx(data_ptr, 16, G4); 4574 __ srlx(G4, right_shift, G4); 4575 __ ba(L_main); 4576 __ delayed()->bset(G4, O5); 4577 4578 // If 'data' is aligned, load normally 4579 __ BIND(L_aligned); 4580 __ ldx(data, 0, O4); 4581 __ ldx(data, 8, O5); 4582 4583 __ BIND(L_main); 4584 __ ldx(subkeyH, 0, O2); 4585 __ ldx(subkeyH, 8, O3); 4586 4587 __ xor3(O0, O4, O0); 4588 __ xor3(O1, O5, O1); 4589 4590 __ xmulxhi(O0, O3, G3); 4591 __ xmulx(O0, O2, O5); 4592 __ xmulxhi(O1, O2, G4); 4593 __ xmulxhi(O1, O3, G5); 4594 __ xmulx(O0, O3, G1); 4595 __ xmulx(O1, O3, G2); 4596 __ xmulx(O1, O2, O3); 4597 __ xmulxhi(O0, O2, O4); 4598 4599 __ mov(0xE1, O0); 4600 __ sllx(O0, 56, O0); 4601 4602 __ xor3(O5, G3, O5); 4603 __ xor3(O5, G4, O5); 4604 __ xor3(G5, G1, G1); 4605 __ xor3(G1, O3, G1); 4606 __ srlx(G2, 63, O1); 4607 __ srlx(G1, 63, G3); 4608 __ sllx(G2, 63, O3); 4609 __ sllx(G2, 58, O2); 4610 __ xor3(O3, O2, O2); 4611 4612 __ sllx(G1, 1, G1); 4613 __ or3(G1, O1, G1); 4614 4615 __ xor3(G1, O2, G1); 4616 4617 __ sllx(G2, 1, G2); 4618 4619 __ xmulxhi(G1, O0, O1); 4620 __ xmulx(G1, O0, O2); 4621 __ xmulxhi(G2, O0, O3); 4622 __ xmulx(G2, O0, G1); 4623 4624 __ xor3(O4, O1, O4); 4625 __ xor3(O5, O2, O5); 4626 __ xor3(O5, O3, O5); 4627 4628 __ sllx(O4, 1, O2); 4629 __ srlx(O5, 63, O3); 4630 4631 __ or3(O2, O3, O0); 4632 4633 __ sllx(O5, 1, O1); 4634 __ srlx(G1, 63, O2); 4635 __ or3(O1, O2, O1); 4636 __ xor3(O1, G3, O1); 4637 4638 __ deccc(len); 4639 __ br(Assembler::notZero, true, Assembler::pt, L_ghash_loop); 4640 __ delayed()->add(data, 16, data); 4641 4642 __ stx(O0, I0, 0); 4643 __ stx(O1, I0, 8); 4644 4645 __ ret(); 4646 __ delayed()->restore(); 4647 4648 return start; 4649 } 4650 4651 /** 4652 * Arguments: 4653 * 4654 * Inputs: 4655 * O0 - int crc 4656 * O1 - byte* buf 4657 * O2 - int len 4658 * O3 - int* table 4659 * 4660 * Output: 4661 * O0 - int crc result 4662 */ 4663 address generate_updateBytesCRC32C() { 4664 assert(UseCRC32CIntrinsics, "need CRC32C instruction"); 4665 4666 __ align(CodeEntryAlignment); 4667 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); 4668 address start = __ pc(); 4669 4670 const Register crc = O0; // crc 4671 const Register buf = O1; // source java byte array address 4672 const Register len = O2; // number of bytes 4673 const Register table = O3; // byteTable 4674 4675 __ kernel_crc32c(crc, buf, len, table); 4676 4677 __ retl(); 4678 __ delayed()->nop(); 4679 4680 return start; 4681 } 4682 4683 #define ADLER32_NUM_TEMPS 16 4684 4685 /** 4686 * Arguments: 4687 * 4688 * Inputs: 4689 * O0 - int adler 4690 * O1 - byte* buff 4691 * O2 - int len 4692 * 4693 * Output: 4694 * O0 - int adler result 4695 */ 4696 address generate_updateBytesAdler32() { 4697 __ align(CodeEntryAlignment); 4698 StubCodeMark mark(this, "StubRoutines", "updateBytesAdler32"); 4699 address start = __ pc(); 4700 4701 Label L_cleanup_loop, L_cleanup_loop_check; 4702 Label L_main_loop_check, L_main_loop, L_inner_loop, L_inner_loop_check; 4703 Label L_nmax_check_done; 4704 4705 // Aliases 4706 Register s1 = O0; 4707 Register s2 = O3; 4708 Register buff = O1; 4709 Register len = O2; 4710 Register temp[ADLER32_NUM_TEMPS] = {L0, L1, L2, L3, L4, L5, L6, L7, I0, I1, I2, I3, I4, I5, G3, I7}; 4711 4712 // Max number of bytes we can process before having to take the mod 4713 // 0x15B0 is 5552 in decimal, the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 4714 unsigned long NMAX = 0x15B0; 4715 4716 // Zero-out the upper bits of len 4717 __ clruwu(len); 4718 4719 // Create the mask 0xFFFF 4720 __ set64(0x00FFFF, O4, O5); // O5 is the temp register 4721 4722 // s1 is initialized to the lower 16 bits of adler 4723 // s2 is initialized to the upper 16 bits of adler 4724 __ srlx(O0, 16, O5); // adler >> 16 4725 __ and3(O0, O4, s1); // s1 = (adler & 0xFFFF) 4726 __ and3(O5, O4, s2); // s2 = ((adler >> 16) & 0xFFFF) 4727 4728 // The pipelined loop needs at least 16 elements for 1 iteration 4729 // It does check this, but it is more effective to skip to the cleanup loop 4730 // Setup the constant for cutoff checking 4731 __ mov(15, O4); 4732 4733 // Check if we are above the cutoff, if not go to the cleanup loop immediately 4734 __ cmp_and_br_short(len, O4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_loop_check); 4735 4736 // Free up some registers for our use 4737 for (int i = 0; i < ADLER32_NUM_TEMPS; i++) { 4738 __ movxtod(temp[i], as_FloatRegister(2*i)); 4739 } 4740 4741 // Loop maintenance stuff is done at the end of the loop, so skip to there 4742 __ ba_short(L_main_loop_check); 4743 4744 __ BIND(L_main_loop); 4745 4746 // Prologue for inner loop 4747 __ ldub(buff, 0, L0); 4748 __ dec(O5); 4749 4750 for (int i = 1; i < 8; i++) { 4751 __ ldub(buff, i, temp[i]); 4752 } 4753 4754 __ inc(buff, 8); 4755 4756 // Inner loop processes 16 elements at a time, might never execute if only 16 elements 4757 // to be processed by the outter loop 4758 __ ba_short(L_inner_loop_check); 4759 4760 __ BIND(L_inner_loop); 4761 4762 for (int i = 0; i < 8; i++) { 4763 __ ldub(buff, (2*i), temp[(8+(2*i)) % ADLER32_NUM_TEMPS]); 4764 __ add(s1, temp[i], s1); 4765 __ ldub(buff, (2*i)+1, temp[(8+(2*i)+1) % ADLER32_NUM_TEMPS]); 4766 __ add(s2, s1, s2); 4767 } 4768 4769 // Original temp 0-7 used and new loads to temp 0-7 issued 4770 // temp 8-15 ready to be consumed 4771 __ add(s1, I0, s1); 4772 __ dec(O5); 4773 __ add(s2, s1, s2); 4774 __ add(s1, I1, s1); 4775 __ inc(buff, 16); 4776 __ add(s2, s1, s2); 4777 4778 for (int i = 0; i < 6; i++) { 4779 __ add(s1, temp[10+i], s1); 4780 __ add(s2, s1, s2); 4781 } 4782 4783 __ BIND(L_inner_loop_check); 4784 __ nop(); 4785 __ cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_inner_loop); 4786 4787 // Epilogue 4788 for (int i = 0; i < 4; i++) { 4789 __ ldub(buff, (2*i), temp[8+(2*i)]); 4790 __ add(s1, temp[i], s1); 4791 __ ldub(buff, (2*i)+1, temp[8+(2*i)+1]); 4792 __ add(s2, s1, s2); 4793 } 4794 4795 __ add(s1, temp[4], s1); 4796 __ inc(buff, 8); 4797 4798 for (int i = 0; i < 11; i++) { 4799 __ add(s2, s1, s2); 4800 __ add(s1, temp[5+i], s1); 4801 } 4802 4803 __ add(s2, s1, s2); 4804 4805 // Take the mod for s1 and s2 4806 __ set64(0xFFF1, L0, L1); 4807 __ udivx(s1, L0, L1); 4808 __ udivx(s2, L0, L2); 4809 __ mulx(L0, L1, L1); 4810 __ mulx(L0, L2, L2); 4811 __ sub(s1, L1, s1); 4812 __ sub(s2, L2, s2); 4813 4814 // Make sure there is something left to process 4815 __ BIND(L_main_loop_check); 4816 __ set64(NMAX, L0, L1); 4817 // k = len < NMAX ? len : NMAX 4818 __ cmp_and_br_short(len, L0, Assembler::greaterEqualUnsigned, Assembler::pt, L_nmax_check_done); 4819 __ andn(len, 0x0F, L0); // only loop a multiple of 16 times 4820 __ BIND(L_nmax_check_done); 4821 __ mov(L0, O5); 4822 __ sub(len, L0, len); // len -= k 4823 4824 __ srlx(O5, 4, O5); // multiplies of 16 4825 __ cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_main_loop); 4826 4827 // Restore anything we used, take the mod one last time, combine and return 4828 // Restore any registers we saved 4829 for (int i = 0; i < ADLER32_NUM_TEMPS; i++) { 4830 __ movdtox(as_FloatRegister(2*i), temp[i]); 4831 } 4832 4833 // There might be nothing left to process 4834 __ ba_short(L_cleanup_loop_check); 4835 4836 __ BIND(L_cleanup_loop); 4837 __ ldub(buff, 0, O4); // load single byte form buffer 4838 __ inc(buff); // buff++ 4839 __ add(s1, O4, s1); // s1 += *buff++; 4840 __ dec(len); // len-- 4841 __ add(s1, s2, s2); // s2 += s1; 4842 __ BIND(L_cleanup_loop_check); 4843 __ nop(); 4844 __ cmp_and_br_short(len, 0, Assembler::notEqual, Assembler::pt, L_cleanup_loop); 4845 4846 // Take the mod one last time 4847 __ set64(0xFFF1, O1, O2); 4848 __ udivx(s1, O1, O2); 4849 __ udivx(s2, O1, O5); 4850 __ mulx(O1, O2, O2); 4851 __ mulx(O1, O5, O5); 4852 __ sub(s1, O2, s1); 4853 __ sub(s2, O5, s2); 4854 4855 // Combine lower bits and higher bits 4856 __ sllx(s2, 16, s2); // s2 = s2 << 16 4857 __ or3(s1, s2, s1); // adler = s2 | s1 4858 // Final return value is in O0 4859 __ retl(); 4860 __ delayed()->nop(); 4861 4862 return start; 4863 } 4864 4865 /** 4866 * Arguments: 4867 * 4868 * Inputs: 4869 * O0 - int crc 4870 * O1 - byte* buf 4871 * O2 - int len 4872 * O3 - int* table 4873 * 4874 * Output: 4875 * O0 - int crc result 4876 */ 4877 address generate_updateBytesCRC32() { 4878 assert(UseCRC32Intrinsics, "need VIS3 instructions"); 4879 4880 __ align(CodeEntryAlignment); 4881 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 4882 address start = __ pc(); 4883 4884 const Register crc = O0; // crc 4885 const Register buf = O1; // source java byte array address 4886 const Register len = O2; // length 4887 const Register table = O3; // crc_table address (reuse register) 4888 4889 __ kernel_crc32(crc, buf, len, table); 4890 4891 __ retl(); 4892 __ delayed()->nop(); 4893 4894 return start; 4895 } 4896 4897 /** 4898 * Arguments: 4899 * 4900 * Inputs: 4901 * I0 - int* x-addr 4902 * I1 - int x-len 4903 * I2 - int* y-addr 4904 * I3 - int y-len 4905 * I4 - int* z-addr (output vector) 4906 * I5 - int z-len 4907 */ 4908 address generate_multiplyToLen() { 4909 assert(UseMultiplyToLenIntrinsic, "need VIS3 instructions"); 4910 4911 __ align(CodeEntryAlignment); 4912 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 4913 address start = __ pc(); 4914 4915 __ save_frame(0); 4916 4917 const Register xptr = I0; // input address 4918 const Register xlen = I1; // ...and length in 32b-words 4919 const Register yptr = I2; // 4920 const Register ylen = I3; // 4921 const Register zptr = I4; // output address 4922 const Register zlen = I5; // ...and length in 32b-words 4923 4924 /* The minimal "limb" representation suggest that odd length vectors are as 4925 * likely as even length dittos. This in turn suggests that we need to cope 4926 * with odd/even length arrays and data not aligned properly for 64-bit read 4927 * and write operations. We thus use a number of different kernels: 4928 * 4929 * if (is_even(x.len) && is_even(y.len)) 4930 * if (is_align64(x) && is_align64(y) && is_align64(z)) 4931 * if (x.len == y.len && 16 <= x.len && x.len <= 64) 4932 * memv_mult_mpmul(...) 4933 * else 4934 * memv_mult_64x64(...) 4935 * else 4936 * memv_mult_64x64u(...) 4937 * else 4938 * memv_mult_32x32(...) 4939 * 4940 * Here we assume VIS3 support (for 'umulxhi', 'addxc' and 'addxccc'). 4941 * In case CBCOND instructions are supported, we will use 'cxbX'. If the 4942 * MPMUL instruction is supported, we will generate a kernel using 'mpmul' 4943 * (for vectors with proper characteristics). 4944 */ 4945 const Register tmp0 = L0; 4946 const Register tmp1 = L1; 4947 4948 Label L_mult_32x32; 4949 Label L_mult_64x64u; 4950 Label L_mult_64x64; 4951 Label L_exit; 4952 4953 if_both_even(xlen, ylen, tmp0, false, L_mult_32x32); 4954 if_all3_aligned(xptr, yptr, zptr, tmp1, 64, false, L_mult_64x64u); 4955 4956 if (UseMPMUL) { 4957 if_eq(xlen, ylen, false, L_mult_64x64); 4958 if_in_rng(xlen, 16, 64, tmp0, tmp1, false, L_mult_64x64); 4959 4960 // 1. Multiply naturally aligned 64b-datums using a generic 'mpmul' kernel, 4961 // operating on equal length vectors of size [16..64]. 4962 gen_mult_mpmul(xlen, xptr, yptr, zptr, L_exit); 4963 } 4964 4965 // 2. Multiply naturally aligned 64-bit datums (64x64). 4966 __ bind(L_mult_64x64); 4967 gen_mult_64x64(xptr, xlen, yptr, ylen, zptr, zlen, L_exit); 4968 4969 // 3. Multiply unaligned 64-bit datums (64x64). 4970 __ bind(L_mult_64x64u); 4971 gen_mult_64x64_unaligned(xptr, xlen, yptr, ylen, zptr, zlen, L_exit); 4972 4973 // 4. Multiply naturally aligned 32-bit datums (32x32). 4974 __ bind(L_mult_32x32); 4975 gen_mult_32x32(xptr, xlen, yptr, ylen, zptr, zlen, L_exit); 4976 4977 __ bind(L_exit); 4978 __ ret(); 4979 __ delayed()->restore(); 4980 4981 return start; 4982 } 4983 4984 // Additional help functions used by multiplyToLen generation. 4985 4986 void if_both_even(Register r1, Register r2, Register tmp, bool iseven, Label &L) 4987 { 4988 __ or3(r1, r2, tmp); 4989 __ andcc(tmp, 0x1, tmp); 4990 __ br_icc_zero(iseven, Assembler::pn, L); 4991 } 4992 4993 void if_all3_aligned(Register r1, Register r2, Register r3, 4994 Register tmp, uint align, bool isalign, Label &L) 4995 { 4996 __ or3(r1, r2, tmp); 4997 __ or3(r3, tmp, tmp); 4998 __ andcc(tmp, (align - 1), tmp); 4999 __ br_icc_zero(isalign, Assembler::pn, L); 5000 } 5001 5002 void if_eq(Register x, Register y, bool iseq, Label &L) 5003 { 5004 Assembler::Condition cf = (iseq ? Assembler::equal : Assembler::notEqual); 5005 __ cmp_and_br_short(x, y, cf, Assembler::pt, L); 5006 } 5007 5008 void if_in_rng(Register x, int lb, int ub, Register t1, Register t2, bool inrng, Label &L) 5009 { 5010 assert(Assembler::is_simm13(lb), "Small ints only!"); 5011 assert(Assembler::is_simm13(ub), "Small ints only!"); 5012 // Compute (x - lb) * (ub - x) >= 0 5013 // NOTE: With the local use of this routine, we rely on small integers to 5014 // guarantee that we do not overflow in the multiplication. 5015 __ add(G0, ub, t2); 5016 __ sub(x, lb, t1); 5017 __ sub(t2, x, t2); 5018 __ mulx(t1, t2, t1); 5019 Assembler::Condition cf = (inrng ? Assembler::greaterEqual : Assembler::less); 5020 __ cmp_and_br_short(t1, G0, cf, Assembler::pt, L); 5021 } 5022 5023 void ldd_entry(Register base, Register offs, FloatRegister dest) 5024 { 5025 __ ldd(base, offs, dest); 5026 __ inc(offs, 8); 5027 } 5028 5029 void ldx_entry(Register base, Register offs, Register dest) 5030 { 5031 __ ldx(base, offs, dest); 5032 __ inc(offs, 8); 5033 } 5034 5035 void mpmul_entry(int m, Label &next) 5036 { 5037 __ mpmul(m); 5038 __ cbcond(Assembler::equal, Assembler::icc, G0, G0, next); 5039 } 5040 5041 void stx_entry(Label &L, Register r1, Register r2, Register base, Register offs) 5042 { 5043 __ bind(L); 5044 __ stx(r1, base, offs); 5045 __ inc(offs, 8); 5046 __ stx(r2, base, offs); 5047 __ inc(offs, 8); 5048 } 5049 5050 void offs_entry(Label &Lbl0, Label &Lbl1) 5051 { 5052 assert(Lbl0.is_bound(), "must be"); 5053 assert(Lbl1.is_bound(), "must be"); 5054 5055 int offset = Lbl0.loc_pos() - Lbl1.loc_pos(); 5056 5057 __ emit_data(offset); 5058 } 5059 5060 /* Generate the actual multiplication kernels for BigInteger vectors: 5061 * 5062 * 1. gen_mult_mpmul(...) 5063 * 5064 * 2. gen_mult_64x64(...) 5065 * 5066 * 3. gen_mult_64x64_unaligned(...) 5067 * 5068 * 4. gen_mult_32x32(...) 5069 */ 5070 void gen_mult_mpmul(Register len, Register xptr, Register yptr, Register zptr, 5071 Label &L_exit) 5072 { 5073 const Register zero = G0; 5074 const Register gxp = G1; // Need to use global registers across RWs. 5075 const Register gyp = G2; 5076 const Register gzp = G3; 5077 const Register disp = G4; 5078 const Register offs = G5; 5079 5080 __ mov(xptr, gxp); 5081 __ mov(yptr, gyp); 5082 __ mov(zptr, gzp); 5083 5084 /* Compute jump vector entry: 5085 * 5086 * 1. mpmul input size (0..31) x 64b 5087 * 2. vector input size in 32b limbs (even number) 5088 * 3. branch entries in reverse order (31..0), using two 5089 * instructions per entry (2 * 4 bytes). 5090 * 5091 * displacement = byte_offset(bra_offset(len)) 5092 * = byte_offset((64 - len)/2) 5093 * = 8 * (64 - len)/2 5094 * = 4 * (64 - len) 5095 */ 5096 Register temp = I5; // Alright to use input regs. in first batch. 5097 5098 __ sub(zero, len, temp); 5099 __ add(temp, 64, temp); 5100 __ sllx(temp, 2, disp); // disp := (64 - len) << 2 5101 5102 // Dispatch relative current PC, into instruction table below. 5103 __ rdpc(temp); 5104 __ add(temp, 16, temp); 5105 __ jmp(temp, disp); 5106 __ delayed()->clr(offs); 5107 5108 ldd_entry(gxp, offs, F22); 5109 ldd_entry(gxp, offs, F20); 5110 ldd_entry(gxp, offs, F18); 5111 ldd_entry(gxp, offs, F16); 5112 ldd_entry(gxp, offs, F14); 5113 ldd_entry(gxp, offs, F12); 5114 ldd_entry(gxp, offs, F10); 5115 ldd_entry(gxp, offs, F8); 5116 ldd_entry(gxp, offs, F6); 5117 ldd_entry(gxp, offs, F4); 5118 ldx_entry(gxp, offs, I5); 5119 ldx_entry(gxp, offs, I4); 5120 ldx_entry(gxp, offs, I3); 5121 ldx_entry(gxp, offs, I2); 5122 ldx_entry(gxp, offs, I1); 5123 ldx_entry(gxp, offs, I0); 5124 ldx_entry(gxp, offs, L7); 5125 ldx_entry(gxp, offs, L6); 5126 ldx_entry(gxp, offs, L5); 5127 ldx_entry(gxp, offs, L4); 5128 ldx_entry(gxp, offs, L3); 5129 ldx_entry(gxp, offs, L2); 5130 ldx_entry(gxp, offs, L1); 5131 ldx_entry(gxp, offs, L0); 5132 ldd_entry(gxp, offs, F2); 5133 ldd_entry(gxp, offs, F0); 5134 ldx_entry(gxp, offs, O5); 5135 ldx_entry(gxp, offs, O4); 5136 ldx_entry(gxp, offs, O3); 5137 ldx_entry(gxp, offs, O2); 5138 ldx_entry(gxp, offs, O1); 5139 ldx_entry(gxp, offs, O0); 5140 5141 __ save(SP, -176, SP); 5142 5143 const Register addr = gxp; // Alright to reuse 'gxp'. 5144 5145 // Dispatch relative current PC, into instruction table below. 5146 __ rdpc(addr); 5147 __ add(addr, 16, addr); 5148 __ jmp(addr, disp); 5149 __ delayed()->clr(offs); 5150 5151 ldd_entry(gyp, offs, F58); 5152 ldd_entry(gyp, offs, F56); 5153 ldd_entry(gyp, offs, F54); 5154 ldd_entry(gyp, offs, F52); 5155 ldd_entry(gyp, offs, F50); 5156 ldd_entry(gyp, offs, F48); 5157 ldd_entry(gyp, offs, F46); 5158 ldd_entry(gyp, offs, F44); 5159 ldd_entry(gyp, offs, F42); 5160 ldd_entry(gyp, offs, F40); 5161 ldd_entry(gyp, offs, F38); 5162 ldd_entry(gyp, offs, F36); 5163 ldd_entry(gyp, offs, F34); 5164 ldd_entry(gyp, offs, F32); 5165 ldd_entry(gyp, offs, F30); 5166 ldd_entry(gyp, offs, F28); 5167 ldd_entry(gyp, offs, F26); 5168 ldd_entry(gyp, offs, F24); 5169 ldx_entry(gyp, offs, O5); 5170 ldx_entry(gyp, offs, O4); 5171 ldx_entry(gyp, offs, O3); 5172 ldx_entry(gyp, offs, O2); 5173 ldx_entry(gyp, offs, O1); 5174 ldx_entry(gyp, offs, O0); 5175 ldx_entry(gyp, offs, L7); 5176 ldx_entry(gyp, offs, L6); 5177 ldx_entry(gyp, offs, L5); 5178 ldx_entry(gyp, offs, L4); 5179 ldx_entry(gyp, offs, L3); 5180 ldx_entry(gyp, offs, L2); 5181 ldx_entry(gyp, offs, L1); 5182 ldx_entry(gyp, offs, L0); 5183 5184 __ save(SP, -176, SP); 5185 __ save(SP, -176, SP); 5186 __ save(SP, -176, SP); 5187 __ save(SP, -176, SP); 5188 __ save(SP, -176, SP); 5189 5190 Label L_mpmul_restore_4, L_mpmul_restore_3, L_mpmul_restore_2; 5191 Label L_mpmul_restore_1, L_mpmul_restore_0; 5192 5193 // Dispatch relative current PC, into instruction table below. 5194 __ rdpc(addr); 5195 __ add(addr, 16, addr); 5196 __ jmp(addr, disp); 5197 __ delayed()->clr(offs); 5198 5199 mpmul_entry(31, L_mpmul_restore_0); 5200 mpmul_entry(30, L_mpmul_restore_0); 5201 mpmul_entry(29, L_mpmul_restore_0); 5202 mpmul_entry(28, L_mpmul_restore_0); 5203 mpmul_entry(27, L_mpmul_restore_1); 5204 mpmul_entry(26, L_mpmul_restore_1); 5205 mpmul_entry(25, L_mpmul_restore_1); 5206 mpmul_entry(24, L_mpmul_restore_1); 5207 mpmul_entry(23, L_mpmul_restore_1); 5208 mpmul_entry(22, L_mpmul_restore_1); 5209 mpmul_entry(21, L_mpmul_restore_1); 5210 mpmul_entry(20, L_mpmul_restore_2); 5211 mpmul_entry(19, L_mpmul_restore_2); 5212 mpmul_entry(18, L_mpmul_restore_2); 5213 mpmul_entry(17, L_mpmul_restore_2); 5214 mpmul_entry(16, L_mpmul_restore_2); 5215 mpmul_entry(15, L_mpmul_restore_2); 5216 mpmul_entry(14, L_mpmul_restore_2); 5217 mpmul_entry(13, L_mpmul_restore_3); 5218 mpmul_entry(12, L_mpmul_restore_3); 5219 mpmul_entry(11, L_mpmul_restore_3); 5220 mpmul_entry(10, L_mpmul_restore_3); 5221 mpmul_entry( 9, L_mpmul_restore_3); 5222 mpmul_entry( 8, L_mpmul_restore_3); 5223 mpmul_entry( 7, L_mpmul_restore_3); 5224 mpmul_entry( 6, L_mpmul_restore_4); 5225 mpmul_entry( 5, L_mpmul_restore_4); 5226 mpmul_entry( 4, L_mpmul_restore_4); 5227 mpmul_entry( 3, L_mpmul_restore_4); 5228 mpmul_entry( 2, L_mpmul_restore_4); 5229 mpmul_entry( 1, L_mpmul_restore_4); 5230 mpmul_entry( 0, L_mpmul_restore_4); 5231 5232 Label L_z31, L_z30, L_z29, L_z28, L_z27, L_z26, L_z25, L_z24; 5233 Label L_z23, L_z22, L_z21, L_z20, L_z19, L_z18, L_z17, L_z16; 5234 Label L_z15, L_z14, L_z13, L_z12, L_z11, L_z10, L_z09, L_z08; 5235 Label L_z07, L_z06, L_z05, L_z04, L_z03, L_z02, L_z01, L_z00; 5236 5237 Label L_zst_base; // Store sequence base address. 5238 __ bind(L_zst_base); 5239 5240 stx_entry(L_z31, L7, L6, gzp, offs); 5241 stx_entry(L_z30, L5, L4, gzp, offs); 5242 stx_entry(L_z29, L3, L2, gzp, offs); 5243 stx_entry(L_z28, L1, L0, gzp, offs); 5244 __ restore(); 5245 stx_entry(L_z27, O5, O4, gzp, offs); 5246 stx_entry(L_z26, O3, O2, gzp, offs); 5247 stx_entry(L_z25, O1, O0, gzp, offs); 5248 stx_entry(L_z24, L7, L6, gzp, offs); 5249 stx_entry(L_z23, L5, L4, gzp, offs); 5250 stx_entry(L_z22, L3, L2, gzp, offs); 5251 stx_entry(L_z21, L1, L0, gzp, offs); 5252 __ restore(); 5253 stx_entry(L_z20, O5, O4, gzp, offs); 5254 stx_entry(L_z19, O3, O2, gzp, offs); 5255 stx_entry(L_z18, O1, O0, gzp, offs); 5256 stx_entry(L_z17, L7, L6, gzp, offs); 5257 stx_entry(L_z16, L5, L4, gzp, offs); 5258 stx_entry(L_z15, L3, L2, gzp, offs); 5259 stx_entry(L_z14, L1, L0, gzp, offs); 5260 __ restore(); 5261 stx_entry(L_z13, O5, O4, gzp, offs); 5262 stx_entry(L_z12, O3, O2, gzp, offs); 5263 stx_entry(L_z11, O1, O0, gzp, offs); 5264 stx_entry(L_z10, L7, L6, gzp, offs); 5265 stx_entry(L_z09, L5, L4, gzp, offs); 5266 stx_entry(L_z08, L3, L2, gzp, offs); 5267 stx_entry(L_z07, L1, L0, gzp, offs); 5268 __ restore(); 5269 stx_entry(L_z06, O5, O4, gzp, offs); 5270 stx_entry(L_z05, O3, O2, gzp, offs); 5271 stx_entry(L_z04, O1, O0, gzp, offs); 5272 stx_entry(L_z03, L7, L6, gzp, offs); 5273 stx_entry(L_z02, L5, L4, gzp, offs); 5274 stx_entry(L_z01, L3, L2, gzp, offs); 5275 stx_entry(L_z00, L1, L0, gzp, offs); 5276 5277 __ restore(); 5278 __ restore(); 5279 // Exit out of 'mpmul' routine, back to multiplyToLen. 5280 __ ba_short(L_exit); 5281 5282 Label L_zst_offs; 5283 __ bind(L_zst_offs); 5284 5285 offs_entry(L_z31, L_zst_base); // index 31: 2048x2048 5286 offs_entry(L_z30, L_zst_base); 5287 offs_entry(L_z29, L_zst_base); 5288 offs_entry(L_z28, L_zst_base); 5289 offs_entry(L_z27, L_zst_base); 5290 offs_entry(L_z26, L_zst_base); 5291 offs_entry(L_z25, L_zst_base); 5292 offs_entry(L_z24, L_zst_base); 5293 offs_entry(L_z23, L_zst_base); 5294 offs_entry(L_z22, L_zst_base); 5295 offs_entry(L_z21, L_zst_base); 5296 offs_entry(L_z20, L_zst_base); 5297 offs_entry(L_z19, L_zst_base); 5298 offs_entry(L_z18, L_zst_base); 5299 offs_entry(L_z17, L_zst_base); 5300 offs_entry(L_z16, L_zst_base); 5301 offs_entry(L_z15, L_zst_base); 5302 offs_entry(L_z14, L_zst_base); 5303 offs_entry(L_z13, L_zst_base); 5304 offs_entry(L_z12, L_zst_base); 5305 offs_entry(L_z11, L_zst_base); 5306 offs_entry(L_z10, L_zst_base); 5307 offs_entry(L_z09, L_zst_base); 5308 offs_entry(L_z08, L_zst_base); 5309 offs_entry(L_z07, L_zst_base); 5310 offs_entry(L_z06, L_zst_base); 5311 offs_entry(L_z05, L_zst_base); 5312 offs_entry(L_z04, L_zst_base); 5313 offs_entry(L_z03, L_zst_base); 5314 offs_entry(L_z02, L_zst_base); 5315 offs_entry(L_z01, L_zst_base); 5316 offs_entry(L_z00, L_zst_base); // index 0: 64x64 5317 5318 __ bind(L_mpmul_restore_4); 5319 __ restore(); 5320 __ bind(L_mpmul_restore_3); 5321 __ restore(); 5322 __ bind(L_mpmul_restore_2); 5323 __ restore(); 5324 __ bind(L_mpmul_restore_1); 5325 __ restore(); 5326 __ bind(L_mpmul_restore_0); 5327 5328 // Dispatch via offset vector entry, into z-store sequence. 5329 Label L_zst_rdpc; 5330 __ bind(L_zst_rdpc); 5331 5332 assert(L_zst_base.is_bound(), "must be"); 5333 assert(L_zst_offs.is_bound(), "must be"); 5334 assert(L_zst_rdpc.is_bound(), "must be"); 5335 5336 int dbase = L_zst_rdpc.loc_pos() - L_zst_base.loc_pos(); 5337 int doffs = L_zst_rdpc.loc_pos() - L_zst_offs.loc_pos(); 5338 5339 temp = gyp; // Alright to reuse 'gyp'. 5340 5341 __ rdpc(addr); 5342 __ sub(addr, doffs, temp); 5343 __ srlx(disp, 1, disp); 5344 __ lduw(temp, disp, offs); 5345 __ sub(addr, dbase, temp); 5346 __ jmp(temp, offs); 5347 __ delayed()->clr(offs); 5348 } 5349 5350 void gen_mult_64x64(Register xp, Register xn, 5351 Register yp, Register yn, 5352 Register zp, Register zn, Label &L_exit) 5353 { 5354 // Assuming that a stack frame has already been created, i.e. local and 5355 // output registers are available for immediate use. 5356 5357 const Register ri = L0; // Outer loop index, xv[i] 5358 const Register rj = L1; // Inner loop index, yv[j] 5359 const Register rk = L2; // Output loop index, zv[k] 5360 const Register rx = L4; // x-vector datum [i] 5361 const Register ry = L5; // y-vector datum [j] 5362 const Register rz = L6; // z-vector datum [k] 5363 const Register rc = L7; // carry over (to z-vector datum [k-1]) 5364 5365 const Register lop = O0; // lo-64b product 5366 const Register hip = O1; // hi-64b product 5367 5368 const Register zero = G0; 5369 5370 Label L_loop_i, L_exit_loop_i; 5371 Label L_loop_j; 5372 Label L_loop_i2, L_exit_loop_i2; 5373 5374 __ srlx(xn, 1, xn); // index for u32 to u64 ditto 5375 __ srlx(yn, 1, yn); // index for u32 to u64 ditto 5376 __ srlx(zn, 1, zn); // index for u32 to u64 ditto 5377 __ dec(xn); // Adjust [0..(N/2)-1] 5378 __ dec(yn); 5379 __ dec(zn); 5380 __ clr(rc); // u64 c = 0 5381 __ sllx(xn, 3, ri); // int i = xn (byte offset i = 8*xn) 5382 __ sllx(yn, 3, rj); // int j = yn (byte offset i = 8*xn) 5383 __ sllx(zn, 3, rk); // int k = zn (byte offset k = 8*zn) 5384 __ ldx(yp, rj, ry); // u64 y = yp[yn] 5385 5386 // for (int i = xn; i >= 0; i--) 5387 __ bind(L_loop_i); 5388 5389 __ cmp_and_br_short(ri, 0, // i >= 0 5390 Assembler::less, Assembler::pn, L_exit_loop_i); 5391 __ ldx(xp, ri, rx); // x = xp[i] 5392 __ mulx(rx, ry, lop); // lo-64b-part of result 64x64 5393 __ umulxhi(rx, ry, hip); // hi-64b-part of result 64x64 5394 __ addcc(rc, lop, lop); // Accumulate lower order bits (producing carry) 5395 __ addxc(hip, zero, rc); // carry over to next datum [k-1] 5396 __ stx(lop, zp, rk); // z[k] = lop 5397 __ dec(rk, 8); // k-- 5398 __ dec(ri, 8); // i-- 5399 __ ba_short(L_loop_i); 5400 5401 __ bind(L_exit_loop_i); 5402 __ stx(rc, zp, rk); // z[k] = c 5403 5404 // for (int j = yn - 1; j >= 0; j--) 5405 __ sllx(yn, 3, rj); // int j = yn - 1 (byte offset j = 8*yn) 5406 __ dec(rj, 8); 5407 5408 __ bind(L_loop_j); 5409 5410 __ cmp_and_br_short(rj, 0, // j >= 0 5411 Assembler::less, Assembler::pn, L_exit); 5412 __ clr(rc); // u64 c = 0 5413 __ ldx(yp, rj, ry); // u64 y = yp[j] 5414 5415 // for (int i = xn, k = --zn; i >= 0; i--) 5416 __ dec(zn); // --zn 5417 __ sllx(xn, 3, ri); // int i = xn (byte offset i = 8*xn) 5418 __ sllx(zn, 3, rk); // int k = zn (byte offset k = 8*zn) 5419 5420 __ bind(L_loop_i2); 5421 5422 __ cmp_and_br_short(ri, 0, // i >= 0 5423 Assembler::less, Assembler::pn, L_exit_loop_i2); 5424 __ ldx(xp, ri, rx); // x = xp[i] 5425 __ ldx(zp, rk, rz); // z = zp[k], accumulator 5426 __ mulx(rx, ry, lop); // lo-64b-part of result 64x64 5427 __ umulxhi(rx, ry, hip); // hi-64b-part of result 64x64 5428 __ addcc(rz, rc, rz); // Accumulate lower order bits, 5429 __ addxc(hip, zero, rc); // Accumulate higher order bits to carry 5430 __ addcc(rz, lop, rz); // z += lo(p) + c 5431 __ addxc(rc, zero, rc); 5432 __ stx(rz, zp, rk); // zp[k] = z 5433 __ dec(rk, 8); // k-- 5434 __ dec(ri, 8); // i-- 5435 __ ba_short(L_loop_i2); 5436 5437 __ bind(L_exit_loop_i2); 5438 __ stx(rc, zp, rk); // z[k] = c 5439 __ dec(rj, 8); // j-- 5440 __ ba_short(L_loop_j); 5441 } 5442 5443 void gen_mult_64x64_unaligned(Register xp, Register xn, 5444 Register yp, Register yn, 5445 Register zp, Register zn, Label &L_exit) 5446 { 5447 // Assuming that a stack frame has already been created, i.e. local and 5448 // output registers are available for use. 5449 5450 const Register xpc = L0; // Outer loop cursor, xp[i] 5451 const Register ypc = L1; // Inner loop cursor, yp[j] 5452 const Register zpc = L2; // Output loop cursor, zp[k] 5453 const Register rx = L4; // x-vector datum [i] 5454 const Register ry = L5; // y-vector datum [j] 5455 const Register rz = L6; // z-vector datum [k] 5456 const Register rc = L7; // carry over (to z-vector datum [k-1]) 5457 const Register rt = O2; 5458 5459 const Register lop = O0; // lo-64b product 5460 const Register hip = O1; // hi-64b product 5461 5462 const Register zero = G0; 5463 5464 Label L_loop_i, L_exit_loop_i; 5465 Label L_loop_j; 5466 Label L_loop_i2, L_exit_loop_i2; 5467 5468 __ srlx(xn, 1, xn); // index for u32 to u64 ditto 5469 __ srlx(yn, 1, yn); // index for u32 to u64 ditto 5470 __ srlx(zn, 1, zn); // index for u32 to u64 ditto 5471 __ dec(xn); // Adjust [0..(N/2)-1] 5472 __ dec(yn); 5473 __ dec(zn); 5474 __ clr(rc); // u64 c = 0 5475 __ sllx(xn, 3, xpc); // u32* xpc = &xp[xn] (byte offset 8*xn) 5476 __ add(xp, xpc, xpc); 5477 __ sllx(yn, 3, ypc); // u32* ypc = &yp[yn] (byte offset 8*yn) 5478 __ add(yp, ypc, ypc); 5479 __ sllx(zn, 3, zpc); // u32* zpc = &zp[zn] (byte offset 8*zn) 5480 __ add(zp, zpc, zpc); 5481 __ lduw(ypc, 0, rt); // u64 y = yp[yn] 5482 __ lduw(ypc, 4, ry); // ... 5483 __ sllx(rt, 32, rt); 5484 __ or3(rt, ry, ry); 5485 5486 // for (int i = xn; i >= 0; i--) 5487 __ bind(L_loop_i); 5488 5489 __ cmp_and_brx_short(xpc, xp,// i >= 0 5490 Assembler::lessUnsigned, Assembler::pn, L_exit_loop_i); 5491 __ lduw(xpc, 0, rt); // u64 x = xp[i] 5492 __ lduw(xpc, 4, rx); // ... 5493 __ sllx(rt, 32, rt); 5494 __ or3(rt, rx, rx); 5495 __ mulx(rx, ry, lop); // lo-64b-part of result 64x64 5496 __ umulxhi(rx, ry, hip); // hi-64b-part of result 64x64 5497 __ addcc(rc, lop, lop); // Accumulate lower order bits (producing carry) 5498 __ addxc(hip, zero, rc); // carry over to next datum [k-1] 5499 __ srlx(lop, 32, rt); 5500 __ stw(rt, zpc, 0); // z[k] = lop 5501 __ stw(lop, zpc, 4); // ... 5502 __ dec(zpc, 8); // k-- (zpc--) 5503 __ dec(xpc, 8); // i-- (xpc--) 5504 __ ba_short(L_loop_i); 5505 5506 __ bind(L_exit_loop_i); 5507 __ srlx(rc, 32, rt); 5508 __ stw(rt, zpc, 0); // z[k] = c 5509 __ stw(rc, zpc, 4); 5510 5511 // for (int j = yn - 1; j >= 0; j--) 5512 __ sllx(yn, 3, ypc); // u32* ypc = &yp[yn] (byte offset 8*yn) 5513 __ add(yp, ypc, ypc); 5514 __ dec(ypc, 8); // yn - 1 (ypc--) 5515 5516 __ bind(L_loop_j); 5517 5518 __ cmp_and_brx_short(ypc, yp,// j >= 0 5519 Assembler::lessUnsigned, Assembler::pn, L_exit); 5520 __ clr(rc); // u64 c = 0 5521 __ lduw(ypc, 0, rt); // u64 y = yp[j] (= *ypc) 5522 __ lduw(ypc, 4, ry); // ... 5523 __ sllx(rt, 32, rt); 5524 __ or3(rt, ry, ry); 5525 5526 // for (int i = xn, k = --zn; i >= 0; i--) 5527 __ sllx(xn, 3, xpc); // u32* xpc = &xp[xn] (byte offset 8*xn) 5528 __ add(xp, xpc, xpc); 5529 __ dec(zn); // --zn 5530 __ sllx(zn, 3, zpc); // u32* zpc = &zp[zn] (byte offset 8*zn) 5531 __ add(zp, zpc, zpc); 5532 5533 __ bind(L_loop_i2); 5534 5535 __ cmp_and_brx_short(xpc, xp,// i >= 0 5536 Assembler::lessUnsigned, Assembler::pn, L_exit_loop_i2); 5537 __ lduw(xpc, 0, rt); // u64 x = xp[i] (= *xpc) 5538 __ lduw(xpc, 4, rx); // ... 5539 __ sllx(rt, 32, rt); 5540 __ or3(rt, rx, rx); 5541 5542 __ lduw(zpc, 0, rt); // u64 z = zp[k] (= *zpc) 5543 __ lduw(zpc, 4, rz); // ... 5544 __ sllx(rt, 32, rt); 5545 __ or3(rt, rz, rz); 5546 5547 __ mulx(rx, ry, lop); // lo-64b-part of result 64x64 5548 __ umulxhi(rx, ry, hip); // hi-64b-part of result 64x64 5549 __ addcc(rz, rc, rz); // Accumulate lower order bits... 5550 __ addxc(hip, zero, rc); // Accumulate higher order bits to carry 5551 __ addcc(rz, lop, rz); // ... z += lo(p) + c 5552 __ addxccc(rc, zero, rc); 5553 __ srlx(rz, 32, rt); 5554 __ stw(rt, zpc, 0); // zp[k] = z (*zpc = z) 5555 __ stw(rz, zpc, 4); 5556 __ dec(zpc, 8); // k-- (zpc--) 5557 __ dec(xpc, 8); // i-- (xpc--) 5558 __ ba_short(L_loop_i2); 5559 5560 __ bind(L_exit_loop_i2); 5561 __ srlx(rc, 32, rt); 5562 __ stw(rt, zpc, 0); // z[k] = c 5563 __ stw(rc, zpc, 4); 5564 __ dec(ypc, 8); // j-- (ypc--) 5565 __ ba_short(L_loop_j); 5566 } 5567 5568 void gen_mult_32x32(Register xp, Register xn, 5569 Register yp, Register yn, 5570 Register zp, Register zn, Label &L_exit) 5571 { 5572 // Assuming that a stack frame has already been created, i.e. local and 5573 // output registers are available for use. 5574 5575 const Register ri = L0; // Outer loop index, xv[i] 5576 const Register rj = L1; // Inner loop index, yv[j] 5577 const Register rk = L2; // Output loop index, zv[k] 5578 const Register rx = L4; // x-vector datum [i] 5579 const Register ry = L5; // y-vector datum [j] 5580 const Register rz = L6; // z-vector datum [k] 5581 const Register rc = L7; // carry over (to z-vector datum [k-1]) 5582 5583 const Register p64 = O0; // 64b product 5584 const Register z65 = O1; // carry+64b accumulator 5585 const Register c65 = O2; // carry at bit 65 5586 const Register c33 = O2; // carry at bit 33 (after shift) 5587 5588 const Register zero = G0; 5589 5590 Label L_loop_i, L_exit_loop_i; 5591 Label L_loop_j; 5592 Label L_loop_i2, L_exit_loop_i2; 5593 5594 __ dec(xn); // Adjust [0..N-1] 5595 __ dec(yn); 5596 __ dec(zn); 5597 __ clr(rc); // u32 c = 0 5598 __ sllx(xn, 2, ri); // int i = xn (byte offset i = 4*xn) 5599 __ sllx(yn, 2, rj); // int j = yn (byte offset i = 4*xn) 5600 __ sllx(zn, 2, rk); // int k = zn (byte offset k = 4*zn) 5601 __ lduw(yp, rj, ry); // u32 y = yp[yn] 5602 5603 // for (int i = xn; i >= 0; i--) 5604 __ bind(L_loop_i); 5605 5606 __ cmp_and_br_short(ri, 0, // i >= 0 5607 Assembler::less, Assembler::pn, L_exit_loop_i); 5608 __ lduw(xp, ri, rx); // x = xp[i] 5609 __ mulx(rx, ry, p64); // 64b result of 32x32 5610 __ addcc(rc, p64, z65); // Accumulate to 65 bits (producing carry) 5611 __ addxc(zero, zero, c65); // Materialise carry (in bit 65) into lsb, 5612 __ sllx(c65, 32, c33); // and shift into bit 33 5613 __ srlx(z65, 32, rc); // carry = c33 | hi(z65) >> 32 5614 __ add(c33, rc, rc); // carry over to next datum [k-1] 5615 __ stw(z65, zp, rk); // z[k] = lo(z65) 5616 __ dec(rk, 4); // k-- 5617 __ dec(ri, 4); // i-- 5618 __ ba_short(L_loop_i); 5619 5620 __ bind(L_exit_loop_i); 5621 __ stw(rc, zp, rk); // z[k] = c 5622 5623 // for (int j = yn - 1; j >= 0; j--) 5624 __ sllx(yn, 2, rj); // int j = yn - 1 (byte offset j = 4*yn) 5625 __ dec(rj, 4); 5626 5627 __ bind(L_loop_j); 5628 5629 __ cmp_and_br_short(rj, 0, // j >= 0 5630 Assembler::less, Assembler::pn, L_exit); 5631 __ clr(rc); // u32 c = 0 5632 __ lduw(yp, rj, ry); // u32 y = yp[j] 5633 5634 // for (int i = xn, k = --zn; i >= 0; i--) 5635 __ dec(zn); // --zn 5636 __ sllx(xn, 2, ri); // int i = xn (byte offset i = 4*xn) 5637 __ sllx(zn, 2, rk); // int k = zn (byte offset k = 4*zn) 5638 5639 __ bind(L_loop_i2); 5640 5641 __ cmp_and_br_short(ri, 0, // i >= 0 5642 Assembler::less, Assembler::pn, L_exit_loop_i2); 5643 __ lduw(xp, ri, rx); // x = xp[i] 5644 __ lduw(zp, rk, rz); // z = zp[k], accumulator 5645 __ mulx(rx, ry, p64); // 64b result of 32x32 5646 __ add(rz, rc, rz); // Accumulate lower order bits, 5647 __ addcc(rz, p64, z65); // z += lo(p64) + c 5648 __ addxc(zero, zero, c65); // Materialise carry (in bit 65) into lsb, 5649 __ sllx(c65, 32, c33); // and shift into bit 33 5650 __ srlx(z65, 32, rc); // carry = c33 | hi(z65) >> 32 5651 __ add(c33, rc, rc); // carry over to next datum [k-1] 5652 __ stw(z65, zp, rk); // zp[k] = lo(z65) 5653 __ dec(rk, 4); // k-- 5654 __ dec(ri, 4); // i-- 5655 __ ba_short(L_loop_i2); 5656 5657 __ bind(L_exit_loop_i2); 5658 __ stw(rc, zp, rk); // z[k] = c 5659 __ dec(rj, 4); // j-- 5660 __ ba_short(L_loop_j); 5661 } 5662 5663 5664 void generate_initial() { 5665 // Generates all stubs and initializes the entry points 5666 5667 //------------------------------------------------------------------------------------------------------------------------ 5668 // entry points that exist in all platforms 5669 // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than 5670 // the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp. 5671 StubRoutines::_forward_exception_entry = generate_forward_exception(); 5672 5673 StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address); 5674 StubRoutines::_catch_exception_entry = generate_catch_exception(); 5675 5676 //------------------------------------------------------------------------------------------------------------------------ 5677 // entry points that are platform specific 5678 StubRoutines::Sparc::_test_stop_entry = generate_test_stop(); 5679 5680 StubRoutines::Sparc::_stop_subroutine_entry = generate_stop_subroutine(); 5681 StubRoutines::Sparc::_flush_callers_register_windows_entry = generate_flush_callers_register_windows(); 5682 5683 // Build this early so it's available for the interpreter. 5684 StubRoutines::_throw_StackOverflowError_entry = 5685 generate_throw_exception("StackOverflowError throw_exception", 5686 CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); 5687 StubRoutines::_throw_delayed_StackOverflowError_entry = 5688 generate_throw_exception("delayed StackOverflowError throw_exception", 5689 CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError)); 5690 5691 if (UseCRC32Intrinsics) { 5692 // set table address before stub generation which use it 5693 StubRoutines::_crc_table_adr = (address)StubRoutines::Sparc::_crc_table; 5694 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 5695 } 5696 5697 if (UseCRC32CIntrinsics) { 5698 // set table address before stub generation which use it 5699 StubRoutines::_crc32c_table_addr = (address)StubRoutines::Sparc::_crc32c_table; 5700 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(); 5701 } 5702 } 5703 5704 5705 void generate_all() { 5706 // Generates all stubs and initializes the entry points 5707 5708 // Generate partial_subtype_check first here since its code depends on 5709 // UseZeroBaseCompressedOops which is defined after heap initialization. 5710 StubRoutines::Sparc::_partial_subtype_check = generate_partial_subtype_check(); 5711 // These entry points require SharedInfo::stack0 to be set up in non-core builds 5712 StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError)); 5713 StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError)); 5714 StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call)); 5715 5716 // support for verify_oop (must happen after universe_init) 5717 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine(); 5718 5719 // arraycopy stubs used by compilers 5720 generate_arraycopy_stubs(); 5721 5722 // Don't initialize the platform math functions since sparc 5723 // doesn't have intrinsics for these operations. 5724 5725 // Safefetch stubs. 5726 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 5727 &StubRoutines::_safefetch32_fault_pc, 5728 &StubRoutines::_safefetch32_continuation_pc); 5729 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 5730 &StubRoutines::_safefetchN_fault_pc, 5731 &StubRoutines::_safefetchN_continuation_pc); 5732 5733 // generate AES intrinsics code 5734 if (UseAESIntrinsics) { 5735 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 5736 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 5737 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 5738 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 5739 } 5740 // generate GHASH intrinsics code 5741 if (UseGHASHIntrinsics) { 5742 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 5743 } 5744 5745 // generate SHA1/SHA256/SHA512 intrinsics code 5746 if (UseSHA1Intrinsics) { 5747 StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); 5748 StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); 5749 } 5750 if (UseSHA256Intrinsics) { 5751 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 5752 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 5753 } 5754 if (UseSHA512Intrinsics) { 5755 StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); 5756 StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); 5757 } 5758 // generate Adler32 intrinsics code 5759 if (UseAdler32Intrinsics) { 5760 StubRoutines::_updateBytesAdler32 = generate_updateBytesAdler32(); 5761 } 5762 5763 #ifdef COMPILER2 5764 // Intrinsics supported by C2 only: 5765 if (UseMultiplyToLenIntrinsic) { 5766 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 5767 } 5768 #endif // COMPILER2 5769 } 5770 5771 public: 5772 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 5773 // replace the standard masm with a special one: 5774 _masm = new MacroAssembler(code); 5775 5776 _stub_count = !all ? 0x100 : 0x200; 5777 if (all) { 5778 generate_all(); 5779 } else { 5780 generate_initial(); 5781 } 5782 5783 // make sure this stub is available for all local calls 5784 if (_atomic_add_stub.is_unbound()) { 5785 // generate a second time, if necessary 5786 (void) generate_atomic_add(); 5787 } 5788 } 5789 5790 5791 private: 5792 int _stub_count; 5793 void stub_prolog(StubCodeDesc* cdesc) { 5794 # ifdef ASSERT 5795 // put extra information in the stub code, to make it more readable 5796 // Write the high part of the address 5797 // [RGV] Check if there is a dependency on the size of this prolog 5798 __ emit_data((intptr_t)cdesc >> 32, relocInfo::none); 5799 __ emit_data((intptr_t)cdesc, relocInfo::none); 5800 __ emit_data(++_stub_count, relocInfo::none); 5801 # endif 5802 align(true); 5803 } 5804 5805 void align(bool at_header = false) { 5806 // %%%%% move this constant somewhere else 5807 // UltraSPARC cache line size is 8 instructions: 5808 const unsigned int icache_line_size = 32; 5809 const unsigned int icache_half_line_size = 16; 5810 5811 if (at_header) { 5812 while ((intptr_t)(__ pc()) % icache_line_size != 0) { 5813 __ emit_data(0, relocInfo::none); 5814 } 5815 } else { 5816 while ((intptr_t)(__ pc()) % icache_half_line_size != 0) { 5817 __ nop(); 5818 } 5819 } 5820 } 5821 5822 }; // end class declaration 5823 5824 void StubGenerator_generate(CodeBuffer* code, bool all) { 5825 StubGenerator g(code, all); 5826 }