1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2018, SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "gc/shared/barrierSet.hpp" 29 #include "gc/shared/barrierSetAssembler.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "nativeInst_ppc.hpp" 32 #include "oops/instanceOop.hpp" 33 #include "oops/method.hpp" 34 #include "oops/objArrayKlass.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/methodHandles.hpp" 37 #include "runtime/frame.inline.hpp" 38 #include "runtime/handles.inline.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/stubCodeGenerator.hpp" 41 #include "runtime/stubRoutines.hpp" 42 #include "runtime/thread.inline.hpp" 43 #include "utilities/align.hpp" 44 45 // Declaration and definition of StubGenerator (no .hpp file). 46 // For a more detailed description of the stub routine structure 47 // see the comment in stubRoutines.hpp. 48 49 #define __ _masm-> 50 51 #ifdef PRODUCT 52 #define BLOCK_COMMENT(str) // nothing 53 #else 54 #define BLOCK_COMMENT(str) __ block_comment(str) 55 #endif 56 57 #if defined(ABI_ELFv2) 58 #define STUB_ENTRY(name) StubRoutines::name() 59 #else 60 #define STUB_ENTRY(name) ((FunctionDescriptor*)StubRoutines::name())->entry() 61 #endif 62 63 class StubGenerator: public StubCodeGenerator { 64 private: 65 66 // Call stubs are used to call Java from C 67 // 68 // Arguments: 69 // 70 // R3 - call wrapper address : address 71 // R4 - result : intptr_t* 72 // R5 - result type : BasicType 73 // R6 - method : Method 74 // R7 - frame mgr entry point : address 75 // R8 - parameter block : intptr_t* 76 // R9 - parameter count in words : int 77 // R10 - thread : Thread* 78 // 79 address generate_call_stub(address& return_address) { 80 // Setup a new c frame, copy java arguments, call frame manager or 81 // native_entry, and process result. 82 83 StubCodeMark mark(this, "StubRoutines", "call_stub"); 84 85 address start = __ function_entry(); 86 87 // some sanity checks 88 assert((sizeof(frame::abi_minframe) % 16) == 0, "unaligned"); 89 assert((sizeof(frame::abi_reg_args) % 16) == 0, "unaligned"); 90 assert((sizeof(frame::spill_nonvolatiles) % 16) == 0, "unaligned"); 91 assert((sizeof(frame::parent_ijava_frame_abi) % 16) == 0, "unaligned"); 92 assert((sizeof(frame::entry_frame_locals) % 16) == 0, "unaligned"); 93 94 Register r_arg_call_wrapper_addr = R3; 95 Register r_arg_result_addr = R4; 96 Register r_arg_result_type = R5; 97 Register r_arg_method = R6; 98 Register r_arg_entry = R7; 99 Register r_arg_thread = R10; 100 101 Register r_temp = R24; 102 Register r_top_of_arguments_addr = R25; 103 Register r_entryframe_fp = R26; 104 105 { 106 // Stack on entry to call_stub: 107 // 108 // F1 [C_FRAME] 109 // ... 110 111 Register r_arg_argument_addr = R8; 112 Register r_arg_argument_count = R9; 113 Register r_frame_alignment_in_bytes = R27; 114 Register r_argument_addr = R28; 115 Register r_argumentcopy_addr = R29; 116 Register r_argument_size_in_bytes = R30; 117 Register r_frame_size = R23; 118 119 Label arguments_copied; 120 121 // Save LR/CR to caller's C_FRAME. 122 __ save_LR_CR(R0); 123 124 // Zero extend arg_argument_count. 125 __ clrldi(r_arg_argument_count, r_arg_argument_count, 32); 126 127 // Save non-volatiles GPRs to ENTRY_FRAME (not yet pushed, but it's safe). 128 __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14)); 129 130 // Keep copy of our frame pointer (caller's SP). 131 __ mr(r_entryframe_fp, R1_SP); 132 133 BLOCK_COMMENT("Push ENTRY_FRAME including arguments"); 134 // Push ENTRY_FRAME including arguments: 135 // 136 // F0 [TOP_IJAVA_FRAME_ABI] 137 // alignment (optional) 138 // [outgoing Java arguments] 139 // [ENTRY_FRAME_LOCALS] 140 // F1 [C_FRAME] 141 // ... 142 143 // calculate frame size 144 145 // unaligned size of arguments 146 __ sldi(r_argument_size_in_bytes, 147 r_arg_argument_count, Interpreter::logStackElementSize); 148 // arguments alignment (max 1 slot) 149 // FIXME: use round_to() here 150 __ andi_(r_frame_alignment_in_bytes, r_arg_argument_count, 1); 151 __ sldi(r_frame_alignment_in_bytes, 152 r_frame_alignment_in_bytes, Interpreter::logStackElementSize); 153 154 // size = unaligned size of arguments + top abi's size 155 __ addi(r_frame_size, r_argument_size_in_bytes, 156 frame::top_ijava_frame_abi_size); 157 // size += arguments alignment 158 __ add(r_frame_size, 159 r_frame_size, r_frame_alignment_in_bytes); 160 // size += size of call_stub locals 161 __ addi(r_frame_size, 162 r_frame_size, frame::entry_frame_locals_size); 163 164 // push ENTRY_FRAME 165 __ push_frame(r_frame_size, r_temp); 166 167 // initialize call_stub locals (step 1) 168 __ std(r_arg_call_wrapper_addr, 169 _entry_frame_locals_neg(call_wrapper_address), r_entryframe_fp); 170 __ std(r_arg_result_addr, 171 _entry_frame_locals_neg(result_address), r_entryframe_fp); 172 __ std(r_arg_result_type, 173 _entry_frame_locals_neg(result_type), r_entryframe_fp); 174 // we will save arguments_tos_address later 175 176 177 BLOCK_COMMENT("Copy Java arguments"); 178 // copy Java arguments 179 180 // Calculate top_of_arguments_addr which will be R17_tos (not prepushed) later. 181 // FIXME: why not simply use SP+frame::top_ijava_frame_size? 182 __ addi(r_top_of_arguments_addr, 183 R1_SP, frame::top_ijava_frame_abi_size); 184 __ add(r_top_of_arguments_addr, 185 r_top_of_arguments_addr, r_frame_alignment_in_bytes); 186 187 // any arguments to copy? 188 __ cmpdi(CCR0, r_arg_argument_count, 0); 189 __ beq(CCR0, arguments_copied); 190 191 // prepare loop and copy arguments in reverse order 192 { 193 // init CTR with arg_argument_count 194 __ mtctr(r_arg_argument_count); 195 196 // let r_argumentcopy_addr point to last outgoing Java arguments P 197 __ mr(r_argumentcopy_addr, r_top_of_arguments_addr); 198 199 // let r_argument_addr point to last incoming java argument 200 __ add(r_argument_addr, 201 r_arg_argument_addr, r_argument_size_in_bytes); 202 __ addi(r_argument_addr, r_argument_addr, -BytesPerWord); 203 204 // now loop while CTR > 0 and copy arguments 205 { 206 Label next_argument; 207 __ bind(next_argument); 208 209 __ ld(r_temp, 0, r_argument_addr); 210 // argument_addr--; 211 __ addi(r_argument_addr, r_argument_addr, -BytesPerWord); 212 __ std(r_temp, 0, r_argumentcopy_addr); 213 // argumentcopy_addr++; 214 __ addi(r_argumentcopy_addr, r_argumentcopy_addr, BytesPerWord); 215 216 __ bdnz(next_argument); 217 } 218 } 219 220 // Arguments copied, continue. 221 __ bind(arguments_copied); 222 } 223 224 { 225 BLOCK_COMMENT("Call frame manager or native entry."); 226 // Call frame manager or native entry. 227 Register r_new_arg_entry = R14; 228 assert_different_registers(r_new_arg_entry, r_top_of_arguments_addr, 229 r_arg_method, r_arg_thread); 230 231 __ mr(r_new_arg_entry, r_arg_entry); 232 233 // Register state on entry to frame manager / native entry: 234 // 235 // tos - intptr_t* sender tos (prepushed) Lesp = (SP) + copied_arguments_offset - 8 236 // R19_method - Method 237 // R16_thread - JavaThread* 238 239 // Tos must point to last argument - element_size. 240 const Register tos = R15_esp; 241 242 __ addi(tos, r_top_of_arguments_addr, -Interpreter::stackElementSize); 243 244 // initialize call_stub locals (step 2) 245 // now save tos as arguments_tos_address 246 __ std(tos, _entry_frame_locals_neg(arguments_tos_address), r_entryframe_fp); 247 248 // load argument registers for call 249 __ mr(R19_method, r_arg_method); 250 __ mr(R16_thread, r_arg_thread); 251 assert(tos != r_arg_method, "trashed r_arg_method"); 252 assert(tos != r_arg_thread && R19_method != r_arg_thread, "trashed r_arg_thread"); 253 254 // Set R15_prev_state to 0 for simplifying checks in callee. 255 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 256 // Stack on entry to frame manager / native entry: 257 // 258 // F0 [TOP_IJAVA_FRAME_ABI] 259 // alignment (optional) 260 // [outgoing Java arguments] 261 // [ENTRY_FRAME_LOCALS] 262 // F1 [C_FRAME] 263 // ... 264 // 265 266 // global toc register 267 __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R11_scratch1); 268 // Remember the senderSP so we interpreter can pop c2i arguments off of the stack 269 // when called via a c2i. 270 271 // Pass initial_caller_sp to framemanager. 272 __ mr(R21_tmp1, R1_SP); 273 274 // Do a light-weight C-call here, r_new_arg_entry holds the address 275 // of the interpreter entry point (frame manager or native entry) 276 // and save runtime-value of LR in return_address. 277 assert(r_new_arg_entry != tos && r_new_arg_entry != R19_method && r_new_arg_entry != R16_thread, 278 "trashed r_new_arg_entry"); 279 return_address = __ call_stub(r_new_arg_entry); 280 } 281 282 { 283 BLOCK_COMMENT("Returned from frame manager or native entry."); 284 // Returned from frame manager or native entry. 285 // Now pop frame, process result, and return to caller. 286 287 // Stack on exit from frame manager / native entry: 288 // 289 // F0 [ABI] 290 // ... 291 // [ENTRY_FRAME_LOCALS] 292 // F1 [C_FRAME] 293 // ... 294 // 295 // Just pop the topmost frame ... 296 // 297 298 Label ret_is_object; 299 Label ret_is_long; 300 Label ret_is_float; 301 Label ret_is_double; 302 303 Register r_entryframe_fp = R30; 304 Register r_lr = R7_ARG5; 305 Register r_cr = R8_ARG6; 306 307 // Reload some volatile registers which we've spilled before the call 308 // to frame manager / native entry. 309 // Access all locals via frame pointer, because we know nothing about 310 // the topmost frame's size. 311 __ ld(r_entryframe_fp, _abi(callers_sp), R1_SP); 312 assert_different_registers(r_entryframe_fp, R3_RET, r_arg_result_addr, r_arg_result_type, r_cr, r_lr); 313 __ ld(r_arg_result_addr, 314 _entry_frame_locals_neg(result_address), r_entryframe_fp); 315 __ ld(r_arg_result_type, 316 _entry_frame_locals_neg(result_type), r_entryframe_fp); 317 __ ld(r_cr, _abi(cr), r_entryframe_fp); 318 __ ld(r_lr, _abi(lr), r_entryframe_fp); 319 320 // pop frame and restore non-volatiles, LR and CR 321 __ mr(R1_SP, r_entryframe_fp); 322 __ mtcr(r_cr); 323 __ mtlr(r_lr); 324 325 // Store result depending on type. Everything that is not 326 // T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE is treated as T_INT. 327 __ cmpwi(CCR0, r_arg_result_type, T_OBJECT); 328 __ cmpwi(CCR1, r_arg_result_type, T_LONG); 329 __ cmpwi(CCR5, r_arg_result_type, T_FLOAT); 330 __ cmpwi(CCR6, r_arg_result_type, T_DOUBLE); 331 332 // restore non-volatile registers 333 __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14)); 334 335 336 // Stack on exit from call_stub: 337 // 338 // 0 [C_FRAME] 339 // ... 340 // 341 // no call_stub frames left. 342 343 // All non-volatiles have been restored at this point!! 344 assert(R3_RET == R3, "R3_RET should be R3"); 345 346 __ beq(CCR0, ret_is_object); 347 __ beq(CCR1, ret_is_long); 348 __ beq(CCR5, ret_is_float); 349 __ beq(CCR6, ret_is_double); 350 351 // default: 352 __ stw(R3_RET, 0, r_arg_result_addr); 353 __ blr(); // return to caller 354 355 // case T_OBJECT: 356 __ bind(ret_is_object); 357 __ std(R3_RET, 0, r_arg_result_addr); 358 __ blr(); // return to caller 359 360 // case T_LONG: 361 __ bind(ret_is_long); 362 __ std(R3_RET, 0, r_arg_result_addr); 363 __ blr(); // return to caller 364 365 // case T_FLOAT: 366 __ bind(ret_is_float); 367 __ stfs(F1_RET, 0, r_arg_result_addr); 368 __ blr(); // return to caller 369 370 // case T_DOUBLE: 371 __ bind(ret_is_double); 372 __ stfd(F1_RET, 0, r_arg_result_addr); 373 __ blr(); // return to caller 374 } 375 376 return start; 377 } 378 379 // Return point for a Java call if there's an exception thrown in 380 // Java code. The exception is caught and transformed into a 381 // pending exception stored in JavaThread that can be tested from 382 // within the VM. 383 // 384 address generate_catch_exception() { 385 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 386 387 address start = __ pc(); 388 389 // Registers alive 390 // 391 // R16_thread 392 // R3_ARG1 - address of pending exception 393 // R4_ARG2 - return address in call stub 394 395 const Register exception_file = R21_tmp1; 396 const Register exception_line = R22_tmp2; 397 398 __ load_const(exception_file, (void*)__FILE__); 399 __ load_const(exception_line, (void*)__LINE__); 400 401 __ std(R3_ARG1, in_bytes(JavaThread::pending_exception_offset()), R16_thread); 402 // store into `char *' 403 __ std(exception_file, in_bytes(JavaThread::exception_file_offset()), R16_thread); 404 // store into `int' 405 __ stw(exception_line, in_bytes(JavaThread::exception_line_offset()), R16_thread); 406 407 // complete return to VM 408 assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before"); 409 410 __ mtlr(R4_ARG2); 411 // continue in call stub 412 __ blr(); 413 414 return start; 415 } 416 417 // Continuation point for runtime calls returning with a pending 418 // exception. The pending exception check happened in the runtime 419 // or native call stub. The pending exception in Thread is 420 // converted into a Java-level exception. 421 // 422 // Read: 423 // 424 // LR: The pc the runtime library callee wants to return to. 425 // Since the exception occurred in the callee, the return pc 426 // from the point of view of Java is the exception pc. 427 // thread: Needed for method handles. 428 // 429 // Invalidate: 430 // 431 // volatile registers (except below). 432 // 433 // Update: 434 // 435 // R4_ARG2: exception 436 // 437 // (LR is unchanged and is live out). 438 // 439 address generate_forward_exception() { 440 StubCodeMark mark(this, "StubRoutines", "forward_exception"); 441 address start = __ pc(); 442 443 #if !defined(PRODUCT) 444 if (VerifyOops) { 445 // Get pending exception oop. 446 __ ld(R3_ARG1, 447 in_bytes(Thread::pending_exception_offset()), 448 R16_thread); 449 // Make sure that this code is only executed if there is a pending exception. 450 { 451 Label L; 452 __ cmpdi(CCR0, R3_ARG1, 0); 453 __ bne(CCR0, L); 454 __ stop("StubRoutines::forward exception: no pending exception (1)"); 455 __ bind(L); 456 } 457 __ verify_oop(R3_ARG1, "StubRoutines::forward exception: not an oop"); 458 } 459 #endif 460 461 // Save LR/CR and copy exception pc (LR) into R4_ARG2. 462 __ save_LR_CR(R4_ARG2); 463 __ push_frame_reg_args(0, R0); 464 // Find exception handler. 465 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 466 SharedRuntime::exception_handler_for_return_address), 467 R16_thread, 468 R4_ARG2); 469 // Copy handler's address. 470 __ mtctr(R3_RET); 471 __ pop_frame(); 472 __ restore_LR_CR(R0); 473 474 // Set up the arguments for the exception handler: 475 // - R3_ARG1: exception oop 476 // - R4_ARG2: exception pc. 477 478 // Load pending exception oop. 479 __ ld(R3_ARG1, 480 in_bytes(Thread::pending_exception_offset()), 481 R16_thread); 482 483 // The exception pc is the return address in the caller. 484 // Must load it into R4_ARG2. 485 __ mflr(R4_ARG2); 486 487 #ifdef ASSERT 488 // Make sure exception is set. 489 { 490 Label L; 491 __ cmpdi(CCR0, R3_ARG1, 0); 492 __ bne(CCR0, L); 493 __ stop("StubRoutines::forward exception: no pending exception (2)"); 494 __ bind(L); 495 } 496 #endif 497 498 // Clear the pending exception. 499 __ li(R0, 0); 500 __ std(R0, 501 in_bytes(Thread::pending_exception_offset()), 502 R16_thread); 503 // Jump to exception handler. 504 __ bctr(); 505 506 return start; 507 } 508 509 #undef __ 510 #define __ masm-> 511 // Continuation point for throwing of implicit exceptions that are 512 // not handled in the current activation. Fabricates an exception 513 // oop and initiates normal exception dispatching in this 514 // frame. Only callee-saved registers are preserved (through the 515 // normal register window / RegisterMap handling). If the compiler 516 // needs all registers to be preserved between the fault point and 517 // the exception handler then it must assume responsibility for that 518 // in AbstractCompiler::continuation_for_implicit_null_exception or 519 // continuation_for_implicit_division_by_zero_exception. All other 520 // implicit exceptions (e.g., NullPointerException or 521 // AbstractMethodError on entry) are either at call sites or 522 // otherwise assume that stack unwinding will be initiated, so 523 // caller saved registers were assumed volatile in the compiler. 524 // 525 // Note that we generate only this stub into a RuntimeStub, because 526 // it needs to be properly traversed and ignored during GC, so we 527 // change the meaning of the "__" macro within this method. 528 // 529 // Note: the routine set_pc_not_at_call_for_caller in 530 // SharedRuntime.cpp requires that this code be generated into a 531 // RuntimeStub. 532 address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc, 533 Register arg1 = noreg, Register arg2 = noreg) { 534 CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0); 535 MacroAssembler* masm = new MacroAssembler(&code); 536 537 OopMapSet* oop_maps = new OopMapSet(); 538 int frame_size_in_bytes = frame::abi_reg_args_size; 539 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 540 541 address start = __ pc(); 542 543 __ save_LR_CR(R11_scratch1); 544 545 // Push a frame. 546 __ push_frame_reg_args(0, R11_scratch1); 547 548 address frame_complete_pc = __ pc(); 549 550 if (restore_saved_exception_pc) { 551 __ unimplemented("StubGenerator::throw_exception with restore_saved_exception_pc", 74); 552 } 553 554 // Note that we always have a runtime stub frame on the top of 555 // stack by this point. Remember the offset of the instruction 556 // whose address will be moved to R11_scratch1. 557 address gc_map_pc = __ get_PC_trash_LR(R11_scratch1); 558 559 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1); 560 561 __ mr(R3_ARG1, R16_thread); 562 if (arg1 != noreg) { 563 __ mr(R4_ARG2, arg1); 564 } 565 if (arg2 != noreg) { 566 __ mr(R5_ARG3, arg2); 567 } 568 #if defined(ABI_ELFv2) 569 __ call_c(runtime_entry, relocInfo::none); 570 #else 571 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), relocInfo::none); 572 #endif 573 574 // Set an oopmap for the call site. 575 oop_maps->add_gc_map((int)(gc_map_pc - start), map); 576 577 __ reset_last_Java_frame(); 578 579 #ifdef ASSERT 580 // Make sure that this code is only executed if there is a pending 581 // exception. 582 { 583 Label L; 584 __ ld(R0, 585 in_bytes(Thread::pending_exception_offset()), 586 R16_thread); 587 __ cmpdi(CCR0, R0, 0); 588 __ bne(CCR0, L); 589 __ stop("StubRoutines::throw_exception: no pending exception"); 590 __ bind(L); 591 } 592 #endif 593 594 // Pop frame. 595 __ pop_frame(); 596 597 __ restore_LR_CR(R11_scratch1); 598 599 __ load_const(R11_scratch1, StubRoutines::forward_exception_entry()); 600 __ mtctr(R11_scratch1); 601 __ bctr(); 602 603 // Create runtime stub with OopMap. 604 RuntimeStub* stub = 605 RuntimeStub::new_runtime_stub(name, &code, 606 /*frame_complete=*/ (int)(frame_complete_pc - start), 607 frame_size_in_bytes/wordSize, 608 oop_maps, 609 false); 610 return stub->entry_point(); 611 } 612 #undef __ 613 #define __ _masm-> 614 615 616 // Support for void zero_words_aligned8(HeapWord* to, size_t count) 617 // 618 // Arguments: 619 // to: 620 // count: 621 // 622 // Destroys: 623 // 624 address generate_zero_words_aligned8() { 625 StubCodeMark mark(this, "StubRoutines", "zero_words_aligned8"); 626 627 // Implemented as in ClearArray. 628 address start = __ function_entry(); 629 630 Register base_ptr_reg = R3_ARG1; // tohw (needs to be 8b aligned) 631 Register cnt_dwords_reg = R4_ARG2; // count (in dwords) 632 Register tmp1_reg = R5_ARG3; 633 Register tmp2_reg = R6_ARG4; 634 Register zero_reg = R7_ARG5; 635 636 // Procedure for large arrays (uses data cache block zero instruction). 637 Label dwloop, fast, fastloop, restloop, lastdword, done; 638 int cl_size = VM_Version::L1_data_cache_line_size(); 639 int cl_dwords = cl_size >> 3; 640 int cl_dwordaddr_bits = exact_log2(cl_dwords); 641 int min_dcbz = 2; // Needs to be positive, apply dcbz only to at least min_dcbz cache lines. 642 643 // Clear up to 128byte boundary if long enough, dword_cnt=(16-(base>>3))%16. 644 __ dcbtst(base_ptr_reg); // Indicate write access to first cache line ... 645 __ andi(tmp2_reg, cnt_dwords_reg, 1); // to check if number of dwords is even. 646 __ srdi_(tmp1_reg, cnt_dwords_reg, 1); // number of double dwords 647 __ load_const_optimized(zero_reg, 0L); // Use as zero register. 648 649 __ cmpdi(CCR1, tmp2_reg, 0); // cnt_dwords even? 650 __ beq(CCR0, lastdword); // size <= 1 651 __ mtctr(tmp1_reg); // Speculatively preload counter for rest loop (>0). 652 __ cmpdi(CCR0, cnt_dwords_reg, (min_dcbz+1)*cl_dwords-1); // Big enough to ensure >=min_dcbz cache lines are included? 653 __ neg(tmp1_reg, base_ptr_reg); // bit 0..58: bogus, bit 57..60: (16-(base>>3))%16, bit 61..63: 000 654 655 __ blt(CCR0, restloop); // Too small. (<31=(2*cl_dwords)-1 is sufficient, but bigger performs better.) 656 __ rldicl_(tmp1_reg, tmp1_reg, 64-3, 64-cl_dwordaddr_bits); // Extract number of dwords to 128byte boundary=(16-(base>>3))%16. 657 658 __ beq(CCR0, fast); // already 128byte aligned 659 __ mtctr(tmp1_reg); // Set ctr to hit 128byte boundary (0<ctr<cnt). 660 __ subf(cnt_dwords_reg, tmp1_reg, cnt_dwords_reg); // rest (>0 since size>=256-8) 661 662 // Clear in first cache line dword-by-dword if not already 128byte aligned. 663 __ bind(dwloop); 664 __ std(zero_reg, 0, base_ptr_reg); // Clear 8byte aligned block. 665 __ addi(base_ptr_reg, base_ptr_reg, 8); 666 __ bdnz(dwloop); 667 668 // clear 128byte blocks 669 __ bind(fast); 670 __ srdi(tmp1_reg, cnt_dwords_reg, cl_dwordaddr_bits); // loop count for 128byte loop (>0 since size>=256-8) 671 __ andi(tmp2_reg, cnt_dwords_reg, 1); // to check if rest even 672 673 __ mtctr(tmp1_reg); // load counter 674 __ cmpdi(CCR1, tmp2_reg, 0); // rest even? 675 __ rldicl_(tmp1_reg, cnt_dwords_reg, 63, 65-cl_dwordaddr_bits); // rest in double dwords 676 677 __ bind(fastloop); 678 __ dcbz(base_ptr_reg); // Clear 128byte aligned block. 679 __ addi(base_ptr_reg, base_ptr_reg, cl_size); 680 __ bdnz(fastloop); 681 682 //__ dcbtst(base_ptr_reg); // Indicate write access to last cache line. 683 __ beq(CCR0, lastdword); // rest<=1 684 __ mtctr(tmp1_reg); // load counter 685 686 // Clear rest. 687 __ bind(restloop); 688 __ std(zero_reg, 0, base_ptr_reg); // Clear 8byte aligned block. 689 __ std(zero_reg, 8, base_ptr_reg); // Clear 8byte aligned block. 690 __ addi(base_ptr_reg, base_ptr_reg, 16); 691 __ bdnz(restloop); 692 693 __ bind(lastdword); 694 __ beq(CCR1, done); 695 __ std(zero_reg, 0, base_ptr_reg); 696 __ bind(done); 697 __ blr(); // return 698 699 return start; 700 } 701 702 #if !defined(PRODUCT) 703 // Wrapper which calls oopDesc::is_oop_or_null() 704 // Only called by MacroAssembler::verify_oop 705 static void verify_oop_helper(const char* message, oop o) { 706 if (!oopDesc::is_oop_or_null(o)) { 707 fatal("%s", message); 708 } 709 ++ StubRoutines::_verify_oop_count; 710 } 711 #endif 712 713 // Return address of code to be called from code generated by 714 // MacroAssembler::verify_oop. 715 // 716 // Don't generate, rather use C++ code. 717 address generate_verify_oop() { 718 // this is actually a `FunctionDescriptor*'. 719 address start = 0; 720 721 #if !defined(PRODUCT) 722 start = CAST_FROM_FN_PTR(address, verify_oop_helper); 723 #endif 724 725 return start; 726 } 727 728 // Fairer handling of safepoints for native methods. 729 // 730 // Generate code which reads from the polling page. This special handling is needed as the 731 // linux-ppc64 kernel before 2.6.6 doesn't set si_addr on some segfaults in 64bit mode 732 // (cf. http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.6), especially when we try 733 // to read from the safepoint polling page. 734 address generate_load_from_poll() { 735 StubCodeMark mark(this, "StubRoutines", "generate_load_from_poll"); 736 address start = __ function_entry(); 737 __ unimplemented("StubRoutines::verify_oop", 95); // TODO PPC port 738 return start; 739 } 740 741 // -XX:+OptimizeFill : convert fill/copy loops into intrinsic 742 // 743 // The code is implemented(ported from sparc) as we believe it benefits JVM98, however 744 // tracing(-XX:+TraceOptimizeFill) shows the intrinsic replacement doesn't happen at all! 745 // 746 // Source code in function is_range_check_if() shows that OptimizeFill relaxed the condition 747 // for turning on loop predication optimization, and hence the behavior of "array range check" 748 // and "loop invariant check" could be influenced, which potentially boosted JVM98. 749 // 750 // Generate stub for disjoint short fill. If "aligned" is true, the 751 // "to" address is assumed to be heapword aligned. 752 // 753 // Arguments for generated stub: 754 // to: R3_ARG1 755 // value: R4_ARG2 756 // count: R5_ARG3 treated as signed 757 // 758 address generate_fill(BasicType t, bool aligned, const char* name) { 759 StubCodeMark mark(this, "StubRoutines", name); 760 address start = __ function_entry(); 761 762 const Register to = R3_ARG1; // source array address 763 const Register value = R4_ARG2; // fill value 764 const Register count = R5_ARG3; // elements count 765 const Register temp = R6_ARG4; // temp register 766 767 //assert_clean_int(count, O3); // Make sure 'count' is clean int. 768 769 Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte; 770 Label L_fill_2_bytes, L_fill_4_bytes, L_fill_elements, L_fill_32_bytes; 771 772 int shift = -1; 773 switch (t) { 774 case T_BYTE: 775 shift = 2; 776 // Clone bytes (zero extend not needed because store instructions below ignore high order bytes). 777 __ rldimi(value, value, 8, 48); // 8 bit -> 16 bit 778 __ cmpdi(CCR0, count, 2<<shift); // Short arrays (< 8 bytes) fill by element. 779 __ blt(CCR0, L_fill_elements); 780 __ rldimi(value, value, 16, 32); // 16 bit -> 32 bit 781 break; 782 case T_SHORT: 783 shift = 1; 784 // Clone bytes (zero extend not needed because store instructions below ignore high order bytes). 785 __ rldimi(value, value, 16, 32); // 16 bit -> 32 bit 786 __ cmpdi(CCR0, count, 2<<shift); // Short arrays (< 8 bytes) fill by element. 787 __ blt(CCR0, L_fill_elements); 788 break; 789 case T_INT: 790 shift = 0; 791 __ cmpdi(CCR0, count, 2<<shift); // Short arrays (< 8 bytes) fill by element. 792 __ blt(CCR0, L_fill_4_bytes); 793 break; 794 default: ShouldNotReachHere(); 795 } 796 797 if (!aligned && (t == T_BYTE || t == T_SHORT)) { 798 // Align source address at 4 bytes address boundary. 799 if (t == T_BYTE) { 800 // One byte misalignment happens only for byte arrays. 801 __ andi_(temp, to, 1); 802 __ beq(CCR0, L_skip_align1); 803 __ stb(value, 0, to); 804 __ addi(to, to, 1); 805 __ addi(count, count, -1); 806 __ bind(L_skip_align1); 807 } 808 // Two bytes misalignment happens only for byte and short (char) arrays. 809 __ andi_(temp, to, 2); 810 __ beq(CCR0, L_skip_align2); 811 __ sth(value, 0, to); 812 __ addi(to, to, 2); 813 __ addi(count, count, -(1 << (shift - 1))); 814 __ bind(L_skip_align2); 815 } 816 817 if (!aligned) { 818 // Align to 8 bytes, we know we are 4 byte aligned to start. 819 __ andi_(temp, to, 7); 820 __ beq(CCR0, L_fill_32_bytes); 821 __ stw(value, 0, to); 822 __ addi(to, to, 4); 823 __ addi(count, count, -(1 << shift)); 824 __ bind(L_fill_32_bytes); 825 } 826 827 __ li(temp, 8<<shift); // Prepare for 32 byte loop. 828 // Clone bytes int->long as above. 829 __ rldimi(value, value, 32, 0); // 32 bit -> 64 bit 830 831 Label L_check_fill_8_bytes; 832 // Fill 32-byte chunks. 833 __ subf_(count, temp, count); 834 __ blt(CCR0, L_check_fill_8_bytes); 835 836 Label L_fill_32_bytes_loop; 837 __ align(32); 838 __ bind(L_fill_32_bytes_loop); 839 840 __ std(value, 0, to); 841 __ std(value, 8, to); 842 __ subf_(count, temp, count); // Update count. 843 __ std(value, 16, to); 844 __ std(value, 24, to); 845 846 __ addi(to, to, 32); 847 __ bge(CCR0, L_fill_32_bytes_loop); 848 849 __ bind(L_check_fill_8_bytes); 850 __ add_(count, temp, count); 851 __ beq(CCR0, L_exit); 852 __ addic_(count, count, -(2 << shift)); 853 __ blt(CCR0, L_fill_4_bytes); 854 855 // 856 // Length is too short, just fill 8 bytes at a time. 857 // 858 Label L_fill_8_bytes_loop; 859 __ bind(L_fill_8_bytes_loop); 860 __ std(value, 0, to); 861 __ addic_(count, count, -(2 << shift)); 862 __ addi(to, to, 8); 863 __ bge(CCR0, L_fill_8_bytes_loop); 864 865 // Fill trailing 4 bytes. 866 __ bind(L_fill_4_bytes); 867 __ andi_(temp, count, 1<<shift); 868 __ beq(CCR0, L_fill_2_bytes); 869 870 __ stw(value, 0, to); 871 if (t == T_BYTE || t == T_SHORT) { 872 __ addi(to, to, 4); 873 // Fill trailing 2 bytes. 874 __ bind(L_fill_2_bytes); 875 __ andi_(temp, count, 1<<(shift-1)); 876 __ beq(CCR0, L_fill_byte); 877 __ sth(value, 0, to); 878 if (t == T_BYTE) { 879 __ addi(to, to, 2); 880 // Fill trailing byte. 881 __ bind(L_fill_byte); 882 __ andi_(count, count, 1); 883 __ beq(CCR0, L_exit); 884 __ stb(value, 0, to); 885 } else { 886 __ bind(L_fill_byte); 887 } 888 } else { 889 __ bind(L_fill_2_bytes); 890 } 891 __ bind(L_exit); 892 __ blr(); 893 894 // Handle copies less than 8 bytes. Int is handled elsewhere. 895 if (t == T_BYTE) { 896 __ bind(L_fill_elements); 897 Label L_fill_2, L_fill_4; 898 __ andi_(temp, count, 1); 899 __ beq(CCR0, L_fill_2); 900 __ stb(value, 0, to); 901 __ addi(to, to, 1); 902 __ bind(L_fill_2); 903 __ andi_(temp, count, 2); 904 __ beq(CCR0, L_fill_4); 905 __ stb(value, 0, to); 906 __ stb(value, 0, to); 907 __ addi(to, to, 2); 908 __ bind(L_fill_4); 909 __ andi_(temp, count, 4); 910 __ beq(CCR0, L_exit); 911 __ stb(value, 0, to); 912 __ stb(value, 1, to); 913 __ stb(value, 2, to); 914 __ stb(value, 3, to); 915 __ blr(); 916 } 917 918 if (t == T_SHORT) { 919 Label L_fill_2; 920 __ bind(L_fill_elements); 921 __ andi_(temp, count, 1); 922 __ beq(CCR0, L_fill_2); 923 __ sth(value, 0, to); 924 __ addi(to, to, 2); 925 __ bind(L_fill_2); 926 __ andi_(temp, count, 2); 927 __ beq(CCR0, L_exit); 928 __ sth(value, 0, to); 929 __ sth(value, 2, to); 930 __ blr(); 931 } 932 return start; 933 } 934 935 inline void assert_positive_int(Register count) { 936 #ifdef ASSERT 937 __ srdi_(R0, count, 31); 938 __ asm_assert_eq("missing zero extend", 0xAFFE); 939 #endif 940 } 941 942 // Generate overlap test for array copy stubs. 943 // 944 // Input: 945 // R3_ARG1 - from 946 // R4_ARG2 - to 947 // R5_ARG3 - element count 948 // 949 void array_overlap_test(address no_overlap_target, int log2_elem_size) { 950 Register tmp1 = R6_ARG4; 951 Register tmp2 = R7_ARG5; 952 953 assert_positive_int(R5_ARG3); 954 955 __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes 956 __ sldi(tmp2, R5_ARG3, log2_elem_size); // size in bytes 957 __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison! 958 __ cmpld(CCR1, tmp1, tmp2); 959 __ crnand(CCR0, Assembler::less, CCR1, Assembler::less); 960 // Overlaps if Src before dst and distance smaller than size. 961 // Branch to forward copy routine otherwise (within range of 32kB). 962 __ bc(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::less), no_overlap_target); 963 964 // need to copy backwards 965 } 966 967 // The guideline in the implementations of generate_disjoint_xxx_copy 968 // (xxx=byte,short,int,long,oop) is to copy as many elements as possible with 969 // single instructions, but to avoid alignment interrupts (see subsequent 970 // comment). Furthermore, we try to minimize misaligned access, even 971 // though they cause no alignment interrupt. 972 // 973 // In Big-Endian mode, the PowerPC architecture requires implementations to 974 // handle automatically misaligned integer halfword and word accesses, 975 // word-aligned integer doubleword accesses, and word-aligned floating-point 976 // accesses. Other accesses may or may not generate an Alignment interrupt 977 // depending on the implementation. 978 // Alignment interrupt handling may require on the order of hundreds of cycles, 979 // so every effort should be made to avoid misaligned memory values. 980 // 981 // 982 // Generate stub for disjoint byte copy. If "aligned" is true, the 983 // "from" and "to" addresses are assumed to be heapword aligned. 984 // 985 // Arguments for generated stub: 986 // from: R3_ARG1 987 // to: R4_ARG2 988 // count: R5_ARG3 treated as signed 989 // 990 address generate_disjoint_byte_copy(bool aligned, const char * name) { 991 StubCodeMark mark(this, "StubRoutines", name); 992 address start = __ function_entry(); 993 assert_positive_int(R5_ARG3); 994 995 Register tmp1 = R6_ARG4; 996 Register tmp2 = R7_ARG5; 997 Register tmp3 = R8_ARG6; 998 Register tmp4 = R9_ARG7; 999 1000 VectorSRegister tmp_vsr1 = VSR1; 1001 VectorSRegister tmp_vsr2 = VSR2; 1002 1003 Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9, l_10; 1004 1005 // Don't try anything fancy if arrays don't have many elements. 1006 __ li(tmp3, 0); 1007 __ cmpwi(CCR0, R5_ARG3, 17); 1008 __ ble(CCR0, l_6); // copy 4 at a time 1009 1010 if (!aligned) { 1011 __ xorr(tmp1, R3_ARG1, R4_ARG2); 1012 __ andi_(tmp1, tmp1, 3); 1013 __ bne(CCR0, l_6); // If arrays don't have the same alignment mod 4, do 4 element copy. 1014 1015 // Copy elements if necessary to align to 4 bytes. 1016 __ neg(tmp1, R3_ARG1); // Compute distance to alignment boundary. 1017 __ andi_(tmp1, tmp1, 3); 1018 __ beq(CCR0, l_2); 1019 1020 __ subf(R5_ARG3, tmp1, R5_ARG3); 1021 __ bind(l_9); 1022 __ lbz(tmp2, 0, R3_ARG1); 1023 __ addic_(tmp1, tmp1, -1); 1024 __ stb(tmp2, 0, R4_ARG2); 1025 __ addi(R3_ARG1, R3_ARG1, 1); 1026 __ addi(R4_ARG2, R4_ARG2, 1); 1027 __ bne(CCR0, l_9); 1028 1029 __ bind(l_2); 1030 } 1031 1032 // copy 8 elements at a time 1033 __ xorr(tmp2, R3_ARG1, R4_ARG2); // skip if src & dest have differing alignment mod 8 1034 __ andi_(tmp1, tmp2, 7); 1035 __ bne(CCR0, l_7); // not same alignment -> to or from is aligned -> copy 8 1036 1037 // copy a 2-element word if necessary to align to 8 bytes 1038 __ andi_(R0, R3_ARG1, 7); 1039 __ beq(CCR0, l_7); 1040 1041 __ lwzx(tmp2, R3_ARG1, tmp3); 1042 __ addi(R5_ARG3, R5_ARG3, -4); 1043 __ stwx(tmp2, R4_ARG2, tmp3); 1044 { // FasterArrayCopy 1045 __ addi(R3_ARG1, R3_ARG1, 4); 1046 __ addi(R4_ARG2, R4_ARG2, 4); 1047 } 1048 __ bind(l_7); 1049 1050 { // FasterArrayCopy 1051 __ cmpwi(CCR0, R5_ARG3, 31); 1052 __ ble(CCR0, l_6); // copy 2 at a time if less than 32 elements remain 1053 1054 __ srdi(tmp1, R5_ARG3, 5); 1055 __ andi_(R5_ARG3, R5_ARG3, 31); 1056 __ mtctr(tmp1); 1057 1058 if (!VM_Version::has_vsx()) { 1059 1060 __ bind(l_8); 1061 // Use unrolled version for mass copying (copy 32 elements a time) 1062 // Load feeding store gets zero latency on Power6, however not on Power5. 1063 // Therefore, the following sequence is made for the good of both. 1064 __ ld(tmp1, 0, R3_ARG1); 1065 __ ld(tmp2, 8, R3_ARG1); 1066 __ ld(tmp3, 16, R3_ARG1); 1067 __ ld(tmp4, 24, R3_ARG1); 1068 __ std(tmp1, 0, R4_ARG2); 1069 __ std(tmp2, 8, R4_ARG2); 1070 __ std(tmp3, 16, R4_ARG2); 1071 __ std(tmp4, 24, R4_ARG2); 1072 __ addi(R3_ARG1, R3_ARG1, 32); 1073 __ addi(R4_ARG2, R4_ARG2, 32); 1074 __ bdnz(l_8); 1075 1076 } else { // Processor supports VSX, so use it to mass copy. 1077 1078 // Prefetch the data into the L2 cache. 1079 __ dcbt(R3_ARG1, 0); 1080 1081 // If supported set DSCR pre-fetch to deepest. 1082 if (VM_Version::has_mfdscr()) { 1083 __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7); 1084 __ mtdscr(tmp2); 1085 } 1086 1087 __ li(tmp1, 16); 1088 1089 // Backbranch target aligned to 32-byte. Not 16-byte align as 1090 // loop contains < 8 instructions that fit inside a single 1091 // i-cache sector. 1092 __ align(32); 1093 1094 __ bind(l_10); 1095 // Use loop with VSX load/store instructions to 1096 // copy 32 elements a time. 1097 __ lxvd2x(tmp_vsr1, R3_ARG1); // Load src 1098 __ stxvd2x(tmp_vsr1, R4_ARG2); // Store to dst 1099 __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1); // Load src + 16 1100 __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16 1101 __ addi(R3_ARG1, R3_ARG1, 32); // Update src+=32 1102 __ addi(R4_ARG2, R4_ARG2, 32); // Update dsc+=32 1103 __ bdnz(l_10); // Dec CTR and loop if not zero. 1104 1105 // Restore DSCR pre-fetch value. 1106 if (VM_Version::has_mfdscr()) { 1107 __ load_const_optimized(tmp2, VM_Version::_dscr_val); 1108 __ mtdscr(tmp2); 1109 } 1110 1111 } // VSX 1112 } // FasterArrayCopy 1113 1114 __ bind(l_6); 1115 1116 // copy 4 elements at a time 1117 __ cmpwi(CCR0, R5_ARG3, 4); 1118 __ blt(CCR0, l_1); 1119 __ srdi(tmp1, R5_ARG3, 2); 1120 __ mtctr(tmp1); // is > 0 1121 __ andi_(R5_ARG3, R5_ARG3, 3); 1122 1123 { // FasterArrayCopy 1124 __ addi(R3_ARG1, R3_ARG1, -4); 1125 __ addi(R4_ARG2, R4_ARG2, -4); 1126 __ bind(l_3); 1127 __ lwzu(tmp2, 4, R3_ARG1); 1128 __ stwu(tmp2, 4, R4_ARG2); 1129 __ bdnz(l_3); 1130 __ addi(R3_ARG1, R3_ARG1, 4); 1131 __ addi(R4_ARG2, R4_ARG2, 4); 1132 } 1133 1134 // do single element copy 1135 __ bind(l_1); 1136 __ cmpwi(CCR0, R5_ARG3, 0); 1137 __ beq(CCR0, l_4); 1138 1139 { // FasterArrayCopy 1140 __ mtctr(R5_ARG3); 1141 __ addi(R3_ARG1, R3_ARG1, -1); 1142 __ addi(R4_ARG2, R4_ARG2, -1); 1143 1144 __ bind(l_5); 1145 __ lbzu(tmp2, 1, R3_ARG1); 1146 __ stbu(tmp2, 1, R4_ARG2); 1147 __ bdnz(l_5); 1148 } 1149 1150 __ bind(l_4); 1151 __ li(R3_RET, 0); // return 0 1152 __ blr(); 1153 1154 return start; 1155 } 1156 1157 // Generate stub for conjoint byte copy. If "aligned" is true, the 1158 // "from" and "to" addresses are assumed to be heapword aligned. 1159 // 1160 // Arguments for generated stub: 1161 // from: R3_ARG1 1162 // to: R4_ARG2 1163 // count: R5_ARG3 treated as signed 1164 // 1165 address generate_conjoint_byte_copy(bool aligned, const char * name) { 1166 StubCodeMark mark(this, "StubRoutines", name); 1167 address start = __ function_entry(); 1168 assert_positive_int(R5_ARG3); 1169 1170 Register tmp1 = R6_ARG4; 1171 Register tmp2 = R7_ARG5; 1172 Register tmp3 = R8_ARG6; 1173 1174 address nooverlap_target = aligned ? 1175 STUB_ENTRY(arrayof_jbyte_disjoint_arraycopy) : 1176 STUB_ENTRY(jbyte_disjoint_arraycopy); 1177 1178 array_overlap_test(nooverlap_target, 0); 1179 // Do reverse copy. We assume the case of actual overlap is rare enough 1180 // that we don't have to optimize it. 1181 Label l_1, l_2; 1182 1183 __ b(l_2); 1184 __ bind(l_1); 1185 __ stbx(tmp1, R4_ARG2, R5_ARG3); 1186 __ bind(l_2); 1187 __ addic_(R5_ARG3, R5_ARG3, -1); 1188 __ lbzx(tmp1, R3_ARG1, R5_ARG3); 1189 __ bge(CCR0, l_1); 1190 1191 __ li(R3_RET, 0); // return 0 1192 __ blr(); 1193 1194 return start; 1195 } 1196 1197 // Generate stub for disjoint short copy. If "aligned" is true, the 1198 // "from" and "to" addresses are assumed to be heapword aligned. 1199 // 1200 // Arguments for generated stub: 1201 // from: R3_ARG1 1202 // to: R4_ARG2 1203 // elm.count: R5_ARG3 treated as signed 1204 // 1205 // Strategy for aligned==true: 1206 // 1207 // If length <= 9: 1208 // 1. copy 2 elements at a time (l_6) 1209 // 2. copy last element if original element count was odd (l_1) 1210 // 1211 // If length > 9: 1212 // 1. copy 4 elements at a time until less than 4 elements are left (l_7) 1213 // 2. copy 2 elements at a time until less than 2 elements are left (l_6) 1214 // 3. copy last element if one was left in step 2. (l_1) 1215 // 1216 // 1217 // Strategy for aligned==false: 1218 // 1219 // If length <= 9: same as aligned==true case, but NOTE: load/stores 1220 // can be unaligned (see comment below) 1221 // 1222 // If length > 9: 1223 // 1. continue with step 6. if the alignment of from and to mod 4 1224 // is different. 1225 // 2. align from and to to 4 bytes by copying 1 element if necessary 1226 // 3. at l_2 from and to are 4 byte aligned; continue with 1227 // 5. if they cannot be aligned to 8 bytes because they have 1228 // got different alignment mod 8. 1229 // 4. at this point we know that both, from and to, have the same 1230 // alignment mod 8, now copy one element if necessary to get 1231 // 8 byte alignment of from and to. 1232 // 5. copy 4 elements at a time until less than 4 elements are 1233 // left; depending on step 3. all load/stores are aligned or 1234 // either all loads or all stores are unaligned. 1235 // 6. copy 2 elements at a time until less than 2 elements are 1236 // left (l_6); arriving here from step 1., there is a chance 1237 // that all accesses are unaligned. 1238 // 7. copy last element if one was left in step 6. (l_1) 1239 // 1240 // There are unaligned data accesses using integer load/store 1241 // instructions in this stub. POWER allows such accesses. 1242 // 1243 // According to the manuals (PowerISA_V2.06_PUBLIC, Book II, 1244 // Chapter 2: Effect of Operand Placement on Performance) unaligned 1245 // integer load/stores have good performance. Only unaligned 1246 // floating point load/stores can have poor performance. 1247 // 1248 // TODO: 1249 // 1250 // 1. check if aligning the backbranch target of loops is beneficial 1251 // 1252 address generate_disjoint_short_copy(bool aligned, const char * name) { 1253 StubCodeMark mark(this, "StubRoutines", name); 1254 1255 Register tmp1 = R6_ARG4; 1256 Register tmp2 = R7_ARG5; 1257 Register tmp3 = R8_ARG6; 1258 Register tmp4 = R9_ARG7; 1259 1260 VectorSRegister tmp_vsr1 = VSR1; 1261 VectorSRegister tmp_vsr2 = VSR2; 1262 1263 address start = __ function_entry(); 1264 assert_positive_int(R5_ARG3); 1265 1266 Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9; 1267 1268 // don't try anything fancy if arrays don't have many elements 1269 __ li(tmp3, 0); 1270 __ cmpwi(CCR0, R5_ARG3, 9); 1271 __ ble(CCR0, l_6); // copy 2 at a time 1272 1273 if (!aligned) { 1274 __ xorr(tmp1, R3_ARG1, R4_ARG2); 1275 __ andi_(tmp1, tmp1, 3); 1276 __ bne(CCR0, l_6); // if arrays don't have the same alignment mod 4, do 2 element copy 1277 1278 // At this point it is guaranteed that both, from and to have the same alignment mod 4. 1279 1280 // Copy 1 element if necessary to align to 4 bytes. 1281 __ andi_(tmp1, R3_ARG1, 3); 1282 __ beq(CCR0, l_2); 1283 1284 __ lhz(tmp2, 0, R3_ARG1); 1285 __ addi(R3_ARG1, R3_ARG1, 2); 1286 __ sth(tmp2, 0, R4_ARG2); 1287 __ addi(R4_ARG2, R4_ARG2, 2); 1288 __ addi(R5_ARG3, R5_ARG3, -1); 1289 __ bind(l_2); 1290 1291 // At this point the positions of both, from and to, are at least 4 byte aligned. 1292 1293 // Copy 4 elements at a time. 1294 // Align to 8 bytes, but only if both, from and to, have same alignment mod 8. 1295 __ xorr(tmp2, R3_ARG1, R4_ARG2); 1296 __ andi_(tmp1, tmp2, 7); 1297 __ bne(CCR0, l_7); // not same alignment mod 8 -> copy 4, either from or to will be unaligned 1298 1299 // Copy a 2-element word if necessary to align to 8 bytes. 1300 __ andi_(R0, R3_ARG1, 7); 1301 __ beq(CCR0, l_7); 1302 1303 __ lwzx(tmp2, R3_ARG1, tmp3); 1304 __ addi(R5_ARG3, R5_ARG3, -2); 1305 __ stwx(tmp2, R4_ARG2, tmp3); 1306 { // FasterArrayCopy 1307 __ addi(R3_ARG1, R3_ARG1, 4); 1308 __ addi(R4_ARG2, R4_ARG2, 4); 1309 } 1310 } 1311 1312 __ bind(l_7); 1313 1314 // Copy 4 elements at a time; either the loads or the stores can 1315 // be unaligned if aligned == false. 1316 1317 { // FasterArrayCopy 1318 __ cmpwi(CCR0, R5_ARG3, 15); 1319 __ ble(CCR0, l_6); // copy 2 at a time if less than 16 elements remain 1320 1321 __ srdi(tmp1, R5_ARG3, 4); 1322 __ andi_(R5_ARG3, R5_ARG3, 15); 1323 __ mtctr(tmp1); 1324 1325 if (!VM_Version::has_vsx()) { 1326 1327 __ bind(l_8); 1328 // Use unrolled version for mass copying (copy 16 elements a time). 1329 // Load feeding store gets zero latency on Power6, however not on Power5. 1330 // Therefore, the following sequence is made for the good of both. 1331 __ ld(tmp1, 0, R3_ARG1); 1332 __ ld(tmp2, 8, R3_ARG1); 1333 __ ld(tmp3, 16, R3_ARG1); 1334 __ ld(tmp4, 24, R3_ARG1); 1335 __ std(tmp1, 0, R4_ARG2); 1336 __ std(tmp2, 8, R4_ARG2); 1337 __ std(tmp3, 16, R4_ARG2); 1338 __ std(tmp4, 24, R4_ARG2); 1339 __ addi(R3_ARG1, R3_ARG1, 32); 1340 __ addi(R4_ARG2, R4_ARG2, 32); 1341 __ bdnz(l_8); 1342 1343 } else { // Processor supports VSX, so use it to mass copy. 1344 1345 // Prefetch src data into L2 cache. 1346 __ dcbt(R3_ARG1, 0); 1347 1348 // If supported set DSCR pre-fetch to deepest. 1349 if (VM_Version::has_mfdscr()) { 1350 __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7); 1351 __ mtdscr(tmp2); 1352 } 1353 __ li(tmp1, 16); 1354 1355 // Backbranch target aligned to 32-byte. It's not aligned 16-byte 1356 // as loop contains < 8 instructions that fit inside a single 1357 // i-cache sector. 1358 __ align(32); 1359 1360 __ bind(l_9); 1361 // Use loop with VSX load/store instructions to 1362 // copy 16 elements a time. 1363 __ lxvd2x(tmp_vsr1, R3_ARG1); // Load from src. 1364 __ stxvd2x(tmp_vsr1, R4_ARG2); // Store to dst. 1365 __ lxvd2x(tmp_vsr2, R3_ARG1, tmp1); // Load from src + 16. 1366 __ stxvd2x(tmp_vsr2, R4_ARG2, tmp1); // Store to dst + 16. 1367 __ addi(R3_ARG1, R3_ARG1, 32); // Update src+=32. 1368 __ addi(R4_ARG2, R4_ARG2, 32); // Update dsc+=32. 1369 __ bdnz(l_9); // Dec CTR and loop if not zero. 1370 1371 // Restore DSCR pre-fetch value. 1372 if (VM_Version::has_mfdscr()) { 1373 __ load_const_optimized(tmp2, VM_Version::_dscr_val); 1374 __ mtdscr(tmp2); 1375 } 1376 1377 } 1378 } // FasterArrayCopy 1379 __ bind(l_6); 1380 1381 // copy 2 elements at a time 1382 { // FasterArrayCopy 1383 __ cmpwi(CCR0, R5_ARG3, 2); 1384 __ blt(CCR0, l_1); 1385 __ srdi(tmp1, R5_ARG3, 1); 1386 __ andi_(R5_ARG3, R5_ARG3, 1); 1387 1388 __ addi(R3_ARG1, R3_ARG1, -4); 1389 __ addi(R4_ARG2, R4_ARG2, -4); 1390 __ mtctr(tmp1); 1391 1392 __ bind(l_3); 1393 __ lwzu(tmp2, 4, R3_ARG1); 1394 __ stwu(tmp2, 4, R4_ARG2); 1395 __ bdnz(l_3); 1396 1397 __ addi(R3_ARG1, R3_ARG1, 4); 1398 __ addi(R4_ARG2, R4_ARG2, 4); 1399 } 1400 1401 // do single element copy 1402 __ bind(l_1); 1403 __ cmpwi(CCR0, R5_ARG3, 0); 1404 __ beq(CCR0, l_4); 1405 1406 { // FasterArrayCopy 1407 __ mtctr(R5_ARG3); 1408 __ addi(R3_ARG1, R3_ARG1, -2); 1409 __ addi(R4_ARG2, R4_ARG2, -2); 1410 1411 __ bind(l_5); 1412 __ lhzu(tmp2, 2, R3_ARG1); 1413 __ sthu(tmp2, 2, R4_ARG2); 1414 __ bdnz(l_5); 1415 } 1416 __ bind(l_4); 1417 __ li(R3_RET, 0); // return 0 1418 __ blr(); 1419 1420 return start; 1421 } 1422 1423 // Generate stub for conjoint short copy. If "aligned" is true, the 1424 // "from" and "to" addresses are assumed to be heapword aligned. 1425 // 1426 // Arguments for generated stub: 1427 // from: R3_ARG1 1428 // to: R4_ARG2 1429 // count: R5_ARG3 treated as signed 1430 // 1431 address generate_conjoint_short_copy(bool aligned, const char * name) { 1432 StubCodeMark mark(this, "StubRoutines", name); 1433 address start = __ function_entry(); 1434 assert_positive_int(R5_ARG3); 1435 1436 Register tmp1 = R6_ARG4; 1437 Register tmp2 = R7_ARG5; 1438 Register tmp3 = R8_ARG6; 1439 1440 address nooverlap_target = aligned ? 1441 STUB_ENTRY(arrayof_jshort_disjoint_arraycopy) : 1442 STUB_ENTRY(jshort_disjoint_arraycopy); 1443 1444 array_overlap_test(nooverlap_target, 1); 1445 1446 Label l_1, l_2; 1447 __ sldi(tmp1, R5_ARG3, 1); 1448 __ b(l_2); 1449 __ bind(l_1); 1450 __ sthx(tmp2, R4_ARG2, tmp1); 1451 __ bind(l_2); 1452 __ addic_(tmp1, tmp1, -2); 1453 __ lhzx(tmp2, R3_ARG1, tmp1); 1454 __ bge(CCR0, l_1); 1455 1456 __ li(R3_RET, 0); // return 0 1457 __ blr(); 1458 1459 return start; 1460 } 1461 1462 // Generate core code for disjoint int copy (and oop copy on 32-bit). If "aligned" 1463 // is true, the "from" and "to" addresses are assumed to be heapword aligned. 1464 // 1465 // Arguments: 1466 // from: R3_ARG1 1467 // to: R4_ARG2 1468 // count: R5_ARG3 treated as signed 1469 // 1470 void generate_disjoint_int_copy_core(bool aligned) { 1471 Register tmp1 = R6_ARG4; 1472 Register tmp2 = R7_ARG5; 1473 Register tmp3 = R8_ARG6; 1474 Register tmp4 = R0; 1475 1476 VectorSRegister tmp_vsr1 = VSR1; 1477 VectorSRegister tmp_vsr2 = VSR2; 1478 1479 Label l_1, l_2, l_3, l_4, l_5, l_6, l_7; 1480 1481 // for short arrays, just do single element copy 1482 __ li(tmp3, 0); 1483 __ cmpwi(CCR0, R5_ARG3, 5); 1484 __ ble(CCR0, l_2); 1485 1486 if (!aligned) { 1487 // check if arrays have same alignment mod 8. 1488 __ xorr(tmp1, R3_ARG1, R4_ARG2); 1489 __ andi_(R0, tmp1, 7); 1490 // Not the same alignment, but ld and std just need to be 4 byte aligned. 1491 __ bne(CCR0, l_4); // to OR from is 8 byte aligned -> copy 2 at a time 1492 1493 // copy 1 element to align to and from on an 8 byte boundary 1494 __ andi_(R0, R3_ARG1, 7); 1495 __ beq(CCR0, l_4); 1496 1497 __ lwzx(tmp2, R3_ARG1, tmp3); 1498 __ addi(R5_ARG3, R5_ARG3, -1); 1499 __ stwx(tmp2, R4_ARG2, tmp3); 1500 { // FasterArrayCopy 1501 __ addi(R3_ARG1, R3_ARG1, 4); 1502 __ addi(R4_ARG2, R4_ARG2, 4); 1503 } 1504 __ bind(l_4); 1505 } 1506 1507 { // FasterArrayCopy 1508 __ cmpwi(CCR0, R5_ARG3, 7); 1509 __ ble(CCR0, l_2); // copy 1 at a time if less than 8 elements remain 1510 1511 __ srdi(tmp1, R5_ARG3, 3); 1512 __ andi_(R5_ARG3, R5_ARG3, 7); 1513 __ mtctr(tmp1); 1514 1515 if (!VM_Version::has_vsx()) { 1516 1517 __ bind(l_6); 1518 // Use unrolled version for mass copying (copy 8 elements a time). 1519 // Load feeding store gets zero latency on power6, however not on power 5. 1520 // Therefore, the following sequence is made for the good of both. 1521 __ ld(tmp1, 0, R3_ARG1); 1522 __ ld(tmp2, 8, R3_ARG1); 1523 __ ld(tmp3, 16, R3_ARG1); 1524 __ ld(tmp4, 24, R3_ARG1); 1525 __ std(tmp1, 0, R4_ARG2); 1526 __ std(tmp2, 8, R4_ARG2); 1527 __ std(tmp3, 16, R4_ARG2); 1528 __ std(tmp4, 24, R4_ARG2); 1529 __ addi(R3_ARG1, R3_ARG1, 32); 1530 __ addi(R4_ARG2, R4_ARG2, 32); 1531 __ bdnz(l_6); 1532 1533 } else { // Processor supports VSX, so use it to mass copy. 1534 1535 // Prefetch the data into the L2 cache. 1536 __ dcbt(R3_ARG1, 0); 1537 1538 // If supported set DSCR pre-fetch to deepest. 1539 if (VM_Version::has_mfdscr()) { 1540 __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7); 1541 __ mtdscr(tmp2); 1542 } 1543 1544 __ li(tmp1, 16); 1545 1546 // Backbranch target aligned to 32-byte. Not 16-byte align as 1547 // loop contains < 8 instructions that fit inside a single 1548 // i-cache sector. 1549 __ align(32); 1550 1551 __ bind(l_7); 1552 // Use loop with VSX load/store instructions to 1553 // copy 8 elements a time. 1554 __ lxvd2x(tmp_vsr1, R3_ARG1); // Load src 1555 __ stxvd2x(tmp_vsr1, R4_ARG2); // Store to dst 1556 __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1); // Load src + 16 1557 __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16 1558 __ addi(R3_ARG1, R3_ARG1, 32); // Update src+=32 1559 __ addi(R4_ARG2, R4_ARG2, 32); // Update dsc+=32 1560 __ bdnz(l_7); // Dec CTR and loop if not zero. 1561 1562 // Restore DSCR pre-fetch value. 1563 if (VM_Version::has_mfdscr()) { 1564 __ load_const_optimized(tmp2, VM_Version::_dscr_val); 1565 __ mtdscr(tmp2); 1566 } 1567 1568 } // VSX 1569 } // FasterArrayCopy 1570 1571 // copy 1 element at a time 1572 __ bind(l_2); 1573 __ cmpwi(CCR0, R5_ARG3, 0); 1574 __ beq(CCR0, l_1); 1575 1576 { // FasterArrayCopy 1577 __ mtctr(R5_ARG3); 1578 __ addi(R3_ARG1, R3_ARG1, -4); 1579 __ addi(R4_ARG2, R4_ARG2, -4); 1580 1581 __ bind(l_3); 1582 __ lwzu(tmp2, 4, R3_ARG1); 1583 __ stwu(tmp2, 4, R4_ARG2); 1584 __ bdnz(l_3); 1585 } 1586 1587 __ bind(l_1); 1588 return; 1589 } 1590 1591 // Generate stub for disjoint int copy. If "aligned" is true, the 1592 // "from" and "to" addresses are assumed to be heapword aligned. 1593 // 1594 // Arguments for generated stub: 1595 // from: R3_ARG1 1596 // to: R4_ARG2 1597 // count: R5_ARG3 treated as signed 1598 // 1599 address generate_disjoint_int_copy(bool aligned, const char * name) { 1600 StubCodeMark mark(this, "StubRoutines", name); 1601 address start = __ function_entry(); 1602 assert_positive_int(R5_ARG3); 1603 generate_disjoint_int_copy_core(aligned); 1604 __ li(R3_RET, 0); // return 0 1605 __ blr(); 1606 return start; 1607 } 1608 1609 // Generate core code for conjoint int copy (and oop copy on 1610 // 32-bit). If "aligned" is true, the "from" and "to" addresses 1611 // are assumed to be heapword aligned. 1612 // 1613 // Arguments: 1614 // from: R3_ARG1 1615 // to: R4_ARG2 1616 // count: R5_ARG3 treated as signed 1617 // 1618 void generate_conjoint_int_copy_core(bool aligned) { 1619 // Do reverse copy. We assume the case of actual overlap is rare enough 1620 // that we don't have to optimize it. 1621 1622 Label l_1, l_2, l_3, l_4, l_5, l_6, l_7; 1623 1624 Register tmp1 = R6_ARG4; 1625 Register tmp2 = R7_ARG5; 1626 Register tmp3 = R8_ARG6; 1627 Register tmp4 = R0; 1628 1629 VectorSRegister tmp_vsr1 = VSR1; 1630 VectorSRegister tmp_vsr2 = VSR2; 1631 1632 { // FasterArrayCopy 1633 __ cmpwi(CCR0, R5_ARG3, 0); 1634 __ beq(CCR0, l_6); 1635 1636 __ sldi(R5_ARG3, R5_ARG3, 2); 1637 __ add(R3_ARG1, R3_ARG1, R5_ARG3); 1638 __ add(R4_ARG2, R4_ARG2, R5_ARG3); 1639 __ srdi(R5_ARG3, R5_ARG3, 2); 1640 1641 if (!aligned) { 1642 // check if arrays have same alignment mod 8. 1643 __ xorr(tmp1, R3_ARG1, R4_ARG2); 1644 __ andi_(R0, tmp1, 7); 1645 // Not the same alignment, but ld and std just need to be 4 byte aligned. 1646 __ bne(CCR0, l_7); // to OR from is 8 byte aligned -> copy 2 at a time 1647 1648 // copy 1 element to align to and from on an 8 byte boundary 1649 __ andi_(R0, R3_ARG1, 7); 1650 __ beq(CCR0, l_7); 1651 1652 __ addi(R3_ARG1, R3_ARG1, -4); 1653 __ addi(R4_ARG2, R4_ARG2, -4); 1654 __ addi(R5_ARG3, R5_ARG3, -1); 1655 __ lwzx(tmp2, R3_ARG1); 1656 __ stwx(tmp2, R4_ARG2); 1657 __ bind(l_7); 1658 } 1659 1660 __ cmpwi(CCR0, R5_ARG3, 7); 1661 __ ble(CCR0, l_5); // copy 1 at a time if less than 8 elements remain 1662 1663 __ srdi(tmp1, R5_ARG3, 3); 1664 __ andi(R5_ARG3, R5_ARG3, 7); 1665 __ mtctr(tmp1); 1666 1667 if (!VM_Version::has_vsx()) { 1668 __ bind(l_4); 1669 // Use unrolled version for mass copying (copy 4 elements a time). 1670 // Load feeding store gets zero latency on Power6, however not on Power5. 1671 // Therefore, the following sequence is made for the good of both. 1672 __ addi(R3_ARG1, R3_ARG1, -32); 1673 __ addi(R4_ARG2, R4_ARG2, -32); 1674 __ ld(tmp4, 24, R3_ARG1); 1675 __ ld(tmp3, 16, R3_ARG1); 1676 __ ld(tmp2, 8, R3_ARG1); 1677 __ ld(tmp1, 0, R3_ARG1); 1678 __ std(tmp4, 24, R4_ARG2); 1679 __ std(tmp3, 16, R4_ARG2); 1680 __ std(tmp2, 8, R4_ARG2); 1681 __ std(tmp1, 0, R4_ARG2); 1682 __ bdnz(l_4); 1683 } else { // Processor supports VSX, so use it to mass copy. 1684 // Prefetch the data into the L2 cache. 1685 __ dcbt(R3_ARG1, 0); 1686 1687 // If supported set DSCR pre-fetch to deepest. 1688 if (VM_Version::has_mfdscr()) { 1689 __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7); 1690 __ mtdscr(tmp2); 1691 } 1692 1693 __ li(tmp1, 16); 1694 1695 // Backbranch target aligned to 32-byte. Not 16-byte align as 1696 // loop contains < 8 instructions that fit inside a single 1697 // i-cache sector. 1698 __ align(32); 1699 1700 __ bind(l_4); 1701 // Use loop with VSX load/store instructions to 1702 // copy 8 elements a time. 1703 __ addi(R3_ARG1, R3_ARG1, -32); // Update src-=32 1704 __ addi(R4_ARG2, R4_ARG2, -32); // Update dsc-=32 1705 __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1); // Load src+16 1706 __ lxvd2x(tmp_vsr1, R3_ARG1); // Load src 1707 __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst+16 1708 __ stxvd2x(tmp_vsr1, R4_ARG2); // Store to dst 1709 __ bdnz(l_4); 1710 1711 // Restore DSCR pre-fetch value. 1712 if (VM_Version::has_mfdscr()) { 1713 __ load_const_optimized(tmp2, VM_Version::_dscr_val); 1714 __ mtdscr(tmp2); 1715 } 1716 } 1717 1718 __ cmpwi(CCR0, R5_ARG3, 0); 1719 __ beq(CCR0, l_6); 1720 1721 __ bind(l_5); 1722 __ mtctr(R5_ARG3); 1723 __ bind(l_3); 1724 __ lwz(R0, -4, R3_ARG1); 1725 __ stw(R0, -4, R4_ARG2); 1726 __ addi(R3_ARG1, R3_ARG1, -4); 1727 __ addi(R4_ARG2, R4_ARG2, -4); 1728 __ bdnz(l_3); 1729 1730 __ bind(l_6); 1731 } 1732 } 1733 1734 // Generate stub for conjoint int copy. If "aligned" is true, the 1735 // "from" and "to" addresses are assumed to be heapword aligned. 1736 // 1737 // Arguments for generated stub: 1738 // from: R3_ARG1 1739 // to: R4_ARG2 1740 // count: R5_ARG3 treated as signed 1741 // 1742 address generate_conjoint_int_copy(bool aligned, const char * name) { 1743 StubCodeMark mark(this, "StubRoutines", name); 1744 address start = __ function_entry(); 1745 assert_positive_int(R5_ARG3); 1746 address nooverlap_target = aligned ? 1747 STUB_ENTRY(arrayof_jint_disjoint_arraycopy) : 1748 STUB_ENTRY(jint_disjoint_arraycopy); 1749 1750 array_overlap_test(nooverlap_target, 2); 1751 1752 generate_conjoint_int_copy_core(aligned); 1753 1754 __ li(R3_RET, 0); // return 0 1755 __ blr(); 1756 1757 return start; 1758 } 1759 1760 // Generate core code for disjoint long copy (and oop copy on 1761 // 64-bit). If "aligned" is true, the "from" and "to" addresses 1762 // are assumed to be heapword aligned. 1763 // 1764 // Arguments: 1765 // from: R3_ARG1 1766 // to: R4_ARG2 1767 // count: R5_ARG3 treated as signed 1768 // 1769 void generate_disjoint_long_copy_core(bool aligned) { 1770 Register tmp1 = R6_ARG4; 1771 Register tmp2 = R7_ARG5; 1772 Register tmp3 = R8_ARG6; 1773 Register tmp4 = R0; 1774 1775 Label l_1, l_2, l_3, l_4, l_5; 1776 1777 VectorSRegister tmp_vsr1 = VSR1; 1778 VectorSRegister tmp_vsr2 = VSR2; 1779 1780 { // FasterArrayCopy 1781 __ cmpwi(CCR0, R5_ARG3, 3); 1782 __ ble(CCR0, l_3); // copy 1 at a time if less than 4 elements remain 1783 1784 __ srdi(tmp1, R5_ARG3, 2); 1785 __ andi_(R5_ARG3, R5_ARG3, 3); 1786 __ mtctr(tmp1); 1787 1788 if (!VM_Version::has_vsx()) { 1789 __ bind(l_4); 1790 // Use unrolled version for mass copying (copy 4 elements a time). 1791 // Load feeding store gets zero latency on Power6, however not on Power5. 1792 // Therefore, the following sequence is made for the good of both. 1793 __ ld(tmp1, 0, R3_ARG1); 1794 __ ld(tmp2, 8, R3_ARG1); 1795 __ ld(tmp3, 16, R3_ARG1); 1796 __ ld(tmp4, 24, R3_ARG1); 1797 __ std(tmp1, 0, R4_ARG2); 1798 __ std(tmp2, 8, R4_ARG2); 1799 __ std(tmp3, 16, R4_ARG2); 1800 __ std(tmp4, 24, R4_ARG2); 1801 __ addi(R3_ARG1, R3_ARG1, 32); 1802 __ addi(R4_ARG2, R4_ARG2, 32); 1803 __ bdnz(l_4); 1804 1805 } else { // Processor supports VSX, so use it to mass copy. 1806 1807 // Prefetch the data into the L2 cache. 1808 __ dcbt(R3_ARG1, 0); 1809 1810 // If supported set DSCR pre-fetch to deepest. 1811 if (VM_Version::has_mfdscr()) { 1812 __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7); 1813 __ mtdscr(tmp2); 1814 } 1815 1816 __ li(tmp1, 16); 1817 1818 // Backbranch target aligned to 32-byte. Not 16-byte align as 1819 // loop contains < 8 instructions that fit inside a single 1820 // i-cache sector. 1821 __ align(32); 1822 1823 __ bind(l_5); 1824 // Use loop with VSX load/store instructions to 1825 // copy 4 elements a time. 1826 __ lxvd2x(tmp_vsr1, R3_ARG1); // Load src 1827 __ stxvd2x(tmp_vsr1, R4_ARG2); // Store to dst 1828 __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1); // Load src + 16 1829 __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16 1830 __ addi(R3_ARG1, R3_ARG1, 32); // Update src+=32 1831 __ addi(R4_ARG2, R4_ARG2, 32); // Update dsc+=32 1832 __ bdnz(l_5); // Dec CTR and loop if not zero. 1833 1834 // Restore DSCR pre-fetch value. 1835 if (VM_Version::has_mfdscr()) { 1836 __ load_const_optimized(tmp2, VM_Version::_dscr_val); 1837 __ mtdscr(tmp2); 1838 } 1839 1840 } // VSX 1841 } // FasterArrayCopy 1842 1843 // copy 1 element at a time 1844 __ bind(l_3); 1845 __ cmpwi(CCR0, R5_ARG3, 0); 1846 __ beq(CCR0, l_1); 1847 1848 { // FasterArrayCopy 1849 __ mtctr(R5_ARG3); 1850 __ addi(R3_ARG1, R3_ARG1, -8); 1851 __ addi(R4_ARG2, R4_ARG2, -8); 1852 1853 __ bind(l_2); 1854 __ ldu(R0, 8, R3_ARG1); 1855 __ stdu(R0, 8, R4_ARG2); 1856 __ bdnz(l_2); 1857 1858 } 1859 __ bind(l_1); 1860 } 1861 1862 // Generate stub for disjoint long copy. If "aligned" is true, the 1863 // "from" and "to" addresses are assumed to be heapword aligned. 1864 // 1865 // Arguments for generated stub: 1866 // from: R3_ARG1 1867 // to: R4_ARG2 1868 // count: R5_ARG3 treated as signed 1869 // 1870 address generate_disjoint_long_copy(bool aligned, const char * name) { 1871 StubCodeMark mark(this, "StubRoutines", name); 1872 address start = __ function_entry(); 1873 assert_positive_int(R5_ARG3); 1874 generate_disjoint_long_copy_core(aligned); 1875 __ li(R3_RET, 0); // return 0 1876 __ blr(); 1877 1878 return start; 1879 } 1880 1881 // Generate core code for conjoint long copy (and oop copy on 1882 // 64-bit). If "aligned" is true, the "from" and "to" addresses 1883 // are assumed to be heapword aligned. 1884 // 1885 // Arguments: 1886 // from: R3_ARG1 1887 // to: R4_ARG2 1888 // count: R5_ARG3 treated as signed 1889 // 1890 void generate_conjoint_long_copy_core(bool aligned) { 1891 Register tmp1 = R6_ARG4; 1892 Register tmp2 = R7_ARG5; 1893 Register tmp3 = R8_ARG6; 1894 Register tmp4 = R0; 1895 1896 VectorSRegister tmp_vsr1 = VSR1; 1897 VectorSRegister tmp_vsr2 = VSR2; 1898 1899 Label l_1, l_2, l_3, l_4, l_5; 1900 1901 __ cmpwi(CCR0, R5_ARG3, 0); 1902 __ beq(CCR0, l_1); 1903 1904 { // FasterArrayCopy 1905 __ sldi(R5_ARG3, R5_ARG3, 3); 1906 __ add(R3_ARG1, R3_ARG1, R5_ARG3); 1907 __ add(R4_ARG2, R4_ARG2, R5_ARG3); 1908 __ srdi(R5_ARG3, R5_ARG3, 3); 1909 1910 __ cmpwi(CCR0, R5_ARG3, 3); 1911 __ ble(CCR0, l_5); // copy 1 at a time if less than 4 elements remain 1912 1913 __ srdi(tmp1, R5_ARG3, 2); 1914 __ andi(R5_ARG3, R5_ARG3, 3); 1915 __ mtctr(tmp1); 1916 1917 if (!VM_Version::has_vsx()) { 1918 __ bind(l_4); 1919 // Use unrolled version for mass copying (copy 4 elements a time). 1920 // Load feeding store gets zero latency on Power6, however not on Power5. 1921 // Therefore, the following sequence is made for the good of both. 1922 __ addi(R3_ARG1, R3_ARG1, -32); 1923 __ addi(R4_ARG2, R4_ARG2, -32); 1924 __ ld(tmp4, 24, R3_ARG1); 1925 __ ld(tmp3, 16, R3_ARG1); 1926 __ ld(tmp2, 8, R3_ARG1); 1927 __ ld(tmp1, 0, R3_ARG1); 1928 __ std(tmp4, 24, R4_ARG2); 1929 __ std(tmp3, 16, R4_ARG2); 1930 __ std(tmp2, 8, R4_ARG2); 1931 __ std(tmp1, 0, R4_ARG2); 1932 __ bdnz(l_4); 1933 } else { // Processor supports VSX, so use it to mass copy. 1934 // Prefetch the data into the L2 cache. 1935 __ dcbt(R3_ARG1, 0); 1936 1937 // If supported set DSCR pre-fetch to deepest. 1938 if (VM_Version::has_mfdscr()) { 1939 __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7); 1940 __ mtdscr(tmp2); 1941 } 1942 1943 __ li(tmp1, 16); 1944 1945 // Backbranch target aligned to 32-byte. Not 16-byte align as 1946 // loop contains < 8 instructions that fit inside a single 1947 // i-cache sector. 1948 __ align(32); 1949 1950 __ bind(l_4); 1951 // Use loop with VSX load/store instructions to 1952 // copy 4 elements a time. 1953 __ addi(R3_ARG1, R3_ARG1, -32); // Update src-=32 1954 __ addi(R4_ARG2, R4_ARG2, -32); // Update dsc-=32 1955 __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1); // Load src+16 1956 __ lxvd2x(tmp_vsr1, R3_ARG1); // Load src 1957 __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst+16 1958 __ stxvd2x(tmp_vsr1, R4_ARG2); // Store to dst 1959 __ bdnz(l_4); 1960 1961 // Restore DSCR pre-fetch value. 1962 if (VM_Version::has_mfdscr()) { 1963 __ load_const_optimized(tmp2, VM_Version::_dscr_val); 1964 __ mtdscr(tmp2); 1965 } 1966 } 1967 1968 __ cmpwi(CCR0, R5_ARG3, 0); 1969 __ beq(CCR0, l_1); 1970 1971 __ bind(l_5); 1972 __ mtctr(R5_ARG3); 1973 __ bind(l_3); 1974 __ ld(R0, -8, R3_ARG1); 1975 __ std(R0, -8, R4_ARG2); 1976 __ addi(R3_ARG1, R3_ARG1, -8); 1977 __ addi(R4_ARG2, R4_ARG2, -8); 1978 __ bdnz(l_3); 1979 1980 } 1981 __ bind(l_1); 1982 } 1983 1984 // Generate stub for conjoint long copy. If "aligned" is true, the 1985 // "from" and "to" addresses are assumed to be heapword aligned. 1986 // 1987 // Arguments for generated stub: 1988 // from: R3_ARG1 1989 // to: R4_ARG2 1990 // count: R5_ARG3 treated as signed 1991 // 1992 address generate_conjoint_long_copy(bool aligned, const char * name) { 1993 StubCodeMark mark(this, "StubRoutines", name); 1994 address start = __ function_entry(); 1995 assert_positive_int(R5_ARG3); 1996 address nooverlap_target = aligned ? 1997 STUB_ENTRY(arrayof_jlong_disjoint_arraycopy) : 1998 STUB_ENTRY(jlong_disjoint_arraycopy); 1999 2000 array_overlap_test(nooverlap_target, 3); 2001 generate_conjoint_long_copy_core(aligned); 2002 2003 __ li(R3_RET, 0); // return 0 2004 __ blr(); 2005 2006 return start; 2007 } 2008 2009 // Generate stub for conjoint oop copy. If "aligned" is true, the 2010 // "from" and "to" addresses are assumed to be heapword aligned. 2011 // 2012 // Arguments for generated stub: 2013 // from: R3_ARG1 2014 // to: R4_ARG2 2015 // count: R5_ARG3 treated as signed 2016 // dest_uninitialized: G1 support 2017 // 2018 address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) { 2019 StubCodeMark mark(this, "StubRoutines", name); 2020 2021 address start = __ function_entry(); 2022 assert_positive_int(R5_ARG3); 2023 address nooverlap_target = aligned ? 2024 STUB_ENTRY(arrayof_oop_disjoint_arraycopy) : 2025 STUB_ENTRY(oop_disjoint_arraycopy); 2026 2027 DecoratorSet decorators = IN_HEAP | IS_ARRAY; 2028 if (dest_uninitialized) { 2029 decorators |= IS_DEST_UNINITIALIZED; 2030 } 2031 if (aligned) { 2032 decorators |= ARRAYCOPY_ALIGNED; 2033 } 2034 2035 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2036 bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_ARG1, R4_ARG2, R5_ARG3, noreg, noreg); 2037 2038 if (UseCompressedOops) { 2039 array_overlap_test(nooverlap_target, 2); 2040 generate_conjoint_int_copy_core(aligned); 2041 } else { 2042 array_overlap_test(nooverlap_target, 3); 2043 generate_conjoint_long_copy_core(aligned); 2044 } 2045 2046 bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, R4_ARG2, R5_ARG3, noreg); 2047 __ li(R3_RET, 0); // return 0 2048 __ blr(); 2049 return start; 2050 } 2051 2052 // Generate stub for disjoint oop copy. If "aligned" is true, the 2053 // "from" and "to" addresses are assumed to be heapword aligned. 2054 // 2055 // Arguments for generated stub: 2056 // from: R3_ARG1 2057 // to: R4_ARG2 2058 // count: R5_ARG3 treated as signed 2059 // dest_uninitialized: G1 support 2060 // 2061 address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) { 2062 StubCodeMark mark(this, "StubRoutines", name); 2063 address start = __ function_entry(); 2064 assert_positive_int(R5_ARG3); 2065 2066 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 2067 if (dest_uninitialized) { 2068 decorators |= IS_DEST_UNINITIALIZED; 2069 } 2070 if (aligned) { 2071 decorators |= ARRAYCOPY_ALIGNED; 2072 } 2073 2074 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2075 bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_ARG1, R4_ARG2, R5_ARG3, noreg, noreg); 2076 2077 if (UseCompressedOops) { 2078 generate_disjoint_int_copy_core(aligned); 2079 } else { 2080 generate_disjoint_long_copy_core(aligned); 2081 } 2082 2083 bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, R4_ARG2, R5_ARG3, noreg); 2084 __ li(R3_RET, 0); // return 0 2085 __ blr(); 2086 2087 return start; 2088 } 2089 2090 2091 // Helper for generating a dynamic type check. 2092 // Smashes only the given temp registers. 2093 void generate_type_check(Register sub_klass, 2094 Register super_check_offset, 2095 Register super_klass, 2096 Register temp, 2097 Label& L_success) { 2098 assert_different_registers(sub_klass, super_check_offset, super_klass); 2099 2100 BLOCK_COMMENT("type_check:"); 2101 2102 Label L_miss; 2103 2104 __ check_klass_subtype_fast_path(sub_klass, super_klass, temp, R0, &L_success, &L_miss, NULL, 2105 super_check_offset); 2106 __ check_klass_subtype_slow_path(sub_klass, super_klass, temp, R0, &L_success, NULL); 2107 2108 // Fall through on failure! 2109 __ bind(L_miss); 2110 } 2111 2112 2113 // Generate stub for checked oop copy. 2114 // 2115 // Arguments for generated stub: 2116 // from: R3 2117 // to: R4 2118 // count: R5 treated as signed 2119 // ckoff: R6 (super_check_offset) 2120 // ckval: R7 (super_klass) 2121 // ret: R3 zero for success; (-1^K) where K is partial transfer count 2122 // 2123 address generate_checkcast_copy(const char *name, bool dest_uninitialized) { 2124 2125 const Register R3_from = R3_ARG1; // source array address 2126 const Register R4_to = R4_ARG2; // destination array address 2127 const Register R5_count = R5_ARG3; // elements count 2128 const Register R6_ckoff = R6_ARG4; // super_check_offset 2129 const Register R7_ckval = R7_ARG5; // super_klass 2130 2131 const Register R8_offset = R8_ARG6; // loop var, with stride wordSize 2132 const Register R9_remain = R9_ARG7; // loop var, with stride -1 2133 const Register R10_oop = R10_ARG8; // actual oop copied 2134 const Register R11_klass = R11_scratch1; // oop._klass 2135 const Register R12_tmp = R12_scratch2; 2136 2137 const Register R2_minus1 = R2; 2138 2139 //__ align(CodeEntryAlignment); 2140 StubCodeMark mark(this, "StubRoutines", name); 2141 address start = __ function_entry(); 2142 2143 // Assert that int is 64 bit sign extended and arrays are not conjoint. 2144 #ifdef ASSERT 2145 { 2146 assert_positive_int(R5_ARG3); 2147 const Register tmp1 = R11_scratch1, tmp2 = R12_scratch2; 2148 Label no_overlap; 2149 __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes 2150 __ sldi(tmp2, R5_ARG3, LogBytesPerHeapOop); // size in bytes 2151 __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison! 2152 __ cmpld(CCR1, tmp1, tmp2); 2153 __ crnand(CCR0, Assembler::less, CCR1, Assembler::less); 2154 // Overlaps if Src before dst and distance smaller than size. 2155 // Branch to forward copy routine otherwise. 2156 __ blt(CCR0, no_overlap); 2157 __ stop("overlap in checkcast_copy", 0x9543); 2158 __ bind(no_overlap); 2159 } 2160 #endif 2161 2162 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST; 2163 if (dest_uninitialized) { 2164 decorators |= IS_DEST_UNINITIALIZED; 2165 } 2166 2167 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2168 bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_from, R4_to, R5_count, /* preserve: */ R6_ckoff, R7_ckval); 2169 2170 //inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, R12_tmp, R3_RET); 2171 2172 Label load_element, store_element, store_null, success, do_epilogue; 2173 __ or_(R9_remain, R5_count, R5_count); // Initialize loop index, and test it. 2174 __ li(R8_offset, 0); // Offset from start of arrays. 2175 __ li(R2_minus1, -1); 2176 __ bne(CCR0, load_element); 2177 2178 // Empty array: Nothing to do. 2179 __ li(R3_RET, 0); // Return 0 on (trivial) success. 2180 __ blr(); 2181 2182 // ======== begin loop ======== 2183 // (Entry is load_element.) 2184 __ align(OptoLoopAlignment); 2185 __ bind(store_element); 2186 if (UseCompressedOops) { 2187 __ encode_heap_oop_not_null(R10_oop); 2188 __ bind(store_null); 2189 __ stw(R10_oop, R8_offset, R4_to); 2190 } else { 2191 __ bind(store_null); 2192 __ std(R10_oop, R8_offset, R4_to); 2193 } 2194 2195 __ addi(R8_offset, R8_offset, heapOopSize); // Step to next offset. 2196 __ add_(R9_remain, R2_minus1, R9_remain); // Decrement the count. 2197 __ beq(CCR0, success); 2198 2199 // ======== loop entry is here ======== 2200 __ bind(load_element); 2201 __ load_heap_oop(R10_oop, R8_offset, R3_from, R12_tmp, noreg, false, AS_RAW, &store_null); 2202 2203 __ load_klass(R11_klass, R10_oop); // Query the object klass. 2204 2205 generate_type_check(R11_klass, R6_ckoff, R7_ckval, R12_tmp, 2206 // Branch to this on success: 2207 store_element); 2208 // ======== end loop ======== 2209 2210 // It was a real error; we must depend on the caller to finish the job. 2211 // Register R9_remain has number of *remaining* oops, R5_count number of *total* oops. 2212 // Emit GC store barriers for the oops we have copied (R5_count minus R9_remain), 2213 // and report their number to the caller. 2214 __ subf_(R5_count, R9_remain, R5_count); 2215 __ nand(R3_RET, R5_count, R5_count); // report (-1^K) to caller 2216 __ bne(CCR0, do_epilogue); 2217 __ blr(); 2218 2219 __ bind(success); 2220 __ li(R3_RET, 0); 2221 2222 __ bind(do_epilogue); 2223 bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, R4_to, R5_count, /* preserve */ R3_RET); 2224 2225 __ blr(); 2226 return start; 2227 } 2228 2229 2230 // Generate 'unsafe' array copy stub. 2231 // Though just as safe as the other stubs, it takes an unscaled 2232 // size_t argument instead of an element count. 2233 // 2234 // Arguments for generated stub: 2235 // from: R3 2236 // to: R4 2237 // count: R5 byte count, treated as ssize_t, can be zero 2238 // 2239 // Examines the alignment of the operands and dispatches 2240 // to a long, int, short, or byte copy loop. 2241 // 2242 address generate_unsafe_copy(const char* name, 2243 address byte_copy_entry, 2244 address short_copy_entry, 2245 address int_copy_entry, 2246 address long_copy_entry) { 2247 2248 const Register R3_from = R3_ARG1; // source array address 2249 const Register R4_to = R4_ARG2; // destination array address 2250 const Register R5_count = R5_ARG3; // elements count (as long on PPC64) 2251 2252 const Register R6_bits = R6_ARG4; // test copy of low bits 2253 const Register R7_tmp = R7_ARG5; 2254 2255 //__ align(CodeEntryAlignment); 2256 StubCodeMark mark(this, "StubRoutines", name); 2257 address start = __ function_entry(); 2258 2259 // Bump this on entry, not on exit: 2260 //inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, R6_bits, R7_tmp); 2261 2262 Label short_copy, int_copy, long_copy; 2263 2264 __ orr(R6_bits, R3_from, R4_to); 2265 __ orr(R6_bits, R6_bits, R5_count); 2266 __ andi_(R0, R6_bits, (BytesPerLong-1)); 2267 __ beq(CCR0, long_copy); 2268 2269 __ andi_(R0, R6_bits, (BytesPerInt-1)); 2270 __ beq(CCR0, int_copy); 2271 2272 __ andi_(R0, R6_bits, (BytesPerShort-1)); 2273 __ beq(CCR0, short_copy); 2274 2275 // byte_copy: 2276 __ b(byte_copy_entry); 2277 2278 __ bind(short_copy); 2279 __ srwi(R5_count, R5_count, LogBytesPerShort); 2280 __ b(short_copy_entry); 2281 2282 __ bind(int_copy); 2283 __ srwi(R5_count, R5_count, LogBytesPerInt); 2284 __ b(int_copy_entry); 2285 2286 __ bind(long_copy); 2287 __ srwi(R5_count, R5_count, LogBytesPerLong); 2288 __ b(long_copy_entry); 2289 2290 return start; 2291 } 2292 2293 2294 // Perform range checks on the proposed arraycopy. 2295 // Kills the two temps, but nothing else. 2296 // Also, clean the sign bits of src_pos and dst_pos. 2297 void arraycopy_range_checks(Register src, // source array oop 2298 Register src_pos, // source position 2299 Register dst, // destination array oop 2300 Register dst_pos, // destination position 2301 Register length, // length of copy 2302 Register temp1, Register temp2, 2303 Label& L_failed) { 2304 BLOCK_COMMENT("arraycopy_range_checks:"); 2305 2306 const Register array_length = temp1; // scratch 2307 const Register end_pos = temp2; // scratch 2308 2309 // if (src_pos + length > arrayOop(src)->length() ) FAIL; 2310 __ lwa(array_length, arrayOopDesc::length_offset_in_bytes(), src); 2311 __ add(end_pos, src_pos, length); // src_pos + length 2312 __ cmpd(CCR0, end_pos, array_length); 2313 __ bgt(CCR0, L_failed); 2314 2315 // if (dst_pos + length > arrayOop(dst)->length() ) FAIL; 2316 __ lwa(array_length, arrayOopDesc::length_offset_in_bytes(), dst); 2317 __ add(end_pos, dst_pos, length); // src_pos + length 2318 __ cmpd(CCR0, end_pos, array_length); 2319 __ bgt(CCR0, L_failed); 2320 2321 BLOCK_COMMENT("arraycopy_range_checks done"); 2322 } 2323 2324 2325 // 2326 // Generate generic array copy stubs 2327 // 2328 // Input: 2329 // R3 - src oop 2330 // R4 - src_pos 2331 // R5 - dst oop 2332 // R6 - dst_pos 2333 // R7 - element count 2334 // 2335 // Output: 2336 // R3 == 0 - success 2337 // R3 == -1 - need to call System.arraycopy 2338 // 2339 address generate_generic_copy(const char *name, 2340 address entry_jbyte_arraycopy, 2341 address entry_jshort_arraycopy, 2342 address entry_jint_arraycopy, 2343 address entry_oop_arraycopy, 2344 address entry_disjoint_oop_arraycopy, 2345 address entry_jlong_arraycopy, 2346 address entry_checkcast_arraycopy) { 2347 Label L_failed, L_objArray; 2348 2349 // Input registers 2350 const Register src = R3_ARG1; // source array oop 2351 const Register src_pos = R4_ARG2; // source position 2352 const Register dst = R5_ARG3; // destination array oop 2353 const Register dst_pos = R6_ARG4; // destination position 2354 const Register length = R7_ARG5; // elements count 2355 2356 // registers used as temp 2357 const Register src_klass = R8_ARG6; // source array klass 2358 const Register dst_klass = R9_ARG7; // destination array klass 2359 const Register lh = R10_ARG8; // layout handler 2360 const Register temp = R2; 2361 2362 //__ align(CodeEntryAlignment); 2363 StubCodeMark mark(this, "StubRoutines", name); 2364 address start = __ function_entry(); 2365 2366 // Bump this on entry, not on exit: 2367 //inc_counter_np(SharedRuntime::_generic_array_copy_ctr, lh, temp); 2368 2369 // In principle, the int arguments could be dirty. 2370 2371 //----------------------------------------------------------------------- 2372 // Assembler stubs will be used for this call to arraycopy 2373 // if the following conditions are met: 2374 // 2375 // (1) src and dst must not be null. 2376 // (2) src_pos must not be negative. 2377 // (3) dst_pos must not be negative. 2378 // (4) length must not be negative. 2379 // (5) src klass and dst klass should be the same and not NULL. 2380 // (6) src and dst should be arrays. 2381 // (7) src_pos + length must not exceed length of src. 2382 // (8) dst_pos + length must not exceed length of dst. 2383 BLOCK_COMMENT("arraycopy initial argument checks"); 2384 2385 __ cmpdi(CCR1, src, 0); // if (src == NULL) return -1; 2386 __ extsw_(src_pos, src_pos); // if (src_pos < 0) return -1; 2387 __ cmpdi(CCR5, dst, 0); // if (dst == NULL) return -1; 2388 __ cror(CCR1, Assembler::equal, CCR0, Assembler::less); 2389 __ extsw_(dst_pos, dst_pos); // if (src_pos < 0) return -1; 2390 __ cror(CCR5, Assembler::equal, CCR0, Assembler::less); 2391 __ extsw_(length, length); // if (length < 0) return -1; 2392 __ cror(CCR1, Assembler::equal, CCR5, Assembler::equal); 2393 __ cror(CCR1, Assembler::equal, CCR0, Assembler::less); 2394 __ beq(CCR1, L_failed); 2395 2396 BLOCK_COMMENT("arraycopy argument klass checks"); 2397 __ load_klass(src_klass, src); 2398 __ load_klass(dst_klass, dst); 2399 2400 // Load layout helper 2401 // 2402 // |array_tag| | header_size | element_type | |log2_element_size| 2403 // 32 30 24 16 8 2 0 2404 // 2405 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2406 // 2407 2408 int lh_offset = in_bytes(Klass::layout_helper_offset()); 2409 2410 // Load 32-bits signed value. Use br() instruction with it to check icc. 2411 __ lwz(lh, lh_offset, src_klass); 2412 2413 // Handle objArrays completely differently... 2414 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2415 __ load_const_optimized(temp, objArray_lh, R0); 2416 __ cmpw(CCR0, lh, temp); 2417 __ beq(CCR0, L_objArray); 2418 2419 __ cmpd(CCR5, src_klass, dst_klass); // if (src->klass() != dst->klass()) return -1; 2420 __ cmpwi(CCR6, lh, Klass::_lh_neutral_value); // if (!src->is_Array()) return -1; 2421 2422 __ crnand(CCR5, Assembler::equal, CCR6, Assembler::less); 2423 __ beq(CCR5, L_failed); 2424 2425 // At this point, it is known to be a typeArray (array_tag 0x3). 2426 #ifdef ASSERT 2427 { Label L; 2428 jint lh_prim_tag_in_place = (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift); 2429 __ load_const_optimized(temp, lh_prim_tag_in_place, R0); 2430 __ cmpw(CCR0, lh, temp); 2431 __ bge(CCR0, L); 2432 __ stop("must be a primitive array"); 2433 __ bind(L); 2434 } 2435 #endif 2436 2437 arraycopy_range_checks(src, src_pos, dst, dst_pos, length, 2438 temp, dst_klass, L_failed); 2439 2440 // TypeArrayKlass 2441 // 2442 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2443 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2444 // 2445 2446 const Register offset = dst_klass; // array offset 2447 const Register elsize = src_klass; // log2 element size 2448 2449 __ rldicl(offset, lh, 64 - Klass::_lh_header_size_shift, 64 - exact_log2(Klass::_lh_header_size_mask + 1)); 2450 __ andi(elsize, lh, Klass::_lh_log2_element_size_mask); 2451 __ add(src, offset, src); // src array offset 2452 __ add(dst, offset, dst); // dst array offset 2453 2454 // Next registers should be set before the jump to corresponding stub. 2455 const Register from = R3_ARG1; // source array address 2456 const Register to = R4_ARG2; // destination array address 2457 const Register count = R5_ARG3; // elements count 2458 2459 // 'from', 'to', 'count' registers should be set in this order 2460 // since they are the same as 'src', 'src_pos', 'dst'. 2461 2462 BLOCK_COMMENT("scale indexes to element size"); 2463 __ sld(src_pos, src_pos, elsize); 2464 __ sld(dst_pos, dst_pos, elsize); 2465 __ add(from, src_pos, src); // src_addr 2466 __ add(to, dst_pos, dst); // dst_addr 2467 __ mr(count, length); // length 2468 2469 BLOCK_COMMENT("choose copy loop based on element size"); 2470 // Using conditional branches with range 32kB. 2471 const int bo = Assembler::bcondCRbiIs1, bi = Assembler::bi0(CCR0, Assembler::equal); 2472 __ cmpwi(CCR0, elsize, 0); 2473 __ bc(bo, bi, entry_jbyte_arraycopy); 2474 __ cmpwi(CCR0, elsize, LogBytesPerShort); 2475 __ bc(bo, bi, entry_jshort_arraycopy); 2476 __ cmpwi(CCR0, elsize, LogBytesPerInt); 2477 __ bc(bo, bi, entry_jint_arraycopy); 2478 #ifdef ASSERT 2479 { Label L; 2480 __ cmpwi(CCR0, elsize, LogBytesPerLong); 2481 __ beq(CCR0, L); 2482 __ stop("must be long copy, but elsize is wrong"); 2483 __ bind(L); 2484 } 2485 #endif 2486 __ b(entry_jlong_arraycopy); 2487 2488 // ObjArrayKlass 2489 __ bind(L_objArray); 2490 // live at this point: src_klass, dst_klass, src[_pos], dst[_pos], length 2491 2492 Label L_disjoint_plain_copy, L_checkcast_copy; 2493 // test array classes for subtyping 2494 __ cmpd(CCR0, src_klass, dst_klass); // usual case is exact equality 2495 __ bne(CCR0, L_checkcast_copy); 2496 2497 // Identically typed arrays can be copied without element-wise checks. 2498 arraycopy_range_checks(src, src_pos, dst, dst_pos, length, 2499 temp, lh, L_failed); 2500 2501 __ addi(src, src, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //src offset 2502 __ addi(dst, dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //dst offset 2503 __ sldi(src_pos, src_pos, LogBytesPerHeapOop); 2504 __ sldi(dst_pos, dst_pos, LogBytesPerHeapOop); 2505 __ add(from, src_pos, src); // src_addr 2506 __ add(to, dst_pos, dst); // dst_addr 2507 __ mr(count, length); // length 2508 __ b(entry_oop_arraycopy); 2509 2510 __ bind(L_checkcast_copy); 2511 // live at this point: src_klass, dst_klass 2512 { 2513 // Before looking at dst.length, make sure dst is also an objArray. 2514 __ lwz(temp, lh_offset, dst_klass); 2515 __ cmpw(CCR0, lh, temp); 2516 __ bne(CCR0, L_failed); 2517 2518 // It is safe to examine both src.length and dst.length. 2519 arraycopy_range_checks(src, src_pos, dst, dst_pos, length, 2520 temp, lh, L_failed); 2521 2522 // Marshal the base address arguments now, freeing registers. 2523 __ addi(src, src, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //src offset 2524 __ addi(dst, dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //dst offset 2525 __ sldi(src_pos, src_pos, LogBytesPerHeapOop); 2526 __ sldi(dst_pos, dst_pos, LogBytesPerHeapOop); 2527 __ add(from, src_pos, src); // src_addr 2528 __ add(to, dst_pos, dst); // dst_addr 2529 __ mr(count, length); // length 2530 2531 Register sco_temp = R6_ARG4; // This register is free now. 2532 assert_different_registers(from, to, count, sco_temp, 2533 dst_klass, src_klass); 2534 2535 // Generate the type check. 2536 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2537 __ lwz(sco_temp, sco_offset, dst_klass); 2538 generate_type_check(src_klass, sco_temp, dst_klass, 2539 temp, L_disjoint_plain_copy); 2540 2541 // Fetch destination element klass from the ObjArrayKlass header. 2542 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2543 2544 // The checkcast_copy loop needs two extra arguments: 2545 __ ld(R7_ARG5, ek_offset, dst_klass); // dest elem klass 2546 __ lwz(R6_ARG4, sco_offset, R7_ARG5); // sco of elem klass 2547 __ b(entry_checkcast_arraycopy); 2548 } 2549 2550 __ bind(L_disjoint_plain_copy); 2551 __ b(entry_disjoint_oop_arraycopy); 2552 2553 __ bind(L_failed); 2554 __ li(R3_RET, -1); // return -1 2555 __ blr(); 2556 return start; 2557 } 2558 2559 // Arguments for generated stub: 2560 // R3_ARG1 - source byte array address 2561 // R4_ARG2 - destination byte array address 2562 // R5_ARG3 - round key array 2563 address generate_aescrypt_encryptBlock() { 2564 assert(UseAES, "need AES instructions and misaligned SSE support"); 2565 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 2566 2567 address start = __ function_entry(); 2568 2569 Label L_doLast; 2570 2571 Register from = R3_ARG1; // source array address 2572 Register to = R4_ARG2; // destination array address 2573 Register key = R5_ARG3; // round key array 2574 2575 Register keylen = R8; 2576 Register temp = R9; 2577 Register keypos = R10; 2578 Register fifteen = R12; 2579 2580 VectorRegister vRet = VR0; 2581 2582 VectorRegister vKey1 = VR1; 2583 VectorRegister vKey2 = VR2; 2584 VectorRegister vKey3 = VR3; 2585 VectorRegister vKey4 = VR4; 2586 2587 VectorRegister fromPerm = VR5; 2588 VectorRegister keyPerm = VR6; 2589 VectorRegister toPerm = VR7; 2590 VectorRegister fSplt = VR8; 2591 2592 VectorRegister vTmp1 = VR9; 2593 VectorRegister vTmp2 = VR10; 2594 VectorRegister vTmp3 = VR11; 2595 VectorRegister vTmp4 = VR12; 2596 2597 __ li (fifteen, 15); 2598 2599 // load unaligned from[0-15] to vsRet 2600 __ lvx (vRet, from); 2601 __ lvx (vTmp1, fifteen, from); 2602 __ lvsl (fromPerm, from); 2603 #ifdef VM_LITTLE_ENDIAN 2604 __ vspltisb (fSplt, 0x0f); 2605 __ vxor (fromPerm, fromPerm, fSplt); 2606 #endif 2607 __ vperm (vRet, vRet, vTmp1, fromPerm); 2608 2609 // load keylen (44 or 52 or 60) 2610 __ lwz (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key); 2611 2612 // to load keys 2613 __ load_perm (keyPerm, key); 2614 #ifdef VM_LITTLE_ENDIAN 2615 __ vspltisb (vTmp2, -16); 2616 __ vrld (keyPerm, keyPerm, vTmp2); 2617 __ vrld (keyPerm, keyPerm, vTmp2); 2618 __ vsldoi (keyPerm, keyPerm, keyPerm, 8); 2619 #endif 2620 2621 // load the 1st round key to vTmp1 2622 __ lvx (vTmp1, key); 2623 __ li (keypos, 16); 2624 __ lvx (vKey1, keypos, key); 2625 __ vec_perm (vTmp1, vKey1, keyPerm); 2626 2627 // 1st round 2628 __ vxor (vRet, vRet, vTmp1); 2629 2630 // load the 2nd round key to vKey1 2631 __ li (keypos, 32); 2632 __ lvx (vKey2, keypos, key); 2633 __ vec_perm (vKey1, vKey2, keyPerm); 2634 2635 // load the 3rd round key to vKey2 2636 __ li (keypos, 48); 2637 __ lvx (vKey3, keypos, key); 2638 __ vec_perm (vKey2, vKey3, keyPerm); 2639 2640 // load the 4th round key to vKey3 2641 __ li (keypos, 64); 2642 __ lvx (vKey4, keypos, key); 2643 __ vec_perm (vKey3, vKey4, keyPerm); 2644 2645 // load the 5th round key to vKey4 2646 __ li (keypos, 80); 2647 __ lvx (vTmp1, keypos, key); 2648 __ vec_perm (vKey4, vTmp1, keyPerm); 2649 2650 // 2nd - 5th rounds 2651 __ vcipher (vRet, vRet, vKey1); 2652 __ vcipher (vRet, vRet, vKey2); 2653 __ vcipher (vRet, vRet, vKey3); 2654 __ vcipher (vRet, vRet, vKey4); 2655 2656 // load the 6th round key to vKey1 2657 __ li (keypos, 96); 2658 __ lvx (vKey2, keypos, key); 2659 __ vec_perm (vKey1, vTmp1, vKey2, keyPerm); 2660 2661 // load the 7th round key to vKey2 2662 __ li (keypos, 112); 2663 __ lvx (vKey3, keypos, key); 2664 __ vec_perm (vKey2, vKey3, keyPerm); 2665 2666 // load the 8th round key to vKey3 2667 __ li (keypos, 128); 2668 __ lvx (vKey4, keypos, key); 2669 __ vec_perm (vKey3, vKey4, keyPerm); 2670 2671 // load the 9th round key to vKey4 2672 __ li (keypos, 144); 2673 __ lvx (vTmp1, keypos, key); 2674 __ vec_perm (vKey4, vTmp1, keyPerm); 2675 2676 // 6th - 9th rounds 2677 __ vcipher (vRet, vRet, vKey1); 2678 __ vcipher (vRet, vRet, vKey2); 2679 __ vcipher (vRet, vRet, vKey3); 2680 __ vcipher (vRet, vRet, vKey4); 2681 2682 // load the 10th round key to vKey1 2683 __ li (keypos, 160); 2684 __ lvx (vKey2, keypos, key); 2685 __ vec_perm (vKey1, vTmp1, vKey2, keyPerm); 2686 2687 // load the 11th round key to vKey2 2688 __ li (keypos, 176); 2689 __ lvx (vTmp1, keypos, key); 2690 __ vec_perm (vKey2, vTmp1, keyPerm); 2691 2692 // if all round keys are loaded, skip next 4 rounds 2693 __ cmpwi (CCR0, keylen, 44); 2694 __ beq (CCR0, L_doLast); 2695 2696 // 10th - 11th rounds 2697 __ vcipher (vRet, vRet, vKey1); 2698 __ vcipher (vRet, vRet, vKey2); 2699 2700 // load the 12th round key to vKey1 2701 __ li (keypos, 192); 2702 __ lvx (vKey2, keypos, key); 2703 __ vec_perm (vKey1, vTmp1, vKey2, keyPerm); 2704 2705 // load the 13th round key to vKey2 2706 __ li (keypos, 208); 2707 __ lvx (vTmp1, keypos, key); 2708 __ vec_perm (vKey2, vTmp1, keyPerm); 2709 2710 // if all round keys are loaded, skip next 2 rounds 2711 __ cmpwi (CCR0, keylen, 52); 2712 __ beq (CCR0, L_doLast); 2713 2714 // 12th - 13th rounds 2715 __ vcipher (vRet, vRet, vKey1); 2716 __ vcipher (vRet, vRet, vKey2); 2717 2718 // load the 14th round key to vKey1 2719 __ li (keypos, 224); 2720 __ lvx (vKey2, keypos, key); 2721 __ vec_perm (vKey1, vTmp1, vKey2, keyPerm); 2722 2723 // load the 15th round key to vKey2 2724 __ li (keypos, 240); 2725 __ lvx (vTmp1, keypos, key); 2726 __ vec_perm (vKey2, vTmp1, keyPerm); 2727 2728 __ bind(L_doLast); 2729 2730 // last two rounds 2731 __ vcipher (vRet, vRet, vKey1); 2732 __ vcipherlast (vRet, vRet, vKey2); 2733 2734 // store result (unaligned) 2735 #ifdef VM_LITTLE_ENDIAN 2736 __ lvsl (toPerm, to); 2737 #else 2738 __ lvsr (toPerm, to); 2739 #endif 2740 __ vspltisb (vTmp3, -1); 2741 __ vspltisb (vTmp4, 0); 2742 __ lvx (vTmp1, to); 2743 __ lvx (vTmp2, fifteen, to); 2744 #ifdef VM_LITTLE_ENDIAN 2745 __ vperm (vTmp3, vTmp3, vTmp4, toPerm); // generate select mask 2746 __ vxor (toPerm, toPerm, fSplt); // swap bytes 2747 #else 2748 __ vperm (vTmp3, vTmp4, vTmp3, toPerm); // generate select mask 2749 #endif 2750 __ vperm (vTmp4, vRet, vRet, toPerm); // rotate data 2751 __ vsel (vTmp2, vTmp4, vTmp2, vTmp3); 2752 __ vsel (vTmp1, vTmp1, vTmp4, vTmp3); 2753 __ stvx (vTmp2, fifteen, to); // store this one first (may alias) 2754 __ stvx (vTmp1, to); 2755 2756 __ blr(); 2757 return start; 2758 } 2759 2760 // Arguments for generated stub: 2761 // R3_ARG1 - source byte array address 2762 // R4_ARG2 - destination byte array address 2763 // R5_ARG3 - K (key) in little endian int array 2764 address generate_aescrypt_decryptBlock() { 2765 assert(UseAES, "need AES instructions and misaligned SSE support"); 2766 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 2767 2768 address start = __ function_entry(); 2769 2770 Label L_doLast; 2771 Label L_do44; 2772 Label L_do52; 2773 Label L_do60; 2774 2775 Register from = R3_ARG1; // source array address 2776 Register to = R4_ARG2; // destination array address 2777 Register key = R5_ARG3; // round key array 2778 2779 Register keylen = R8; 2780 Register temp = R9; 2781 Register keypos = R10; 2782 Register fifteen = R12; 2783 2784 VectorRegister vRet = VR0; 2785 2786 VectorRegister vKey1 = VR1; 2787 VectorRegister vKey2 = VR2; 2788 VectorRegister vKey3 = VR3; 2789 VectorRegister vKey4 = VR4; 2790 VectorRegister vKey5 = VR5; 2791 2792 VectorRegister fromPerm = VR6; 2793 VectorRegister keyPerm = VR7; 2794 VectorRegister toPerm = VR8; 2795 VectorRegister fSplt = VR9; 2796 2797 VectorRegister vTmp1 = VR10; 2798 VectorRegister vTmp2 = VR11; 2799 VectorRegister vTmp3 = VR12; 2800 VectorRegister vTmp4 = VR13; 2801 2802 __ li (fifteen, 15); 2803 2804 // load unaligned from[0-15] to vsRet 2805 __ lvx (vRet, from); 2806 __ lvx (vTmp1, fifteen, from); 2807 __ lvsl (fromPerm, from); 2808 #ifdef VM_LITTLE_ENDIAN 2809 __ vspltisb (fSplt, 0x0f); 2810 __ vxor (fromPerm, fromPerm, fSplt); 2811 #endif 2812 __ vperm (vRet, vRet, vTmp1, fromPerm); // align [and byte swap in LE] 2813 2814 // load keylen (44 or 52 or 60) 2815 __ lwz (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key); 2816 2817 // to load keys 2818 __ load_perm (keyPerm, key); 2819 #ifdef VM_LITTLE_ENDIAN 2820 __ vxor (vTmp2, vTmp2, vTmp2); 2821 __ vspltisb (vTmp2, -16); 2822 __ vrld (keyPerm, keyPerm, vTmp2); 2823 __ vrld (keyPerm, keyPerm, vTmp2); 2824 __ vsldoi (keyPerm, keyPerm, keyPerm, 8); 2825 #endif 2826 2827 __ cmpwi (CCR0, keylen, 44); 2828 __ beq (CCR0, L_do44); 2829 2830 __ cmpwi (CCR0, keylen, 52); 2831 __ beq (CCR0, L_do52); 2832 2833 // load the 15th round key to vKey1 2834 __ li (keypos, 240); 2835 __ lvx (vKey1, keypos, key); 2836 __ li (keypos, 224); 2837 __ lvx (vKey2, keypos, key); 2838 __ vec_perm (vKey1, vKey2, vKey1, keyPerm); 2839 2840 // load the 14th round key to vKey2 2841 __ li (keypos, 208); 2842 __ lvx (vKey3, keypos, key); 2843 __ vec_perm (vKey2, vKey3, vKey2, keyPerm); 2844 2845 // load the 13th round key to vKey3 2846 __ li (keypos, 192); 2847 __ lvx (vKey4, keypos, key); 2848 __ vec_perm (vKey3, vKey4, vKey3, keyPerm); 2849 2850 // load the 12th round key to vKey4 2851 __ li (keypos, 176); 2852 __ lvx (vKey5, keypos, key); 2853 __ vec_perm (vKey4, vKey5, vKey4, keyPerm); 2854 2855 // load the 11th round key to vKey5 2856 __ li (keypos, 160); 2857 __ lvx (vTmp1, keypos, key); 2858 __ vec_perm (vKey5, vTmp1, vKey5, keyPerm); 2859 2860 // 1st - 5th rounds 2861 __ vxor (vRet, vRet, vKey1); 2862 __ vncipher (vRet, vRet, vKey2); 2863 __ vncipher (vRet, vRet, vKey3); 2864 __ vncipher (vRet, vRet, vKey4); 2865 __ vncipher (vRet, vRet, vKey5); 2866 2867 __ b (L_doLast); 2868 2869 __ bind (L_do52); 2870 2871 // load the 13th round key to vKey1 2872 __ li (keypos, 208); 2873 __ lvx (vKey1, keypos, key); 2874 __ li (keypos, 192); 2875 __ lvx (vKey2, keypos, key); 2876 __ vec_perm (vKey1, vKey2, vKey1, keyPerm); 2877 2878 // load the 12th round key to vKey2 2879 __ li (keypos, 176); 2880 __ lvx (vKey3, keypos, key); 2881 __ vec_perm (vKey2, vKey3, vKey2, keyPerm); 2882 2883 // load the 11th round key to vKey3 2884 __ li (keypos, 160); 2885 __ lvx (vTmp1, keypos, key); 2886 __ vec_perm (vKey3, vTmp1, vKey3, keyPerm); 2887 2888 // 1st - 3rd rounds 2889 __ vxor (vRet, vRet, vKey1); 2890 __ vncipher (vRet, vRet, vKey2); 2891 __ vncipher (vRet, vRet, vKey3); 2892 2893 __ b (L_doLast); 2894 2895 __ bind (L_do44); 2896 2897 // load the 11th round key to vKey1 2898 __ li (keypos, 176); 2899 __ lvx (vKey1, keypos, key); 2900 __ li (keypos, 160); 2901 __ lvx (vTmp1, keypos, key); 2902 __ vec_perm (vKey1, vTmp1, vKey1, keyPerm); 2903 2904 // 1st round 2905 __ vxor (vRet, vRet, vKey1); 2906 2907 __ bind (L_doLast); 2908 2909 // load the 10th round key to vKey1 2910 __ li (keypos, 144); 2911 __ lvx (vKey2, keypos, key); 2912 __ vec_perm (vKey1, vKey2, vTmp1, keyPerm); 2913 2914 // load the 9th round key to vKey2 2915 __ li (keypos, 128); 2916 __ lvx (vKey3, keypos, key); 2917 __ vec_perm (vKey2, vKey3, vKey2, keyPerm); 2918 2919 // load the 8th round key to vKey3 2920 __ li (keypos, 112); 2921 __ lvx (vKey4, keypos, key); 2922 __ vec_perm (vKey3, vKey4, vKey3, keyPerm); 2923 2924 // load the 7th round key to vKey4 2925 __ li (keypos, 96); 2926 __ lvx (vKey5, keypos, key); 2927 __ vec_perm (vKey4, vKey5, vKey4, keyPerm); 2928 2929 // load the 6th round key to vKey5 2930 __ li (keypos, 80); 2931 __ lvx (vTmp1, keypos, key); 2932 __ vec_perm (vKey5, vTmp1, vKey5, keyPerm); 2933 2934 // last 10th - 6th rounds 2935 __ vncipher (vRet, vRet, vKey1); 2936 __ vncipher (vRet, vRet, vKey2); 2937 __ vncipher (vRet, vRet, vKey3); 2938 __ vncipher (vRet, vRet, vKey4); 2939 __ vncipher (vRet, vRet, vKey5); 2940 2941 // load the 5th round key to vKey1 2942 __ li (keypos, 64); 2943 __ lvx (vKey2, keypos, key); 2944 __ vec_perm (vKey1, vKey2, vTmp1, keyPerm); 2945 2946 // load the 4th round key to vKey2 2947 __ li (keypos, 48); 2948 __ lvx (vKey3, keypos, key); 2949 __ vec_perm (vKey2, vKey3, vKey2, keyPerm); 2950 2951 // load the 3rd round key to vKey3 2952 __ li (keypos, 32); 2953 __ lvx (vKey4, keypos, key); 2954 __ vec_perm (vKey3, vKey4, vKey3, keyPerm); 2955 2956 // load the 2nd round key to vKey4 2957 __ li (keypos, 16); 2958 __ lvx (vKey5, keypos, key); 2959 __ vec_perm (vKey4, vKey5, vKey4, keyPerm); 2960 2961 // load the 1st round key to vKey5 2962 __ lvx (vTmp1, key); 2963 __ vec_perm (vKey5, vTmp1, vKey5, keyPerm); 2964 2965 // last 5th - 1th rounds 2966 __ vncipher (vRet, vRet, vKey1); 2967 __ vncipher (vRet, vRet, vKey2); 2968 __ vncipher (vRet, vRet, vKey3); 2969 __ vncipher (vRet, vRet, vKey4); 2970 __ vncipherlast (vRet, vRet, vKey5); 2971 2972 // store result (unaligned) 2973 #ifdef VM_LITTLE_ENDIAN 2974 __ lvsl (toPerm, to); 2975 #else 2976 __ lvsr (toPerm, to); 2977 #endif 2978 __ vspltisb (vTmp3, -1); 2979 __ vspltisb (vTmp4, 0); 2980 __ lvx (vTmp1, to); 2981 __ lvx (vTmp2, fifteen, to); 2982 #ifdef VM_LITTLE_ENDIAN 2983 __ vperm (vTmp3, vTmp3, vTmp4, toPerm); // generate select mask 2984 __ vxor (toPerm, toPerm, fSplt); // swap bytes 2985 #else 2986 __ vperm (vTmp3, vTmp4, vTmp3, toPerm); // generate select mask 2987 #endif 2988 __ vperm (vTmp4, vRet, vRet, toPerm); // rotate data 2989 __ vsel (vTmp2, vTmp4, vTmp2, vTmp3); 2990 __ vsel (vTmp1, vTmp1, vTmp4, vTmp3); 2991 __ stvx (vTmp2, fifteen, to); // store this one first (may alias) 2992 __ stvx (vTmp1, to); 2993 2994 __ blr(); 2995 return start; 2996 } 2997 2998 address generate_sha256_implCompress(bool multi_block, const char *name) { 2999 assert(UseSHA, "need SHA instructions"); 3000 StubCodeMark mark(this, "StubRoutines", name); 3001 address start = __ function_entry(); 3002 3003 __ sha256 (multi_block); 3004 3005 __ blr(); 3006 return start; 3007 } 3008 3009 address generate_sha512_implCompress(bool multi_block, const char *name) { 3010 assert(UseSHA, "need SHA instructions"); 3011 StubCodeMark mark(this, "StubRoutines", name); 3012 address start = __ function_entry(); 3013 3014 __ sha512 (multi_block); 3015 3016 __ blr(); 3017 return start; 3018 } 3019 3020 void generate_arraycopy_stubs() { 3021 // Note: the disjoint stubs must be generated first, some of 3022 // the conjoint stubs use them. 3023 3024 // non-aligned disjoint versions 3025 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy"); 3026 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy"); 3027 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy"); 3028 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy(false, "jlong_disjoint_arraycopy"); 3029 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy", false); 3030 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy_uninit", true); 3031 3032 // aligned disjoint versions 3033 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, "arrayof_jbyte_disjoint_arraycopy"); 3034 StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy"); 3035 StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, "arrayof_jint_disjoint_arraycopy"); 3036 StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, "arrayof_jlong_disjoint_arraycopy"); 3037 StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(true, "arrayof_oop_disjoint_arraycopy", false); 3038 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, "oop_disjoint_arraycopy_uninit", true); 3039 3040 // non-aligned conjoint versions 3041 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, "jbyte_arraycopy"); 3042 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy"); 3043 StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, "jint_arraycopy"); 3044 StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy(false, "jlong_arraycopy"); 3045 StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(false, "oop_arraycopy", false); 3046 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, "oop_arraycopy_uninit", true); 3047 3048 // aligned conjoint versions 3049 StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, "arrayof_jbyte_arraycopy"); 3050 StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy"); 3051 StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, "arrayof_jint_arraycopy"); 3052 StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy(true, "arrayof_jlong_arraycopy"); 3053 StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", false); 3054 StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", true); 3055 3056 // special/generic versions 3057 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", false); 3058 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", true); 3059 3060 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 3061 STUB_ENTRY(jbyte_arraycopy), 3062 STUB_ENTRY(jshort_arraycopy), 3063 STUB_ENTRY(jint_arraycopy), 3064 STUB_ENTRY(jlong_arraycopy)); 3065 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 3066 STUB_ENTRY(jbyte_arraycopy), 3067 STUB_ENTRY(jshort_arraycopy), 3068 STUB_ENTRY(jint_arraycopy), 3069 STUB_ENTRY(oop_arraycopy), 3070 STUB_ENTRY(oop_disjoint_arraycopy), 3071 STUB_ENTRY(jlong_arraycopy), 3072 STUB_ENTRY(checkcast_arraycopy)); 3073 3074 // fill routines 3075 if (OptimizeFill) { 3076 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 3077 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 3078 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 3079 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 3080 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 3081 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 3082 } 3083 } 3084 3085 // Safefetch stubs. 3086 void generate_safefetch(const char* name, int size, address* entry, address* fault_pc, address* continuation_pc) { 3087 // safefetch signatures: 3088 // int SafeFetch32(int* adr, int errValue); 3089 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3090 // 3091 // arguments: 3092 // R3_ARG1 = adr 3093 // R4_ARG2 = errValue 3094 // 3095 // result: 3096 // R3_RET = *adr or errValue 3097 3098 StubCodeMark mark(this, "StubRoutines", name); 3099 3100 // Entry point, pc or function descriptor. 3101 *entry = __ function_entry(); 3102 3103 // Load *adr into R4_ARG2, may fault. 3104 *fault_pc = __ pc(); 3105 switch (size) { 3106 case 4: 3107 // int32_t, signed extended 3108 __ lwa(R4_ARG2, 0, R3_ARG1); 3109 break; 3110 case 8: 3111 // int64_t 3112 __ ld(R4_ARG2, 0, R3_ARG1); 3113 break; 3114 default: 3115 ShouldNotReachHere(); 3116 } 3117 3118 // return errValue or *adr 3119 *continuation_pc = __ pc(); 3120 __ mr(R3_RET, R4_ARG2); 3121 __ blr(); 3122 } 3123 3124 // Stub for BigInteger::multiplyToLen() 3125 // 3126 // Arguments: 3127 // 3128 // Input: 3129 // R3 - x address 3130 // R4 - x length 3131 // R5 - y address 3132 // R6 - y length 3133 // R7 - z address 3134 // R8 - z length 3135 // 3136 address generate_multiplyToLen() { 3137 3138 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 3139 3140 address start = __ function_entry(); 3141 3142 const Register x = R3; 3143 const Register xlen = R4; 3144 const Register y = R5; 3145 const Register ylen = R6; 3146 const Register z = R7; 3147 const Register zlen = R8; 3148 3149 const Register tmp1 = R2; // TOC not used. 3150 const Register tmp2 = R9; 3151 const Register tmp3 = R10; 3152 const Register tmp4 = R11; 3153 const Register tmp5 = R12; 3154 3155 // non-volatile regs 3156 const Register tmp6 = R31; 3157 const Register tmp7 = R30; 3158 const Register tmp8 = R29; 3159 const Register tmp9 = R28; 3160 const Register tmp10 = R27; 3161 const Register tmp11 = R26; 3162 const Register tmp12 = R25; 3163 const Register tmp13 = R24; 3164 3165 BLOCK_COMMENT("Entry:"); 3166 3167 // C2 does not respect int to long conversion for stub calls. 3168 __ clrldi(xlen, xlen, 32); 3169 __ clrldi(ylen, ylen, 32); 3170 __ clrldi(zlen, zlen, 32); 3171 3172 // Save non-volatile regs (frameless). 3173 int current_offs = 8; 3174 __ std(R24, -current_offs, R1_SP); current_offs += 8; 3175 __ std(R25, -current_offs, R1_SP); current_offs += 8; 3176 __ std(R26, -current_offs, R1_SP); current_offs += 8; 3177 __ std(R27, -current_offs, R1_SP); current_offs += 8; 3178 __ std(R28, -current_offs, R1_SP); current_offs += 8; 3179 __ std(R29, -current_offs, R1_SP); current_offs += 8; 3180 __ std(R30, -current_offs, R1_SP); current_offs += 8; 3181 __ std(R31, -current_offs, R1_SP); 3182 3183 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, 3184 tmp6, tmp7, tmp8, tmp9, tmp10, tmp11, tmp12, tmp13); 3185 3186 // Restore non-volatile regs. 3187 current_offs = 8; 3188 __ ld(R24, -current_offs, R1_SP); current_offs += 8; 3189 __ ld(R25, -current_offs, R1_SP); current_offs += 8; 3190 __ ld(R26, -current_offs, R1_SP); current_offs += 8; 3191 __ ld(R27, -current_offs, R1_SP); current_offs += 8; 3192 __ ld(R28, -current_offs, R1_SP); current_offs += 8; 3193 __ ld(R29, -current_offs, R1_SP); current_offs += 8; 3194 __ ld(R30, -current_offs, R1_SP); current_offs += 8; 3195 __ ld(R31, -current_offs, R1_SP); 3196 3197 __ blr(); // Return to caller. 3198 3199 return start; 3200 } 3201 3202 3203 // Compute CRC32/CRC32C function. 3204 void generate_CRC_updateBytes(const char* name, Register table, bool invertCRC) { 3205 3206 // arguments to kernel_crc32: 3207 const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call. 3208 const Register data = R4_ARG2; // source byte array 3209 const Register dataLen = R5_ARG3; // #bytes to process 3210 3211 const Register t0 = R2; 3212 const Register t1 = R7; 3213 const Register t2 = R8; 3214 const Register t3 = R9; 3215 const Register tc0 = R10; 3216 const Register tc1 = R11; 3217 const Register tc2 = R12; 3218 3219 BLOCK_COMMENT("Stub body {"); 3220 assert_different_registers(crc, data, dataLen, table); 3221 3222 __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, table, invertCRC); 3223 3224 BLOCK_COMMENT("return"); 3225 __ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET). 3226 __ blr(); 3227 3228 BLOCK_COMMENT("} Stub body"); 3229 } 3230 3231 /** 3232 * Arguments: 3233 * 3234 * Input: 3235 * R3_ARG1 - out address 3236 * R4_ARG2 - in address 3237 * R5_ARG3 - offset 3238 * R6_ARG4 - len 3239 * R7_ARG5 - k 3240 * Output: 3241 * R3_RET - carry 3242 */ 3243 address generate_mulAdd() { 3244 __ align(CodeEntryAlignment); 3245 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 3246 3247 address start = __ function_entry(); 3248 3249 // C2 does not sign extend signed parameters to full 64 bits registers: 3250 __ rldic (R5_ARG3, R5_ARG3, 2, 32); // always positive 3251 __ clrldi(R6_ARG4, R6_ARG4, 32); // force zero bits on higher word 3252 __ clrldi(R7_ARG5, R7_ARG5, 32); // force zero bits on higher word 3253 3254 __ muladd(R3_ARG1, R4_ARG2, R5_ARG3, R6_ARG4, R7_ARG5, R8, R9, R10); 3255 3256 // Moves output carry to return register 3257 __ mr (R3_RET, R10); 3258 3259 __ blr(); 3260 3261 return start; 3262 } 3263 3264 /** 3265 * Arguments: 3266 * 3267 * Input: 3268 * R3_ARG1 - in address 3269 * R4_ARG2 - in length 3270 * R5_ARG3 - out address 3271 * R6_ARG4 - out length 3272 */ 3273 address generate_squareToLen() { 3274 __ align(CodeEntryAlignment); 3275 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 3276 3277 address start = __ function_entry(); 3278 3279 // args - higher word is cleaned (unsignedly) due to int to long casting 3280 const Register in = R3_ARG1; 3281 const Register in_len = R4_ARG2; 3282 __ clrldi(in_len, in_len, 32); 3283 const Register out = R5_ARG3; 3284 const Register out_len = R6_ARG4; 3285 __ clrldi(out_len, out_len, 32); 3286 3287 // output 3288 const Register ret = R3_RET; 3289 3290 // temporaries 3291 const Register lplw_s = R7; 3292 const Register in_aux = R8; 3293 const Register out_aux = R9; 3294 const Register piece = R10; 3295 const Register product = R14; 3296 const Register lplw = R15; 3297 const Register i_minus1 = R16; 3298 const Register carry = R17; 3299 const Register offset = R18; 3300 const Register off_aux = R19; 3301 const Register t = R20; 3302 const Register mlen = R21; 3303 const Register len = R22; 3304 const Register a = R23; 3305 const Register b = R24; 3306 const Register i = R25; 3307 const Register c = R26; 3308 const Register cs = R27; 3309 3310 // Labels 3311 Label SKIP_LSHIFT, SKIP_DIAGONAL_SUM, SKIP_ADDONE, SKIP_MULADD, SKIP_LOOP_SQUARE; 3312 Label LOOP_LSHIFT, LOOP_DIAGONAL_SUM, LOOP_ADDONE, LOOP_MULADD, LOOP_SQUARE; 3313 3314 // Save non-volatile regs (frameless). 3315 int current_offs = -8; 3316 __ std(R28, current_offs, R1_SP); current_offs -= 8; 3317 __ std(R27, current_offs, R1_SP); current_offs -= 8; 3318 __ std(R26, current_offs, R1_SP); current_offs -= 8; 3319 __ std(R25, current_offs, R1_SP); current_offs -= 8; 3320 __ std(R24, current_offs, R1_SP); current_offs -= 8; 3321 __ std(R23, current_offs, R1_SP); current_offs -= 8; 3322 __ std(R22, current_offs, R1_SP); current_offs -= 8; 3323 __ std(R21, current_offs, R1_SP); current_offs -= 8; 3324 __ std(R20, current_offs, R1_SP); current_offs -= 8; 3325 __ std(R19, current_offs, R1_SP); current_offs -= 8; 3326 __ std(R18, current_offs, R1_SP); current_offs -= 8; 3327 __ std(R17, current_offs, R1_SP); current_offs -= 8; 3328 __ std(R16, current_offs, R1_SP); current_offs -= 8; 3329 __ std(R15, current_offs, R1_SP); current_offs -= 8; 3330 __ std(R14, current_offs, R1_SP); 3331 3332 // Store the squares, right shifted one bit (i.e., divided by 2) 3333 __ subi (out_aux, out, 8); 3334 __ subi (in_aux, in, 4); 3335 __ cmpwi (CCR0, in_len, 0); 3336 // Initialize lplw outside of the loop 3337 __ xorr (lplw, lplw, lplw); 3338 __ ble (CCR0, SKIP_LOOP_SQUARE); // in_len <= 0 3339 __ mtctr (in_len); 3340 3341 __ bind(LOOP_SQUARE); 3342 __ lwzu (piece, 4, in_aux); 3343 __ mulld (product, piece, piece); 3344 // shift left 63 bits and only keep the MSB 3345 __ rldic (lplw_s, lplw, 63, 0); 3346 __ mr (lplw, product); 3347 // shift right 1 bit without sign extension 3348 __ srdi (product, product, 1); 3349 // join them to the same register and store it 3350 __ orr (product, lplw_s, product); 3351 #ifdef VM_LITTLE_ENDIAN 3352 // Swap low and high words for little endian 3353 __ rldicl (product, product, 32, 0); 3354 #endif 3355 __ stdu (product, 8, out_aux); 3356 __ bdnz (LOOP_SQUARE); 3357 3358 __ bind(SKIP_LOOP_SQUARE); 3359 3360 // Add in off-diagonal sums 3361 __ cmpwi (CCR0, in_len, 0); 3362 __ ble (CCR0, SKIP_DIAGONAL_SUM); 3363 // Avoid CTR usage here in order to use it at mulAdd 3364 __ subi (i_minus1, in_len, 1); 3365 __ li (offset, 4); 3366 3367 __ bind(LOOP_DIAGONAL_SUM); 3368 3369 __ sldi (off_aux, out_len, 2); 3370 __ sub (off_aux, off_aux, offset); 3371 3372 __ mr (len, i_minus1); 3373 __ sldi (mlen, i_minus1, 2); 3374 __ lwzx (t, in, mlen); 3375 3376 __ muladd (out, in, off_aux, len, t, a, b, carry); 3377 3378 // begin<addOne> 3379 // off_aux = out_len*4 - 4 - mlen - offset*4 - 4; 3380 __ addi (mlen, mlen, 4); 3381 __ sldi (a, out_len, 2); 3382 __ subi (a, a, 4); 3383 __ sub (a, a, mlen); 3384 __ subi (off_aux, offset, 4); 3385 __ sub (off_aux, a, off_aux); 3386 3387 __ lwzx (b, off_aux, out); 3388 __ add (b, b, carry); 3389 __ stwx (b, off_aux, out); 3390 3391 // if (((uint64_t)s >> 32) != 0) { 3392 __ srdi_ (a, b, 32); 3393 __ beq (CCR0, SKIP_ADDONE); 3394 3395 // while (--mlen >= 0) { 3396 __ bind(LOOP_ADDONE); 3397 __ subi (mlen, mlen, 4); 3398 __ cmpwi (CCR0, mlen, 0); 3399 __ beq (CCR0, SKIP_ADDONE); 3400 3401 // if (--offset_aux < 0) { // Carry out of number 3402 __ subi (off_aux, off_aux, 4); 3403 __ cmpwi (CCR0, off_aux, 0); 3404 __ blt (CCR0, SKIP_ADDONE); 3405 3406 // } else { 3407 __ lwzx (b, off_aux, out); 3408 __ addi (b, b, 1); 3409 __ stwx (b, off_aux, out); 3410 __ cmpwi (CCR0, b, 0); 3411 __ bne (CCR0, SKIP_ADDONE); 3412 __ b (LOOP_ADDONE); 3413 3414 __ bind(SKIP_ADDONE); 3415 // } } } end<addOne> 3416 3417 __ addi (offset, offset, 8); 3418 __ subi (i_minus1, i_minus1, 1); 3419 __ cmpwi (CCR0, i_minus1, 0); 3420 __ bge (CCR0, LOOP_DIAGONAL_SUM); 3421 3422 __ bind(SKIP_DIAGONAL_SUM); 3423 3424 // Shift back up and set low bit 3425 // Shifts 1 bit left up to len positions. Assumes no leading zeros 3426 // begin<primitiveLeftShift> 3427 __ cmpwi (CCR0, out_len, 0); 3428 __ ble (CCR0, SKIP_LSHIFT); 3429 __ li (i, 0); 3430 __ lwz (c, 0, out); 3431 __ subi (b, out_len, 1); 3432 __ mtctr (b); 3433 3434 __ bind(LOOP_LSHIFT); 3435 __ mr (b, c); 3436 __ addi (cs, i, 4); 3437 __ lwzx (c, out, cs); 3438 3439 __ sldi (b, b, 1); 3440 __ srwi (cs, c, 31); 3441 __ orr (b, b, cs); 3442 __ stwx (b, i, out); 3443 3444 __ addi (i, i, 4); 3445 __ bdnz (LOOP_LSHIFT); 3446 3447 __ sldi (c, out_len, 2); 3448 __ subi (c, c, 4); 3449 __ lwzx (b, out, c); 3450 __ sldi (b, b, 1); 3451 __ stwx (b, out, c); 3452 3453 __ bind(SKIP_LSHIFT); 3454 // end<primitiveLeftShift> 3455 3456 // Set low bit 3457 __ sldi (i, in_len, 2); 3458 __ subi (i, i, 4); 3459 __ lwzx (i, in, i); 3460 __ sldi (c, out_len, 2); 3461 __ subi (c, c, 4); 3462 __ lwzx (b, out, c); 3463 3464 __ andi (i, i, 1); 3465 __ orr (i, b, i); 3466 3467 __ stwx (i, out, c); 3468 3469 // Restore non-volatile regs. 3470 current_offs = -8; 3471 __ ld(R28, current_offs, R1_SP); current_offs -= 8; 3472 __ ld(R27, current_offs, R1_SP); current_offs -= 8; 3473 __ ld(R26, current_offs, R1_SP); current_offs -= 8; 3474 __ ld(R25, current_offs, R1_SP); current_offs -= 8; 3475 __ ld(R24, current_offs, R1_SP); current_offs -= 8; 3476 __ ld(R23, current_offs, R1_SP); current_offs -= 8; 3477 __ ld(R22, current_offs, R1_SP); current_offs -= 8; 3478 __ ld(R21, current_offs, R1_SP); current_offs -= 8; 3479 __ ld(R20, current_offs, R1_SP); current_offs -= 8; 3480 __ ld(R19, current_offs, R1_SP); current_offs -= 8; 3481 __ ld(R18, current_offs, R1_SP); current_offs -= 8; 3482 __ ld(R17, current_offs, R1_SP); current_offs -= 8; 3483 __ ld(R16, current_offs, R1_SP); current_offs -= 8; 3484 __ ld(R15, current_offs, R1_SP); current_offs -= 8; 3485 __ ld(R14, current_offs, R1_SP); 3486 3487 __ mr(ret, out); 3488 __ blr(); 3489 3490 return start; 3491 } 3492 3493 /** 3494 * Arguments: 3495 * 3496 * Inputs: 3497 * R3_ARG1 - int crc 3498 * R4_ARG2 - byte* buf 3499 * R5_ARG3 - int length (of buffer) 3500 * 3501 * scratch: 3502 * R2, R6-R12 3503 * 3504 * Ouput: 3505 * R3_RET - int crc result 3506 */ 3507 // Compute CRC32 function. 3508 address generate_CRC32_updateBytes(const char* name) { 3509 __ align(CodeEntryAlignment); 3510 StubCodeMark mark(this, "StubRoutines", name); 3511 address start = __ function_entry(); // Remember stub start address (is rtn value). 3512 3513 const Register table = R6; // crc table address 3514 3515 // arguments to kernel_crc32: 3516 const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call. 3517 const Register data = R4_ARG2; // source byte array 3518 const Register dataLen = R5_ARG3; // #bytes to process 3519 3520 if (VM_Version::has_vpmsumb()) { 3521 const Register constants = R2; // constants address 3522 const Register bconstants = R8; // barret table address 3523 3524 const Register t0 = R9; 3525 const Register t1 = R10; 3526 const Register t2 = R11; 3527 const Register t3 = R12; 3528 const Register t4 = R7; 3529 3530 BLOCK_COMMENT("Stub body {"); 3531 assert_different_registers(crc, data, dataLen, table); 3532 3533 StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table); 3534 StubRoutines::ppc64::generate_load_crc_constants_addr(_masm, constants); 3535 StubRoutines::ppc64::generate_load_crc_barret_constants_addr(_masm, bconstants); 3536 3537 __ kernel_crc32_1word_vpmsum(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4, true); 3538 3539 BLOCK_COMMENT("return"); 3540 __ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET). 3541 __ blr(); 3542 3543 BLOCK_COMMENT("} Stub body"); 3544 } else { 3545 StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table); 3546 generate_CRC_updateBytes(name, table, true); 3547 } 3548 3549 return start; 3550 } 3551 3552 3553 /** 3554 * Arguments: 3555 * 3556 * Inputs: 3557 * R3_ARG1 - int crc 3558 * R4_ARG2 - byte* buf 3559 * R5_ARG3 - int length (of buffer) 3560 * 3561 * scratch: 3562 * R2, R6-R12 3563 * 3564 * Ouput: 3565 * R3_RET - int crc result 3566 */ 3567 // Compute CRC32C function. 3568 address generate_CRC32C_updateBytes(const char* name) { 3569 __ align(CodeEntryAlignment); 3570 StubCodeMark mark(this, "StubRoutines", name); 3571 address start = __ function_entry(); // Remember stub start address (is rtn value). 3572 3573 const Register table = R6; // crc table address 3574 3575 // arguments to kernel_crc32: 3576 const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call. 3577 const Register data = R4_ARG2; // source byte array 3578 const Register dataLen = R5_ARG3; // #bytes to process 3579 3580 if (VM_Version::has_vpmsumb()) { 3581 const Register constants = R2; // constants address 3582 const Register bconstants = R8; // barret table address 3583 3584 const Register t0 = R9; 3585 const Register t1 = R10; 3586 const Register t2 = R11; 3587 const Register t3 = R12; 3588 const Register t4 = R7; 3589 3590 BLOCK_COMMENT("Stub body {"); 3591 assert_different_registers(crc, data, dataLen, table); 3592 3593 StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table); 3594 StubRoutines::ppc64::generate_load_crc32c_constants_addr(_masm, constants); 3595 StubRoutines::ppc64::generate_load_crc32c_barret_constants_addr(_masm, bconstants); 3596 3597 __ kernel_crc32_1word_vpmsum(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4, false); 3598 3599 BLOCK_COMMENT("return"); 3600 __ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET). 3601 __ blr(); 3602 3603 BLOCK_COMMENT("} Stub body"); 3604 } else { 3605 StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table); 3606 generate_CRC_updateBytes(name, table, false); 3607 } 3608 3609 return start; 3610 } 3611 3612 3613 // Initialization 3614 void generate_initial() { 3615 // Generates all stubs and initializes the entry points 3616 3617 // Entry points that exist in all platforms. 3618 // Note: This is code that could be shared among different platforms - however the 3619 // benefit seems to be smaller than the disadvantage of having a 3620 // much more complicated generator structure. See also comment in 3621 // stubRoutines.hpp. 3622 3623 StubRoutines::_forward_exception_entry = generate_forward_exception(); 3624 StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address); 3625 StubRoutines::_catch_exception_entry = generate_catch_exception(); 3626 3627 // Build this early so it's available for the interpreter. 3628 StubRoutines::_throw_StackOverflowError_entry = 3629 generate_throw_exception("StackOverflowError throw_exception", 3630 CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false); 3631 StubRoutines::_throw_delayed_StackOverflowError_entry = 3632 generate_throw_exception("delayed StackOverflowError throw_exception", 3633 CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError), false); 3634 3635 // CRC32 Intrinsics. 3636 if (UseCRC32Intrinsics) { 3637 StubRoutines::_crc_table_adr = (address)StubRoutines::ppc64::_crc_table; 3638 StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes"); 3639 } 3640 3641 // CRC32C Intrinsics. 3642 if (UseCRC32CIntrinsics) { 3643 StubRoutines::_crc32c_table_addr = (address)StubRoutines::ppc64::_crc32c_table; 3644 StubRoutines::_updateBytesCRC32C = generate_CRC32C_updateBytes("CRC32C_updateBytes"); 3645 } 3646 } 3647 3648 void generate_all() { 3649 // Generates all stubs and initializes the entry points 3650 3651 // These entry points require SharedInfo::stack0 to be set up in 3652 // non-core builds 3653 StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false); 3654 // Handle IncompatibleClassChangeError in itable stubs. 3655 StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false); 3656 StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false); 3657 3658 // support for verify_oop (must happen after universe_init) 3659 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 3660 3661 // arraycopy stubs used by compilers 3662 generate_arraycopy_stubs(); 3663 3664 // Safefetch stubs. 3665 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 3666 &StubRoutines::_safefetch32_fault_pc, 3667 &StubRoutines::_safefetch32_continuation_pc); 3668 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 3669 &StubRoutines::_safefetchN_fault_pc, 3670 &StubRoutines::_safefetchN_continuation_pc); 3671 3672 #ifdef COMPILER2 3673 if (UseMultiplyToLenIntrinsic) { 3674 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 3675 } 3676 #endif 3677 3678 if (UseSquareToLenIntrinsic) { 3679 StubRoutines::_squareToLen = generate_squareToLen(); 3680 } 3681 if (UseMulAddIntrinsic) { 3682 StubRoutines::_mulAdd = generate_mulAdd(); 3683 } 3684 if (UseMontgomeryMultiplyIntrinsic) { 3685 StubRoutines::_montgomeryMultiply 3686 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 3687 } 3688 if (UseMontgomerySquareIntrinsic) { 3689 StubRoutines::_montgomerySquare 3690 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 3691 } 3692 3693 if (UseAESIntrinsics) { 3694 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 3695 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 3696 } 3697 3698 if (UseSHA256Intrinsics) { 3699 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 3700 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 3701 } 3702 if (UseSHA512Intrinsics) { 3703 StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); 3704 StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); 3705 } 3706 } 3707 3708 public: 3709 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 3710 // replace the standard masm with a special one: 3711 _masm = new MacroAssembler(code); 3712 if (all) { 3713 generate_all(); 3714 } else { 3715 generate_initial(); 3716 } 3717 } 3718 }; 3719 3720 void StubGenerator_generate(CodeBuffer* code, bool all) { 3721 StubGenerator g(code, all); 3722 }