1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * Copyright 2012, 2014 SAP AG. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "nativeInst_ppc.hpp" 30 #include "oops/instanceOop.hpp" 31 #include "oops/method.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/frame.inline.hpp" 36 #include "runtime/handles.inline.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubCodeGenerator.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "utilities/top.hpp" 41 #include "runtime/thread.inline.hpp" 42 43 #define __ _masm-> 44 45 #ifdef PRODUCT 46 #define BLOCK_COMMENT(str) // nothing 47 #else 48 #define BLOCK_COMMENT(str) __ block_comment(str) 49 #endif 50 51 class StubGenerator: public StubCodeGenerator { 52 private: 53 54 // Call stubs are used to call Java from C 55 // 56 // Arguments: 57 // 58 // R3 - call wrapper address : address 59 // R4 - result : intptr_t* 60 // R5 - result type : BasicType 61 // R6 - method : Method 62 // R7 - frame mgr entry point : address 63 // R8 - parameter block : intptr_t* 64 // R9 - parameter count in words : int 65 // R10 - thread : Thread* 66 // 67 address generate_call_stub(address& return_address) { 68 // Setup a new c frame, copy java arguments, call frame manager or 69 // native_entry, and process result. 70 71 StubCodeMark mark(this, "StubRoutines", "call_stub"); 72 73 address start = __ function_entry(); 74 75 // some sanity checks 76 assert((sizeof(frame::abi_minframe) % 16) == 0, "unaligned"); 77 assert((sizeof(frame::abi_reg_args) % 16) == 0, "unaligned"); 78 assert((sizeof(frame::spill_nonvolatiles) % 16) == 0, "unaligned"); 79 assert((sizeof(frame::parent_ijava_frame_abi) % 16) == 0, "unaligned"); 80 assert((sizeof(frame::entry_frame_locals) % 16) == 0, "unaligned"); 81 82 Register r_arg_call_wrapper_addr = R3; 83 Register r_arg_result_addr = R4; 84 Register r_arg_result_type = R5; 85 Register r_arg_method = R6; 86 Register r_arg_entry = R7; 87 Register r_arg_thread = R10; 88 89 Register r_temp = R24; 90 Register r_top_of_arguments_addr = R25; 91 Register r_entryframe_fp = R26; 92 93 { 94 // Stack on entry to call_stub: 95 // 96 // F1 [C_FRAME] 97 // ... 98 99 Register r_arg_argument_addr = R8; 100 Register r_arg_argument_count = R9; 101 Register r_frame_alignment_in_bytes = R27; 102 Register r_argument_addr = R28; 103 Register r_argumentcopy_addr = R29; 104 Register r_argument_size_in_bytes = R30; 105 Register r_frame_size = R23; 106 107 Label arguments_copied; 108 109 // Save LR/CR to caller's C_FRAME. 110 __ save_LR_CR(R0); 111 112 // Zero extend arg_argument_count. 113 __ clrldi(r_arg_argument_count, r_arg_argument_count, 32); 114 115 // Save non-volatiles GPRs to ENTRY_FRAME (not yet pushed, but it's safe). 116 __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14)); 117 118 // Keep copy of our frame pointer (caller's SP). 119 __ mr(r_entryframe_fp, R1_SP); 120 121 BLOCK_COMMENT("Push ENTRY_FRAME including arguments"); 122 // Push ENTRY_FRAME including arguments: 123 // 124 // F0 [TOP_IJAVA_FRAME_ABI] 125 // alignment (optional) 126 // [outgoing Java arguments] 127 // [ENTRY_FRAME_LOCALS] 128 // F1 [C_FRAME] 129 // ... 130 131 // calculate frame size 132 133 // unaligned size of arguments 134 __ sldi(r_argument_size_in_bytes, 135 r_arg_argument_count, Interpreter::logStackElementSize); 136 // arguments alignment (max 1 slot) 137 // FIXME: use round_to() here 138 __ andi_(r_frame_alignment_in_bytes, r_arg_argument_count, 1); 139 __ sldi(r_frame_alignment_in_bytes, 140 r_frame_alignment_in_bytes, Interpreter::logStackElementSize); 141 142 // size = unaligned size of arguments + top abi's size 143 __ addi(r_frame_size, r_argument_size_in_bytes, 144 frame::top_ijava_frame_abi_size); 145 // size += arguments alignment 146 __ add(r_frame_size, 147 r_frame_size, r_frame_alignment_in_bytes); 148 // size += size of call_stub locals 149 __ addi(r_frame_size, 150 r_frame_size, frame::entry_frame_locals_size); 151 152 // push ENTRY_FRAME 153 __ push_frame(r_frame_size, r_temp); 154 155 // initialize call_stub locals (step 1) 156 __ std(r_arg_call_wrapper_addr, 157 _entry_frame_locals_neg(call_wrapper_address), r_entryframe_fp); 158 __ std(r_arg_result_addr, 159 _entry_frame_locals_neg(result_address), r_entryframe_fp); 160 __ std(r_arg_result_type, 161 _entry_frame_locals_neg(result_type), r_entryframe_fp); 162 // we will save arguments_tos_address later 163 164 165 BLOCK_COMMENT("Copy Java arguments"); 166 // copy Java arguments 167 168 // Calculate top_of_arguments_addr which will be R17_tos (not prepushed) later. 169 // FIXME: why not simply use SP+frame::top_ijava_frame_size? 170 __ addi(r_top_of_arguments_addr, 171 R1_SP, frame::top_ijava_frame_abi_size); 172 __ add(r_top_of_arguments_addr, 173 r_top_of_arguments_addr, r_frame_alignment_in_bytes); 174 175 // any arguments to copy? 176 __ cmpdi(CCR0, r_arg_argument_count, 0); 177 __ beq(CCR0, arguments_copied); 178 179 // prepare loop and copy arguments in reverse order 180 { 181 // init CTR with arg_argument_count 182 __ mtctr(r_arg_argument_count); 183 184 // let r_argumentcopy_addr point to last outgoing Java arguments P 185 __ mr(r_argumentcopy_addr, r_top_of_arguments_addr); 186 187 // let r_argument_addr point to last incoming java argument 188 __ add(r_argument_addr, 189 r_arg_argument_addr, r_argument_size_in_bytes); 190 __ addi(r_argument_addr, r_argument_addr, -BytesPerWord); 191 192 // now loop while CTR > 0 and copy arguments 193 { 194 Label next_argument; 195 __ bind(next_argument); 196 197 __ ld(r_temp, 0, r_argument_addr); 198 // argument_addr--; 199 __ addi(r_argument_addr, r_argument_addr, -BytesPerWord); 200 __ std(r_temp, 0, r_argumentcopy_addr); 201 // argumentcopy_addr++; 202 __ addi(r_argumentcopy_addr, r_argumentcopy_addr, BytesPerWord); 203 204 __ bdnz(next_argument); 205 } 206 } 207 208 // Arguments copied, continue. 209 __ bind(arguments_copied); 210 } 211 212 { 213 BLOCK_COMMENT("Call frame manager or native entry."); 214 // Call frame manager or native entry. 215 Register r_new_arg_entry = R14; 216 assert_different_registers(r_new_arg_entry, r_top_of_arguments_addr, 217 r_arg_method, r_arg_thread); 218 219 __ mr(r_new_arg_entry, r_arg_entry); 220 221 // Register state on entry to frame manager / native entry: 222 // 223 // tos - intptr_t* sender tos (prepushed) Lesp = (SP) + copied_arguments_offset - 8 224 // R19_method - Method 225 // R16_thread - JavaThread* 226 227 // Tos must point to last argument - element_size. 228 #ifdef CC_INTERP 229 const Register tos = R17_tos; 230 #else 231 const Register tos = R15_esp; 232 #endif 233 __ addi(tos, r_top_of_arguments_addr, -Interpreter::stackElementSize); 234 235 // initialize call_stub locals (step 2) 236 // now save tos as arguments_tos_address 237 __ std(tos, _entry_frame_locals_neg(arguments_tos_address), r_entryframe_fp); 238 239 // load argument registers for call 240 __ mr(R19_method, r_arg_method); 241 __ mr(R16_thread, r_arg_thread); 242 assert(tos != r_arg_method, "trashed r_arg_method"); 243 assert(tos != r_arg_thread && R19_method != r_arg_thread, "trashed r_arg_thread"); 244 245 // Set R15_prev_state to 0 for simplifying checks in callee. 246 #ifdef CC_INTERP 247 __ li(R15_prev_state, 0); 248 #else 249 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 250 #endif 251 // Stack on entry to frame manager / native entry: 252 // 253 // F0 [TOP_IJAVA_FRAME_ABI] 254 // alignment (optional) 255 // [outgoing Java arguments] 256 // [ENTRY_FRAME_LOCALS] 257 // F1 [C_FRAME] 258 // ... 259 // 260 261 // global toc register 262 __ load_const(R29, MacroAssembler::global_toc(), R11_scratch1); 263 264 // Load narrow oop base. 265 __ reinit_heapbase(R30, R11_scratch1); 266 267 // Remember the senderSP so we interpreter can pop c2i arguments off of the stack 268 // when called via a c2i. 269 270 // Pass initial_caller_sp to framemanager. 271 __ mr(R21_tmp1, R1_SP); 272 273 // Do a light-weight C-call here, r_new_arg_entry holds the address 274 // of the interpreter entry point (frame manager or native entry) 275 // and save runtime-value of LR in return_address. 276 assert(r_new_arg_entry != tos && r_new_arg_entry != R19_method && r_new_arg_entry != R16_thread, 277 "trashed r_new_arg_entry"); 278 return_address = __ call_stub(r_new_arg_entry); 279 } 280 281 { 282 BLOCK_COMMENT("Returned from frame manager or native entry."); 283 // Returned from frame manager or native entry. 284 // Now pop frame, process result, and return to caller. 285 286 // Stack on exit from frame manager / native entry: 287 // 288 // F0 [ABI] 289 // ... 290 // [ENTRY_FRAME_LOCALS] 291 // F1 [C_FRAME] 292 // ... 293 // 294 // Just pop the topmost frame ... 295 // 296 297 Label ret_is_object; 298 Label ret_is_long; 299 Label ret_is_float; 300 Label ret_is_double; 301 302 Register r_entryframe_fp = R30; 303 Register r_lr = R7_ARG5; 304 Register r_cr = R8_ARG6; 305 306 // Reload some volatile registers which we've spilled before the call 307 // to frame manager / native entry. 308 // Access all locals via frame pointer, because we know nothing about 309 // the topmost frame's size. 310 __ ld(r_entryframe_fp, _abi(callers_sp), R1_SP); 311 assert_different_registers(r_entryframe_fp, R3_RET, r_arg_result_addr, r_arg_result_type, r_cr, r_lr); 312 __ ld(r_arg_result_addr, 313 _entry_frame_locals_neg(result_address), r_entryframe_fp); 314 __ ld(r_arg_result_type, 315 _entry_frame_locals_neg(result_type), r_entryframe_fp); 316 __ ld(r_cr, _abi(cr), r_entryframe_fp); 317 __ ld(r_lr, _abi(lr), r_entryframe_fp); 318 319 // pop frame and restore non-volatiles, LR and CR 320 __ mr(R1_SP, r_entryframe_fp); 321 __ mtcr(r_cr); 322 __ mtlr(r_lr); 323 324 // Store result depending on type. Everything that is not 325 // T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE is treated as T_INT. 326 __ cmpwi(CCR0, r_arg_result_type, T_OBJECT); 327 __ cmpwi(CCR1, r_arg_result_type, T_LONG); 328 __ cmpwi(CCR5, r_arg_result_type, T_FLOAT); 329 __ cmpwi(CCR6, r_arg_result_type, T_DOUBLE); 330 331 // restore non-volatile registers 332 __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14)); 333 334 335 // Stack on exit from call_stub: 336 // 337 // 0 [C_FRAME] 338 // ... 339 // 340 // no call_stub frames left. 341 342 // All non-volatiles have been restored at this point!! 343 assert(R3_RET == R3, "R3_RET should be R3"); 344 345 __ beq(CCR0, ret_is_object); 346 __ beq(CCR1, ret_is_long); 347 __ beq(CCR5, ret_is_float); 348 __ beq(CCR6, ret_is_double); 349 350 // default: 351 __ stw(R3_RET, 0, r_arg_result_addr); 352 __ blr(); // return to caller 353 354 // case T_OBJECT: 355 __ bind(ret_is_object); 356 __ std(R3_RET, 0, r_arg_result_addr); 357 __ blr(); // return to caller 358 359 // case T_LONG: 360 __ bind(ret_is_long); 361 __ std(R3_RET, 0, r_arg_result_addr); 362 __ blr(); // return to caller 363 364 // case T_FLOAT: 365 __ bind(ret_is_float); 366 __ stfs(F1_RET, 0, r_arg_result_addr); 367 __ blr(); // return to caller 368 369 // case T_DOUBLE: 370 __ bind(ret_is_double); 371 __ stfd(F1_RET, 0, r_arg_result_addr); 372 __ blr(); // return to caller 373 } 374 375 return start; 376 } 377 378 // Return point for a Java call if there's an exception thrown in 379 // Java code. The exception is caught and transformed into a 380 // pending exception stored in JavaThread that can be tested from 381 // within the VM. 382 // 383 address generate_catch_exception() { 384 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 385 386 address start = __ pc(); 387 388 // Registers alive 389 // 390 // R16_thread 391 // R3_ARG1 - address of pending exception 392 // R4_ARG2 - return address in call stub 393 394 const Register exception_file = R21_tmp1; 395 const Register exception_line = R22_tmp2; 396 397 __ load_const(exception_file, (void*)__FILE__); 398 __ load_const(exception_line, (void*)__LINE__); 399 400 __ std(R3_ARG1, thread_(pending_exception)); 401 // store into `char *' 402 __ std(exception_file, thread_(exception_file)); 403 // store into `int' 404 __ stw(exception_line, thread_(exception_line)); 405 406 // complete return to VM 407 assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before"); 408 409 __ mtlr(R4_ARG2); 410 // continue in call stub 411 __ blr(); 412 413 return start; 414 } 415 416 // Continuation point for runtime calls returning with a pending 417 // exception. The pending exception check happened in the runtime 418 // or native call stub. The pending exception in Thread is 419 // converted into a Java-level exception. 420 // 421 address generate_forward_exception() { 422 StubCodeMark mark(this, "StubRoutines", "forward_exception"); 423 address start = __ pc(); 424 425 #if !defined(PRODUCT) 426 if (VerifyOops) { 427 // Get pending exception oop. 428 __ ld(R3_ARG1, 429 in_bytes(Thread::pending_exception_offset()), 430 R16_thread); 431 // Make sure that this code is only executed if there is a pending exception. 432 { 433 Label L; 434 __ cmpdi(CCR0, R3_ARG1, 0); 435 __ bne(CCR0, L); 436 __ stop("StubRoutines::forward exception: no pending exception (1)"); 437 __ bind(L); 438 } 439 __ verify_oop(R3_ARG1, "StubRoutines::forward exception: not an oop"); 440 } 441 #endif 442 443 // Save LR/CR and copy exception pc (LR) into R4_ARG2. 444 __ save_LR_CR(R4_ARG2); 445 __ push_frame_reg_args(0, R0); 446 // Find exception handler. 447 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 448 SharedRuntime::exception_handler_for_return_address), 449 R16_thread, 450 R4_ARG2); 451 // Copy handler's address. 452 __ mtctr(R3_RET); 453 __ pop_frame(); 454 __ restore_LR_CR(R0); 455 456 // Set up the arguments for the exception handler: 457 // - R3_ARG1: exception oop 458 // - R4_ARG2: exception pc. 459 460 // Load pending exception oop. 461 __ ld(R3_ARG1, 462 in_bytes(Thread::pending_exception_offset()), 463 R16_thread); 464 465 // The exception pc is the return address in the caller. 466 // Must load it into R4_ARG2. 467 __ mflr(R4_ARG2); 468 469 #ifdef ASSERT 470 // Make sure exception is set. 471 { 472 Label L; 473 __ cmpdi(CCR0, R3_ARG1, 0); 474 __ bne(CCR0, L); 475 __ stop("StubRoutines::forward exception: no pending exception (2)"); 476 __ bind(L); 477 } 478 #endif 479 480 // Clear the pending exception. 481 __ li(R0, 0); 482 __ std(R0, 483 in_bytes(Thread::pending_exception_offset()), 484 R16_thread); 485 // Jump to exception handler. 486 __ bctr(); 487 488 return start; 489 } 490 491 #undef __ 492 #define __ masm-> 493 // Continuation point for throwing of implicit exceptions that are 494 // not handled in the current activation. Fabricates an exception 495 // oop and initiates normal exception dispatching in this 496 // frame. Only callee-saved registers are preserved (through the 497 // normal register window / RegisterMap handling). If the compiler 498 // needs all registers to be preserved between the fault point and 499 // the exception handler then it must assume responsibility for that 500 // in AbstractCompiler::continuation_for_implicit_null_exception or 501 // continuation_for_implicit_division_by_zero_exception. All other 502 // implicit exceptions (e.g., NullPointerException or 503 // AbstractMethodError on entry) are either at call sites or 504 // otherwise assume that stack unwinding will be initiated, so 505 // caller saved registers were assumed volatile in the compiler. 506 // 507 // Note that we generate only this stub into a RuntimeStub, because 508 // it needs to be properly traversed and ignored during GC, so we 509 // change the meaning of the "__" macro within this method. 510 // 511 // Note: the routine set_pc_not_at_call_for_caller in 512 // SharedRuntime.cpp requires that this code be generated into a 513 // RuntimeStub. 514 address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc, 515 Register arg1 = noreg, Register arg2 = noreg) { 516 CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0); 517 MacroAssembler* masm = new MacroAssembler(&code); 518 519 OopMapSet* oop_maps = new OopMapSet(); 520 int frame_size_in_bytes = frame::abi_reg_args_size; 521 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 522 523 StubCodeMark mark(this, "StubRoutines", "throw_exception"); 524 525 address start = __ pc(); 526 527 __ save_LR_CR(R11_scratch1); 528 529 // Push a frame. 530 __ push_frame_reg_args(0, R11_scratch1); 531 532 address frame_complete_pc = __ pc(); 533 534 if (restore_saved_exception_pc) { 535 __ unimplemented("StubGenerator::throw_exception with restore_saved_exception_pc", 74); 536 } 537 538 // Note that we always have a runtime stub frame on the top of 539 // stack by this point. Remember the offset of the instruction 540 // whose address will be moved to R11_scratch1. 541 address gc_map_pc = __ get_PC_trash_LR(R11_scratch1); 542 543 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1); 544 545 __ mr(R3_ARG1, R16_thread); 546 if (arg1 != noreg) { 547 __ mr(R4_ARG2, arg1); 548 } 549 if (arg2 != noreg) { 550 __ mr(R5_ARG3, arg2); 551 } 552 #if defined(ABI_ELFv2) 553 __ call_c(runtime_entry, relocInfo::none); 554 #else 555 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), relocInfo::none); 556 #endif 557 558 // Set an oopmap for the call site. 559 oop_maps->add_gc_map((int)(gc_map_pc - start), map); 560 561 __ reset_last_Java_frame(); 562 563 #ifdef ASSERT 564 // Make sure that this code is only executed if there is a pending 565 // exception. 566 { 567 Label L; 568 __ ld(R0, 569 in_bytes(Thread::pending_exception_offset()), 570 R16_thread); 571 __ cmpdi(CCR0, R0, 0); 572 __ bne(CCR0, L); 573 __ stop("StubRoutines::throw_exception: no pending exception"); 574 __ bind(L); 575 } 576 #endif 577 578 // Pop frame. 579 __ pop_frame(); 580 581 __ restore_LR_CR(R11_scratch1); 582 583 __ load_const(R11_scratch1, StubRoutines::forward_exception_entry()); 584 __ mtctr(R11_scratch1); 585 __ bctr(); 586 587 // Create runtime stub with OopMap. 588 RuntimeStub* stub = 589 RuntimeStub::new_runtime_stub(name, &code, 590 /*frame_complete=*/ (int)(frame_complete_pc - start), 591 frame_size_in_bytes/wordSize, 592 oop_maps, 593 false); 594 return stub->entry_point(); 595 } 596 #undef __ 597 #define __ _masm-> 598 599 // Generate G1 pre-write barrier for array. 600 // 601 // Input: 602 // from - register containing src address (only needed for spilling) 603 // to - register containing starting address 604 // count - register containing element count 605 // tmp - scratch register 606 // 607 // Kills: 608 // nothing 609 // 610 void gen_write_ref_array_pre_barrier(Register from, Register to, Register count, bool dest_uninitialized, Register Rtmp1) { 611 BarrierSet* const bs = Universe::heap()->barrier_set(); 612 switch (bs->kind()) { 613 case BarrierSet::G1SATBCT: 614 case BarrierSet::G1SATBCTLogging: 615 // With G1, don't generate the call if we statically know that the target in uninitialized 616 if (!dest_uninitialized) { 617 const int spill_slots = 4 * wordSize; 618 const int frame_size = frame::abi_reg_args_size + spill_slots; 619 Label filtered; 620 621 // Is marking active? 622 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { 623 __ lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread); 624 } else { 625 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption"); 626 __ lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread); 627 } 628 __ cmpdi(CCR0, Rtmp1, 0); 629 __ beq(CCR0, filtered); 630 631 __ save_LR_CR(R0); 632 __ push_frame_reg_args(spill_slots, R0); 633 __ std(from, frame_size - 1 * wordSize, R1_SP); 634 __ std(to, frame_size - 2 * wordSize, R1_SP); 635 __ std(count, frame_size - 3 * wordSize, R1_SP); 636 637 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), to, count); 638 639 __ ld(from, frame_size - 1 * wordSize, R1_SP); 640 __ ld(to, frame_size - 2 * wordSize, R1_SP); 641 __ ld(count, frame_size - 3 * wordSize, R1_SP); 642 __ pop_frame(); 643 __ restore_LR_CR(R0); 644 645 __ bind(filtered); 646 } 647 break; 648 case BarrierSet::CardTableModRef: 649 case BarrierSet::CardTableExtension: 650 case BarrierSet::ModRef: 651 case BarrierSet::Epsilon: 652 break; 653 default: 654 ShouldNotReachHere(); 655 } 656 } 657 658 // Generate CMS/G1 post-write barrier for array. 659 // 660 // Input: 661 // addr - register containing starting address 662 // count - register containing element count 663 // tmp - scratch register 664 // 665 // The input registers and R0 are overwritten. 666 // 667 void gen_write_ref_array_post_barrier(Register addr, Register count, Register tmp, bool branchToEnd) { 668 BarrierSet* const bs = Universe::heap()->barrier_set(); 669 670 switch (bs->kind()) { 671 case BarrierSet::G1SATBCT: 672 case BarrierSet::G1SATBCTLogging: 673 { 674 if (branchToEnd) { 675 __ save_LR_CR(R0); 676 // We need this frame only to spill LR. 677 __ push_frame_reg_args(0, R0); 678 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count); 679 __ pop_frame(); 680 __ restore_LR_CR(R0); 681 } else { 682 // Tail call: fake call from stub caller by branching without linking. 683 address entry_point = (address)CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post); 684 __ mr_if_needed(R3_ARG1, addr); 685 __ mr_if_needed(R4_ARG2, count); 686 __ load_const(R11, entry_point, R0); 687 __ call_c_and_return_to_caller(R11); 688 } 689 } 690 break; 691 case BarrierSet::CardTableModRef: 692 case BarrierSet::CardTableExtension: 693 { 694 Label Lskip_loop, Lstore_loop; 695 if (UseConcMarkSweepGC) { 696 // TODO PPC port: contribute optimization / requires shared changes 697 __ release(); 698 } 699 700 CardTableModRefBS* const ct = (CardTableModRefBS*)bs; 701 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 702 assert_different_registers(addr, count, tmp); 703 704 __ sldi(count, count, LogBytesPerHeapOop); 705 __ addi(count, count, -BytesPerHeapOop); 706 __ add(count, addr, count); 707 // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.) 708 __ srdi(addr, addr, CardTableModRefBS::card_shift); 709 __ srdi(count, count, CardTableModRefBS::card_shift); 710 __ subf(count, addr, count); 711 assert_different_registers(R0, addr, count, tmp); 712 __ load_const(tmp, (address)ct->byte_map_base); 713 __ addic_(count, count, 1); 714 __ beq(CCR0, Lskip_loop); 715 __ li(R0, 0); 716 __ mtctr(count); 717 // Byte store loop 718 __ bind(Lstore_loop); 719 __ stbx(R0, tmp, addr); 720 __ addi(addr, addr, 1); 721 __ bdnz(Lstore_loop); 722 __ bind(Lskip_loop); 723 724 if (!branchToEnd) __ blr(); 725 } 726 break; 727 case BarrierSet::ModRef: 728 case BarrierSet::Epsilon: 729 if (!branchToEnd) __ blr(); 730 break; 731 default: 732 ShouldNotReachHere(); 733 } 734 } 735 736 // Support for void zero_words_aligned8(HeapWord* to, size_t count) 737 // 738 // Arguments: 739 // to: 740 // count: 741 // 742 // Destroys: 743 // 744 address generate_zero_words_aligned8() { 745 StubCodeMark mark(this, "StubRoutines", "zero_words_aligned8"); 746 747 // Implemented as in ClearArray. 748 address start = __ function_entry(); 749 750 Register base_ptr_reg = R3_ARG1; // tohw (needs to be 8b aligned) 751 Register cnt_dwords_reg = R4_ARG2; // count (in dwords) 752 Register tmp1_reg = R5_ARG3; 753 Register tmp2_reg = R6_ARG4; 754 Register zero_reg = R7_ARG5; 755 756 // Procedure for large arrays (uses data cache block zero instruction). 757 Label dwloop, fast, fastloop, restloop, lastdword, done; 758 int cl_size=VM_Version::get_cache_line_size(), cl_dwords=cl_size>>3, cl_dwordaddr_bits=exact_log2(cl_dwords); 759 int min_dcbz=2; // Needs to be positive, apply dcbz only to at least min_dcbz cache lines. 760 761 // Clear up to 128byte boundary if long enough, dword_cnt=(16-(base>>3))%16. 762 __ dcbtst(base_ptr_reg); // Indicate write access to first cache line ... 763 __ andi(tmp2_reg, cnt_dwords_reg, 1); // to check if number of dwords is even. 764 __ srdi_(tmp1_reg, cnt_dwords_reg, 1); // number of double dwords 765 __ load_const_optimized(zero_reg, 0L); // Use as zero register. 766 767 __ cmpdi(CCR1, tmp2_reg, 0); // cnt_dwords even? 768 __ beq(CCR0, lastdword); // size <= 1 769 __ mtctr(tmp1_reg); // Speculatively preload counter for rest loop (>0). 770 __ cmpdi(CCR0, cnt_dwords_reg, (min_dcbz+1)*cl_dwords-1); // Big enough to ensure >=min_dcbz cache lines are included? 771 __ neg(tmp1_reg, base_ptr_reg); // bit 0..58: bogus, bit 57..60: (16-(base>>3))%16, bit 61..63: 000 772 773 __ blt(CCR0, restloop); // Too small. (<31=(2*cl_dwords)-1 is sufficient, but bigger performs better.) 774 __ rldicl_(tmp1_reg, tmp1_reg, 64-3, 64-cl_dwordaddr_bits); // Extract number of dwords to 128byte boundary=(16-(base>>3))%16. 775 776 __ beq(CCR0, fast); // already 128byte aligned 777 __ mtctr(tmp1_reg); // Set ctr to hit 128byte boundary (0<ctr<cnt). 778 __ subf(cnt_dwords_reg, tmp1_reg, cnt_dwords_reg); // rest (>0 since size>=256-8) 779 780 // Clear in first cache line dword-by-dword if not already 128byte aligned. 781 __ bind(dwloop); 782 __ std(zero_reg, 0, base_ptr_reg); // Clear 8byte aligned block. 783 __ addi(base_ptr_reg, base_ptr_reg, 8); 784 __ bdnz(dwloop); 785 786 // clear 128byte blocks 787 __ bind(fast); 788 __ srdi(tmp1_reg, cnt_dwords_reg, cl_dwordaddr_bits); // loop count for 128byte loop (>0 since size>=256-8) 789 __ andi(tmp2_reg, cnt_dwords_reg, 1); // to check if rest even 790 791 __ mtctr(tmp1_reg); // load counter 792 __ cmpdi(CCR1, tmp2_reg, 0); // rest even? 793 __ rldicl_(tmp1_reg, cnt_dwords_reg, 63, 65-cl_dwordaddr_bits); // rest in double dwords 794 795 __ bind(fastloop); 796 __ dcbz(base_ptr_reg); // Clear 128byte aligned block. 797 __ addi(base_ptr_reg, base_ptr_reg, cl_size); 798 __ bdnz(fastloop); 799 800 //__ dcbtst(base_ptr_reg); // Indicate write access to last cache line. 801 __ beq(CCR0, lastdword); // rest<=1 802 __ mtctr(tmp1_reg); // load counter 803 804 // Clear rest. 805 __ bind(restloop); 806 __ std(zero_reg, 0, base_ptr_reg); // Clear 8byte aligned block. 807 __ std(zero_reg, 8, base_ptr_reg); // Clear 8byte aligned block. 808 __ addi(base_ptr_reg, base_ptr_reg, 16); 809 __ bdnz(restloop); 810 811 __ bind(lastdword); 812 __ beq(CCR1, done); 813 __ std(zero_reg, 0, base_ptr_reg); 814 __ bind(done); 815 __ blr(); // return 816 817 return start; 818 } 819 820 // The following routine generates a subroutine to throw an asynchronous 821 // UnknownError when an unsafe access gets a fault that could not be 822 // reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.) 823 // 824 address generate_handler_for_unsafe_access() { 825 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); 826 address start = __ function_entry(); 827 __ unimplemented("StubRoutines::handler_for_unsafe_access", 93); 828 return start; 829 } 830 831 #if !defined(PRODUCT) 832 // Wrapper which calls oopDesc::is_oop_or_null() 833 // Only called by MacroAssembler::verify_oop 834 static void verify_oop_helper(const char* message, oop o) { 835 if (!o->is_oop_or_null()) { 836 fatal(message); 837 } 838 ++ StubRoutines::_verify_oop_count; 839 } 840 #endif 841 842 // Return address of code to be called from code generated by 843 // MacroAssembler::verify_oop. 844 // 845 // Don't generate, rather use C++ code. 846 address generate_verify_oop() { 847 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 848 849 // this is actually a `FunctionDescriptor*'. 850 address start = 0; 851 852 #if !defined(PRODUCT) 853 start = CAST_FROM_FN_PTR(address, verify_oop_helper); 854 #endif 855 856 return start; 857 } 858 859 // Fairer handling of safepoints for native methods. 860 // 861 // Generate code which reads from the polling page. This special handling is needed as the 862 // linux-ppc64 kernel before 2.6.6 doesn't set si_addr on some segfaults in 64bit mode 863 // (cf. http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.6), especially when we try 864 // to read from the safepoint polling page. 865 address generate_load_from_poll() { 866 StubCodeMark mark(this, "StubRoutines", "generate_load_from_poll"); 867 address start = __ function_entry(); 868 __ unimplemented("StubRoutines::verify_oop", 95); // TODO PPC port 869 return start; 870 } 871 872 // -XX:+OptimizeFill : convert fill/copy loops into intrinsic 873 // 874 // The code is implemented(ported from sparc) as we believe it benefits JVM98, however 875 // tracing(-XX:+TraceOptimizeFill) shows the intrinsic replacement doesn't happen at all! 876 // 877 // Source code in function is_range_check_if() shows that OptimizeFill relaxed the condition 878 // for turning on loop predication optimization, and hence the behavior of "array range check" 879 // and "loop invariant check" could be influenced, which potentially boosted JVM98. 880 // 881 // Generate stub for disjoint short fill. If "aligned" is true, the 882 // "to" address is assumed to be heapword aligned. 883 // 884 // Arguments for generated stub: 885 // to: R3_ARG1 886 // value: R4_ARG2 887 // count: R5_ARG3 treated as signed 888 // 889 address generate_fill(BasicType t, bool aligned, const char* name) { 890 StubCodeMark mark(this, "StubRoutines", name); 891 address start = __ function_entry(); 892 893 const Register to = R3_ARG1; // source array address 894 const Register value = R4_ARG2; // fill value 895 const Register count = R5_ARG3; // elements count 896 const Register temp = R6_ARG4; // temp register 897 898 //assert_clean_int(count, O3); // Make sure 'count' is clean int. 899 900 Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte; 901 Label L_fill_2_bytes, L_fill_4_bytes, L_fill_elements, L_fill_32_bytes; 902 903 int shift = -1; 904 switch (t) { 905 case T_BYTE: 906 shift = 2; 907 // Clone bytes (zero extend not needed because store instructions below ignore high order bytes). 908 __ rldimi(value, value, 8, 48); // 8 bit -> 16 bit 909 __ cmpdi(CCR0, count, 2<<shift); // Short arrays (< 8 bytes) fill by element. 910 __ blt(CCR0, L_fill_elements); 911 __ rldimi(value, value, 16, 32); // 16 bit -> 32 bit 912 break; 913 case T_SHORT: 914 shift = 1; 915 // Clone bytes (zero extend not needed because store instructions below ignore high order bytes). 916 __ rldimi(value, value, 16, 32); // 16 bit -> 32 bit 917 __ cmpdi(CCR0, count, 2<<shift); // Short arrays (< 8 bytes) fill by element. 918 __ blt(CCR0, L_fill_elements); 919 break; 920 case T_INT: 921 shift = 0; 922 __ cmpdi(CCR0, count, 2<<shift); // Short arrays (< 8 bytes) fill by element. 923 __ blt(CCR0, L_fill_4_bytes); 924 break; 925 default: ShouldNotReachHere(); 926 } 927 928 if (!aligned && (t == T_BYTE || t == T_SHORT)) { 929 // Align source address at 4 bytes address boundary. 930 if (t == T_BYTE) { 931 // One byte misalignment happens only for byte arrays. 932 __ andi_(temp, to, 1); 933 __ beq(CCR0, L_skip_align1); 934 __ stb(value, 0, to); 935 __ addi(to, to, 1); 936 __ addi(count, count, -1); 937 __ bind(L_skip_align1); 938 } 939 // Two bytes misalignment happens only for byte and short (char) arrays. 940 __ andi_(temp, to, 2); 941 __ beq(CCR0, L_skip_align2); 942 __ sth(value, 0, to); 943 __ addi(to, to, 2); 944 __ addi(count, count, -(1 << (shift - 1))); 945 __ bind(L_skip_align2); 946 } 947 948 if (!aligned) { 949 // Align to 8 bytes, we know we are 4 byte aligned to start. 950 __ andi_(temp, to, 7); 951 __ beq(CCR0, L_fill_32_bytes); 952 __ stw(value, 0, to); 953 __ addi(to, to, 4); 954 __ addi(count, count, -(1 << shift)); 955 __ bind(L_fill_32_bytes); 956 } 957 958 __ li(temp, 8<<shift); // Prepare for 32 byte loop. 959 // Clone bytes int->long as above. 960 __ rldimi(value, value, 32, 0); // 32 bit -> 64 bit 961 962 Label L_check_fill_8_bytes; 963 // Fill 32-byte chunks. 964 __ subf_(count, temp, count); 965 __ blt(CCR0, L_check_fill_8_bytes); 966 967 Label L_fill_32_bytes_loop; 968 __ align(32); 969 __ bind(L_fill_32_bytes_loop); 970 971 __ std(value, 0, to); 972 __ std(value, 8, to); 973 __ subf_(count, temp, count); // Update count. 974 __ std(value, 16, to); 975 __ std(value, 24, to); 976 977 __ addi(to, to, 32); 978 __ bge(CCR0, L_fill_32_bytes_loop); 979 980 __ bind(L_check_fill_8_bytes); 981 __ add_(count, temp, count); 982 __ beq(CCR0, L_exit); 983 __ addic_(count, count, -(2 << shift)); 984 __ blt(CCR0, L_fill_4_bytes); 985 986 // 987 // Length is too short, just fill 8 bytes at a time. 988 // 989 Label L_fill_8_bytes_loop; 990 __ bind(L_fill_8_bytes_loop); 991 __ std(value, 0, to); 992 __ addic_(count, count, -(2 << shift)); 993 __ addi(to, to, 8); 994 __ bge(CCR0, L_fill_8_bytes_loop); 995 996 // Fill trailing 4 bytes. 997 __ bind(L_fill_4_bytes); 998 __ andi_(temp, count, 1<<shift); 999 __ beq(CCR0, L_fill_2_bytes); 1000 1001 __ stw(value, 0, to); 1002 if (t == T_BYTE || t == T_SHORT) { 1003 __ addi(to, to, 4); 1004 // Fill trailing 2 bytes. 1005 __ bind(L_fill_2_bytes); 1006 __ andi_(temp, count, 1<<(shift-1)); 1007 __ beq(CCR0, L_fill_byte); 1008 __ sth(value, 0, to); 1009 if (t == T_BYTE) { 1010 __ addi(to, to, 2); 1011 // Fill trailing byte. 1012 __ bind(L_fill_byte); 1013 __ andi_(count, count, 1); 1014 __ beq(CCR0, L_exit); 1015 __ stb(value, 0, to); 1016 } else { 1017 __ bind(L_fill_byte); 1018 } 1019 } else { 1020 __ bind(L_fill_2_bytes); 1021 } 1022 __ bind(L_exit); 1023 __ blr(); 1024 1025 // Handle copies less than 8 bytes. Int is handled elsewhere. 1026 if (t == T_BYTE) { 1027 __ bind(L_fill_elements); 1028 Label L_fill_2, L_fill_4; 1029 __ andi_(temp, count, 1); 1030 __ beq(CCR0, L_fill_2); 1031 __ stb(value, 0, to); 1032 __ addi(to, to, 1); 1033 __ bind(L_fill_2); 1034 __ andi_(temp, count, 2); 1035 __ beq(CCR0, L_fill_4); 1036 __ stb(value, 0, to); 1037 __ stb(value, 0, to); 1038 __ addi(to, to, 2); 1039 __ bind(L_fill_4); 1040 __ andi_(temp, count, 4); 1041 __ beq(CCR0, L_exit); 1042 __ stb(value, 0, to); 1043 __ stb(value, 1, to); 1044 __ stb(value, 2, to); 1045 __ stb(value, 3, to); 1046 __ blr(); 1047 } 1048 1049 if (t == T_SHORT) { 1050 Label L_fill_2; 1051 __ bind(L_fill_elements); 1052 __ andi_(temp, count, 1); 1053 __ beq(CCR0, L_fill_2); 1054 __ sth(value, 0, to); 1055 __ addi(to, to, 2); 1056 __ bind(L_fill_2); 1057 __ andi_(temp, count, 2); 1058 __ beq(CCR0, L_exit); 1059 __ sth(value, 0, to); 1060 __ sth(value, 2, to); 1061 __ blr(); 1062 } 1063 return start; 1064 } 1065 1066 1067 // Generate overlap test for array copy stubs. 1068 // 1069 // Input: 1070 // R3_ARG1 - from 1071 // R4_ARG2 - to 1072 // R5_ARG3 - element count 1073 // 1074 void array_overlap_test(address no_overlap_target, int log2_elem_size) { 1075 Register tmp1 = R6_ARG4; 1076 Register tmp2 = R7_ARG5; 1077 1078 Label l_overlap; 1079 #ifdef ASSERT 1080 __ srdi_(tmp2, R5_ARG3, 31); 1081 __ asm_assert_eq("missing zero extend", 0xAFFE); 1082 #endif 1083 1084 __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes 1085 __ sldi(tmp2, R5_ARG3, log2_elem_size); // size in bytes 1086 __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison! 1087 __ cmpld(CCR1, tmp1, tmp2); 1088 __ crand(/*CCR0 lt*/0, /*CCR1 lt*/4+0, /*CCR0 lt*/0); 1089 __ blt(CCR0, l_overlap); // Src before dst and distance smaller than size. 1090 1091 // need to copy forwards 1092 if (__ is_within_range_of_b(no_overlap_target, __ pc())) { 1093 __ b(no_overlap_target); 1094 } else { 1095 __ load_const(tmp1, no_overlap_target, tmp2); 1096 __ mtctr(tmp1); 1097 __ bctr(); 1098 } 1099 1100 __ bind(l_overlap); 1101 // need to copy backwards 1102 } 1103 1104 // The guideline in the implementations of generate_disjoint_xxx_copy 1105 // (xxx=byte,short,int,long,oop) is to copy as many elements as possible with 1106 // single instructions, but to avoid alignment interrupts (see subsequent 1107 // comment). Furthermore, we try to minimize misaligned access, even 1108 // though they cause no alignment interrupt. 1109 // 1110 // In Big-Endian mode, the PowerPC architecture requires implementations to 1111 // handle automatically misaligned integer halfword and word accesses, 1112 // word-aligned integer doubleword accesses, and word-aligned floating-point 1113 // accesses. Other accesses may or may not generate an Alignment interrupt 1114 // depending on the implementation. 1115 // Alignment interrupt handling may require on the order of hundreds of cycles, 1116 // so every effort should be made to avoid misaligned memory values. 1117 // 1118 // 1119 // Generate stub for disjoint byte copy. If "aligned" is true, the 1120 // "from" and "to" addresses are assumed to be heapword aligned. 1121 // 1122 // Arguments for generated stub: 1123 // from: R3_ARG1 1124 // to: R4_ARG2 1125 // count: R5_ARG3 treated as signed 1126 // 1127 address generate_disjoint_byte_copy(bool aligned, const char * name) { 1128 StubCodeMark mark(this, "StubRoutines", name); 1129 address start = __ function_entry(); 1130 1131 Register tmp1 = R6_ARG4; 1132 Register tmp2 = R7_ARG5; 1133 Register tmp3 = R8_ARG6; 1134 Register tmp4 = R9_ARG7; 1135 1136 1137 Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9; 1138 // Don't try anything fancy if arrays don't have many elements. 1139 __ li(tmp3, 0); 1140 __ cmpwi(CCR0, R5_ARG3, 17); 1141 __ ble(CCR0, l_6); // copy 4 at a time 1142 1143 if (!aligned) { 1144 __ xorr(tmp1, R3_ARG1, R4_ARG2); 1145 __ andi_(tmp1, tmp1, 3); 1146 __ bne(CCR0, l_6); // If arrays don't have the same alignment mod 4, do 4 element copy. 1147 1148 // Copy elements if necessary to align to 4 bytes. 1149 __ neg(tmp1, R3_ARG1); // Compute distance to alignment boundary. 1150 __ andi_(tmp1, tmp1, 3); 1151 __ beq(CCR0, l_2); 1152 1153 __ subf(R5_ARG3, tmp1, R5_ARG3); 1154 __ bind(l_9); 1155 __ lbz(tmp2, 0, R3_ARG1); 1156 __ addic_(tmp1, tmp1, -1); 1157 __ stb(tmp2, 0, R4_ARG2); 1158 __ addi(R3_ARG1, R3_ARG1, 1); 1159 __ addi(R4_ARG2, R4_ARG2, 1); 1160 __ bne(CCR0, l_9); 1161 1162 __ bind(l_2); 1163 } 1164 1165 // copy 8 elements at a time 1166 __ xorr(tmp2, R3_ARG1, R4_ARG2); // skip if src & dest have differing alignment mod 8 1167 __ andi_(tmp1, tmp2, 7); 1168 __ bne(CCR0, l_7); // not same alignment -> to or from is aligned -> copy 8 1169 1170 // copy a 2-element word if necessary to align to 8 bytes 1171 __ andi_(R0, R3_ARG1, 7); 1172 __ beq(CCR0, l_7); 1173 1174 __ lwzx(tmp2, R3_ARG1, tmp3); 1175 __ addi(R5_ARG3, R5_ARG3, -4); 1176 __ stwx(tmp2, R4_ARG2, tmp3); 1177 { // FasterArrayCopy 1178 __ addi(R3_ARG1, R3_ARG1, 4); 1179 __ addi(R4_ARG2, R4_ARG2, 4); 1180 } 1181 __ bind(l_7); 1182 1183 { // FasterArrayCopy 1184 __ cmpwi(CCR0, R5_ARG3, 31); 1185 __ ble(CCR0, l_6); // copy 2 at a time if less than 32 elements remain 1186 1187 __ srdi(tmp1, R5_ARG3, 5); 1188 __ andi_(R5_ARG3, R5_ARG3, 31); 1189 __ mtctr(tmp1); 1190 1191 __ bind(l_8); 1192 // Use unrolled version for mass copying (copy 32 elements a time) 1193 // Load feeding store gets zero latency on Power6, however not on Power5. 1194 // Therefore, the following sequence is made for the good of both. 1195 __ ld(tmp1, 0, R3_ARG1); 1196 __ ld(tmp2, 8, R3_ARG1); 1197 __ ld(tmp3, 16, R3_ARG1); 1198 __ ld(tmp4, 24, R3_ARG1); 1199 __ std(tmp1, 0, R4_ARG2); 1200 __ std(tmp2, 8, R4_ARG2); 1201 __ std(tmp3, 16, R4_ARG2); 1202 __ std(tmp4, 24, R4_ARG2); 1203 __ addi(R3_ARG1, R3_ARG1, 32); 1204 __ addi(R4_ARG2, R4_ARG2, 32); 1205 __ bdnz(l_8); 1206 } 1207 1208 __ bind(l_6); 1209 1210 // copy 4 elements at a time 1211 __ cmpwi(CCR0, R5_ARG3, 4); 1212 __ blt(CCR0, l_1); 1213 __ srdi(tmp1, R5_ARG3, 2); 1214 __ mtctr(tmp1); // is > 0 1215 __ andi_(R5_ARG3, R5_ARG3, 3); 1216 1217 { // FasterArrayCopy 1218 __ addi(R3_ARG1, R3_ARG1, -4); 1219 __ addi(R4_ARG2, R4_ARG2, -4); 1220 __ bind(l_3); 1221 __ lwzu(tmp2, 4, R3_ARG1); 1222 __ stwu(tmp2, 4, R4_ARG2); 1223 __ bdnz(l_3); 1224 __ addi(R3_ARG1, R3_ARG1, 4); 1225 __ addi(R4_ARG2, R4_ARG2, 4); 1226 } 1227 1228 // do single element copy 1229 __ bind(l_1); 1230 __ cmpwi(CCR0, R5_ARG3, 0); 1231 __ beq(CCR0, l_4); 1232 1233 { // FasterArrayCopy 1234 __ mtctr(R5_ARG3); 1235 __ addi(R3_ARG1, R3_ARG1, -1); 1236 __ addi(R4_ARG2, R4_ARG2, -1); 1237 1238 __ bind(l_5); 1239 __ lbzu(tmp2, 1, R3_ARG1); 1240 __ stbu(tmp2, 1, R4_ARG2); 1241 __ bdnz(l_5); 1242 } 1243 1244 __ bind(l_4); 1245 __ blr(); 1246 1247 return start; 1248 } 1249 1250 // Generate stub for conjoint byte copy. If "aligned" is true, the 1251 // "from" and "to" addresses are assumed to be heapword aligned. 1252 // 1253 // Arguments for generated stub: 1254 // from: R3_ARG1 1255 // to: R4_ARG2 1256 // count: R5_ARG3 treated as signed 1257 // 1258 address generate_conjoint_byte_copy(bool aligned, const char * name) { 1259 StubCodeMark mark(this, "StubRoutines", name); 1260 address start = __ function_entry(); 1261 1262 Register tmp1 = R6_ARG4; 1263 Register tmp2 = R7_ARG5; 1264 Register tmp3 = R8_ARG6; 1265 1266 #if defined(ABI_ELFv2) 1267 address nooverlap_target = aligned ? 1268 StubRoutines::arrayof_jbyte_disjoint_arraycopy() : 1269 StubRoutines::jbyte_disjoint_arraycopy(); 1270 #else 1271 address nooverlap_target = aligned ? 1272 ((FunctionDescriptor*)StubRoutines::arrayof_jbyte_disjoint_arraycopy())->entry() : 1273 ((FunctionDescriptor*)StubRoutines::jbyte_disjoint_arraycopy())->entry(); 1274 #endif 1275 1276 array_overlap_test(nooverlap_target, 0); 1277 // Do reverse copy. We assume the case of actual overlap is rare enough 1278 // that we don't have to optimize it. 1279 Label l_1, l_2; 1280 1281 __ b(l_2); 1282 __ bind(l_1); 1283 __ stbx(tmp1, R4_ARG2, R5_ARG3); 1284 __ bind(l_2); 1285 __ addic_(R5_ARG3, R5_ARG3, -1); 1286 __ lbzx(tmp1, R3_ARG1, R5_ARG3); 1287 __ bge(CCR0, l_1); 1288 1289 __ blr(); 1290 1291 return start; 1292 } 1293 1294 // Generate stub for disjoint short copy. If "aligned" is true, the 1295 // "from" and "to" addresses are assumed to be heapword aligned. 1296 // 1297 // Arguments for generated stub: 1298 // from: R3_ARG1 1299 // to: R4_ARG2 1300 // elm.count: R5_ARG3 treated as signed 1301 // 1302 // Strategy for aligned==true: 1303 // 1304 // If length <= 9: 1305 // 1. copy 2 elements at a time (l_6) 1306 // 2. copy last element if original element count was odd (l_1) 1307 // 1308 // If length > 9: 1309 // 1. copy 4 elements at a time until less than 4 elements are left (l_7) 1310 // 2. copy 2 elements at a time until less than 2 elements are left (l_6) 1311 // 3. copy last element if one was left in step 2. (l_1) 1312 // 1313 // 1314 // Strategy for aligned==false: 1315 // 1316 // If length <= 9: same as aligned==true case, but NOTE: load/stores 1317 // can be unaligned (see comment below) 1318 // 1319 // If length > 9: 1320 // 1. continue with step 6. if the alignment of from and to mod 4 1321 // is different. 1322 // 2. align from and to to 4 bytes by copying 1 element if necessary 1323 // 3. at l_2 from and to are 4 byte aligned; continue with 1324 // 5. if they cannot be aligned to 8 bytes because they have 1325 // got different alignment mod 8. 1326 // 4. at this point we know that both, from and to, have the same 1327 // alignment mod 8, now copy one element if necessary to get 1328 // 8 byte alignment of from and to. 1329 // 5. copy 4 elements at a time until less than 4 elements are 1330 // left; depending on step 3. all load/stores are aligned or 1331 // either all loads or all stores are unaligned. 1332 // 6. copy 2 elements at a time until less than 2 elements are 1333 // left (l_6); arriving here from step 1., there is a chance 1334 // that all accesses are unaligned. 1335 // 7. copy last element if one was left in step 6. (l_1) 1336 // 1337 // There are unaligned data accesses using integer load/store 1338 // instructions in this stub. POWER allows such accesses. 1339 // 1340 // According to the manuals (PowerISA_V2.06_PUBLIC, Book II, 1341 // Chapter 2: Effect of Operand Placement on Performance) unaligned 1342 // integer load/stores have good performance. Only unaligned 1343 // floating point load/stores can have poor performance. 1344 // 1345 // TODO: 1346 // 1347 // 1. check if aligning the backbranch target of loops is beneficial 1348 // 1349 address generate_disjoint_short_copy(bool aligned, const char * name) { 1350 StubCodeMark mark(this, "StubRoutines", name); 1351 1352 Register tmp1 = R6_ARG4; 1353 Register tmp2 = R7_ARG5; 1354 Register tmp3 = R8_ARG6; 1355 Register tmp4 = R9_ARG7; 1356 1357 address start = __ function_entry(); 1358 1359 Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8; 1360 // don't try anything fancy if arrays don't have many elements 1361 __ li(tmp3, 0); 1362 __ cmpwi(CCR0, R5_ARG3, 9); 1363 __ ble(CCR0, l_6); // copy 2 at a time 1364 1365 if (!aligned) { 1366 __ xorr(tmp1, R3_ARG1, R4_ARG2); 1367 __ andi_(tmp1, tmp1, 3); 1368 __ bne(CCR0, l_6); // if arrays don't have the same alignment mod 4, do 2 element copy 1369 1370 // At this point it is guaranteed that both, from and to have the same alignment mod 4. 1371 1372 // Copy 1 element if necessary to align to 4 bytes. 1373 __ andi_(tmp1, R3_ARG1, 3); 1374 __ beq(CCR0, l_2); 1375 1376 __ lhz(tmp2, 0, R3_ARG1); 1377 __ addi(R3_ARG1, R3_ARG1, 2); 1378 __ sth(tmp2, 0, R4_ARG2); 1379 __ addi(R4_ARG2, R4_ARG2, 2); 1380 __ addi(R5_ARG3, R5_ARG3, -1); 1381 __ bind(l_2); 1382 1383 // At this point the positions of both, from and to, are at least 4 byte aligned. 1384 1385 // Copy 4 elements at a time. 1386 // Align to 8 bytes, but only if both, from and to, have same alignment mod 8. 1387 __ xorr(tmp2, R3_ARG1, R4_ARG2); 1388 __ andi_(tmp1, tmp2, 7); 1389 __ bne(CCR0, l_7); // not same alignment mod 8 -> copy 4, either from or to will be unaligned 1390 1391 // Copy a 2-element word if necessary to align to 8 bytes. 1392 __ andi_(R0, R3_ARG1, 7); 1393 __ beq(CCR0, l_7); 1394 1395 __ lwzx(tmp2, R3_ARG1, tmp3); 1396 __ addi(R5_ARG3, R5_ARG3, -2); 1397 __ stwx(tmp2, R4_ARG2, tmp3); 1398 { // FasterArrayCopy 1399 __ addi(R3_ARG1, R3_ARG1, 4); 1400 __ addi(R4_ARG2, R4_ARG2, 4); 1401 } 1402 } 1403 1404 __ bind(l_7); 1405 1406 // Copy 4 elements at a time; either the loads or the stores can 1407 // be unaligned if aligned == false. 1408 1409 { // FasterArrayCopy 1410 __ cmpwi(CCR0, R5_ARG3, 15); 1411 __ ble(CCR0, l_6); // copy 2 at a time if less than 16 elements remain 1412 1413 __ srdi(tmp1, R5_ARG3, 4); 1414 __ andi_(R5_ARG3, R5_ARG3, 15); 1415 __ mtctr(tmp1); 1416 1417 __ bind(l_8); 1418 // Use unrolled version for mass copying (copy 16 elements a time). 1419 // Load feeding store gets zero latency on Power6, however not on Power5. 1420 // Therefore, the following sequence is made for the good of both. 1421 __ ld(tmp1, 0, R3_ARG1); 1422 __ ld(tmp2, 8, R3_ARG1); 1423 __ ld(tmp3, 16, R3_ARG1); 1424 __ ld(tmp4, 24, R3_ARG1); 1425 __ std(tmp1, 0, R4_ARG2); 1426 __ std(tmp2, 8, R4_ARG2); 1427 __ std(tmp3, 16, R4_ARG2); 1428 __ std(tmp4, 24, R4_ARG2); 1429 __ addi(R3_ARG1, R3_ARG1, 32); 1430 __ addi(R4_ARG2, R4_ARG2, 32); 1431 __ bdnz(l_8); 1432 } 1433 __ bind(l_6); 1434 1435 // copy 2 elements at a time 1436 { // FasterArrayCopy 1437 __ cmpwi(CCR0, R5_ARG3, 2); 1438 __ blt(CCR0, l_1); 1439 __ srdi(tmp1, R5_ARG3, 1); 1440 __ andi_(R5_ARG3, R5_ARG3, 1); 1441 1442 __ addi(R3_ARG1, R3_ARG1, -4); 1443 __ addi(R4_ARG2, R4_ARG2, -4); 1444 __ mtctr(tmp1); 1445 1446 __ bind(l_3); 1447 __ lwzu(tmp2, 4, R3_ARG1); 1448 __ stwu(tmp2, 4, R4_ARG2); 1449 __ bdnz(l_3); 1450 1451 __ addi(R3_ARG1, R3_ARG1, 4); 1452 __ addi(R4_ARG2, R4_ARG2, 4); 1453 } 1454 1455 // do single element copy 1456 __ bind(l_1); 1457 __ cmpwi(CCR0, R5_ARG3, 0); 1458 __ beq(CCR0, l_4); 1459 1460 { // FasterArrayCopy 1461 __ mtctr(R5_ARG3); 1462 __ addi(R3_ARG1, R3_ARG1, -2); 1463 __ addi(R4_ARG2, R4_ARG2, -2); 1464 1465 __ bind(l_5); 1466 __ lhzu(tmp2, 2, R3_ARG1); 1467 __ sthu(tmp2, 2, R4_ARG2); 1468 __ bdnz(l_5); 1469 } 1470 __ bind(l_4); 1471 __ blr(); 1472 1473 return start; 1474 } 1475 1476 // Generate stub for conjoint short copy. If "aligned" is true, the 1477 // "from" and "to" addresses are assumed to be heapword aligned. 1478 // 1479 // Arguments for generated stub: 1480 // from: R3_ARG1 1481 // to: R4_ARG2 1482 // count: R5_ARG3 treated as signed 1483 // 1484 address generate_conjoint_short_copy(bool aligned, const char * name) { 1485 StubCodeMark mark(this, "StubRoutines", name); 1486 address start = __ function_entry(); 1487 1488 Register tmp1 = R6_ARG4; 1489 Register tmp2 = R7_ARG5; 1490 Register tmp3 = R8_ARG6; 1491 1492 #if defined(ABI_ELFv2) 1493 address nooverlap_target = aligned ? 1494 StubRoutines::arrayof_jshort_disjoint_arraycopy() : 1495 StubRoutines::jshort_disjoint_arraycopy(); 1496 #else 1497 address nooverlap_target = aligned ? 1498 ((FunctionDescriptor*)StubRoutines::arrayof_jshort_disjoint_arraycopy())->entry() : 1499 ((FunctionDescriptor*)StubRoutines::jshort_disjoint_arraycopy())->entry(); 1500 #endif 1501 1502 array_overlap_test(nooverlap_target, 1); 1503 1504 Label l_1, l_2; 1505 __ sldi(tmp1, R5_ARG3, 1); 1506 __ b(l_2); 1507 __ bind(l_1); 1508 __ sthx(tmp2, R4_ARG2, tmp1); 1509 __ bind(l_2); 1510 __ addic_(tmp1, tmp1, -2); 1511 __ lhzx(tmp2, R3_ARG1, tmp1); 1512 __ bge(CCR0, l_1); 1513 1514 __ blr(); 1515 1516 return start; 1517 } 1518 1519 // Generate core code for disjoint int copy (and oop copy on 32-bit). If "aligned" 1520 // is true, the "from" and "to" addresses are assumed to be heapword aligned. 1521 // 1522 // Arguments: 1523 // from: R3_ARG1 1524 // to: R4_ARG2 1525 // count: R5_ARG3 treated as signed 1526 // 1527 void generate_disjoint_int_copy_core(bool aligned) { 1528 Register tmp1 = R6_ARG4; 1529 Register tmp2 = R7_ARG5; 1530 Register tmp3 = R8_ARG6; 1531 Register tmp4 = R0; 1532 1533 Label l_1, l_2, l_3, l_4, l_5, l_6; 1534 // for short arrays, just do single element copy 1535 __ li(tmp3, 0); 1536 __ cmpwi(CCR0, R5_ARG3, 5); 1537 __ ble(CCR0, l_2); 1538 1539 if (!aligned) { 1540 // check if arrays have same alignment mod 8. 1541 __ xorr(tmp1, R3_ARG1, R4_ARG2); 1542 __ andi_(R0, tmp1, 7); 1543 // Not the same alignment, but ld and std just need to be 4 byte aligned. 1544 __ bne(CCR0, l_4); // to OR from is 8 byte aligned -> copy 2 at a time 1545 1546 // copy 1 element to align to and from on an 8 byte boundary 1547 __ andi_(R0, R3_ARG1, 7); 1548 __ beq(CCR0, l_4); 1549 1550 __ lwzx(tmp2, R3_ARG1, tmp3); 1551 __ addi(R5_ARG3, R5_ARG3, -1); 1552 __ stwx(tmp2, R4_ARG2, tmp3); 1553 { // FasterArrayCopy 1554 __ addi(R3_ARG1, R3_ARG1, 4); 1555 __ addi(R4_ARG2, R4_ARG2, 4); 1556 } 1557 __ bind(l_4); 1558 } 1559 1560 { // FasterArrayCopy 1561 __ cmpwi(CCR0, R5_ARG3, 7); 1562 __ ble(CCR0, l_2); // copy 1 at a time if less than 8 elements remain 1563 1564 __ srdi(tmp1, R5_ARG3, 3); 1565 __ andi_(R5_ARG3, R5_ARG3, 7); 1566 __ mtctr(tmp1); 1567 1568 __ bind(l_6); 1569 // Use unrolled version for mass copying (copy 8 elements a time). 1570 // Load feeding store gets zero latency on power6, however not on power 5. 1571 // Therefore, the following sequence is made for the good of both. 1572 __ ld(tmp1, 0, R3_ARG1); 1573 __ ld(tmp2, 8, R3_ARG1); 1574 __ ld(tmp3, 16, R3_ARG1); 1575 __ ld(tmp4, 24, R3_ARG1); 1576 __ std(tmp1, 0, R4_ARG2); 1577 __ std(tmp2, 8, R4_ARG2); 1578 __ std(tmp3, 16, R4_ARG2); 1579 __ std(tmp4, 24, R4_ARG2); 1580 __ addi(R3_ARG1, R3_ARG1, 32); 1581 __ addi(R4_ARG2, R4_ARG2, 32); 1582 __ bdnz(l_6); 1583 } 1584 1585 // copy 1 element at a time 1586 __ bind(l_2); 1587 __ cmpwi(CCR0, R5_ARG3, 0); 1588 __ beq(CCR0, l_1); 1589 1590 { // FasterArrayCopy 1591 __ mtctr(R5_ARG3); 1592 __ addi(R3_ARG1, R3_ARG1, -4); 1593 __ addi(R4_ARG2, R4_ARG2, -4); 1594 1595 __ bind(l_3); 1596 __ lwzu(tmp2, 4, R3_ARG1); 1597 __ stwu(tmp2, 4, R4_ARG2); 1598 __ bdnz(l_3); 1599 } 1600 1601 __ bind(l_1); 1602 return; 1603 } 1604 1605 // Generate stub for disjoint int copy. If "aligned" is true, the 1606 // "from" and "to" addresses are assumed to be heapword aligned. 1607 // 1608 // Arguments for generated stub: 1609 // from: R3_ARG1 1610 // to: R4_ARG2 1611 // count: R5_ARG3 treated as signed 1612 // 1613 address generate_disjoint_int_copy(bool aligned, const char * name) { 1614 StubCodeMark mark(this, "StubRoutines", name); 1615 address start = __ function_entry(); 1616 generate_disjoint_int_copy_core(aligned); 1617 __ blr(); 1618 return start; 1619 } 1620 1621 // Generate core code for conjoint int copy (and oop copy on 1622 // 32-bit). If "aligned" is true, the "from" and "to" addresses 1623 // are assumed to be heapword aligned. 1624 // 1625 // Arguments: 1626 // from: R3_ARG1 1627 // to: R4_ARG2 1628 // count: R5_ARG3 treated as signed 1629 // 1630 void generate_conjoint_int_copy_core(bool aligned) { 1631 // Do reverse copy. We assume the case of actual overlap is rare enough 1632 // that we don't have to optimize it. 1633 1634 Label l_1, l_2, l_3, l_4, l_5, l_6; 1635 1636 Register tmp1 = R6_ARG4; 1637 Register tmp2 = R7_ARG5; 1638 Register tmp3 = R8_ARG6; 1639 Register tmp4 = R0; 1640 1641 { // FasterArrayCopy 1642 __ cmpwi(CCR0, R5_ARG3, 0); 1643 __ beq(CCR0, l_6); 1644 1645 __ sldi(R5_ARG3, R5_ARG3, 2); 1646 __ add(R3_ARG1, R3_ARG1, R5_ARG3); 1647 __ add(R4_ARG2, R4_ARG2, R5_ARG3); 1648 __ srdi(R5_ARG3, R5_ARG3, 2); 1649 1650 __ cmpwi(CCR0, R5_ARG3, 7); 1651 __ ble(CCR0, l_5); // copy 1 at a time if less than 8 elements remain 1652 1653 __ srdi(tmp1, R5_ARG3, 3); 1654 __ andi(R5_ARG3, R5_ARG3, 7); 1655 __ mtctr(tmp1); 1656 1657 __ bind(l_4); 1658 // Use unrolled version for mass copying (copy 4 elements a time). 1659 // Load feeding store gets zero latency on Power6, however not on Power5. 1660 // Therefore, the following sequence is made for the good of both. 1661 __ addi(R3_ARG1, R3_ARG1, -32); 1662 __ addi(R4_ARG2, R4_ARG2, -32); 1663 __ ld(tmp4, 24, R3_ARG1); 1664 __ ld(tmp3, 16, R3_ARG1); 1665 __ ld(tmp2, 8, R3_ARG1); 1666 __ ld(tmp1, 0, R3_ARG1); 1667 __ std(tmp4, 24, R4_ARG2); 1668 __ std(tmp3, 16, R4_ARG2); 1669 __ std(tmp2, 8, R4_ARG2); 1670 __ std(tmp1, 0, R4_ARG2); 1671 __ bdnz(l_4); 1672 1673 __ cmpwi(CCR0, R5_ARG3, 0); 1674 __ beq(CCR0, l_6); 1675 1676 __ bind(l_5); 1677 __ mtctr(R5_ARG3); 1678 __ bind(l_3); 1679 __ lwz(R0, -4, R3_ARG1); 1680 __ stw(R0, -4, R4_ARG2); 1681 __ addi(R3_ARG1, R3_ARG1, -4); 1682 __ addi(R4_ARG2, R4_ARG2, -4); 1683 __ bdnz(l_3); 1684 1685 __ bind(l_6); 1686 } 1687 } 1688 1689 // Generate stub for conjoint int copy. If "aligned" is true, the 1690 // "from" and "to" addresses are assumed to be heapword aligned. 1691 // 1692 // Arguments for generated stub: 1693 // from: R3_ARG1 1694 // to: R4_ARG2 1695 // count: R5_ARG3 treated as signed 1696 // 1697 address generate_conjoint_int_copy(bool aligned, const char * name) { 1698 StubCodeMark mark(this, "StubRoutines", name); 1699 address start = __ function_entry(); 1700 1701 #if defined(ABI_ELFv2) 1702 address nooverlap_target = aligned ? 1703 StubRoutines::arrayof_jint_disjoint_arraycopy() : 1704 StubRoutines::jint_disjoint_arraycopy(); 1705 #else 1706 address nooverlap_target = aligned ? 1707 ((FunctionDescriptor*)StubRoutines::arrayof_jint_disjoint_arraycopy())->entry() : 1708 ((FunctionDescriptor*)StubRoutines::jint_disjoint_arraycopy())->entry(); 1709 #endif 1710 1711 array_overlap_test(nooverlap_target, 2); 1712 1713 generate_conjoint_int_copy_core(aligned); 1714 1715 __ blr(); 1716 1717 return start; 1718 } 1719 1720 // Generate core code for disjoint long copy (and oop copy on 1721 // 64-bit). If "aligned" is true, the "from" and "to" addresses 1722 // are assumed to be heapword aligned. 1723 // 1724 // Arguments: 1725 // from: R3_ARG1 1726 // to: R4_ARG2 1727 // count: R5_ARG3 treated as signed 1728 // 1729 void generate_disjoint_long_copy_core(bool aligned) { 1730 Register tmp1 = R6_ARG4; 1731 Register tmp2 = R7_ARG5; 1732 Register tmp3 = R8_ARG6; 1733 Register tmp4 = R0; 1734 1735 Label l_1, l_2, l_3, l_4; 1736 1737 { // FasterArrayCopy 1738 __ cmpwi(CCR0, R5_ARG3, 3); 1739 __ ble(CCR0, l_3); // copy 1 at a time if less than 4 elements remain 1740 1741 __ srdi(tmp1, R5_ARG3, 2); 1742 __ andi_(R5_ARG3, R5_ARG3, 3); 1743 __ mtctr(tmp1); 1744 1745 __ bind(l_4); 1746 // Use unrolled version for mass copying (copy 4 elements a time). 1747 // Load feeding store gets zero latency on Power6, however not on Power5. 1748 // Therefore, the following sequence is made for the good of both. 1749 __ ld(tmp1, 0, R3_ARG1); 1750 __ ld(tmp2, 8, R3_ARG1); 1751 __ ld(tmp3, 16, R3_ARG1); 1752 __ ld(tmp4, 24, R3_ARG1); 1753 __ std(tmp1, 0, R4_ARG2); 1754 __ std(tmp2, 8, R4_ARG2); 1755 __ std(tmp3, 16, R4_ARG2); 1756 __ std(tmp4, 24, R4_ARG2); 1757 __ addi(R3_ARG1, R3_ARG1, 32); 1758 __ addi(R4_ARG2, R4_ARG2, 32); 1759 __ bdnz(l_4); 1760 } 1761 1762 // copy 1 element at a time 1763 __ bind(l_3); 1764 __ cmpwi(CCR0, R5_ARG3, 0); 1765 __ beq(CCR0, l_1); 1766 1767 { // FasterArrayCopy 1768 __ mtctr(R5_ARG3); 1769 __ addi(R3_ARG1, R3_ARG1, -8); 1770 __ addi(R4_ARG2, R4_ARG2, -8); 1771 1772 __ bind(l_2); 1773 __ ldu(R0, 8, R3_ARG1); 1774 __ stdu(R0, 8, R4_ARG2); 1775 __ bdnz(l_2); 1776 1777 } 1778 __ bind(l_1); 1779 } 1780 1781 // Generate stub for disjoint long copy. If "aligned" is true, the 1782 // "from" and "to" addresses are assumed to be heapword aligned. 1783 // 1784 // Arguments for generated stub: 1785 // from: R3_ARG1 1786 // to: R4_ARG2 1787 // count: R5_ARG3 treated as signed 1788 // 1789 address generate_disjoint_long_copy(bool aligned, const char * name) { 1790 StubCodeMark mark(this, "StubRoutines", name); 1791 address start = __ function_entry(); 1792 generate_disjoint_long_copy_core(aligned); 1793 __ blr(); 1794 1795 return start; 1796 } 1797 1798 // Generate core code for conjoint long copy (and oop copy on 1799 // 64-bit). If "aligned" is true, the "from" and "to" addresses 1800 // are assumed to be heapword aligned. 1801 // 1802 // Arguments: 1803 // from: R3_ARG1 1804 // to: R4_ARG2 1805 // count: R5_ARG3 treated as signed 1806 // 1807 void generate_conjoint_long_copy_core(bool aligned) { 1808 Register tmp1 = R6_ARG4; 1809 Register tmp2 = R7_ARG5; 1810 Register tmp3 = R8_ARG6; 1811 Register tmp4 = R0; 1812 1813 Label l_1, l_2, l_3, l_4, l_5; 1814 1815 __ cmpwi(CCR0, R5_ARG3, 0); 1816 __ beq(CCR0, l_1); 1817 1818 { // FasterArrayCopy 1819 __ sldi(R5_ARG3, R5_ARG3, 3); 1820 __ add(R3_ARG1, R3_ARG1, R5_ARG3); 1821 __ add(R4_ARG2, R4_ARG2, R5_ARG3); 1822 __ srdi(R5_ARG3, R5_ARG3, 3); 1823 1824 __ cmpwi(CCR0, R5_ARG3, 3); 1825 __ ble(CCR0, l_5); // copy 1 at a time if less than 4 elements remain 1826 1827 __ srdi(tmp1, R5_ARG3, 2); 1828 __ andi(R5_ARG3, R5_ARG3, 3); 1829 __ mtctr(tmp1); 1830 1831 __ bind(l_4); 1832 // Use unrolled version for mass copying (copy 4 elements a time). 1833 // Load feeding store gets zero latency on Power6, however not on Power5. 1834 // Therefore, the following sequence is made for the good of both. 1835 __ addi(R3_ARG1, R3_ARG1, -32); 1836 __ addi(R4_ARG2, R4_ARG2, -32); 1837 __ ld(tmp4, 24, R3_ARG1); 1838 __ ld(tmp3, 16, R3_ARG1); 1839 __ ld(tmp2, 8, R3_ARG1); 1840 __ ld(tmp1, 0, R3_ARG1); 1841 __ std(tmp4, 24, R4_ARG2); 1842 __ std(tmp3, 16, R4_ARG2); 1843 __ std(tmp2, 8, R4_ARG2); 1844 __ std(tmp1, 0, R4_ARG2); 1845 __ bdnz(l_4); 1846 1847 __ cmpwi(CCR0, R5_ARG3, 0); 1848 __ beq(CCR0, l_1); 1849 1850 __ bind(l_5); 1851 __ mtctr(R5_ARG3); 1852 __ bind(l_3); 1853 __ ld(R0, -8, R3_ARG1); 1854 __ std(R0, -8, R4_ARG2); 1855 __ addi(R3_ARG1, R3_ARG1, -8); 1856 __ addi(R4_ARG2, R4_ARG2, -8); 1857 __ bdnz(l_3); 1858 1859 } 1860 __ bind(l_1); 1861 } 1862 1863 // Generate stub for conjoint long copy. If "aligned" is true, the 1864 // "from" and "to" addresses are assumed to be heapword aligned. 1865 // 1866 // Arguments for generated stub: 1867 // from: R3_ARG1 1868 // to: R4_ARG2 1869 // count: R5_ARG3 treated as signed 1870 // 1871 address generate_conjoint_long_copy(bool aligned, const char * name) { 1872 StubCodeMark mark(this, "StubRoutines", name); 1873 address start = __ function_entry(); 1874 1875 #if defined(ABI_ELFv2) 1876 address nooverlap_target = aligned ? 1877 StubRoutines::arrayof_jlong_disjoint_arraycopy() : 1878 StubRoutines::jlong_disjoint_arraycopy(); 1879 #else 1880 address nooverlap_target = aligned ? 1881 ((FunctionDescriptor*)StubRoutines::arrayof_jlong_disjoint_arraycopy())->entry() : 1882 ((FunctionDescriptor*)StubRoutines::jlong_disjoint_arraycopy())->entry(); 1883 #endif 1884 1885 array_overlap_test(nooverlap_target, 3); 1886 generate_conjoint_long_copy_core(aligned); 1887 1888 __ blr(); 1889 1890 return start; 1891 } 1892 1893 // Generate stub for conjoint oop copy. If "aligned" is true, the 1894 // "from" and "to" addresses are assumed to be heapword aligned. 1895 // 1896 // Arguments for generated stub: 1897 // from: R3_ARG1 1898 // to: R4_ARG2 1899 // count: R5_ARG3 treated as signed 1900 // dest_uninitialized: G1 support 1901 // 1902 address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) { 1903 StubCodeMark mark(this, "StubRoutines", name); 1904 1905 address start = __ function_entry(); 1906 1907 #if defined(ABI_ELFv2) 1908 address nooverlap_target = aligned ? 1909 StubRoutines::arrayof_oop_disjoint_arraycopy() : 1910 StubRoutines::oop_disjoint_arraycopy(); 1911 #else 1912 address nooverlap_target = aligned ? 1913 ((FunctionDescriptor*)StubRoutines::arrayof_oop_disjoint_arraycopy())->entry() : 1914 ((FunctionDescriptor*)StubRoutines::oop_disjoint_arraycopy())->entry(); 1915 #endif 1916 1917 gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7); 1918 1919 // Save arguments. 1920 __ mr(R9_ARG7, R4_ARG2); 1921 __ mr(R10_ARG8, R5_ARG3); 1922 1923 if (UseCompressedOops) { 1924 array_overlap_test(nooverlap_target, 2); 1925 generate_conjoint_int_copy_core(aligned); 1926 } else { 1927 array_overlap_test(nooverlap_target, 3); 1928 generate_conjoint_long_copy_core(aligned); 1929 } 1930 1931 gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1, /*branchToEnd*/ false); 1932 return start; 1933 } 1934 1935 // Generate stub for disjoint oop copy. If "aligned" is true, the 1936 // "from" and "to" addresses are assumed to be heapword aligned. 1937 // 1938 // Arguments for generated stub: 1939 // from: R3_ARG1 1940 // to: R4_ARG2 1941 // count: R5_ARG3 treated as signed 1942 // dest_uninitialized: G1 support 1943 // 1944 address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) { 1945 StubCodeMark mark(this, "StubRoutines", name); 1946 address start = __ function_entry(); 1947 1948 gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7); 1949 1950 // save some arguments, disjoint_long_copy_core destroys them. 1951 // needed for post barrier 1952 __ mr(R9_ARG7, R4_ARG2); 1953 __ mr(R10_ARG8, R5_ARG3); 1954 1955 if (UseCompressedOops) { 1956 generate_disjoint_int_copy_core(aligned); 1957 } else { 1958 generate_disjoint_long_copy_core(aligned); 1959 } 1960 1961 gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1, /*branchToEnd*/ false); 1962 1963 return start; 1964 } 1965 1966 // Arguments for generated stub (little endian only): 1967 // R3_ARG1 - source byte array address 1968 // R4_ARG2 - destination byte array address 1969 // R5_ARG3 - round key array 1970 address generate_aescrypt_encryptBlock() { 1971 assert(UseAES, "need AES instructions and misaligned SSE support"); 1972 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 1973 1974 address start = __ function_entry(); 1975 1976 Label L_doLast; 1977 1978 Register from = R3_ARG1; // source array address 1979 Register to = R4_ARG2; // destination array address 1980 Register key = R5_ARG3; // round key array 1981 1982 Register keylen = R8; 1983 Register temp = R9; 1984 Register keypos = R10; 1985 Register hex = R11; 1986 Register fifteen = R12; 1987 1988 VectorRegister vRet = VR0; 1989 1990 VectorRegister vKey1 = VR1; 1991 VectorRegister vKey2 = VR2; 1992 VectorRegister vKey3 = VR3; 1993 VectorRegister vKey4 = VR4; 1994 1995 VectorRegister fromPerm = VR5; 1996 VectorRegister keyPerm = VR6; 1997 VectorRegister toPerm = VR7; 1998 VectorRegister fSplt = VR8; 1999 2000 VectorRegister vTmp1 = VR9; 2001 VectorRegister vTmp2 = VR10; 2002 VectorRegister vTmp3 = VR11; 2003 VectorRegister vTmp4 = VR12; 2004 2005 VectorRegister vLow = VR13; 2006 VectorRegister vHigh = VR14; 2007 2008 __ li (hex, 16); 2009 __ li (fifteen, 15); 2010 __ vspltisb (fSplt, 0x0f); 2011 2012 // load unaligned from[0-15] to vsRet 2013 __ lvx (vRet, from); 2014 __ lvx (vTmp1, fifteen, from); 2015 __ lvsl (fromPerm, from); 2016 __ vxor (fromPerm, fromPerm, fSplt); 2017 __ vperm (vRet, vRet, vTmp1, fromPerm); 2018 2019 // load keylen (44 or 52 or 60) 2020 __ lwz (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key); 2021 2022 // to load keys 2023 __ lvsr (keyPerm, key); 2024 __ vxor (vTmp2, vTmp2, vTmp2); 2025 __ vspltisb (vTmp2, -16); 2026 __ vrld (keyPerm, keyPerm, vTmp2); 2027 __ vrld (keyPerm, keyPerm, vTmp2); 2028 __ vsldoi (keyPerm, keyPerm, keyPerm, -8); 2029 2030 // load the 1st round key to vKey1 2031 __ li (keypos, 0); 2032 __ lvx (vKey1, keypos, key); 2033 __ addi (keypos, keypos, 16); 2034 __ lvx (vTmp1, keypos, key); 2035 __ vperm (vKey1, vTmp1, vKey1, keyPerm); 2036 2037 // 1st round 2038 __ vxor (vRet, vRet, vKey1); 2039 2040 // load the 2nd round key to vKey1 2041 __ addi (keypos, keypos, 16); 2042 __ lvx (vTmp2, keypos, key); 2043 __ vperm (vKey1, vTmp2, vTmp1, keyPerm); 2044 2045 // load the 3rd round key to vKey2 2046 __ addi (keypos, keypos, 16); 2047 __ lvx (vTmp1, keypos, key); 2048 __ vperm (vKey2, vTmp1, vTmp2, keyPerm); 2049 2050 // load the 4th round key to vKey3 2051 __ addi (keypos, keypos, 16); 2052 __ lvx (vTmp2, keypos, key); 2053 __ vperm (vKey3, vTmp2, vTmp1, keyPerm); 2054 2055 // load the 5th round key to vKey4 2056 __ addi (keypos, keypos, 16); 2057 __ lvx (vTmp1, keypos, key); 2058 __ vperm (vKey4, vTmp1, vTmp2, keyPerm); 2059 2060 // 2nd - 5th rounds 2061 __ vcipher (vRet, vRet, vKey1); 2062 __ vcipher (vRet, vRet, vKey2); 2063 __ vcipher (vRet, vRet, vKey3); 2064 __ vcipher (vRet, vRet, vKey4); 2065 2066 // load the 6th round key to vKey1 2067 __ addi (keypos, keypos, 16); 2068 __ lvx (vTmp2, keypos, key); 2069 __ vperm (vKey1, vTmp2, vTmp1, keyPerm); 2070 2071 // load the 7th round key to vKey2 2072 __ addi (keypos, keypos, 16); 2073 __ lvx (vTmp1, keypos, key); 2074 __ vperm (vKey2, vTmp1, vTmp2, keyPerm); 2075 2076 // load the 8th round key to vKey3 2077 __ addi (keypos, keypos, 16); 2078 __ lvx (vTmp2, keypos, key); 2079 __ vperm (vKey3, vTmp2, vTmp1, keyPerm); 2080 2081 // load the 9th round key to vKey4 2082 __ addi (keypos, keypos, 16); 2083 __ lvx (vTmp1, keypos, key); 2084 __ vperm (vKey4, vTmp1, vTmp2, keyPerm); 2085 2086 // 6th - 9th rounds 2087 __ vcipher (vRet, vRet, vKey1); 2088 __ vcipher (vRet, vRet, vKey2); 2089 __ vcipher (vRet, vRet, vKey3); 2090 __ vcipher (vRet, vRet, vKey4); 2091 2092 // load the 10th round key to vKey1 2093 __ addi (keypos, keypos, 16); 2094 __ lvx (vTmp2, keypos, key); 2095 __ vperm (vKey1, vTmp2, vTmp1, keyPerm); 2096 2097 // load the 11th round key to vKey2 2098 __ addi (keypos, keypos, 16); 2099 __ lvx (vTmp1, keypos, key); 2100 __ vperm (vKey2, vTmp1, vTmp2, keyPerm); 2101 2102 // if all round keys are loaded, skip next 4 rounds 2103 __ cmpwi (CCR0, keylen, 44); 2104 __ beq (CCR0, L_doLast); 2105 2106 // 10th - 11th rounds 2107 __ vcipher (vRet, vRet, vKey1); 2108 __ vcipher (vRet, vRet, vKey2); 2109 2110 // load the 12th round key to vKey1 2111 __ addi (keypos, keypos, 16); 2112 __ lvx (vTmp2, keypos, key); 2113 __ vperm (vKey1, vTmp2, vTmp1, keyPerm); 2114 2115 // load the 13th round key to vKey2 2116 __ addi (keypos, keypos, 16); 2117 __ lvx (vTmp1, keypos, key); 2118 __ vperm (vKey2, vTmp1, vTmp2, keyPerm); 2119 2120 // if all round keys are loaded, skip next 2 rounds 2121 __ cmpwi (CCR0, keylen, 52); 2122 __ beq (CCR0, L_doLast); 2123 2124 // 12th - 13th rounds 2125 __ vcipher (vRet, vRet, vKey1); 2126 __ vcipher (vRet, vRet, vKey2); 2127 2128 // load the 14th round key to vKey1 2129 __ addi (keypos, keypos, 16); 2130 __ lvx (vTmp2, keypos, key); 2131 __ vperm (vKey1, vTmp2, vTmp1, keyPerm); 2132 2133 // load the 15th round key to vKey2 2134 __ addi (keypos, keypos, 16); 2135 __ lvx (vTmp1, keypos, key); 2136 __ vperm (vKey2, vTmp1, vTmp2, keyPerm); 2137 2138 __ bind(L_doLast); 2139 2140 // last two rounds 2141 __ vcipher (vRet, vRet, vKey1); 2142 __ vcipherlast (vRet, vRet, vKey2); 2143 2144 __ neg (temp, to); 2145 __ lvsr (toPerm, temp); 2146 __ vspltisb (vTmp2, -1); 2147 __ vxor (vTmp1, vTmp1, vTmp1); 2148 __ vperm (vTmp2, vTmp2, vTmp1, toPerm); 2149 __ vxor (toPerm, toPerm, fSplt); 2150 __ lvx (vTmp1, to); 2151 __ vperm (vRet, vRet, vRet, toPerm); 2152 __ vsel (vTmp1, vTmp1, vRet, vTmp2); 2153 __ lvx (vTmp4, fifteen, to); 2154 __ stvx (vTmp1, to); 2155 __ vsel (vRet, vRet, vTmp4, vTmp2); 2156 __ stvx (vRet, fifteen, to); 2157 2158 __ blr(); 2159 return start; 2160 } 2161 2162 // Arguments for generated stub (little endian only): 2163 // R3_ARG1 - source byte array address 2164 // R4_ARG2 - destination byte array address 2165 // R5_ARG3 - K (key) in little endian int array 2166 address generate_aescrypt_decryptBlock() { 2167 assert(UseAES, "need AES instructions and misaligned SSE support"); 2168 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 2169 2170 address start = __ function_entry(); 2171 2172 Label L_doLast; 2173 Label L_do44; 2174 Label L_do52; 2175 Label L_do60; 2176 2177 Register from = R3_ARG1; // source array address 2178 Register to = R4_ARG2; // destination array address 2179 Register key = R5_ARG3; // round key array 2180 2181 Register keylen = R8; 2182 Register temp = R9; 2183 Register keypos = R10; 2184 Register hex = R11; 2185 Register fifteen = R12; 2186 2187 VectorRegister vRet = VR0; 2188 2189 VectorRegister vKey1 = VR1; 2190 VectorRegister vKey2 = VR2; 2191 VectorRegister vKey3 = VR3; 2192 VectorRegister vKey4 = VR4; 2193 VectorRegister vKey5 = VR5; 2194 2195 VectorRegister fromPerm = VR6; 2196 VectorRegister keyPerm = VR7; 2197 VectorRegister toPerm = VR8; 2198 VectorRegister fSplt = VR9; 2199 2200 VectorRegister vTmp1 = VR10; 2201 VectorRegister vTmp2 = VR11; 2202 VectorRegister vTmp3 = VR12; 2203 VectorRegister vTmp4 = VR13; 2204 2205 VectorRegister vLow = VR14; 2206 VectorRegister vHigh = VR15; 2207 2208 __ li (hex, 16); 2209 __ li (fifteen, 15); 2210 __ vspltisb (fSplt, 0x0f); 2211 2212 // load unaligned from[0-15] to vsRet 2213 __ lvx (vRet, from); 2214 __ lvx (vTmp1, fifteen, from); 2215 __ lvsl (fromPerm, from); 2216 __ vxor (fromPerm, fromPerm, fSplt); 2217 __ vperm (vRet, vRet, vTmp1, fromPerm); // align [and byte swap in LE] 2218 2219 // load keylen (44 or 52 or 60) 2220 __ lwz (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key); 2221 2222 // to load keys 2223 __ lvsr (keyPerm, key); 2224 __ vxor (vTmp2, vTmp2, vTmp2); 2225 __ vspltisb (vTmp2, -16); 2226 __ vrld (keyPerm, keyPerm, vTmp2); 2227 __ vrld (keyPerm, keyPerm, vTmp2); 2228 __ vsldoi (keyPerm, keyPerm, keyPerm, -8); 2229 2230 __ cmpwi (CCR0, keylen, 44); 2231 __ beq (CCR0, L_do44); 2232 2233 __ cmpwi (CCR0, keylen, 52); 2234 __ beq (CCR0, L_do52); 2235 2236 // load the 15th round key to vKey11 2237 __ li (keypos, 240); 2238 __ lvx (vTmp1, keypos, key); 2239 __ addi (keypos, keypos, -16); 2240 __ lvx (vTmp2, keypos, key); 2241 __ vperm (vKey1, vTmp1, vTmp2, keyPerm); 2242 2243 // load the 14th round key to vKey10 2244 __ addi (keypos, keypos, -16); 2245 __ lvx (vTmp1, keypos, key); 2246 __ vperm (vKey2, vTmp2, vTmp1, keyPerm); 2247 2248 // load the 13th round key to vKey10 2249 __ addi (keypos, keypos, -16); 2250 __ lvx (vTmp2, keypos, key); 2251 __ vperm (vKey3, vTmp1, vTmp2, keyPerm); 2252 2253 // load the 12th round key to vKey10 2254 __ addi (keypos, keypos, -16); 2255 __ lvx (vTmp1, keypos, key); 2256 __ vperm (vKey4, vTmp2, vTmp1, keyPerm); 2257 2258 // load the 11th round key to vKey10 2259 __ addi (keypos, keypos, -16); 2260 __ lvx (vTmp2, keypos, key); 2261 __ vperm (vKey5, vTmp1, vTmp2, keyPerm); 2262 2263 // 1st - 5th rounds 2264 __ vxor (vRet, vRet, vKey1); 2265 __ vncipher (vRet, vRet, vKey2); 2266 __ vncipher (vRet, vRet, vKey3); 2267 __ vncipher (vRet, vRet, vKey4); 2268 __ vncipher (vRet, vRet, vKey5); 2269 2270 __ b (L_doLast); 2271 2272 __ bind (L_do52); 2273 2274 // load the 13th round key to vKey11 2275 __ li (keypos, 208); 2276 __ lvx (vTmp1, keypos, key); 2277 __ addi (keypos, keypos, -16); 2278 __ lvx (vTmp2, keypos, key); 2279 __ vperm (vKey1, vTmp1, vTmp2, keyPerm); 2280 2281 // load the 12th round key to vKey10 2282 __ addi (keypos, keypos, -16); 2283 __ lvx (vTmp1, keypos, key); 2284 __ vperm (vKey2, vTmp2, vTmp1, keyPerm); 2285 2286 // load the 11th round key to vKey10 2287 __ addi (keypos, keypos, -16); 2288 __ lvx (vTmp2, keypos, key); 2289 __ vperm (vKey3, vTmp1, vTmp2, keyPerm); 2290 2291 // 1st - 3rd rounds 2292 __ vxor (vRet, vRet, vKey1); 2293 __ vncipher (vRet, vRet, vKey2); 2294 __ vncipher (vRet, vRet, vKey3); 2295 2296 __ b (L_doLast); 2297 2298 __ bind (L_do44); 2299 2300 // load the 11th round key to vKey11 2301 __ li (keypos, 176); 2302 __ lvx (vTmp1, keypos, key); 2303 __ addi (keypos, keypos, -16); 2304 __ lvx (vTmp2, keypos, key); 2305 __ vperm (vKey1, vTmp1, vTmp2, keyPerm); 2306 2307 // 1st round 2308 __ vxor (vRet, vRet, vKey1); 2309 2310 __ bind (L_doLast); 2311 2312 // load the 10th round key to vKey10 2313 __ addi (keypos, keypos, -16); 2314 __ lvx (vTmp1, keypos, key); 2315 __ vperm (vKey1, vTmp2, vTmp1, keyPerm); 2316 2317 // load the 9th round key to vKey10 2318 __ addi (keypos, keypos, -16); 2319 __ lvx (vTmp2, keypos, key); 2320 __ vperm (vKey2, vTmp1, vTmp2, keyPerm); 2321 2322 // load the 8th round key to vKey10 2323 __ addi (keypos, keypos, -16); 2324 __ lvx (vTmp1, keypos, key); 2325 __ vperm (vKey3, vTmp2, vTmp1, keyPerm); 2326 2327 // load the 7th round key to vKey10 2328 __ addi (keypos, keypos, -16); 2329 __ lvx (vTmp2, keypos, key); 2330 __ vperm (vKey4, vTmp1, vTmp2, keyPerm); 2331 2332 // load the 6th round key to vKey10 2333 __ addi (keypos, keypos, -16); 2334 __ lvx (vTmp1, keypos, key); 2335 __ vperm (vKey5, vTmp2, vTmp1, keyPerm); 2336 2337 // last 10th - 6th rounds 2338 __ vncipher (vRet, vRet, vKey1); 2339 __ vncipher (vRet, vRet, vKey2); 2340 __ vncipher (vRet, vRet, vKey3); 2341 __ vncipher (vRet, vRet, vKey4); 2342 __ vncipher (vRet, vRet, vKey5); 2343 2344 // load the 5th round key to vKey10 2345 __ addi (keypos, keypos, -16); 2346 __ lvx (vTmp2, keypos, key); 2347 __ vperm (vKey1, vTmp1, vTmp2, keyPerm); 2348 2349 // load the 4th round key to vKey10 2350 __ addi (keypos, keypos, -16); 2351 __ lvx (vTmp1, keypos, key); 2352 __ vperm (vKey2, vTmp2, vTmp1, keyPerm); 2353 2354 // load the 3rd round key to vKey10 2355 __ addi (keypos, keypos, -16); 2356 __ lvx (vTmp2, keypos, key); 2357 __ vperm (vKey3, vTmp1, vTmp2, keyPerm); 2358 2359 // load the 2nd round key to vKey10 2360 __ addi (keypos, keypos, -16); 2361 __ lvx (vTmp1, keypos, key); 2362 __ vperm (vKey4, vTmp2, vTmp1, keyPerm); 2363 2364 // load the 1st round key to vKey10 2365 __ addi (keypos, keypos, -16); 2366 __ lvx (vTmp2, keypos, key); 2367 __ vperm (vKey5, vTmp1, vTmp2, keyPerm); 2368 2369 // last 5th - 1th rounds 2370 __ vncipher (vRet, vRet, vKey1); 2371 __ vncipher (vRet, vRet, vKey2); 2372 __ vncipher (vRet, vRet, vKey3); 2373 __ vncipher (vRet, vRet, vKey4); 2374 __ vncipherlast (vRet, vRet, vKey5); 2375 2376 __ neg (temp, to); 2377 __ lvsr (toPerm, temp); 2378 __ vspltisb (vTmp2, -1); 2379 __ vxor (vTmp1, vTmp1, vTmp1); 2380 __ vperm (vTmp2, vTmp2, vTmp1, toPerm); 2381 __ vxor (toPerm, toPerm, fSplt); 2382 __ lvx (vTmp1, to); 2383 __ vperm (vRet, vRet, vRet, toPerm); 2384 __ vsel (vTmp1, vTmp1, vRet, vTmp2); 2385 __ lvx (vTmp4, fifteen, to); 2386 __ stvx (vTmp1, to); 2387 __ vsel (vRet, vRet, vTmp4, vTmp2); 2388 __ stvx (vRet, fifteen, to); 2389 2390 __ blr(); 2391 return start; 2392 } 2393 2394 void generate_arraycopy_stubs() { 2395 // Note: the disjoint stubs must be generated first, some of 2396 // the conjoint stubs use them. 2397 2398 // non-aligned disjoint versions 2399 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy"); 2400 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy"); 2401 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy"); 2402 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy(false, "jlong_disjoint_arraycopy"); 2403 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy", false); 2404 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy_uninit", true); 2405 2406 // aligned disjoint versions 2407 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, "arrayof_jbyte_disjoint_arraycopy"); 2408 StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy"); 2409 StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, "arrayof_jint_disjoint_arraycopy"); 2410 StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, "arrayof_jlong_disjoint_arraycopy"); 2411 StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(true, "arrayof_oop_disjoint_arraycopy", false); 2412 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, "oop_disjoint_arraycopy_uninit", true); 2413 2414 // non-aligned conjoint versions 2415 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, "jbyte_arraycopy"); 2416 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy"); 2417 StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, "jint_arraycopy"); 2418 StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy(false, "jlong_arraycopy"); 2419 StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(false, "oop_arraycopy", false); 2420 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, "oop_arraycopy_uninit", true); 2421 2422 // aligned conjoint versions 2423 StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, "arrayof_jbyte_arraycopy"); 2424 StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy"); 2425 StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, "arrayof_jint_arraycopy"); 2426 StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy(true, "arrayof_jlong_arraycopy"); 2427 StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", false); 2428 StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", true); 2429 2430 // fill routines 2431 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2432 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2433 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2434 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2435 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2436 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2437 } 2438 2439 // Safefetch stubs. 2440 void generate_safefetch(const char* name, int size, address* entry, address* fault_pc, address* continuation_pc) { 2441 // safefetch signatures: 2442 // int SafeFetch32(int* adr, int errValue); 2443 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 2444 // 2445 // arguments: 2446 // R3_ARG1 = adr 2447 // R4_ARG2 = errValue 2448 // 2449 // result: 2450 // R3_RET = *adr or errValue 2451 2452 StubCodeMark mark(this, "StubRoutines", name); 2453 2454 // Entry point, pc or function descriptor. 2455 *entry = __ function_entry(); 2456 2457 // Load *adr into R4_ARG2, may fault. 2458 *fault_pc = __ pc(); 2459 switch (size) { 2460 case 4: 2461 // int32_t, signed extended 2462 __ lwa(R4_ARG2, 0, R3_ARG1); 2463 break; 2464 case 8: 2465 // int64_t 2466 __ ld(R4_ARG2, 0, R3_ARG1); 2467 break; 2468 default: 2469 ShouldNotReachHere(); 2470 } 2471 2472 // return errValue or *adr 2473 *continuation_pc = __ pc(); 2474 __ mr(R3_RET, R4_ARG2); 2475 __ blr(); 2476 } 2477 2478 // Initialization 2479 void generate_initial() { 2480 // Generates all stubs and initializes the entry points 2481 2482 // Entry points that exist in all platforms. 2483 // Note: This is code that could be shared among different platforms - however the 2484 // benefit seems to be smaller than the disadvantage of having a 2485 // much more complicated generator structure. See also comment in 2486 // stubRoutines.hpp. 2487 2488 StubRoutines::_forward_exception_entry = generate_forward_exception(); 2489 StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address); 2490 StubRoutines::_catch_exception_entry = generate_catch_exception(); 2491 2492 // Build this early so it's available for the interpreter. 2493 StubRoutines::_throw_StackOverflowError_entry = 2494 generate_throw_exception("StackOverflowError throw_exception", 2495 CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false); 2496 } 2497 2498 void generate_all() { 2499 // Generates all stubs and initializes the entry points 2500 2501 // These entry points require SharedInfo::stack0 to be set up in 2502 // non-core builds 2503 StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false); 2504 // Handle IncompatibleClassChangeError in itable stubs. 2505 StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false); 2506 StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false); 2507 2508 StubRoutines::_handler_for_unsafe_access_entry = generate_handler_for_unsafe_access(); 2509 2510 // support for verify_oop (must happen after universe_init) 2511 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 2512 2513 // arraycopy stubs used by compilers 2514 generate_arraycopy_stubs(); 2515 2516 // Safefetch stubs. 2517 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 2518 &StubRoutines::_safefetch32_fault_pc, 2519 &StubRoutines::_safefetch32_continuation_pc); 2520 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 2521 &StubRoutines::_safefetchN_fault_pc, 2522 &StubRoutines::_safefetchN_continuation_pc); 2523 2524 if (UseAESIntrinsics) { 2525 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 2526 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 2527 } 2528 2529 if (UseMontgomeryMultiplyIntrinsic) { 2530 StubRoutines::_montgomeryMultiply 2531 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 2532 } 2533 if (UseMontgomerySquareIntrinsic) { 2534 StubRoutines::_montgomerySquare 2535 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 2536 } 2537 } 2538 2539 public: 2540 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 2541 // replace the standard masm with a special one: 2542 _masm = new MacroAssembler(code); 2543 if (all) { 2544 generate_all(); 2545 } else { 2546 generate_initial(); 2547 } 2548 } 2549 }; 2550 2551 void StubGenerator_generate(CodeBuffer* code, bool all) { 2552 StubGenerator g(code, all); 2553 }