1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * Copyright 2012, 2015 SAP AG. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "nativeInst_ppc.hpp" 30 #include "oops/instanceOop.hpp" 31 #include "oops/method.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/frame.inline.hpp" 36 #include "runtime/handles.inline.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubCodeGenerator.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "utilities/top.hpp" 41 #include "runtime/thread.inline.hpp" 42 43 #define __ _masm-> 44 45 #ifdef PRODUCT 46 #define BLOCK_COMMENT(str) // nothing 47 #else 48 #define BLOCK_COMMENT(str) __ block_comment(str) 49 #endif 50 51 class StubGenerator: public StubCodeGenerator { 52 private: 53 54 // Call stubs are used to call Java from C 55 // 56 // Arguments: 57 // 58 // R3 - call wrapper address : address 59 // R4 - result : intptr_t* 60 // R5 - result type : BasicType 61 // R6 - method : Method 62 // R7 - frame mgr entry point : address 63 // R8 - parameter block : intptr_t* 64 // R9 - parameter count in words : int 65 // R10 - thread : Thread* 66 // 67 address generate_call_stub(address& return_address) { 68 // Setup a new c frame, copy java arguments, call frame manager or 69 // native_entry, and process result. 70 71 StubCodeMark mark(this, "StubRoutines", "call_stub"); 72 73 address start = __ function_entry(); 74 75 // some sanity checks 76 assert((sizeof(frame::abi_minframe) % 16) == 0, "unaligned"); 77 assert((sizeof(frame::abi_reg_args) % 16) == 0, "unaligned"); 78 assert((sizeof(frame::spill_nonvolatiles) % 16) == 0, "unaligned"); 79 assert((sizeof(frame::parent_ijava_frame_abi) % 16) == 0, "unaligned"); 80 assert((sizeof(frame::entry_frame_locals) % 16) == 0, "unaligned"); 81 82 Register r_arg_call_wrapper_addr = R3; 83 Register r_arg_result_addr = R4; 84 Register r_arg_result_type = R5; 85 Register r_arg_method = R6; 86 Register r_arg_entry = R7; 87 Register r_arg_thread = R10; 88 89 Register r_temp = R24; 90 Register r_top_of_arguments_addr = R25; 91 Register r_entryframe_fp = R26; 92 93 { 94 // Stack on entry to call_stub: 95 // 96 // F1 [C_FRAME] 97 // ... 98 99 Register r_arg_argument_addr = R8; 100 Register r_arg_argument_count = R9; 101 Register r_frame_alignment_in_bytes = R27; 102 Register r_argument_addr = R28; 103 Register r_argumentcopy_addr = R29; 104 Register r_argument_size_in_bytes = R30; 105 Register r_frame_size = R23; 106 107 Label arguments_copied; 108 109 // Save LR/CR to caller's C_FRAME. 110 __ save_LR_CR(R0); 111 112 // Zero extend arg_argument_count. 113 __ clrldi(r_arg_argument_count, r_arg_argument_count, 32); 114 115 // Save non-volatiles GPRs to ENTRY_FRAME (not yet pushed, but it's safe). 116 __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14)); 117 118 // Keep copy of our frame pointer (caller's SP). 119 __ mr(r_entryframe_fp, R1_SP); 120 121 BLOCK_COMMENT("Push ENTRY_FRAME including arguments"); 122 // Push ENTRY_FRAME including arguments: 123 // 124 // F0 [TOP_IJAVA_FRAME_ABI] 125 // alignment (optional) 126 // [outgoing Java arguments] 127 // [ENTRY_FRAME_LOCALS] 128 // F1 [C_FRAME] 129 // ... 130 131 // calculate frame size 132 133 // unaligned size of arguments 134 __ sldi(r_argument_size_in_bytes, 135 r_arg_argument_count, Interpreter::logStackElementSize); 136 // arguments alignment (max 1 slot) 137 // FIXME: use round_to() here 138 __ andi_(r_frame_alignment_in_bytes, r_arg_argument_count, 1); 139 __ sldi(r_frame_alignment_in_bytes, 140 r_frame_alignment_in_bytes, Interpreter::logStackElementSize); 141 142 // size = unaligned size of arguments + top abi's size 143 __ addi(r_frame_size, r_argument_size_in_bytes, 144 frame::top_ijava_frame_abi_size); 145 // size += arguments alignment 146 __ add(r_frame_size, 147 r_frame_size, r_frame_alignment_in_bytes); 148 // size += size of call_stub locals 149 __ addi(r_frame_size, 150 r_frame_size, frame::entry_frame_locals_size); 151 152 // push ENTRY_FRAME 153 __ push_frame(r_frame_size, r_temp); 154 155 // initialize call_stub locals (step 1) 156 __ std(r_arg_call_wrapper_addr, 157 _entry_frame_locals_neg(call_wrapper_address), r_entryframe_fp); 158 __ std(r_arg_result_addr, 159 _entry_frame_locals_neg(result_address), r_entryframe_fp); 160 __ std(r_arg_result_type, 161 _entry_frame_locals_neg(result_type), r_entryframe_fp); 162 // we will save arguments_tos_address later 163 164 165 BLOCK_COMMENT("Copy Java arguments"); 166 // copy Java arguments 167 168 // Calculate top_of_arguments_addr which will be R17_tos (not prepushed) later. 169 // FIXME: why not simply use SP+frame::top_ijava_frame_size? 170 __ addi(r_top_of_arguments_addr, 171 R1_SP, frame::top_ijava_frame_abi_size); 172 __ add(r_top_of_arguments_addr, 173 r_top_of_arguments_addr, r_frame_alignment_in_bytes); 174 175 // any arguments to copy? 176 __ cmpdi(CCR0, r_arg_argument_count, 0); 177 __ beq(CCR0, arguments_copied); 178 179 // prepare loop and copy arguments in reverse order 180 { 181 // init CTR with arg_argument_count 182 __ mtctr(r_arg_argument_count); 183 184 // let r_argumentcopy_addr point to last outgoing Java arguments P 185 __ mr(r_argumentcopy_addr, r_top_of_arguments_addr); 186 187 // let r_argument_addr point to last incoming java argument 188 __ add(r_argument_addr, 189 r_arg_argument_addr, r_argument_size_in_bytes); 190 __ addi(r_argument_addr, r_argument_addr, -BytesPerWord); 191 192 // now loop while CTR > 0 and copy arguments 193 { 194 Label next_argument; 195 __ bind(next_argument); 196 197 __ ld(r_temp, 0, r_argument_addr); 198 // argument_addr--; 199 __ addi(r_argument_addr, r_argument_addr, -BytesPerWord); 200 __ std(r_temp, 0, r_argumentcopy_addr); 201 // argumentcopy_addr++; 202 __ addi(r_argumentcopy_addr, r_argumentcopy_addr, BytesPerWord); 203 204 __ bdnz(next_argument); 205 } 206 } 207 208 // Arguments copied, continue. 209 __ bind(arguments_copied); 210 } 211 212 { 213 BLOCK_COMMENT("Call frame manager or native entry."); 214 // Call frame manager or native entry. 215 Register r_new_arg_entry = R14; 216 assert_different_registers(r_new_arg_entry, r_top_of_arguments_addr, 217 r_arg_method, r_arg_thread); 218 219 __ mr(r_new_arg_entry, r_arg_entry); 220 221 // Register state on entry to frame manager / native entry: 222 // 223 // tos - intptr_t* sender tos (prepushed) Lesp = (SP) + copied_arguments_offset - 8 224 // R19_method - Method 225 // R16_thread - JavaThread* 226 227 // Tos must point to last argument - element_size. 228 #ifdef CC_INTERP 229 const Register tos = R17_tos; 230 #else 231 const Register tos = R15_esp; 232 #endif 233 __ addi(tos, r_top_of_arguments_addr, -Interpreter::stackElementSize); 234 235 // initialize call_stub locals (step 2) 236 // now save tos as arguments_tos_address 237 __ std(tos, _entry_frame_locals_neg(arguments_tos_address), r_entryframe_fp); 238 239 // load argument registers for call 240 __ mr(R19_method, r_arg_method); 241 __ mr(R16_thread, r_arg_thread); 242 assert(tos != r_arg_method, "trashed r_arg_method"); 243 assert(tos != r_arg_thread && R19_method != r_arg_thread, "trashed r_arg_thread"); 244 245 // Set R15_prev_state to 0 for simplifying checks in callee. 246 #ifdef CC_INTERP 247 __ li(R15_prev_state, 0); 248 #else 249 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 250 #endif 251 // Stack on entry to frame manager / native entry: 252 // 253 // F0 [TOP_IJAVA_FRAME_ABI] 254 // alignment (optional) 255 // [outgoing Java arguments] 256 // [ENTRY_FRAME_LOCALS] 257 // F1 [C_FRAME] 258 // ... 259 // 260 261 // global toc register 262 __ load_const(R29, MacroAssembler::global_toc(), R11_scratch1); 263 264 // Load narrow oop base. 265 __ reinit_heapbase(R30, R11_scratch1); 266 267 // Remember the senderSP so we interpreter can pop c2i arguments off of the stack 268 // when called via a c2i. 269 270 // Pass initial_caller_sp to framemanager. 271 __ mr(R21_tmp1, R1_SP); 272 273 // Do a light-weight C-call here, r_new_arg_entry holds the address 274 // of the interpreter entry point (frame manager or native entry) 275 // and save runtime-value of LR in return_address. 276 assert(r_new_arg_entry != tos && r_new_arg_entry != R19_method && r_new_arg_entry != R16_thread, 277 "trashed r_new_arg_entry"); 278 return_address = __ call_stub(r_new_arg_entry); 279 } 280 281 { 282 BLOCK_COMMENT("Returned from frame manager or native entry."); 283 // Returned from frame manager or native entry. 284 // Now pop frame, process result, and return to caller. 285 286 // Stack on exit from frame manager / native entry: 287 // 288 // F0 [ABI] 289 // ... 290 // [ENTRY_FRAME_LOCALS] 291 // F1 [C_FRAME] 292 // ... 293 // 294 // Just pop the topmost frame ... 295 // 296 297 Label ret_is_object; 298 Label ret_is_long; 299 Label ret_is_float; 300 Label ret_is_double; 301 302 Register r_entryframe_fp = R30; 303 Register r_lr = R7_ARG5; 304 Register r_cr = R8_ARG6; 305 306 // Reload some volatile registers which we've spilled before the call 307 // to frame manager / native entry. 308 // Access all locals via frame pointer, because we know nothing about 309 // the topmost frame's size. 310 __ ld(r_entryframe_fp, _abi(callers_sp), R1_SP); 311 assert_different_registers(r_entryframe_fp, R3_RET, r_arg_result_addr, r_arg_result_type, r_cr, r_lr); 312 __ ld(r_arg_result_addr, 313 _entry_frame_locals_neg(result_address), r_entryframe_fp); 314 __ ld(r_arg_result_type, 315 _entry_frame_locals_neg(result_type), r_entryframe_fp); 316 __ ld(r_cr, _abi(cr), r_entryframe_fp); 317 __ ld(r_lr, _abi(lr), r_entryframe_fp); 318 319 // pop frame and restore non-volatiles, LR and CR 320 __ mr(R1_SP, r_entryframe_fp); 321 __ mtcr(r_cr); 322 __ mtlr(r_lr); 323 324 // Store result depending on type. Everything that is not 325 // T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE is treated as T_INT. 326 __ cmpwi(CCR0, r_arg_result_type, T_OBJECT); 327 __ cmpwi(CCR1, r_arg_result_type, T_LONG); 328 __ cmpwi(CCR5, r_arg_result_type, T_FLOAT); 329 __ cmpwi(CCR6, r_arg_result_type, T_DOUBLE); 330 331 // restore non-volatile registers 332 __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14)); 333 334 335 // Stack on exit from call_stub: 336 // 337 // 0 [C_FRAME] 338 // ... 339 // 340 // no call_stub frames left. 341 342 // All non-volatiles have been restored at this point!! 343 assert(R3_RET == R3, "R3_RET should be R3"); 344 345 __ beq(CCR0, ret_is_object); 346 __ beq(CCR1, ret_is_long); 347 __ beq(CCR5, ret_is_float); 348 __ beq(CCR6, ret_is_double); 349 350 // default: 351 __ stw(R3_RET, 0, r_arg_result_addr); 352 __ blr(); // return to caller 353 354 // case T_OBJECT: 355 __ bind(ret_is_object); 356 __ std(R3_RET, 0, r_arg_result_addr); 357 __ blr(); // return to caller 358 359 // case T_LONG: 360 __ bind(ret_is_long); 361 __ std(R3_RET, 0, r_arg_result_addr); 362 __ blr(); // return to caller 363 364 // case T_FLOAT: 365 __ bind(ret_is_float); 366 __ stfs(F1_RET, 0, r_arg_result_addr); 367 __ blr(); // return to caller 368 369 // case T_DOUBLE: 370 __ bind(ret_is_double); 371 __ stfd(F1_RET, 0, r_arg_result_addr); 372 __ blr(); // return to caller 373 } 374 375 return start; 376 } 377 378 // Return point for a Java call if there's an exception thrown in 379 // Java code. The exception is caught and transformed into a 380 // pending exception stored in JavaThread that can be tested from 381 // within the VM. 382 // 383 address generate_catch_exception() { 384 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 385 386 address start = __ pc(); 387 388 // Registers alive 389 // 390 // R16_thread 391 // R3_ARG1 - address of pending exception 392 // R4_ARG2 - return address in call stub 393 394 const Register exception_file = R21_tmp1; 395 const Register exception_line = R22_tmp2; 396 397 __ load_const(exception_file, (void*)__FILE__); 398 __ load_const(exception_line, (void*)__LINE__); 399 400 __ std(R3_ARG1, in_bytes(JavaThread::pending_exception_offset()), R16_thread); 401 // store into `char *' 402 __ std(exception_file, in_bytes(JavaThread::exception_file_offset()), R16_thread); 403 // store into `int' 404 __ stw(exception_line, in_bytes(JavaThread::exception_line_offset()), R16_thread); 405 406 // complete return to VM 407 assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before"); 408 409 __ mtlr(R4_ARG2); 410 // continue in call stub 411 __ blr(); 412 413 return start; 414 } 415 416 // Continuation point for runtime calls returning with a pending 417 // exception. The pending exception check happened in the runtime 418 // or native call stub. The pending exception in Thread is 419 // converted into a Java-level exception. 420 // 421 address generate_forward_exception() { 422 StubCodeMark mark(this, "StubRoutines", "forward_exception"); 423 address start = __ pc(); 424 425 #if !defined(PRODUCT) 426 if (VerifyOops) { 427 // Get pending exception oop. 428 __ ld(R3_ARG1, 429 in_bytes(Thread::pending_exception_offset()), 430 R16_thread); 431 // Make sure that this code is only executed if there is a pending exception. 432 { 433 Label L; 434 __ cmpdi(CCR0, R3_ARG1, 0); 435 __ bne(CCR0, L); 436 __ stop("StubRoutines::forward exception: no pending exception (1)"); 437 __ bind(L); 438 } 439 __ verify_oop(R3_ARG1, "StubRoutines::forward exception: not an oop"); 440 } 441 #endif 442 443 // Save LR/CR and copy exception pc (LR) into R4_ARG2. 444 __ save_LR_CR(R4_ARG2); 445 __ push_frame_reg_args(0, R0); 446 // Find exception handler. 447 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 448 SharedRuntime::exception_handler_for_return_address), 449 R16_thread, 450 R4_ARG2); 451 // Copy handler's address. 452 __ mtctr(R3_RET); 453 __ pop_frame(); 454 __ restore_LR_CR(R0); 455 456 // Set up the arguments for the exception handler: 457 // - R3_ARG1: exception oop 458 // - R4_ARG2: exception pc. 459 460 // Load pending exception oop. 461 __ ld(R3_ARG1, 462 in_bytes(Thread::pending_exception_offset()), 463 R16_thread); 464 465 // The exception pc is the return address in the caller. 466 // Must load it into R4_ARG2. 467 __ mflr(R4_ARG2); 468 469 #ifdef ASSERT 470 // Make sure exception is set. 471 { 472 Label L; 473 __ cmpdi(CCR0, R3_ARG1, 0); 474 __ bne(CCR0, L); 475 __ stop("StubRoutines::forward exception: no pending exception (2)"); 476 __ bind(L); 477 } 478 #endif 479 480 // Clear the pending exception. 481 __ li(R0, 0); 482 __ std(R0, 483 in_bytes(Thread::pending_exception_offset()), 484 R16_thread); 485 // Jump to exception handler. 486 __ bctr(); 487 488 return start; 489 } 490 491 #undef __ 492 #define __ masm-> 493 // Continuation point for throwing of implicit exceptions that are 494 // not handled in the current activation. Fabricates an exception 495 // oop and initiates normal exception dispatching in this 496 // frame. Only callee-saved registers are preserved (through the 497 // normal register window / RegisterMap handling). If the compiler 498 // needs all registers to be preserved between the fault point and 499 // the exception handler then it must assume responsibility for that 500 // in AbstractCompiler::continuation_for_implicit_null_exception or 501 // continuation_for_implicit_division_by_zero_exception. All other 502 // implicit exceptions (e.g., NullPointerException or 503 // AbstractMethodError on entry) are either at call sites or 504 // otherwise assume that stack unwinding will be initiated, so 505 // caller saved registers were assumed volatile in the compiler. 506 // 507 // Note that we generate only this stub into a RuntimeStub, because 508 // it needs to be properly traversed and ignored during GC, so we 509 // change the meaning of the "__" macro within this method. 510 // 511 // Note: the routine set_pc_not_at_call_for_caller in 512 // SharedRuntime.cpp requires that this code be generated into a 513 // RuntimeStub. 514 address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc, 515 Register arg1 = noreg, Register arg2 = noreg) { 516 CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0); 517 MacroAssembler* masm = new MacroAssembler(&code); 518 519 OopMapSet* oop_maps = new OopMapSet(); 520 int frame_size_in_bytes = frame::abi_reg_args_size; 521 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 522 523 address start = __ pc(); 524 525 __ save_LR_CR(R11_scratch1); 526 527 // Push a frame. 528 __ push_frame_reg_args(0, R11_scratch1); 529 530 address frame_complete_pc = __ pc(); 531 532 if (restore_saved_exception_pc) { 533 __ unimplemented("StubGenerator::throw_exception with restore_saved_exception_pc", 74); 534 } 535 536 // Note that we always have a runtime stub frame on the top of 537 // stack by this point. Remember the offset of the instruction 538 // whose address will be moved to R11_scratch1. 539 address gc_map_pc = __ get_PC_trash_LR(R11_scratch1); 540 541 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1); 542 543 __ mr(R3_ARG1, R16_thread); 544 if (arg1 != noreg) { 545 __ mr(R4_ARG2, arg1); 546 } 547 if (arg2 != noreg) { 548 __ mr(R5_ARG3, arg2); 549 } 550 #if defined(ABI_ELFv2) 551 __ call_c(runtime_entry, relocInfo::none); 552 #else 553 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), relocInfo::none); 554 #endif 555 556 // Set an oopmap for the call site. 557 oop_maps->add_gc_map((int)(gc_map_pc - start), map); 558 559 __ reset_last_Java_frame(); 560 561 #ifdef ASSERT 562 // Make sure that this code is only executed if there is a pending 563 // exception. 564 { 565 Label L; 566 __ ld(R0, 567 in_bytes(Thread::pending_exception_offset()), 568 R16_thread); 569 __ cmpdi(CCR0, R0, 0); 570 __ bne(CCR0, L); 571 __ stop("StubRoutines::throw_exception: no pending exception"); 572 __ bind(L); 573 } 574 #endif 575 576 // Pop frame. 577 __ pop_frame(); 578 579 __ restore_LR_CR(R11_scratch1); 580 581 __ load_const(R11_scratch1, StubRoutines::forward_exception_entry()); 582 __ mtctr(R11_scratch1); 583 __ bctr(); 584 585 // Create runtime stub with OopMap. 586 RuntimeStub* stub = 587 RuntimeStub::new_runtime_stub(name, &code, 588 /*frame_complete=*/ (int)(frame_complete_pc - start), 589 frame_size_in_bytes/wordSize, 590 oop_maps, 591 false); 592 return stub->entry_point(); 593 } 594 #undef __ 595 #define __ _masm-> 596 597 // Generate G1 pre-write barrier for array. 598 // 599 // Input: 600 // from - register containing src address (only needed for spilling) 601 // to - register containing starting address 602 // count - register containing element count 603 // tmp - scratch register 604 // 605 // Kills: 606 // nothing 607 // 608 void gen_write_ref_array_pre_barrier(Register from, Register to, Register count, bool dest_uninitialized, Register Rtmp1) { 609 BarrierSet* const bs = Universe::heap()->barrier_set(); 610 switch (bs->kind()) { 611 case BarrierSet::G1SATBCT: 612 case BarrierSet::G1SATBCTLogging: 613 // With G1, don't generate the call if we statically know that the target in uninitialized 614 if (!dest_uninitialized) { 615 const int spill_slots = 4 * wordSize; 616 const int frame_size = frame::abi_reg_args_size + spill_slots; 617 Label filtered; 618 619 // Is marking active? 620 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { 621 __ lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread); 622 } else { 623 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption"); 624 __ lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread); 625 } 626 __ cmpdi(CCR0, Rtmp1, 0); 627 __ beq(CCR0, filtered); 628 629 __ save_LR_CR(R0); 630 __ push_frame_reg_args(spill_slots, R0); 631 __ std(from, frame_size - 1 * wordSize, R1_SP); 632 __ std(to, frame_size - 2 * wordSize, R1_SP); 633 __ std(count, frame_size - 3 * wordSize, R1_SP); 634 635 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), to, count); 636 637 __ ld(from, frame_size - 1 * wordSize, R1_SP); 638 __ ld(to, frame_size - 2 * wordSize, R1_SP); 639 __ ld(count, frame_size - 3 * wordSize, R1_SP); 640 __ pop_frame(); 641 __ restore_LR_CR(R0); 642 643 __ bind(filtered); 644 } 645 break; 646 case BarrierSet::CardTableModRef: 647 case BarrierSet::CardTableExtension: 648 case BarrierSet::ModRef: 649 break; 650 default: 651 ShouldNotReachHere(); 652 } 653 } 654 655 // Generate CMS/G1 post-write barrier for array. 656 // 657 // Input: 658 // addr - register containing starting address 659 // count - register containing element count 660 // tmp - scratch register 661 // 662 // The input registers and R0 are overwritten. 663 // 664 void gen_write_ref_array_post_barrier(Register addr, Register count, Register tmp, bool branchToEnd) { 665 BarrierSet* const bs = Universe::heap()->barrier_set(); 666 667 switch (bs->kind()) { 668 case BarrierSet::G1SATBCT: 669 case BarrierSet::G1SATBCTLogging: 670 { 671 if (branchToEnd) { 672 __ save_LR_CR(R0); 673 // We need this frame only to spill LR. 674 __ push_frame_reg_args(0, R0); 675 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count); 676 __ pop_frame(); 677 __ restore_LR_CR(R0); 678 } else { 679 // Tail call: fake call from stub caller by branching without linking. 680 address entry_point = (address)CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post); 681 __ mr_if_needed(R3_ARG1, addr); 682 __ mr_if_needed(R4_ARG2, count); 683 __ load_const(R11, entry_point, R0); 684 __ call_c_and_return_to_caller(R11); 685 } 686 } 687 break; 688 case BarrierSet::CardTableModRef: 689 case BarrierSet::CardTableExtension: 690 { 691 Label Lskip_loop, Lstore_loop; 692 if (UseConcMarkSweepGC) { 693 // TODO PPC port: contribute optimization / requires shared changes 694 __ release(); 695 } 696 697 CardTableModRefBS* const ct = (CardTableModRefBS*)bs; 698 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 699 assert_different_registers(addr, count, tmp); 700 701 __ sldi(count, count, LogBytesPerHeapOop); 702 __ addi(count, count, -BytesPerHeapOop); 703 __ add(count, addr, count); 704 // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.) 705 __ srdi(addr, addr, CardTableModRefBS::card_shift); 706 __ srdi(count, count, CardTableModRefBS::card_shift); 707 __ subf(count, addr, count); 708 assert_different_registers(R0, addr, count, tmp); 709 __ load_const(tmp, (address)ct->byte_map_base); 710 __ addic_(count, count, 1); 711 __ beq(CCR0, Lskip_loop); 712 __ li(R0, 0); 713 __ mtctr(count); 714 // Byte store loop 715 __ bind(Lstore_loop); 716 __ stbx(R0, tmp, addr); 717 __ addi(addr, addr, 1); 718 __ bdnz(Lstore_loop); 719 __ bind(Lskip_loop); 720 721 if (!branchToEnd) __ blr(); 722 } 723 break; 724 case BarrierSet::ModRef: 725 if (!branchToEnd) __ blr(); 726 break; 727 default: 728 ShouldNotReachHere(); 729 } 730 } 731 732 // Support for void zero_words_aligned8(HeapWord* to, size_t count) 733 // 734 // Arguments: 735 // to: 736 // count: 737 // 738 // Destroys: 739 // 740 address generate_zero_words_aligned8() { 741 StubCodeMark mark(this, "StubRoutines", "zero_words_aligned8"); 742 743 // Implemented as in ClearArray. 744 address start = __ function_entry(); 745 746 Register base_ptr_reg = R3_ARG1; // tohw (needs to be 8b aligned) 747 Register cnt_dwords_reg = R4_ARG2; // count (in dwords) 748 Register tmp1_reg = R5_ARG3; 749 Register tmp2_reg = R6_ARG4; 750 Register zero_reg = R7_ARG5; 751 752 // Procedure for large arrays (uses data cache block zero instruction). 753 Label dwloop, fast, fastloop, restloop, lastdword, done; 754 int cl_size=VM_Version::get_cache_line_size(), cl_dwords=cl_size>>3, cl_dwordaddr_bits=exact_log2(cl_dwords); 755 int min_dcbz=2; // Needs to be positive, apply dcbz only to at least min_dcbz cache lines. 756 757 // Clear up to 128byte boundary if long enough, dword_cnt=(16-(base>>3))%16. 758 __ dcbtst(base_ptr_reg); // Indicate write access to first cache line ... 759 __ andi(tmp2_reg, cnt_dwords_reg, 1); // to check if number of dwords is even. 760 __ srdi_(tmp1_reg, cnt_dwords_reg, 1); // number of double dwords 761 __ load_const_optimized(zero_reg, 0L); // Use as zero register. 762 763 __ cmpdi(CCR1, tmp2_reg, 0); // cnt_dwords even? 764 __ beq(CCR0, lastdword); // size <= 1 765 __ mtctr(tmp1_reg); // Speculatively preload counter for rest loop (>0). 766 __ cmpdi(CCR0, cnt_dwords_reg, (min_dcbz+1)*cl_dwords-1); // Big enough to ensure >=min_dcbz cache lines are included? 767 __ neg(tmp1_reg, base_ptr_reg); // bit 0..58: bogus, bit 57..60: (16-(base>>3))%16, bit 61..63: 000 768 769 __ blt(CCR0, restloop); // Too small. (<31=(2*cl_dwords)-1 is sufficient, but bigger performs better.) 770 __ rldicl_(tmp1_reg, tmp1_reg, 64-3, 64-cl_dwordaddr_bits); // Extract number of dwords to 128byte boundary=(16-(base>>3))%16. 771 772 __ beq(CCR0, fast); // already 128byte aligned 773 __ mtctr(tmp1_reg); // Set ctr to hit 128byte boundary (0<ctr<cnt). 774 __ subf(cnt_dwords_reg, tmp1_reg, cnt_dwords_reg); // rest (>0 since size>=256-8) 775 776 // Clear in first cache line dword-by-dword if not already 128byte aligned. 777 __ bind(dwloop); 778 __ std(zero_reg, 0, base_ptr_reg); // Clear 8byte aligned block. 779 __ addi(base_ptr_reg, base_ptr_reg, 8); 780 __ bdnz(dwloop); 781 782 // clear 128byte blocks 783 __ bind(fast); 784 __ srdi(tmp1_reg, cnt_dwords_reg, cl_dwordaddr_bits); // loop count for 128byte loop (>0 since size>=256-8) 785 __ andi(tmp2_reg, cnt_dwords_reg, 1); // to check if rest even 786 787 __ mtctr(tmp1_reg); // load counter 788 __ cmpdi(CCR1, tmp2_reg, 0); // rest even? 789 __ rldicl_(tmp1_reg, cnt_dwords_reg, 63, 65-cl_dwordaddr_bits); // rest in double dwords 790 791 __ bind(fastloop); 792 __ dcbz(base_ptr_reg); // Clear 128byte aligned block. 793 __ addi(base_ptr_reg, base_ptr_reg, cl_size); 794 __ bdnz(fastloop); 795 796 //__ dcbtst(base_ptr_reg); // Indicate write access to last cache line. 797 __ beq(CCR0, lastdword); // rest<=1 798 __ mtctr(tmp1_reg); // load counter 799 800 // Clear rest. 801 __ bind(restloop); 802 __ std(zero_reg, 0, base_ptr_reg); // Clear 8byte aligned block. 803 __ std(zero_reg, 8, base_ptr_reg); // Clear 8byte aligned block. 804 __ addi(base_ptr_reg, base_ptr_reg, 16); 805 __ bdnz(restloop); 806 807 __ bind(lastdword); 808 __ beq(CCR1, done); 809 __ std(zero_reg, 0, base_ptr_reg); 810 __ bind(done); 811 __ blr(); // return 812 813 return start; 814 } 815 816 // The following routine generates a subroutine to throw an asynchronous 817 // UnknownError when an unsafe access gets a fault that could not be 818 // reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.) 819 // 820 address generate_handler_for_unsafe_access() { 821 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); 822 address start = __ function_entry(); 823 __ unimplemented("StubRoutines::handler_for_unsafe_access", 93); 824 return start; 825 } 826 827 #if !defined(PRODUCT) 828 // Wrapper which calls oopDesc::is_oop_or_null() 829 // Only called by MacroAssembler::verify_oop 830 static void verify_oop_helper(const char* message, oop o) { 831 if (!o->is_oop_or_null()) { 832 fatal(message); 833 } 834 ++ StubRoutines::_verify_oop_count; 835 } 836 #endif 837 838 // Return address of code to be called from code generated by 839 // MacroAssembler::verify_oop. 840 // 841 // Don't generate, rather use C++ code. 842 address generate_verify_oop() { 843 // this is actually a `FunctionDescriptor*'. 844 address start = 0; 845 846 #if !defined(PRODUCT) 847 start = CAST_FROM_FN_PTR(address, verify_oop_helper); 848 #endif 849 850 return start; 851 } 852 853 // Fairer handling of safepoints for native methods. 854 // 855 // Generate code which reads from the polling page. This special handling is needed as the 856 // linux-ppc64 kernel before 2.6.6 doesn't set si_addr on some segfaults in 64bit mode 857 // (cf. http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.6), especially when we try 858 // to read from the safepoint polling page. 859 address generate_load_from_poll() { 860 StubCodeMark mark(this, "StubRoutines", "generate_load_from_poll"); 861 address start = __ function_entry(); 862 __ unimplemented("StubRoutines::verify_oop", 95); // TODO PPC port 863 return start; 864 } 865 866 // -XX:+OptimizeFill : convert fill/copy loops into intrinsic 867 // 868 // The code is implemented(ported from sparc) as we believe it benefits JVM98, however 869 // tracing(-XX:+TraceOptimizeFill) shows the intrinsic replacement doesn't happen at all! 870 // 871 // Source code in function is_range_check_if() shows that OptimizeFill relaxed the condition 872 // for turning on loop predication optimization, and hence the behavior of "array range check" 873 // and "loop invariant check" could be influenced, which potentially boosted JVM98. 874 // 875 // Generate stub for disjoint short fill. If "aligned" is true, the 876 // "to" address is assumed to be heapword aligned. 877 // 878 // Arguments for generated stub: 879 // to: R3_ARG1 880 // value: R4_ARG2 881 // count: R5_ARG3 treated as signed 882 // 883 address generate_fill(BasicType t, bool aligned, const char* name) { 884 StubCodeMark mark(this, "StubRoutines", name); 885 address start = __ function_entry(); 886 887 const Register to = R3_ARG1; // source array address 888 const Register value = R4_ARG2; // fill value 889 const Register count = R5_ARG3; // elements count 890 const Register temp = R6_ARG4; // temp register 891 892 //assert_clean_int(count, O3); // Make sure 'count' is clean int. 893 894 Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte; 895 Label L_fill_2_bytes, L_fill_4_bytes, L_fill_elements, L_fill_32_bytes; 896 897 int shift = -1; 898 switch (t) { 899 case T_BYTE: 900 shift = 2; 901 // Clone bytes (zero extend not needed because store instructions below ignore high order bytes). 902 __ rldimi(value, value, 8, 48); // 8 bit -> 16 bit 903 __ cmpdi(CCR0, count, 2<<shift); // Short arrays (< 8 bytes) fill by element. 904 __ blt(CCR0, L_fill_elements); 905 __ rldimi(value, value, 16, 32); // 16 bit -> 32 bit 906 break; 907 case T_SHORT: 908 shift = 1; 909 // Clone bytes (zero extend not needed because store instructions below ignore high order bytes). 910 __ rldimi(value, value, 16, 32); // 16 bit -> 32 bit 911 __ cmpdi(CCR0, count, 2<<shift); // Short arrays (< 8 bytes) fill by element. 912 __ blt(CCR0, L_fill_elements); 913 break; 914 case T_INT: 915 shift = 0; 916 __ cmpdi(CCR0, count, 2<<shift); // Short arrays (< 8 bytes) fill by element. 917 __ blt(CCR0, L_fill_4_bytes); 918 break; 919 default: ShouldNotReachHere(); 920 } 921 922 if (!aligned && (t == T_BYTE || t == T_SHORT)) { 923 // Align source address at 4 bytes address boundary. 924 if (t == T_BYTE) { 925 // One byte misalignment happens only for byte arrays. 926 __ andi_(temp, to, 1); 927 __ beq(CCR0, L_skip_align1); 928 __ stb(value, 0, to); 929 __ addi(to, to, 1); 930 __ addi(count, count, -1); 931 __ bind(L_skip_align1); 932 } 933 // Two bytes misalignment happens only for byte and short (char) arrays. 934 __ andi_(temp, to, 2); 935 __ beq(CCR0, L_skip_align2); 936 __ sth(value, 0, to); 937 __ addi(to, to, 2); 938 __ addi(count, count, -(1 << (shift - 1))); 939 __ bind(L_skip_align2); 940 } 941 942 if (!aligned) { 943 // Align to 8 bytes, we know we are 4 byte aligned to start. 944 __ andi_(temp, to, 7); 945 __ beq(CCR0, L_fill_32_bytes); 946 __ stw(value, 0, to); 947 __ addi(to, to, 4); 948 __ addi(count, count, -(1 << shift)); 949 __ bind(L_fill_32_bytes); 950 } 951 952 __ li(temp, 8<<shift); // Prepare for 32 byte loop. 953 // Clone bytes int->long as above. 954 __ rldimi(value, value, 32, 0); // 32 bit -> 64 bit 955 956 Label L_check_fill_8_bytes; 957 // Fill 32-byte chunks. 958 __ subf_(count, temp, count); 959 __ blt(CCR0, L_check_fill_8_bytes); 960 961 Label L_fill_32_bytes_loop; 962 __ align(32); 963 __ bind(L_fill_32_bytes_loop); 964 965 __ std(value, 0, to); 966 __ std(value, 8, to); 967 __ subf_(count, temp, count); // Update count. 968 __ std(value, 16, to); 969 __ std(value, 24, to); 970 971 __ addi(to, to, 32); 972 __ bge(CCR0, L_fill_32_bytes_loop); 973 974 __ bind(L_check_fill_8_bytes); 975 __ add_(count, temp, count); 976 __ beq(CCR0, L_exit); 977 __ addic_(count, count, -(2 << shift)); 978 __ blt(CCR0, L_fill_4_bytes); 979 980 // 981 // Length is too short, just fill 8 bytes at a time. 982 // 983 Label L_fill_8_bytes_loop; 984 __ bind(L_fill_8_bytes_loop); 985 __ std(value, 0, to); 986 __ addic_(count, count, -(2 << shift)); 987 __ addi(to, to, 8); 988 __ bge(CCR0, L_fill_8_bytes_loop); 989 990 // Fill trailing 4 bytes. 991 __ bind(L_fill_4_bytes); 992 __ andi_(temp, count, 1<<shift); 993 __ beq(CCR0, L_fill_2_bytes); 994 995 __ stw(value, 0, to); 996 if (t == T_BYTE || t == T_SHORT) { 997 __ addi(to, to, 4); 998 // Fill trailing 2 bytes. 999 __ bind(L_fill_2_bytes); 1000 __ andi_(temp, count, 1<<(shift-1)); 1001 __ beq(CCR0, L_fill_byte); 1002 __ sth(value, 0, to); 1003 if (t == T_BYTE) { 1004 __ addi(to, to, 2); 1005 // Fill trailing byte. 1006 __ bind(L_fill_byte); 1007 __ andi_(count, count, 1); 1008 __ beq(CCR0, L_exit); 1009 __ stb(value, 0, to); 1010 } else { 1011 __ bind(L_fill_byte); 1012 } 1013 } else { 1014 __ bind(L_fill_2_bytes); 1015 } 1016 __ bind(L_exit); 1017 __ blr(); 1018 1019 // Handle copies less than 8 bytes. Int is handled elsewhere. 1020 if (t == T_BYTE) { 1021 __ bind(L_fill_elements); 1022 Label L_fill_2, L_fill_4; 1023 __ andi_(temp, count, 1); 1024 __ beq(CCR0, L_fill_2); 1025 __ stb(value, 0, to); 1026 __ addi(to, to, 1); 1027 __ bind(L_fill_2); 1028 __ andi_(temp, count, 2); 1029 __ beq(CCR0, L_fill_4); 1030 __ stb(value, 0, to); 1031 __ stb(value, 0, to); 1032 __ addi(to, to, 2); 1033 __ bind(L_fill_4); 1034 __ andi_(temp, count, 4); 1035 __ beq(CCR0, L_exit); 1036 __ stb(value, 0, to); 1037 __ stb(value, 1, to); 1038 __ stb(value, 2, to); 1039 __ stb(value, 3, to); 1040 __ blr(); 1041 } 1042 1043 if (t == T_SHORT) { 1044 Label L_fill_2; 1045 __ bind(L_fill_elements); 1046 __ andi_(temp, count, 1); 1047 __ beq(CCR0, L_fill_2); 1048 __ sth(value, 0, to); 1049 __ addi(to, to, 2); 1050 __ bind(L_fill_2); 1051 __ andi_(temp, count, 2); 1052 __ beq(CCR0, L_exit); 1053 __ sth(value, 0, to); 1054 __ sth(value, 2, to); 1055 __ blr(); 1056 } 1057 return start; 1058 } 1059 1060 1061 // Generate overlap test for array copy stubs. 1062 // 1063 // Input: 1064 // R3_ARG1 - from 1065 // R4_ARG2 - to 1066 // R5_ARG3 - element count 1067 // 1068 void array_overlap_test(address no_overlap_target, int log2_elem_size) { 1069 Register tmp1 = R6_ARG4; 1070 Register tmp2 = R7_ARG5; 1071 1072 Label l_overlap; 1073 #ifdef ASSERT 1074 __ srdi_(tmp2, R5_ARG3, 31); 1075 __ asm_assert_eq("missing zero extend", 0xAFFE); 1076 #endif 1077 1078 __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes 1079 __ sldi(tmp2, R5_ARG3, log2_elem_size); // size in bytes 1080 __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison! 1081 __ cmpld(CCR1, tmp1, tmp2); 1082 __ crand(CCR0, Assembler::less, CCR1, Assembler::less); 1083 __ blt(CCR0, l_overlap); // Src before dst and distance smaller than size. 1084 1085 // need to copy forwards 1086 if (__ is_within_range_of_b(no_overlap_target, __ pc())) { 1087 __ b(no_overlap_target); 1088 } else { 1089 __ load_const(tmp1, no_overlap_target, tmp2); 1090 __ mtctr(tmp1); 1091 __ bctr(); 1092 } 1093 1094 __ bind(l_overlap); 1095 // need to copy backwards 1096 } 1097 1098 // The guideline in the implementations of generate_disjoint_xxx_copy 1099 // (xxx=byte,short,int,long,oop) is to copy as many elements as possible with 1100 // single instructions, but to avoid alignment interrupts (see subsequent 1101 // comment). Furthermore, we try to minimize misaligned access, even 1102 // though they cause no alignment interrupt. 1103 // 1104 // In Big-Endian mode, the PowerPC architecture requires implementations to 1105 // handle automatically misaligned integer halfword and word accesses, 1106 // word-aligned integer doubleword accesses, and word-aligned floating-point 1107 // accesses. Other accesses may or may not generate an Alignment interrupt 1108 // depending on the implementation. 1109 // Alignment interrupt handling may require on the order of hundreds of cycles, 1110 // so every effort should be made to avoid misaligned memory values. 1111 // 1112 // 1113 // Generate stub for disjoint byte copy. If "aligned" is true, the 1114 // "from" and "to" addresses are assumed to be heapword aligned. 1115 // 1116 // Arguments for generated stub: 1117 // from: R3_ARG1 1118 // to: R4_ARG2 1119 // count: R5_ARG3 treated as signed 1120 // 1121 address generate_disjoint_byte_copy(bool aligned, const char * name) { 1122 StubCodeMark mark(this, "StubRoutines", name); 1123 address start = __ function_entry(); 1124 1125 Register tmp1 = R6_ARG4; 1126 Register tmp2 = R7_ARG5; 1127 Register tmp3 = R8_ARG6; 1128 Register tmp4 = R9_ARG7; 1129 1130 1131 Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9; 1132 // Don't try anything fancy if arrays don't have many elements. 1133 __ li(tmp3, 0); 1134 __ cmpwi(CCR0, R5_ARG3, 17); 1135 __ ble(CCR0, l_6); // copy 4 at a time 1136 1137 if (!aligned) { 1138 __ xorr(tmp1, R3_ARG1, R4_ARG2); 1139 __ andi_(tmp1, tmp1, 3); 1140 __ bne(CCR0, l_6); // If arrays don't have the same alignment mod 4, do 4 element copy. 1141 1142 // Copy elements if necessary to align to 4 bytes. 1143 __ neg(tmp1, R3_ARG1); // Compute distance to alignment boundary. 1144 __ andi_(tmp1, tmp1, 3); 1145 __ beq(CCR0, l_2); 1146 1147 __ subf(R5_ARG3, tmp1, R5_ARG3); 1148 __ bind(l_9); 1149 __ lbz(tmp2, 0, R3_ARG1); 1150 __ addic_(tmp1, tmp1, -1); 1151 __ stb(tmp2, 0, R4_ARG2); 1152 __ addi(R3_ARG1, R3_ARG1, 1); 1153 __ addi(R4_ARG2, R4_ARG2, 1); 1154 __ bne(CCR0, l_9); 1155 1156 __ bind(l_2); 1157 } 1158 1159 // copy 8 elements at a time 1160 __ xorr(tmp2, R3_ARG1, R4_ARG2); // skip if src & dest have differing alignment mod 8 1161 __ andi_(tmp1, tmp2, 7); 1162 __ bne(CCR0, l_7); // not same alignment -> to or from is aligned -> copy 8 1163 1164 // copy a 2-element word if necessary to align to 8 bytes 1165 __ andi_(R0, R3_ARG1, 7); 1166 __ beq(CCR0, l_7); 1167 1168 __ lwzx(tmp2, R3_ARG1, tmp3); 1169 __ addi(R5_ARG3, R5_ARG3, -4); 1170 __ stwx(tmp2, R4_ARG2, tmp3); 1171 { // FasterArrayCopy 1172 __ addi(R3_ARG1, R3_ARG1, 4); 1173 __ addi(R4_ARG2, R4_ARG2, 4); 1174 } 1175 __ bind(l_7); 1176 1177 { // FasterArrayCopy 1178 __ cmpwi(CCR0, R5_ARG3, 31); 1179 __ ble(CCR0, l_6); // copy 2 at a time if less than 32 elements remain 1180 1181 __ srdi(tmp1, R5_ARG3, 5); 1182 __ andi_(R5_ARG3, R5_ARG3, 31); 1183 __ mtctr(tmp1); 1184 1185 __ bind(l_8); 1186 // Use unrolled version for mass copying (copy 32 elements a time) 1187 // Load feeding store gets zero latency on Power6, however not on Power5. 1188 // Therefore, the following sequence is made for the good of both. 1189 __ ld(tmp1, 0, R3_ARG1); 1190 __ ld(tmp2, 8, R3_ARG1); 1191 __ ld(tmp3, 16, R3_ARG1); 1192 __ ld(tmp4, 24, R3_ARG1); 1193 __ std(tmp1, 0, R4_ARG2); 1194 __ std(tmp2, 8, R4_ARG2); 1195 __ std(tmp3, 16, R4_ARG2); 1196 __ std(tmp4, 24, R4_ARG2); 1197 __ addi(R3_ARG1, R3_ARG1, 32); 1198 __ addi(R4_ARG2, R4_ARG2, 32); 1199 __ bdnz(l_8); 1200 } 1201 1202 __ bind(l_6); 1203 1204 // copy 4 elements at a time 1205 __ cmpwi(CCR0, R5_ARG3, 4); 1206 __ blt(CCR0, l_1); 1207 __ srdi(tmp1, R5_ARG3, 2); 1208 __ mtctr(tmp1); // is > 0 1209 __ andi_(R5_ARG3, R5_ARG3, 3); 1210 1211 { // FasterArrayCopy 1212 __ addi(R3_ARG1, R3_ARG1, -4); 1213 __ addi(R4_ARG2, R4_ARG2, -4); 1214 __ bind(l_3); 1215 __ lwzu(tmp2, 4, R3_ARG1); 1216 __ stwu(tmp2, 4, R4_ARG2); 1217 __ bdnz(l_3); 1218 __ addi(R3_ARG1, R3_ARG1, 4); 1219 __ addi(R4_ARG2, R4_ARG2, 4); 1220 } 1221 1222 // do single element copy 1223 __ bind(l_1); 1224 __ cmpwi(CCR0, R5_ARG3, 0); 1225 __ beq(CCR0, l_4); 1226 1227 { // FasterArrayCopy 1228 __ mtctr(R5_ARG3); 1229 __ addi(R3_ARG1, R3_ARG1, -1); 1230 __ addi(R4_ARG2, R4_ARG2, -1); 1231 1232 __ bind(l_5); 1233 __ lbzu(tmp2, 1, R3_ARG1); 1234 __ stbu(tmp2, 1, R4_ARG2); 1235 __ bdnz(l_5); 1236 } 1237 1238 __ bind(l_4); 1239 __ blr(); 1240 1241 return start; 1242 } 1243 1244 // Generate stub for conjoint byte copy. If "aligned" is true, the 1245 // "from" and "to" addresses are assumed to be heapword aligned. 1246 // 1247 // Arguments for generated stub: 1248 // from: R3_ARG1 1249 // to: R4_ARG2 1250 // count: R5_ARG3 treated as signed 1251 // 1252 address generate_conjoint_byte_copy(bool aligned, const char * name) { 1253 StubCodeMark mark(this, "StubRoutines", name); 1254 address start = __ function_entry(); 1255 1256 Register tmp1 = R6_ARG4; 1257 Register tmp2 = R7_ARG5; 1258 Register tmp3 = R8_ARG6; 1259 1260 #if defined(ABI_ELFv2) 1261 address nooverlap_target = aligned ? 1262 StubRoutines::arrayof_jbyte_disjoint_arraycopy() : 1263 StubRoutines::jbyte_disjoint_arraycopy(); 1264 #else 1265 address nooverlap_target = aligned ? 1266 ((FunctionDescriptor*)StubRoutines::arrayof_jbyte_disjoint_arraycopy())->entry() : 1267 ((FunctionDescriptor*)StubRoutines::jbyte_disjoint_arraycopy())->entry(); 1268 #endif 1269 1270 array_overlap_test(nooverlap_target, 0); 1271 // Do reverse copy. We assume the case of actual overlap is rare enough 1272 // that we don't have to optimize it. 1273 Label l_1, l_2; 1274 1275 __ b(l_2); 1276 __ bind(l_1); 1277 __ stbx(tmp1, R4_ARG2, R5_ARG3); 1278 __ bind(l_2); 1279 __ addic_(R5_ARG3, R5_ARG3, -1); 1280 __ lbzx(tmp1, R3_ARG1, R5_ARG3); 1281 __ bge(CCR0, l_1); 1282 1283 __ blr(); 1284 1285 return start; 1286 } 1287 1288 // Generate stub for disjoint short copy. If "aligned" is true, the 1289 // "from" and "to" addresses are assumed to be heapword aligned. 1290 // 1291 // Arguments for generated stub: 1292 // from: R3_ARG1 1293 // to: R4_ARG2 1294 // elm.count: R5_ARG3 treated as signed 1295 // 1296 // Strategy for aligned==true: 1297 // 1298 // If length <= 9: 1299 // 1. copy 2 elements at a time (l_6) 1300 // 2. copy last element if original element count was odd (l_1) 1301 // 1302 // If length > 9: 1303 // 1. copy 4 elements at a time until less than 4 elements are left (l_7) 1304 // 2. copy 2 elements at a time until less than 2 elements are left (l_6) 1305 // 3. copy last element if one was left in step 2. (l_1) 1306 // 1307 // 1308 // Strategy for aligned==false: 1309 // 1310 // If length <= 9: same as aligned==true case, but NOTE: load/stores 1311 // can be unaligned (see comment below) 1312 // 1313 // If length > 9: 1314 // 1. continue with step 6. if the alignment of from and to mod 4 1315 // is different. 1316 // 2. align from and to to 4 bytes by copying 1 element if necessary 1317 // 3. at l_2 from and to are 4 byte aligned; continue with 1318 // 5. if they cannot be aligned to 8 bytes because they have 1319 // got different alignment mod 8. 1320 // 4. at this point we know that both, from and to, have the same 1321 // alignment mod 8, now copy one element if necessary to get 1322 // 8 byte alignment of from and to. 1323 // 5. copy 4 elements at a time until less than 4 elements are 1324 // left; depending on step 3. all load/stores are aligned or 1325 // either all loads or all stores are unaligned. 1326 // 6. copy 2 elements at a time until less than 2 elements are 1327 // left (l_6); arriving here from step 1., there is a chance 1328 // that all accesses are unaligned. 1329 // 7. copy last element if one was left in step 6. (l_1) 1330 // 1331 // There are unaligned data accesses using integer load/store 1332 // instructions in this stub. POWER allows such accesses. 1333 // 1334 // According to the manuals (PowerISA_V2.06_PUBLIC, Book II, 1335 // Chapter 2: Effect of Operand Placement on Performance) unaligned 1336 // integer load/stores have good performance. Only unaligned 1337 // floating point load/stores can have poor performance. 1338 // 1339 // TODO: 1340 // 1341 // 1. check if aligning the backbranch target of loops is beneficial 1342 // 1343 address generate_disjoint_short_copy(bool aligned, const char * name) { 1344 StubCodeMark mark(this, "StubRoutines", name); 1345 1346 Register tmp1 = R6_ARG4; 1347 Register tmp2 = R7_ARG5; 1348 Register tmp3 = R8_ARG6; 1349 Register tmp4 = R9_ARG7; 1350 1351 address start = __ function_entry(); 1352 1353 Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8; 1354 // don't try anything fancy if arrays don't have many elements 1355 __ li(tmp3, 0); 1356 __ cmpwi(CCR0, R5_ARG3, 9); 1357 __ ble(CCR0, l_6); // copy 2 at a time 1358 1359 if (!aligned) { 1360 __ xorr(tmp1, R3_ARG1, R4_ARG2); 1361 __ andi_(tmp1, tmp1, 3); 1362 __ bne(CCR0, l_6); // if arrays don't have the same alignment mod 4, do 2 element copy 1363 1364 // At this point it is guaranteed that both, from and to have the same alignment mod 4. 1365 1366 // Copy 1 element if necessary to align to 4 bytes. 1367 __ andi_(tmp1, R3_ARG1, 3); 1368 __ beq(CCR0, l_2); 1369 1370 __ lhz(tmp2, 0, R3_ARG1); 1371 __ addi(R3_ARG1, R3_ARG1, 2); 1372 __ sth(tmp2, 0, R4_ARG2); 1373 __ addi(R4_ARG2, R4_ARG2, 2); 1374 __ addi(R5_ARG3, R5_ARG3, -1); 1375 __ bind(l_2); 1376 1377 // At this point the positions of both, from and to, are at least 4 byte aligned. 1378 1379 // Copy 4 elements at a time. 1380 // Align to 8 bytes, but only if both, from and to, have same alignment mod 8. 1381 __ xorr(tmp2, R3_ARG1, R4_ARG2); 1382 __ andi_(tmp1, tmp2, 7); 1383 __ bne(CCR0, l_7); // not same alignment mod 8 -> copy 4, either from or to will be unaligned 1384 1385 // Copy a 2-element word if necessary to align to 8 bytes. 1386 __ andi_(R0, R3_ARG1, 7); 1387 __ beq(CCR0, l_7); 1388 1389 __ lwzx(tmp2, R3_ARG1, tmp3); 1390 __ addi(R5_ARG3, R5_ARG3, -2); 1391 __ stwx(tmp2, R4_ARG2, tmp3); 1392 { // FasterArrayCopy 1393 __ addi(R3_ARG1, R3_ARG1, 4); 1394 __ addi(R4_ARG2, R4_ARG2, 4); 1395 } 1396 } 1397 1398 __ bind(l_7); 1399 1400 // Copy 4 elements at a time; either the loads or the stores can 1401 // be unaligned if aligned == false. 1402 1403 { // FasterArrayCopy 1404 __ cmpwi(CCR0, R5_ARG3, 15); 1405 __ ble(CCR0, l_6); // copy 2 at a time if less than 16 elements remain 1406 1407 __ srdi(tmp1, R5_ARG3, 4); 1408 __ andi_(R5_ARG3, R5_ARG3, 15); 1409 __ mtctr(tmp1); 1410 1411 __ bind(l_8); 1412 // Use unrolled version for mass copying (copy 16 elements a time). 1413 // Load feeding store gets zero latency on Power6, however not on Power5. 1414 // Therefore, the following sequence is made for the good of both. 1415 __ ld(tmp1, 0, R3_ARG1); 1416 __ ld(tmp2, 8, R3_ARG1); 1417 __ ld(tmp3, 16, R3_ARG1); 1418 __ ld(tmp4, 24, R3_ARG1); 1419 __ std(tmp1, 0, R4_ARG2); 1420 __ std(tmp2, 8, R4_ARG2); 1421 __ std(tmp3, 16, R4_ARG2); 1422 __ std(tmp4, 24, R4_ARG2); 1423 __ addi(R3_ARG1, R3_ARG1, 32); 1424 __ addi(R4_ARG2, R4_ARG2, 32); 1425 __ bdnz(l_8); 1426 } 1427 __ bind(l_6); 1428 1429 // copy 2 elements at a time 1430 { // FasterArrayCopy 1431 __ cmpwi(CCR0, R5_ARG3, 2); 1432 __ blt(CCR0, l_1); 1433 __ srdi(tmp1, R5_ARG3, 1); 1434 __ andi_(R5_ARG3, R5_ARG3, 1); 1435 1436 __ addi(R3_ARG1, R3_ARG1, -4); 1437 __ addi(R4_ARG2, R4_ARG2, -4); 1438 __ mtctr(tmp1); 1439 1440 __ bind(l_3); 1441 __ lwzu(tmp2, 4, R3_ARG1); 1442 __ stwu(tmp2, 4, R4_ARG2); 1443 __ bdnz(l_3); 1444 1445 __ addi(R3_ARG1, R3_ARG1, 4); 1446 __ addi(R4_ARG2, R4_ARG2, 4); 1447 } 1448 1449 // do single element copy 1450 __ bind(l_1); 1451 __ cmpwi(CCR0, R5_ARG3, 0); 1452 __ beq(CCR0, l_4); 1453 1454 { // FasterArrayCopy 1455 __ mtctr(R5_ARG3); 1456 __ addi(R3_ARG1, R3_ARG1, -2); 1457 __ addi(R4_ARG2, R4_ARG2, -2); 1458 1459 __ bind(l_5); 1460 __ lhzu(tmp2, 2, R3_ARG1); 1461 __ sthu(tmp2, 2, R4_ARG2); 1462 __ bdnz(l_5); 1463 } 1464 __ bind(l_4); 1465 __ blr(); 1466 1467 return start; 1468 } 1469 1470 // Generate stub for conjoint short copy. If "aligned" is true, the 1471 // "from" and "to" addresses are assumed to be heapword aligned. 1472 // 1473 // Arguments for generated stub: 1474 // from: R3_ARG1 1475 // to: R4_ARG2 1476 // count: R5_ARG3 treated as signed 1477 // 1478 address generate_conjoint_short_copy(bool aligned, const char * name) { 1479 StubCodeMark mark(this, "StubRoutines", name); 1480 address start = __ function_entry(); 1481 1482 Register tmp1 = R6_ARG4; 1483 Register tmp2 = R7_ARG5; 1484 Register tmp3 = R8_ARG6; 1485 1486 #if defined(ABI_ELFv2) 1487 address nooverlap_target = aligned ? 1488 StubRoutines::arrayof_jshort_disjoint_arraycopy() : 1489 StubRoutines::jshort_disjoint_arraycopy(); 1490 #else 1491 address nooverlap_target = aligned ? 1492 ((FunctionDescriptor*)StubRoutines::arrayof_jshort_disjoint_arraycopy())->entry() : 1493 ((FunctionDescriptor*)StubRoutines::jshort_disjoint_arraycopy())->entry(); 1494 #endif 1495 1496 array_overlap_test(nooverlap_target, 1); 1497 1498 Label l_1, l_2; 1499 __ sldi(tmp1, R5_ARG3, 1); 1500 __ b(l_2); 1501 __ bind(l_1); 1502 __ sthx(tmp2, R4_ARG2, tmp1); 1503 __ bind(l_2); 1504 __ addic_(tmp1, tmp1, -2); 1505 __ lhzx(tmp2, R3_ARG1, tmp1); 1506 __ bge(CCR0, l_1); 1507 1508 __ blr(); 1509 1510 return start; 1511 } 1512 1513 // Generate core code for disjoint int copy (and oop copy on 32-bit). If "aligned" 1514 // is true, the "from" and "to" addresses are assumed to be heapword aligned. 1515 // 1516 // Arguments: 1517 // from: R3_ARG1 1518 // to: R4_ARG2 1519 // count: R5_ARG3 treated as signed 1520 // 1521 void generate_disjoint_int_copy_core(bool aligned) { 1522 Register tmp1 = R6_ARG4; 1523 Register tmp2 = R7_ARG5; 1524 Register tmp3 = R8_ARG6; 1525 Register tmp4 = R0; 1526 1527 Label l_1, l_2, l_3, l_4, l_5, l_6; 1528 // for short arrays, just do single element copy 1529 __ li(tmp3, 0); 1530 __ cmpwi(CCR0, R5_ARG3, 5); 1531 __ ble(CCR0, l_2); 1532 1533 if (!aligned) { 1534 // check if arrays have same alignment mod 8. 1535 __ xorr(tmp1, R3_ARG1, R4_ARG2); 1536 __ andi_(R0, tmp1, 7); 1537 // Not the same alignment, but ld and std just need to be 4 byte aligned. 1538 __ bne(CCR0, l_4); // to OR from is 8 byte aligned -> copy 2 at a time 1539 1540 // copy 1 element to align to and from on an 8 byte boundary 1541 __ andi_(R0, R3_ARG1, 7); 1542 __ beq(CCR0, l_4); 1543 1544 __ lwzx(tmp2, R3_ARG1, tmp3); 1545 __ addi(R5_ARG3, R5_ARG3, -1); 1546 __ stwx(tmp2, R4_ARG2, tmp3); 1547 { // FasterArrayCopy 1548 __ addi(R3_ARG1, R3_ARG1, 4); 1549 __ addi(R4_ARG2, R4_ARG2, 4); 1550 } 1551 __ bind(l_4); 1552 } 1553 1554 { // FasterArrayCopy 1555 __ cmpwi(CCR0, R5_ARG3, 7); 1556 __ ble(CCR0, l_2); // copy 1 at a time if less than 8 elements remain 1557 1558 __ srdi(tmp1, R5_ARG3, 3); 1559 __ andi_(R5_ARG3, R5_ARG3, 7); 1560 __ mtctr(tmp1); 1561 1562 __ bind(l_6); 1563 // Use unrolled version for mass copying (copy 8 elements a time). 1564 // Load feeding store gets zero latency on power6, however not on power 5. 1565 // Therefore, the following sequence is made for the good of both. 1566 __ ld(tmp1, 0, R3_ARG1); 1567 __ ld(tmp2, 8, R3_ARG1); 1568 __ ld(tmp3, 16, R3_ARG1); 1569 __ ld(tmp4, 24, R3_ARG1); 1570 __ std(tmp1, 0, R4_ARG2); 1571 __ std(tmp2, 8, R4_ARG2); 1572 __ std(tmp3, 16, R4_ARG2); 1573 __ std(tmp4, 24, R4_ARG2); 1574 __ addi(R3_ARG1, R3_ARG1, 32); 1575 __ addi(R4_ARG2, R4_ARG2, 32); 1576 __ bdnz(l_6); 1577 } 1578 1579 // copy 1 element at a time 1580 __ bind(l_2); 1581 __ cmpwi(CCR0, R5_ARG3, 0); 1582 __ beq(CCR0, l_1); 1583 1584 { // FasterArrayCopy 1585 __ mtctr(R5_ARG3); 1586 __ addi(R3_ARG1, R3_ARG1, -4); 1587 __ addi(R4_ARG2, R4_ARG2, -4); 1588 1589 __ bind(l_3); 1590 __ lwzu(tmp2, 4, R3_ARG1); 1591 __ stwu(tmp2, 4, R4_ARG2); 1592 __ bdnz(l_3); 1593 } 1594 1595 __ bind(l_1); 1596 return; 1597 } 1598 1599 // Generate stub for disjoint int copy. If "aligned" is true, the 1600 // "from" and "to" addresses are assumed to be heapword aligned. 1601 // 1602 // Arguments for generated stub: 1603 // from: R3_ARG1 1604 // to: R4_ARG2 1605 // count: R5_ARG3 treated as signed 1606 // 1607 address generate_disjoint_int_copy(bool aligned, const char * name) { 1608 StubCodeMark mark(this, "StubRoutines", name); 1609 address start = __ function_entry(); 1610 generate_disjoint_int_copy_core(aligned); 1611 __ blr(); 1612 return start; 1613 } 1614 1615 // Generate core code for conjoint int copy (and oop copy on 1616 // 32-bit). If "aligned" is true, the "from" and "to" addresses 1617 // are assumed to be heapword aligned. 1618 // 1619 // Arguments: 1620 // from: R3_ARG1 1621 // to: R4_ARG2 1622 // count: R5_ARG3 treated as signed 1623 // 1624 void generate_conjoint_int_copy_core(bool aligned) { 1625 // Do reverse copy. We assume the case of actual overlap is rare enough 1626 // that we don't have to optimize it. 1627 1628 Label l_1, l_2, l_3, l_4, l_5, l_6; 1629 1630 Register tmp1 = R6_ARG4; 1631 Register tmp2 = R7_ARG5; 1632 Register tmp3 = R8_ARG6; 1633 Register tmp4 = R0; 1634 1635 { // FasterArrayCopy 1636 __ cmpwi(CCR0, R5_ARG3, 0); 1637 __ beq(CCR0, l_6); 1638 1639 __ sldi(R5_ARG3, R5_ARG3, 2); 1640 __ add(R3_ARG1, R3_ARG1, R5_ARG3); 1641 __ add(R4_ARG2, R4_ARG2, R5_ARG3); 1642 __ srdi(R5_ARG3, R5_ARG3, 2); 1643 1644 __ cmpwi(CCR0, R5_ARG3, 7); 1645 __ ble(CCR0, l_5); // copy 1 at a time if less than 8 elements remain 1646 1647 __ srdi(tmp1, R5_ARG3, 3); 1648 __ andi(R5_ARG3, R5_ARG3, 7); 1649 __ mtctr(tmp1); 1650 1651 __ bind(l_4); 1652 // Use unrolled version for mass copying (copy 4 elements a time). 1653 // Load feeding store gets zero latency on Power6, however not on Power5. 1654 // Therefore, the following sequence is made for the good of both. 1655 __ addi(R3_ARG1, R3_ARG1, -32); 1656 __ addi(R4_ARG2, R4_ARG2, -32); 1657 __ ld(tmp4, 24, R3_ARG1); 1658 __ ld(tmp3, 16, R3_ARG1); 1659 __ ld(tmp2, 8, R3_ARG1); 1660 __ ld(tmp1, 0, R3_ARG1); 1661 __ std(tmp4, 24, R4_ARG2); 1662 __ std(tmp3, 16, R4_ARG2); 1663 __ std(tmp2, 8, R4_ARG2); 1664 __ std(tmp1, 0, R4_ARG2); 1665 __ bdnz(l_4); 1666 1667 __ cmpwi(CCR0, R5_ARG3, 0); 1668 __ beq(CCR0, l_6); 1669 1670 __ bind(l_5); 1671 __ mtctr(R5_ARG3); 1672 __ bind(l_3); 1673 __ lwz(R0, -4, R3_ARG1); 1674 __ stw(R0, -4, R4_ARG2); 1675 __ addi(R3_ARG1, R3_ARG1, -4); 1676 __ addi(R4_ARG2, R4_ARG2, -4); 1677 __ bdnz(l_3); 1678 1679 __ bind(l_6); 1680 } 1681 } 1682 1683 // Generate stub for conjoint int copy. If "aligned" is true, the 1684 // "from" and "to" addresses are assumed to be heapword aligned. 1685 // 1686 // Arguments for generated stub: 1687 // from: R3_ARG1 1688 // to: R4_ARG2 1689 // count: R5_ARG3 treated as signed 1690 // 1691 address generate_conjoint_int_copy(bool aligned, const char * name) { 1692 StubCodeMark mark(this, "StubRoutines", name); 1693 address start = __ function_entry(); 1694 1695 #if defined(ABI_ELFv2) 1696 address nooverlap_target = aligned ? 1697 StubRoutines::arrayof_jint_disjoint_arraycopy() : 1698 StubRoutines::jint_disjoint_arraycopy(); 1699 #else 1700 address nooverlap_target = aligned ? 1701 ((FunctionDescriptor*)StubRoutines::arrayof_jint_disjoint_arraycopy())->entry() : 1702 ((FunctionDescriptor*)StubRoutines::jint_disjoint_arraycopy())->entry(); 1703 #endif 1704 1705 array_overlap_test(nooverlap_target, 2); 1706 1707 generate_conjoint_int_copy_core(aligned); 1708 1709 __ blr(); 1710 1711 return start; 1712 } 1713 1714 // Generate core code for disjoint long copy (and oop copy on 1715 // 64-bit). If "aligned" is true, the "from" and "to" addresses 1716 // are assumed to be heapword aligned. 1717 // 1718 // Arguments: 1719 // from: R3_ARG1 1720 // to: R4_ARG2 1721 // count: R5_ARG3 treated as signed 1722 // 1723 void generate_disjoint_long_copy_core(bool aligned) { 1724 Register tmp1 = R6_ARG4; 1725 Register tmp2 = R7_ARG5; 1726 Register tmp3 = R8_ARG6; 1727 Register tmp4 = R0; 1728 1729 Label l_1, l_2, l_3, l_4; 1730 1731 { // FasterArrayCopy 1732 __ cmpwi(CCR0, R5_ARG3, 3); 1733 __ ble(CCR0, l_3); // copy 1 at a time if less than 4 elements remain 1734 1735 __ srdi(tmp1, R5_ARG3, 2); 1736 __ andi_(R5_ARG3, R5_ARG3, 3); 1737 __ mtctr(tmp1); 1738 1739 __ bind(l_4); 1740 // Use unrolled version for mass copying (copy 4 elements a time). 1741 // Load feeding store gets zero latency on Power6, however not on Power5. 1742 // Therefore, the following sequence is made for the good of both. 1743 __ ld(tmp1, 0, R3_ARG1); 1744 __ ld(tmp2, 8, R3_ARG1); 1745 __ ld(tmp3, 16, R3_ARG1); 1746 __ ld(tmp4, 24, R3_ARG1); 1747 __ std(tmp1, 0, R4_ARG2); 1748 __ std(tmp2, 8, R4_ARG2); 1749 __ std(tmp3, 16, R4_ARG2); 1750 __ std(tmp4, 24, R4_ARG2); 1751 __ addi(R3_ARG1, R3_ARG1, 32); 1752 __ addi(R4_ARG2, R4_ARG2, 32); 1753 __ bdnz(l_4); 1754 } 1755 1756 // copy 1 element at a time 1757 __ bind(l_3); 1758 __ cmpwi(CCR0, R5_ARG3, 0); 1759 __ beq(CCR0, l_1); 1760 1761 { // FasterArrayCopy 1762 __ mtctr(R5_ARG3); 1763 __ addi(R3_ARG1, R3_ARG1, -8); 1764 __ addi(R4_ARG2, R4_ARG2, -8); 1765 1766 __ bind(l_2); 1767 __ ldu(R0, 8, R3_ARG1); 1768 __ stdu(R0, 8, R4_ARG2); 1769 __ bdnz(l_2); 1770 1771 } 1772 __ bind(l_1); 1773 } 1774 1775 // Generate stub for disjoint long copy. If "aligned" is true, the 1776 // "from" and "to" addresses are assumed to be heapword aligned. 1777 // 1778 // Arguments for generated stub: 1779 // from: R3_ARG1 1780 // to: R4_ARG2 1781 // count: R5_ARG3 treated as signed 1782 // 1783 address generate_disjoint_long_copy(bool aligned, const char * name) { 1784 StubCodeMark mark(this, "StubRoutines", name); 1785 address start = __ function_entry(); 1786 generate_disjoint_long_copy_core(aligned); 1787 __ blr(); 1788 1789 return start; 1790 } 1791 1792 // Generate core code for conjoint long copy (and oop copy on 1793 // 64-bit). If "aligned" is true, the "from" and "to" addresses 1794 // are assumed to be heapword aligned. 1795 // 1796 // Arguments: 1797 // from: R3_ARG1 1798 // to: R4_ARG2 1799 // count: R5_ARG3 treated as signed 1800 // 1801 void generate_conjoint_long_copy_core(bool aligned) { 1802 Register tmp1 = R6_ARG4; 1803 Register tmp2 = R7_ARG5; 1804 Register tmp3 = R8_ARG6; 1805 Register tmp4 = R0; 1806 1807 Label l_1, l_2, l_3, l_4, l_5; 1808 1809 __ cmpwi(CCR0, R5_ARG3, 0); 1810 __ beq(CCR0, l_1); 1811 1812 { // FasterArrayCopy 1813 __ sldi(R5_ARG3, R5_ARG3, 3); 1814 __ add(R3_ARG1, R3_ARG1, R5_ARG3); 1815 __ add(R4_ARG2, R4_ARG2, R5_ARG3); 1816 __ srdi(R5_ARG3, R5_ARG3, 3); 1817 1818 __ cmpwi(CCR0, R5_ARG3, 3); 1819 __ ble(CCR0, l_5); // copy 1 at a time if less than 4 elements remain 1820 1821 __ srdi(tmp1, R5_ARG3, 2); 1822 __ andi(R5_ARG3, R5_ARG3, 3); 1823 __ mtctr(tmp1); 1824 1825 __ bind(l_4); 1826 // Use unrolled version for mass copying (copy 4 elements a time). 1827 // Load feeding store gets zero latency on Power6, however not on Power5. 1828 // Therefore, the following sequence is made for the good of both. 1829 __ addi(R3_ARG1, R3_ARG1, -32); 1830 __ addi(R4_ARG2, R4_ARG2, -32); 1831 __ ld(tmp4, 24, R3_ARG1); 1832 __ ld(tmp3, 16, R3_ARG1); 1833 __ ld(tmp2, 8, R3_ARG1); 1834 __ ld(tmp1, 0, R3_ARG1); 1835 __ std(tmp4, 24, R4_ARG2); 1836 __ std(tmp3, 16, R4_ARG2); 1837 __ std(tmp2, 8, R4_ARG2); 1838 __ std(tmp1, 0, R4_ARG2); 1839 __ bdnz(l_4); 1840 1841 __ cmpwi(CCR0, R5_ARG3, 0); 1842 __ beq(CCR0, l_1); 1843 1844 __ bind(l_5); 1845 __ mtctr(R5_ARG3); 1846 __ bind(l_3); 1847 __ ld(R0, -8, R3_ARG1); 1848 __ std(R0, -8, R4_ARG2); 1849 __ addi(R3_ARG1, R3_ARG1, -8); 1850 __ addi(R4_ARG2, R4_ARG2, -8); 1851 __ bdnz(l_3); 1852 1853 } 1854 __ bind(l_1); 1855 } 1856 1857 // Generate stub for conjoint long copy. If "aligned" is true, the 1858 // "from" and "to" addresses are assumed to be heapword aligned. 1859 // 1860 // Arguments for generated stub: 1861 // from: R3_ARG1 1862 // to: R4_ARG2 1863 // count: R5_ARG3 treated as signed 1864 // 1865 address generate_conjoint_long_copy(bool aligned, const char * name) { 1866 StubCodeMark mark(this, "StubRoutines", name); 1867 address start = __ function_entry(); 1868 1869 #if defined(ABI_ELFv2) 1870 address nooverlap_target = aligned ? 1871 StubRoutines::arrayof_jlong_disjoint_arraycopy() : 1872 StubRoutines::jlong_disjoint_arraycopy(); 1873 #else 1874 address nooverlap_target = aligned ? 1875 ((FunctionDescriptor*)StubRoutines::arrayof_jlong_disjoint_arraycopy())->entry() : 1876 ((FunctionDescriptor*)StubRoutines::jlong_disjoint_arraycopy())->entry(); 1877 #endif 1878 1879 array_overlap_test(nooverlap_target, 3); 1880 generate_conjoint_long_copy_core(aligned); 1881 1882 __ blr(); 1883 1884 return start; 1885 } 1886 1887 // Generate stub for conjoint oop copy. If "aligned" is true, the 1888 // "from" and "to" addresses are assumed to be heapword aligned. 1889 // 1890 // Arguments for generated stub: 1891 // from: R3_ARG1 1892 // to: R4_ARG2 1893 // count: R5_ARG3 treated as signed 1894 // dest_uninitialized: G1 support 1895 // 1896 address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) { 1897 StubCodeMark mark(this, "StubRoutines", name); 1898 1899 address start = __ function_entry(); 1900 1901 #if defined(ABI_ELFv2) 1902 address nooverlap_target = aligned ? 1903 StubRoutines::arrayof_oop_disjoint_arraycopy() : 1904 StubRoutines::oop_disjoint_arraycopy(); 1905 #else 1906 address nooverlap_target = aligned ? 1907 ((FunctionDescriptor*)StubRoutines::arrayof_oop_disjoint_arraycopy())->entry() : 1908 ((FunctionDescriptor*)StubRoutines::oop_disjoint_arraycopy())->entry(); 1909 #endif 1910 1911 gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7); 1912 1913 // Save arguments. 1914 __ mr(R9_ARG7, R4_ARG2); 1915 __ mr(R10_ARG8, R5_ARG3); 1916 1917 if (UseCompressedOops) { 1918 array_overlap_test(nooverlap_target, 2); 1919 generate_conjoint_int_copy_core(aligned); 1920 } else { 1921 array_overlap_test(nooverlap_target, 3); 1922 generate_conjoint_long_copy_core(aligned); 1923 } 1924 1925 gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1, /*branchToEnd*/ false); 1926 return start; 1927 } 1928 1929 // Generate stub for disjoint oop copy. If "aligned" is true, the 1930 // "from" and "to" addresses are assumed to be heapword aligned. 1931 // 1932 // Arguments for generated stub: 1933 // from: R3_ARG1 1934 // to: R4_ARG2 1935 // count: R5_ARG3 treated as signed 1936 // dest_uninitialized: G1 support 1937 // 1938 address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) { 1939 StubCodeMark mark(this, "StubRoutines", name); 1940 address start = __ function_entry(); 1941 1942 gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7); 1943 1944 // save some arguments, disjoint_long_copy_core destroys them. 1945 // needed for post barrier 1946 __ mr(R9_ARG7, R4_ARG2); 1947 __ mr(R10_ARG8, R5_ARG3); 1948 1949 if (UseCompressedOops) { 1950 generate_disjoint_int_copy_core(aligned); 1951 } else { 1952 generate_disjoint_long_copy_core(aligned); 1953 } 1954 1955 gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1, /*branchToEnd*/ false); 1956 1957 return start; 1958 } 1959 1960 void generate_arraycopy_stubs() { 1961 // Note: the disjoint stubs must be generated first, some of 1962 // the conjoint stubs use them. 1963 1964 // non-aligned disjoint versions 1965 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy"); 1966 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy"); 1967 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy"); 1968 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy(false, "jlong_disjoint_arraycopy"); 1969 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy", false); 1970 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy_uninit", true); 1971 1972 // aligned disjoint versions 1973 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, "arrayof_jbyte_disjoint_arraycopy"); 1974 StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy"); 1975 StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, "arrayof_jint_disjoint_arraycopy"); 1976 StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, "arrayof_jlong_disjoint_arraycopy"); 1977 StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(true, "arrayof_oop_disjoint_arraycopy", false); 1978 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, "oop_disjoint_arraycopy_uninit", true); 1979 1980 // non-aligned conjoint versions 1981 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, "jbyte_arraycopy"); 1982 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy"); 1983 StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, "jint_arraycopy"); 1984 StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy(false, "jlong_arraycopy"); 1985 StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(false, "oop_arraycopy", false); 1986 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, "oop_arraycopy_uninit", true); 1987 1988 // aligned conjoint versions 1989 StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, "arrayof_jbyte_arraycopy"); 1990 StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy"); 1991 StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, "arrayof_jint_arraycopy"); 1992 StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy(true, "arrayof_jlong_arraycopy"); 1993 StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", false); 1994 StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", true); 1995 1996 // fill routines 1997 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 1998 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 1999 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2000 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2001 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2002 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2003 } 2004 2005 // Safefetch stubs. 2006 void generate_safefetch(const char* name, int size, address* entry, address* fault_pc, address* continuation_pc) { 2007 // safefetch signatures: 2008 // int SafeFetch32(int* adr, int errValue); 2009 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 2010 // 2011 // arguments: 2012 // R3_ARG1 = adr 2013 // R4_ARG2 = errValue 2014 // 2015 // result: 2016 // R3_RET = *adr or errValue 2017 2018 StubCodeMark mark(this, "StubRoutines", name); 2019 2020 // Entry point, pc or function descriptor. 2021 *entry = __ function_entry(); 2022 2023 // Load *adr into R4_ARG2, may fault. 2024 *fault_pc = __ pc(); 2025 switch (size) { 2026 case 4: 2027 // int32_t, signed extended 2028 __ lwa(R4_ARG2, 0, R3_ARG1); 2029 break; 2030 case 8: 2031 // int64_t 2032 __ ld(R4_ARG2, 0, R3_ARG1); 2033 break; 2034 default: 2035 ShouldNotReachHere(); 2036 } 2037 2038 // return errValue or *adr 2039 *continuation_pc = __ pc(); 2040 __ mr(R3_RET, R4_ARG2); 2041 __ blr(); 2042 } 2043 2044 // Initialization 2045 void generate_initial() { 2046 // Generates all stubs and initializes the entry points 2047 2048 // Entry points that exist in all platforms. 2049 // Note: This is code that could be shared among different platforms - however the 2050 // benefit seems to be smaller than the disadvantage of having a 2051 // much more complicated generator structure. See also comment in 2052 // stubRoutines.hpp. 2053 2054 StubRoutines::_forward_exception_entry = generate_forward_exception(); 2055 StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address); 2056 StubRoutines::_catch_exception_entry = generate_catch_exception(); 2057 2058 // Build this early so it's available for the interpreter. 2059 StubRoutines::_throw_StackOverflowError_entry = 2060 generate_throw_exception("StackOverflowError throw_exception", 2061 CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false); 2062 } 2063 2064 void generate_all() { 2065 // Generates all stubs and initializes the entry points 2066 2067 // These entry points require SharedInfo::stack0 to be set up in 2068 // non-core builds 2069 StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false); 2070 // Handle IncompatibleClassChangeError in itable stubs. 2071 StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false); 2072 StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false); 2073 2074 StubRoutines::_handler_for_unsafe_access_entry = generate_handler_for_unsafe_access(); 2075 2076 // support for verify_oop (must happen after universe_init) 2077 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 2078 2079 // arraycopy stubs used by compilers 2080 generate_arraycopy_stubs(); 2081 2082 if (UseAESIntrinsics) { 2083 guarantee(!UseAESIntrinsics, "not yet implemented."); 2084 } 2085 2086 // Safefetch stubs. 2087 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 2088 &StubRoutines::_safefetch32_fault_pc, 2089 &StubRoutines::_safefetch32_continuation_pc); 2090 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 2091 &StubRoutines::_safefetchN_fault_pc, 2092 &StubRoutines::_safefetchN_continuation_pc); 2093 } 2094 2095 public: 2096 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 2097 // replace the standard masm with a special one: 2098 _masm = new MacroAssembler(code); 2099 if (all) { 2100 generate_all(); 2101 } else { 2102 generate_initial(); 2103 } 2104 } 2105 }; 2106 2107 void StubGenerator_generate(CodeBuffer* code, bool all) { 2108 StubGenerator g(code, all); 2109 }