1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2018 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "code/debugInfoRec.hpp" 29 #include "code/icBuffer.hpp" 30 #include "code/vtableStubs.hpp" 31 #include "frame_ppc.hpp" 32 #include "gc/shared/gcLocker.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "interpreter/interp_masm.hpp" 35 #include "memory/resourceArea.hpp" 36 #include "oops/compiledICHolder.hpp" 37 #include "runtime/safepointMechanism.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 #include "runtime/vframeArray.hpp" 40 #include "utilities/align.hpp" 41 #include "vmreg_ppc.inline.hpp" 42 #ifdef COMPILER1 43 #include "c1/c1_Runtime1.hpp" 44 #endif 45 #ifdef COMPILER2 46 #include "opto/ad.hpp" 47 #include "opto/runtime.hpp" 48 #endif 49 50 #include <alloca.h> 51 52 #define __ masm-> 53 54 #ifdef PRODUCT 55 #define BLOCK_COMMENT(str) // nothing 56 #else 57 #define BLOCK_COMMENT(str) __ block_comment(str) 58 #endif 59 60 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 61 62 63 class RegisterSaver { 64 // Used for saving volatile registers. 65 public: 66 67 // Support different return pc locations. 68 enum ReturnPCLocation { 69 return_pc_is_lr, 70 return_pc_is_pre_saved, 71 return_pc_is_thread_saved_exception_pc 72 }; 73 74 static OopMap* push_frame_reg_args_and_save_live_registers(MacroAssembler* masm, 75 int* out_frame_size_in_bytes, 76 bool generate_oop_map, 77 int return_pc_adjustment, 78 ReturnPCLocation return_pc_location, 79 bool save_vectors = false); 80 static void restore_live_registers_and_pop_frame(MacroAssembler* masm, 81 int frame_size_in_bytes, 82 bool restore_ctr, 83 bool save_vectors = false); 84 85 static void push_frame_and_save_argument_registers(MacroAssembler* masm, 86 Register r_temp, 87 int frame_size, 88 int total_args, 89 const VMRegPair *regs, const VMRegPair *regs2 = NULL); 90 static void restore_argument_registers_and_pop_frame(MacroAssembler*masm, 91 int frame_size, 92 int total_args, 93 const VMRegPair *regs, const VMRegPair *regs2 = NULL); 94 95 // During deoptimization only the result registers need to be restored 96 // all the other values have already been extracted. 97 static void restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes); 98 99 // Constants and data structures: 100 101 typedef enum { 102 int_reg, 103 float_reg, 104 special_reg, 105 vs_reg 106 } RegisterType; 107 108 typedef enum { 109 reg_size = 8, 110 half_reg_size = reg_size / 2, 111 vs_reg_size = 16 112 } RegisterConstants; 113 114 typedef struct { 115 RegisterType reg_type; 116 int reg_num; 117 VMReg vmreg; 118 } LiveRegType; 119 }; 120 121 122 #define RegisterSaver_LiveIntReg(regname) \ 123 { RegisterSaver::int_reg, regname->encoding(), regname->as_VMReg() } 124 125 #define RegisterSaver_LiveFloatReg(regname) \ 126 { RegisterSaver::float_reg, regname->encoding(), regname->as_VMReg() } 127 128 #define RegisterSaver_LiveSpecialReg(regname) \ 129 { RegisterSaver::special_reg, regname->encoding(), regname->as_VMReg() } 130 131 #define RegisterSaver_LiveVSReg(regname) \ 132 { RegisterSaver::vs_reg, regname->encoding(), regname->as_VMReg() } 133 134 static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = { 135 // Live registers which get spilled to the stack. Register 136 // positions in this array correspond directly to the stack layout. 137 138 // 139 // live special registers: 140 // 141 RegisterSaver_LiveSpecialReg(SR_CTR), 142 // 143 // live float registers: 144 // 145 RegisterSaver_LiveFloatReg( F0 ), 146 RegisterSaver_LiveFloatReg( F1 ), 147 RegisterSaver_LiveFloatReg( F2 ), 148 RegisterSaver_LiveFloatReg( F3 ), 149 RegisterSaver_LiveFloatReg( F4 ), 150 RegisterSaver_LiveFloatReg( F5 ), 151 RegisterSaver_LiveFloatReg( F6 ), 152 RegisterSaver_LiveFloatReg( F7 ), 153 RegisterSaver_LiveFloatReg( F8 ), 154 RegisterSaver_LiveFloatReg( F9 ), 155 RegisterSaver_LiveFloatReg( F10 ), 156 RegisterSaver_LiveFloatReg( F11 ), 157 RegisterSaver_LiveFloatReg( F12 ), 158 RegisterSaver_LiveFloatReg( F13 ), 159 RegisterSaver_LiveFloatReg( F14 ), 160 RegisterSaver_LiveFloatReg( F15 ), 161 RegisterSaver_LiveFloatReg( F16 ), 162 RegisterSaver_LiveFloatReg( F17 ), 163 RegisterSaver_LiveFloatReg( F18 ), 164 RegisterSaver_LiveFloatReg( F19 ), 165 RegisterSaver_LiveFloatReg( F20 ), 166 RegisterSaver_LiveFloatReg( F21 ), 167 RegisterSaver_LiveFloatReg( F22 ), 168 RegisterSaver_LiveFloatReg( F23 ), 169 RegisterSaver_LiveFloatReg( F24 ), 170 RegisterSaver_LiveFloatReg( F25 ), 171 RegisterSaver_LiveFloatReg( F26 ), 172 RegisterSaver_LiveFloatReg( F27 ), 173 RegisterSaver_LiveFloatReg( F28 ), 174 RegisterSaver_LiveFloatReg( F29 ), 175 RegisterSaver_LiveFloatReg( F30 ), 176 RegisterSaver_LiveFloatReg( F31 ), 177 // 178 // live integer registers: 179 // 180 RegisterSaver_LiveIntReg( R0 ), 181 //RegisterSaver_LiveIntReg( R1 ), // stack pointer 182 RegisterSaver_LiveIntReg( R2 ), 183 RegisterSaver_LiveIntReg( R3 ), 184 RegisterSaver_LiveIntReg( R4 ), 185 RegisterSaver_LiveIntReg( R5 ), 186 RegisterSaver_LiveIntReg( R6 ), 187 RegisterSaver_LiveIntReg( R7 ), 188 RegisterSaver_LiveIntReg( R8 ), 189 RegisterSaver_LiveIntReg( R9 ), 190 RegisterSaver_LiveIntReg( R10 ), 191 RegisterSaver_LiveIntReg( R11 ), 192 RegisterSaver_LiveIntReg( R12 ), 193 //RegisterSaver_LiveIntReg( R13 ), // system thread id 194 RegisterSaver_LiveIntReg( R14 ), 195 RegisterSaver_LiveIntReg( R15 ), 196 RegisterSaver_LiveIntReg( R16 ), 197 RegisterSaver_LiveIntReg( R17 ), 198 RegisterSaver_LiveIntReg( R18 ), 199 RegisterSaver_LiveIntReg( R19 ), 200 RegisterSaver_LiveIntReg( R20 ), 201 RegisterSaver_LiveIntReg( R21 ), 202 RegisterSaver_LiveIntReg( R22 ), 203 RegisterSaver_LiveIntReg( R23 ), 204 RegisterSaver_LiveIntReg( R24 ), 205 RegisterSaver_LiveIntReg( R25 ), 206 RegisterSaver_LiveIntReg( R26 ), 207 RegisterSaver_LiveIntReg( R27 ), 208 RegisterSaver_LiveIntReg( R28 ), 209 RegisterSaver_LiveIntReg( R29 ), 210 RegisterSaver_LiveIntReg( R30 ), 211 RegisterSaver_LiveIntReg( R31 ) // must be the last register (see save/restore functions below) 212 }; 213 214 static const RegisterSaver::LiveRegType RegisterSaver_LiveVSRegs[] = { 215 // 216 // live vector scalar registers (optional, only these ones are used by C2): 217 // 218 RegisterSaver_LiveVSReg( VSR32 ), 219 RegisterSaver_LiveVSReg( VSR33 ), 220 RegisterSaver_LiveVSReg( VSR34 ), 221 RegisterSaver_LiveVSReg( VSR35 ), 222 RegisterSaver_LiveVSReg( VSR36 ), 223 RegisterSaver_LiveVSReg( VSR37 ), 224 RegisterSaver_LiveVSReg( VSR38 ), 225 RegisterSaver_LiveVSReg( VSR39 ), 226 RegisterSaver_LiveVSReg( VSR40 ), 227 RegisterSaver_LiveVSReg( VSR41 ), 228 RegisterSaver_LiveVSReg( VSR42 ), 229 RegisterSaver_LiveVSReg( VSR43 ), 230 RegisterSaver_LiveVSReg( VSR44 ), 231 RegisterSaver_LiveVSReg( VSR45 ), 232 RegisterSaver_LiveVSReg( VSR46 ), 233 RegisterSaver_LiveVSReg( VSR47 ), 234 RegisterSaver_LiveVSReg( VSR48 ), 235 RegisterSaver_LiveVSReg( VSR49 ), 236 RegisterSaver_LiveVSReg( VSR50 ), 237 RegisterSaver_LiveVSReg( VSR51 ) 238 }; 239 240 241 OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm, 242 int* out_frame_size_in_bytes, 243 bool generate_oop_map, 244 int return_pc_adjustment, 245 ReturnPCLocation return_pc_location, 246 bool save_vectors) { 247 // Push an abi_reg_args-frame and store all registers which may be live. 248 // If requested, create an OopMap: Record volatile registers as 249 // callee-save values in an OopMap so their save locations will be 250 // propagated to the RegisterMap of the caller frame during 251 // StackFrameStream construction (needed for deoptimization; see 252 // compiledVFrame::create_stack_value). 253 // If return_pc_adjustment != 0 adjust the return pc by return_pc_adjustment. 254 // Updated return pc is returned in R31 (if not return_pc_is_pre_saved). 255 256 // calcualte frame size 257 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 258 sizeof(RegisterSaver::LiveRegType); 259 const int vsregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVSRegs) / 260 sizeof(RegisterSaver::LiveRegType)) 261 : 0; 262 const int register_save_size = regstosave_num * reg_size + vsregstosave_num * vs_reg_size; 263 const int frame_size_in_bytes = align_up(register_save_size, frame::alignment_in_bytes) 264 + frame::abi_reg_args_size; 265 266 *out_frame_size_in_bytes = frame_size_in_bytes; 267 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 268 const int register_save_offset = frame_size_in_bytes - register_save_size; 269 270 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words. 271 OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : NULL; 272 273 BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {"); 274 275 // push a new frame 276 __ push_frame(frame_size_in_bytes, noreg); 277 278 // Save some registers in the last (non-vector) slots of the new frame so we 279 // can use them as scratch regs or to determine the return pc. 280 __ std(R31, frame_size_in_bytes - reg_size - vsregstosave_num * vs_reg_size, R1_SP); 281 __ std(R30, frame_size_in_bytes - 2*reg_size - vsregstosave_num * vs_reg_size, R1_SP); 282 283 // save the flags 284 // Do the save_LR_CR by hand and adjust the return pc if requested. 285 __ mfcr(R30); 286 __ std(R30, frame_size_in_bytes + _abi(cr), R1_SP); 287 switch (return_pc_location) { 288 case return_pc_is_lr: __ mflr(R31); break; 289 case return_pc_is_pre_saved: assert(return_pc_adjustment == 0, "unsupported"); break; 290 case return_pc_is_thread_saved_exception_pc: __ ld(R31, thread_(saved_exception_pc)); break; 291 default: ShouldNotReachHere(); 292 } 293 if (return_pc_location != return_pc_is_pre_saved) { 294 if (return_pc_adjustment != 0) { 295 __ addi(R31, R31, return_pc_adjustment); 296 } 297 __ std(R31, frame_size_in_bytes + _abi(lr), R1_SP); 298 } 299 300 // save all registers (ints and floats) 301 int offset = register_save_offset; 302 303 for (int i = 0; i < regstosave_num; i++) { 304 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 305 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 306 307 switch (reg_type) { 308 case RegisterSaver::int_reg: { 309 if (reg_num < 30) { // We spilled R30-31 right at the beginning. 310 __ std(as_Register(reg_num), offset, R1_SP); 311 } 312 break; 313 } 314 case RegisterSaver::float_reg: { 315 __ stfd(as_FloatRegister(reg_num), offset, R1_SP); 316 break; 317 } 318 case RegisterSaver::special_reg: { 319 if (reg_num == SR_CTR_SpecialRegisterEnumValue) { 320 __ mfctr(R30); 321 __ std(R30, offset, R1_SP); 322 } else { 323 Unimplemented(); 324 } 325 break; 326 } 327 default: 328 ShouldNotReachHere(); 329 } 330 331 if (generate_oop_map) { 332 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), 333 RegisterSaver_LiveRegs[i].vmreg); 334 map->set_callee_saved(VMRegImpl::stack2reg((offset + half_reg_size)>>2), 335 RegisterSaver_LiveRegs[i].vmreg->next()); 336 } 337 offset += reg_size; 338 } 339 340 for (int i = 0; i < vsregstosave_num; i++) { 341 int reg_num = RegisterSaver_LiveVSRegs[i].reg_num; 342 int reg_type = RegisterSaver_LiveVSRegs[i].reg_type; 343 344 __ li(R30, offset); 345 __ stxvd2x(as_VectorSRegister(reg_num), R30, R1_SP); 346 347 if (generate_oop_map) { 348 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), 349 RegisterSaver_LiveVSRegs[i].vmreg); 350 } 351 offset += vs_reg_size; 352 } 353 354 assert(offset == frame_size_in_bytes, "consistency check"); 355 356 BLOCK_COMMENT("} push_frame_reg_args_and_save_live_registers"); 357 358 // And we're done. 359 return map; 360 } 361 362 363 // Pop the current frame and restore all the registers that we 364 // saved. 365 void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm, 366 int frame_size_in_bytes, 367 bool restore_ctr, 368 bool save_vectors) { 369 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 370 sizeof(RegisterSaver::LiveRegType); 371 const int vsregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVSRegs) / 372 sizeof(RegisterSaver::LiveRegType)) 373 : 0; 374 const int register_save_size = regstosave_num * reg_size + vsregstosave_num * vs_reg_size; 375 376 const int register_save_offset = frame_size_in_bytes - register_save_size; 377 378 BLOCK_COMMENT("restore_live_registers_and_pop_frame {"); 379 380 // restore all registers (ints and floats) 381 int offset = register_save_offset; 382 383 for (int i = 0; i < regstosave_num; i++) { 384 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 385 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 386 387 switch (reg_type) { 388 case RegisterSaver::int_reg: { 389 if (reg_num != 31) // R31 restored at the end, it's the tmp reg! 390 __ ld(as_Register(reg_num), offset, R1_SP); 391 break; 392 } 393 case RegisterSaver::float_reg: { 394 __ lfd(as_FloatRegister(reg_num), offset, R1_SP); 395 break; 396 } 397 case RegisterSaver::special_reg: { 398 if (reg_num == SR_CTR_SpecialRegisterEnumValue) { 399 if (restore_ctr) { // Nothing to do here if ctr already contains the next address. 400 __ ld(R31, offset, R1_SP); 401 __ mtctr(R31); 402 } 403 } else { 404 Unimplemented(); 405 } 406 break; 407 } 408 default: 409 ShouldNotReachHere(); 410 } 411 offset += reg_size; 412 } 413 414 for (int i = 0; i < vsregstosave_num; i++) { 415 int reg_num = RegisterSaver_LiveVSRegs[i].reg_num; 416 int reg_type = RegisterSaver_LiveVSRegs[i].reg_type; 417 418 __ li(R31, offset); 419 __ lxvd2x(as_VectorSRegister(reg_num), R31, R1_SP); 420 421 offset += vs_reg_size; 422 } 423 424 assert(offset == frame_size_in_bytes, "consistency check"); 425 426 // restore link and the flags 427 __ ld(R31, frame_size_in_bytes + _abi(lr), R1_SP); 428 __ mtlr(R31); 429 430 __ ld(R31, frame_size_in_bytes + _abi(cr), R1_SP); 431 __ mtcr(R31); 432 433 // restore scratch register's value 434 __ ld(R31, frame_size_in_bytes - reg_size - vsregstosave_num * vs_reg_size, R1_SP); 435 436 // pop the frame 437 __ addi(R1_SP, R1_SP, frame_size_in_bytes); 438 439 BLOCK_COMMENT("} restore_live_registers_and_pop_frame"); 440 } 441 442 void RegisterSaver::push_frame_and_save_argument_registers(MacroAssembler* masm, Register r_temp, 443 int frame_size,int total_args, const VMRegPair *regs, 444 const VMRegPair *regs2) { 445 __ push_frame(frame_size, r_temp); 446 int st_off = frame_size - wordSize; 447 for (int i = 0; i < total_args; i++) { 448 VMReg r_1 = regs[i].first(); 449 VMReg r_2 = regs[i].second(); 450 if (!r_1->is_valid()) { 451 assert(!r_2->is_valid(), ""); 452 continue; 453 } 454 if (r_1->is_Register()) { 455 Register r = r_1->as_Register(); 456 __ std(r, st_off, R1_SP); 457 st_off -= wordSize; 458 } else if (r_1->is_FloatRegister()) { 459 FloatRegister f = r_1->as_FloatRegister(); 460 __ stfd(f, st_off, R1_SP); 461 st_off -= wordSize; 462 } 463 } 464 if (regs2 != NULL) { 465 for (int i = 0; i < total_args; i++) { 466 VMReg r_1 = regs2[i].first(); 467 VMReg r_2 = regs2[i].second(); 468 if (!r_1->is_valid()) { 469 assert(!r_2->is_valid(), ""); 470 continue; 471 } 472 if (r_1->is_Register()) { 473 Register r = r_1->as_Register(); 474 __ std(r, st_off, R1_SP); 475 st_off -= wordSize; 476 } else if (r_1->is_FloatRegister()) { 477 FloatRegister f = r_1->as_FloatRegister(); 478 __ stfd(f, st_off, R1_SP); 479 st_off -= wordSize; 480 } 481 } 482 } 483 } 484 485 void RegisterSaver::restore_argument_registers_and_pop_frame(MacroAssembler*masm, int frame_size, 486 int total_args, const VMRegPair *regs, 487 const VMRegPair *regs2) { 488 int st_off = frame_size - wordSize; 489 for (int i = 0; i < total_args; i++) { 490 VMReg r_1 = regs[i].first(); 491 VMReg r_2 = regs[i].second(); 492 if (r_1->is_Register()) { 493 Register r = r_1->as_Register(); 494 __ ld(r, st_off, R1_SP); 495 st_off -= wordSize; 496 } else if (r_1->is_FloatRegister()) { 497 FloatRegister f = r_1->as_FloatRegister(); 498 __ lfd(f, st_off, R1_SP); 499 st_off -= wordSize; 500 } 501 } 502 if (regs2 != NULL) 503 for (int i = 0; i < total_args; i++) { 504 VMReg r_1 = regs2[i].first(); 505 VMReg r_2 = regs2[i].second(); 506 if (r_1->is_Register()) { 507 Register r = r_1->as_Register(); 508 __ ld(r, st_off, R1_SP); 509 st_off -= wordSize; 510 } else if (r_1->is_FloatRegister()) { 511 FloatRegister f = r_1->as_FloatRegister(); 512 __ lfd(f, st_off, R1_SP); 513 st_off -= wordSize; 514 } 515 } 516 __ pop_frame(); 517 } 518 519 // Restore the registers that might be holding a result. 520 void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes) { 521 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 522 sizeof(RegisterSaver::LiveRegType); 523 const int register_save_size = regstosave_num * reg_size; // VS registers not relevant here. 524 const int register_save_offset = frame_size_in_bytes - register_save_size; 525 526 // restore all result registers (ints and floats) 527 int offset = register_save_offset; 528 for (int i = 0; i < regstosave_num; i++) { 529 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 530 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 531 switch (reg_type) { 532 case RegisterSaver::int_reg: { 533 if (as_Register(reg_num)==R3_RET) // int result_reg 534 __ ld(as_Register(reg_num), offset, R1_SP); 535 break; 536 } 537 case RegisterSaver::float_reg: { 538 if (as_FloatRegister(reg_num)==F1_RET) // float result_reg 539 __ lfd(as_FloatRegister(reg_num), offset, R1_SP); 540 break; 541 } 542 case RegisterSaver::special_reg: { 543 // Special registers don't hold a result. 544 break; 545 } 546 default: 547 ShouldNotReachHere(); 548 } 549 offset += reg_size; 550 } 551 552 assert(offset == frame_size_in_bytes, "consistency check"); 553 } 554 555 // Is vector's size (in bytes) bigger than a size saved by default? 556 bool SharedRuntime::is_wide_vector(int size) { 557 // Note, MaxVectorSize == 8/16 on PPC64. 558 assert(size <= (SuperwordUseVSX ? 16 : 8), "%d bytes vectors are not supported", size); 559 return size > 8; 560 } 561 562 size_t SharedRuntime::trampoline_size() { 563 return Assembler::load_const_size + 8; 564 } 565 566 void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) { 567 Register Rtemp = R12; 568 __ load_const(Rtemp, destination); 569 __ mtctr(Rtemp); 570 __ bctr(); 571 } 572 573 #ifdef COMPILER2 574 static int reg2slot(VMReg r) { 575 return r->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 576 } 577 578 static int reg2offset(VMReg r) { 579 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 580 } 581 #endif 582 583 // --------------------------------------------------------------------------- 584 // Read the array of BasicTypes from a signature, and compute where the 585 // arguments should go. Values in the VMRegPair regs array refer to 4-byte 586 // quantities. Values less than VMRegImpl::stack0 are registers, those above 587 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer 588 // as framesizes are fixed. 589 // VMRegImpl::stack0 refers to the first slot 0(sp). 590 // and VMRegImpl::stack0+1 refers to the memory word 4-bytes higher. Register 591 // up to RegisterImpl::number_of_registers) are the 64-bit 592 // integer registers. 593 594 // Note: the INPUTS in sig_bt are in units of Java argument words, which are 595 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit 596 // units regardless of build. Of course for i486 there is no 64 bit build 597 598 // The Java calling convention is a "shifted" version of the C ABI. 599 // By skipping the first C ABI register we can call non-static jni methods 600 // with small numbers of arguments without having to shuffle the arguments 601 // at all. Since we control the java ABI we ought to at least get some 602 // advantage out of it. 603 604 const VMReg java_iarg_reg[8] = { 605 R3->as_VMReg(), 606 R4->as_VMReg(), 607 R5->as_VMReg(), 608 R6->as_VMReg(), 609 R7->as_VMReg(), 610 R8->as_VMReg(), 611 R9->as_VMReg(), 612 R10->as_VMReg() 613 }; 614 615 const VMReg java_farg_reg[13] = { 616 F1->as_VMReg(), 617 F2->as_VMReg(), 618 F3->as_VMReg(), 619 F4->as_VMReg(), 620 F5->as_VMReg(), 621 F6->as_VMReg(), 622 F7->as_VMReg(), 623 F8->as_VMReg(), 624 F9->as_VMReg(), 625 F10->as_VMReg(), 626 F11->as_VMReg(), 627 F12->as_VMReg(), 628 F13->as_VMReg() 629 }; 630 631 const int num_java_iarg_registers = sizeof(java_iarg_reg) / sizeof(java_iarg_reg[0]); 632 const int num_java_farg_registers = sizeof(java_farg_reg) / sizeof(java_farg_reg[0]); 633 634 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 635 VMRegPair *regs, 636 int total_args_passed, 637 int is_outgoing) { 638 // C2c calling conventions for compiled-compiled calls. 639 // Put 8 ints/longs into registers _AND_ 13 float/doubles into 640 // registers _AND_ put the rest on the stack. 641 642 const int inc_stk_for_intfloat = 1; // 1 slots for ints and floats 643 const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles 644 645 int i; 646 VMReg reg; 647 int stk = 0; 648 int ireg = 0; 649 int freg = 0; 650 651 // We put the first 8 arguments into registers and the rest on the 652 // stack, float arguments are already in their argument registers 653 // due to c2c calling conventions (see calling_convention). 654 for (int i = 0; i < total_args_passed; ++i) { 655 switch(sig_bt[i]) { 656 case T_BOOLEAN: 657 case T_CHAR: 658 case T_BYTE: 659 case T_SHORT: 660 case T_INT: 661 if (ireg < num_java_iarg_registers) { 662 // Put int/ptr in register 663 reg = java_iarg_reg[ireg]; 664 ++ireg; 665 } else { 666 // Put int/ptr on stack. 667 reg = VMRegImpl::stack2reg(stk); 668 stk += inc_stk_for_intfloat; 669 } 670 regs[i].set1(reg); 671 break; 672 case T_LONG: 673 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 674 if (ireg < num_java_iarg_registers) { 675 // Put long in register. 676 reg = java_iarg_reg[ireg]; 677 ++ireg; 678 } else { 679 // Put long on stack. They must be aligned to 2 slots. 680 if (stk & 0x1) ++stk; 681 reg = VMRegImpl::stack2reg(stk); 682 stk += inc_stk_for_longdouble; 683 } 684 regs[i].set2(reg); 685 break; 686 case T_OBJECT: 687 case T_ARRAY: 688 case T_ADDRESS: 689 if (ireg < num_java_iarg_registers) { 690 // Put ptr in register. 691 reg = java_iarg_reg[ireg]; 692 ++ireg; 693 } else { 694 // Put ptr on stack. Objects must be aligned to 2 slots too, 695 // because "64-bit pointers record oop-ishness on 2 aligned 696 // adjacent registers." (see OopFlow::build_oop_map). 697 if (stk & 0x1) ++stk; 698 reg = VMRegImpl::stack2reg(stk); 699 stk += inc_stk_for_longdouble; 700 } 701 regs[i].set2(reg); 702 break; 703 case T_FLOAT: 704 if (freg < num_java_farg_registers) { 705 // Put float in register. 706 reg = java_farg_reg[freg]; 707 ++freg; 708 } else { 709 // Put float on stack. 710 reg = VMRegImpl::stack2reg(stk); 711 stk += inc_stk_for_intfloat; 712 } 713 regs[i].set1(reg); 714 break; 715 case T_DOUBLE: 716 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 717 if (freg < num_java_farg_registers) { 718 // Put double in register. 719 reg = java_farg_reg[freg]; 720 ++freg; 721 } else { 722 // Put double on stack. They must be aligned to 2 slots. 723 if (stk & 0x1) ++stk; 724 reg = VMRegImpl::stack2reg(stk); 725 stk += inc_stk_for_longdouble; 726 } 727 regs[i].set2(reg); 728 break; 729 case T_VOID: 730 // Do not count halves. 731 regs[i].set_bad(); 732 break; 733 default: 734 ShouldNotReachHere(); 735 } 736 } 737 return align_up(stk, 2); 738 } 739 740 #if defined(COMPILER1) || defined(COMPILER2) 741 // Calling convention for calling C code. 742 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 743 VMRegPair *regs, 744 VMRegPair *regs2, 745 int total_args_passed) { 746 // Calling conventions for C runtime calls and calls to JNI native methods. 747 // 748 // PPC64 convention: Hoist the first 8 int/ptr/long's in the first 8 749 // int regs, leaving int regs undefined if the arg is flt/dbl. Hoist 750 // the first 13 flt/dbl's in the first 13 fp regs but additionally 751 // copy flt/dbl to the stack if they are beyond the 8th argument. 752 753 const VMReg iarg_reg[8] = { 754 R3->as_VMReg(), 755 R4->as_VMReg(), 756 R5->as_VMReg(), 757 R6->as_VMReg(), 758 R7->as_VMReg(), 759 R8->as_VMReg(), 760 R9->as_VMReg(), 761 R10->as_VMReg() 762 }; 763 764 const VMReg farg_reg[13] = { 765 F1->as_VMReg(), 766 F2->as_VMReg(), 767 F3->as_VMReg(), 768 F4->as_VMReg(), 769 F5->as_VMReg(), 770 F6->as_VMReg(), 771 F7->as_VMReg(), 772 F8->as_VMReg(), 773 F9->as_VMReg(), 774 F10->as_VMReg(), 775 F11->as_VMReg(), 776 F12->as_VMReg(), 777 F13->as_VMReg() 778 }; 779 780 // Check calling conventions consistency. 781 assert(sizeof(iarg_reg) / sizeof(iarg_reg[0]) == Argument::n_int_register_parameters_c && 782 sizeof(farg_reg) / sizeof(farg_reg[0]) == Argument::n_float_register_parameters_c, 783 "consistency"); 784 785 // `Stk' counts stack slots. Due to alignment, 32 bit values occupy 786 // 2 such slots, like 64 bit values do. 787 const int inc_stk_for_intfloat = 2; // 2 slots for ints and floats 788 const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles 789 790 int i; 791 VMReg reg; 792 // Leave room for C-compatible ABI_REG_ARGS. 793 int stk = (frame::abi_reg_args_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size; 794 int arg = 0; 795 int freg = 0; 796 797 // Avoid passing C arguments in the wrong stack slots. 798 #if defined(ABI_ELFv2) 799 assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 96, 800 "passing C arguments in wrong stack slots"); 801 #else 802 assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 112, 803 "passing C arguments in wrong stack slots"); 804 #endif 805 // We fill-out regs AND regs2 if an argument must be passed in a 806 // register AND in a stack slot. If regs2 is NULL in such a 807 // situation, we bail-out with a fatal error. 808 for (int i = 0; i < total_args_passed; ++i, ++arg) { 809 // Initialize regs2 to BAD. 810 if (regs2 != NULL) regs2[i].set_bad(); 811 812 switch(sig_bt[i]) { 813 814 // 815 // If arguments 0-7 are integers, they are passed in integer registers. 816 // Argument i is placed in iarg_reg[i]. 817 // 818 case T_BOOLEAN: 819 case T_CHAR: 820 case T_BYTE: 821 case T_SHORT: 822 case T_INT: 823 // We must cast ints to longs and use full 64 bit stack slots 824 // here. Thus fall through, handle as long. 825 case T_LONG: 826 case T_OBJECT: 827 case T_ARRAY: 828 case T_ADDRESS: 829 case T_METADATA: 830 // Oops are already boxed if required (JNI). 831 if (arg < Argument::n_int_register_parameters_c) { 832 reg = iarg_reg[arg]; 833 } else { 834 reg = VMRegImpl::stack2reg(stk); 835 stk += inc_stk_for_longdouble; 836 } 837 regs[i].set2(reg); 838 break; 839 840 // 841 // Floats are treated differently from int regs: The first 13 float arguments 842 // are passed in registers (not the float args among the first 13 args). 843 // Thus argument i is NOT passed in farg_reg[i] if it is float. It is passed 844 // in farg_reg[j] if argument i is the j-th float argument of this call. 845 // 846 case T_FLOAT: 847 #if defined(LINUX) 848 // Linux uses ELF ABI. Both original ELF and ELFv2 ABIs have float 849 // in the least significant word of an argument slot. 850 #if defined(VM_LITTLE_ENDIAN) 851 #define FLOAT_WORD_OFFSET_IN_SLOT 0 852 #else 853 #define FLOAT_WORD_OFFSET_IN_SLOT 1 854 #endif 855 #elif defined(AIX) 856 // Although AIX runs on big endian CPU, float is in the most 857 // significant word of an argument slot. 858 #define FLOAT_WORD_OFFSET_IN_SLOT 0 859 #else 860 #error "unknown OS" 861 #endif 862 if (freg < Argument::n_float_register_parameters_c) { 863 // Put float in register ... 864 reg = farg_reg[freg]; 865 ++freg; 866 867 // Argument i for i > 8 is placed on the stack even if it's 868 // placed in a register (if it's a float arg). Aix disassembly 869 // shows that xlC places these float args on the stack AND in 870 // a register. This is not documented, but we follow this 871 // convention, too. 872 if (arg >= Argument::n_regs_not_on_stack_c) { 873 // ... and on the stack. 874 guarantee(regs2 != NULL, "must pass float in register and stack slot"); 875 VMReg reg2 = VMRegImpl::stack2reg(stk + FLOAT_WORD_OFFSET_IN_SLOT); 876 regs2[i].set1(reg2); 877 stk += inc_stk_for_intfloat; 878 } 879 880 } else { 881 // Put float on stack. 882 reg = VMRegImpl::stack2reg(stk + FLOAT_WORD_OFFSET_IN_SLOT); 883 stk += inc_stk_for_intfloat; 884 } 885 regs[i].set1(reg); 886 break; 887 case T_DOUBLE: 888 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 889 if (freg < Argument::n_float_register_parameters_c) { 890 // Put double in register ... 891 reg = farg_reg[freg]; 892 ++freg; 893 894 // Argument i for i > 8 is placed on the stack even if it's 895 // placed in a register (if it's a double arg). Aix disassembly 896 // shows that xlC places these float args on the stack AND in 897 // a register. This is not documented, but we follow this 898 // convention, too. 899 if (arg >= Argument::n_regs_not_on_stack_c) { 900 // ... and on the stack. 901 guarantee(regs2 != NULL, "must pass float in register and stack slot"); 902 VMReg reg2 = VMRegImpl::stack2reg(stk); 903 regs2[i].set2(reg2); 904 stk += inc_stk_for_longdouble; 905 } 906 } else { 907 // Put double on stack. 908 reg = VMRegImpl::stack2reg(stk); 909 stk += inc_stk_for_longdouble; 910 } 911 regs[i].set2(reg); 912 break; 913 914 case T_VOID: 915 // Do not count halves. 916 regs[i].set_bad(); 917 --arg; 918 break; 919 default: 920 ShouldNotReachHere(); 921 } 922 } 923 924 return align_up(stk, 2); 925 } 926 #endif // COMPILER2 927 928 static address gen_c2i_adapter(MacroAssembler *masm, 929 int total_args_passed, 930 int comp_args_on_stack, 931 const BasicType *sig_bt, 932 const VMRegPair *regs, 933 Label& call_interpreter, 934 const Register& ientry) { 935 936 address c2i_entrypoint; 937 938 const Register sender_SP = R21_sender_SP; // == R21_tmp1 939 const Register code = R22_tmp2; 940 //const Register ientry = R23_tmp3; 941 const Register value_regs[] = { R24_tmp4, R25_tmp5, R26_tmp6 }; 942 const int num_value_regs = sizeof(value_regs) / sizeof(Register); 943 int value_regs_index = 0; 944 945 const Register return_pc = R27_tmp7; 946 const Register tmp = R28_tmp8; 947 948 assert_different_registers(sender_SP, code, ientry, return_pc, tmp); 949 950 // Adapter needs TOP_IJAVA_FRAME_ABI. 951 const int adapter_size = frame::top_ijava_frame_abi_size + 952 align_up(total_args_passed * wordSize, frame::alignment_in_bytes); 953 954 // regular (verified) c2i entry point 955 c2i_entrypoint = __ pc(); 956 957 // Does compiled code exists? If yes, patch the caller's callsite. 958 __ ld(code, method_(code)); 959 __ cmpdi(CCR0, code, 0); 960 __ ld(ientry, method_(interpreter_entry)); // preloaded 961 __ beq(CCR0, call_interpreter); 962 963 964 // Patch caller's callsite, method_(code) was not NULL which means that 965 // compiled code exists. 966 __ mflr(return_pc); 967 __ std(return_pc, _abi(lr), R1_SP); 968 RegisterSaver::push_frame_and_save_argument_registers(masm, tmp, adapter_size, total_args_passed, regs); 969 970 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), R19_method, return_pc); 971 972 RegisterSaver::restore_argument_registers_and_pop_frame(masm, adapter_size, total_args_passed, regs); 973 __ ld(return_pc, _abi(lr), R1_SP); 974 __ ld(ientry, method_(interpreter_entry)); // preloaded 975 __ mtlr(return_pc); 976 977 978 // Call the interpreter. 979 __ BIND(call_interpreter); 980 __ mtctr(ientry); 981 982 // Get a copy of the current SP for loading caller's arguments. 983 __ mr(sender_SP, R1_SP); 984 985 // Add space for the adapter. 986 __ resize_frame(-adapter_size, R12_scratch2); 987 988 int st_off = adapter_size - wordSize; 989 990 // Write the args into the outgoing interpreter space. 991 for (int i = 0; i < total_args_passed; i++) { 992 VMReg r_1 = regs[i].first(); 993 VMReg r_2 = regs[i].second(); 994 if (!r_1->is_valid()) { 995 assert(!r_2->is_valid(), ""); 996 continue; 997 } 998 if (r_1->is_stack()) { 999 Register tmp_reg = value_regs[value_regs_index]; 1000 value_regs_index = (value_regs_index + 1) % num_value_regs; 1001 // The calling convention produces OptoRegs that ignore the out 1002 // preserve area (JIT's ABI). We must account for it here. 1003 int ld_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 1004 if (!r_2->is_valid()) { 1005 __ lwz(tmp_reg, ld_off, sender_SP); 1006 } else { 1007 __ ld(tmp_reg, ld_off, sender_SP); 1008 } 1009 // Pretend stack targets were loaded into tmp_reg. 1010 r_1 = tmp_reg->as_VMReg(); 1011 } 1012 1013 if (r_1->is_Register()) { 1014 Register r = r_1->as_Register(); 1015 if (!r_2->is_valid()) { 1016 __ stw(r, st_off, R1_SP); 1017 st_off-=wordSize; 1018 } else { 1019 // Longs are given 2 64-bit slots in the interpreter, but the 1020 // data is passed in only 1 slot. 1021 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 1022 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); ) 1023 st_off-=wordSize; 1024 } 1025 __ std(r, st_off, R1_SP); 1026 st_off-=wordSize; 1027 } 1028 } else { 1029 assert(r_1->is_FloatRegister(), ""); 1030 FloatRegister f = r_1->as_FloatRegister(); 1031 if (!r_2->is_valid()) { 1032 __ stfs(f, st_off, R1_SP); 1033 st_off-=wordSize; 1034 } else { 1035 // In 64bit, doubles are given 2 64-bit slots in the interpreter, but the 1036 // data is passed in only 1 slot. 1037 // One of these should get known junk... 1038 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); ) 1039 st_off-=wordSize; 1040 __ stfd(f, st_off, R1_SP); 1041 st_off-=wordSize; 1042 } 1043 } 1044 } 1045 1046 // Jump to the interpreter just as if interpreter was doing it. 1047 1048 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 1049 1050 // load TOS 1051 __ addi(R15_esp, R1_SP, st_off); 1052 1053 // Frame_manager expects initial_caller_sp (= SP without resize by c2i) in R21_tmp1. 1054 assert(sender_SP == R21_sender_SP, "passing initial caller's SP in wrong register"); 1055 __ bctr(); 1056 1057 return c2i_entrypoint; 1058 } 1059 1060 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, 1061 int total_args_passed, 1062 int comp_args_on_stack, 1063 const BasicType *sig_bt, 1064 const VMRegPair *regs) { 1065 1066 // Load method's entry-point from method. 1067 __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method); 1068 __ mtctr(R12_scratch2); 1069 1070 // We will only enter here from an interpreted frame and never from after 1071 // passing thru a c2i. Azul allowed this but we do not. If we lose the 1072 // race and use a c2i we will remain interpreted for the race loser(s). 1073 // This removes all sorts of headaches on the x86 side and also eliminates 1074 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions. 1075 1076 // Note: r13 contains the senderSP on entry. We must preserve it since 1077 // we may do a i2c -> c2i transition if we lose a race where compiled 1078 // code goes non-entrant while we get args ready. 1079 // In addition we use r13 to locate all the interpreter args as 1080 // we must align the stack to 16 bytes on an i2c entry else we 1081 // lose alignment we expect in all compiled code and register 1082 // save code can segv when fxsave instructions find improperly 1083 // aligned stack pointer. 1084 1085 const Register ld_ptr = R15_esp; 1086 const Register value_regs[] = { R22_tmp2, R23_tmp3, R24_tmp4, R25_tmp5, R26_tmp6 }; 1087 const int num_value_regs = sizeof(value_regs) / sizeof(Register); 1088 int value_regs_index = 0; 1089 1090 int ld_offset = total_args_passed*wordSize; 1091 1092 // Cut-out for having no stack args. Since up to 2 int/oop args are passed 1093 // in registers, we will occasionally have no stack args. 1094 int comp_words_on_stack = 0; 1095 if (comp_args_on_stack) { 1096 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in 1097 // registers are below. By subtracting stack0, we either get a negative 1098 // number (all values in registers) or the maximum stack slot accessed. 1099 1100 // Convert 4-byte c2 stack slots to words. 1101 comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; 1102 // Round up to miminum stack alignment, in wordSize. 1103 comp_words_on_stack = align_up(comp_words_on_stack, 2); 1104 __ resize_frame(-comp_words_on_stack * wordSize, R11_scratch1); 1105 } 1106 1107 // Now generate the shuffle code. Pick up all register args and move the 1108 // rest through register value=Z_R12. 1109 BLOCK_COMMENT("Shuffle arguments"); 1110 for (int i = 0; i < total_args_passed; i++) { 1111 if (sig_bt[i] == T_VOID) { 1112 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 1113 continue; 1114 } 1115 1116 // Pick up 0, 1 or 2 words from ld_ptr. 1117 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), 1118 "scrambled load targets?"); 1119 VMReg r_1 = regs[i].first(); 1120 VMReg r_2 = regs[i].second(); 1121 if (!r_1->is_valid()) { 1122 assert(!r_2->is_valid(), ""); 1123 continue; 1124 } 1125 if (r_1->is_FloatRegister()) { 1126 if (!r_2->is_valid()) { 1127 __ lfs(r_1->as_FloatRegister(), ld_offset, ld_ptr); 1128 ld_offset-=wordSize; 1129 } else { 1130 // Skip the unused interpreter slot. 1131 __ lfd(r_1->as_FloatRegister(), ld_offset-wordSize, ld_ptr); 1132 ld_offset-=2*wordSize; 1133 } 1134 } else { 1135 Register r; 1136 if (r_1->is_stack()) { 1137 // Must do a memory to memory move thru "value". 1138 r = value_regs[value_regs_index]; 1139 value_regs_index = (value_regs_index + 1) % num_value_regs; 1140 } else { 1141 r = r_1->as_Register(); 1142 } 1143 if (!r_2->is_valid()) { 1144 // Not sure we need to do this but it shouldn't hurt. 1145 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ADDRESS || sig_bt[i] == T_ARRAY) { 1146 __ ld(r, ld_offset, ld_ptr); 1147 ld_offset-=wordSize; 1148 } else { 1149 __ lwz(r, ld_offset, ld_ptr); 1150 ld_offset-=wordSize; 1151 } 1152 } else { 1153 // In 64bit, longs are given 2 64-bit slots in the interpreter, but the 1154 // data is passed in only 1 slot. 1155 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 1156 ld_offset-=wordSize; 1157 } 1158 __ ld(r, ld_offset, ld_ptr); 1159 ld_offset-=wordSize; 1160 } 1161 1162 if (r_1->is_stack()) { 1163 // Now store value where the compiler expects it 1164 int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots())*VMRegImpl::stack_slot_size; 1165 1166 if (sig_bt[i] == T_INT || sig_bt[i] == T_FLOAT ||sig_bt[i] == T_BOOLEAN || 1167 sig_bt[i] == T_SHORT || sig_bt[i] == T_CHAR || sig_bt[i] == T_BYTE) { 1168 __ stw(r, st_off, R1_SP); 1169 } else { 1170 __ std(r, st_off, R1_SP); 1171 } 1172 } 1173 } 1174 } 1175 1176 BLOCK_COMMENT("Store method"); 1177 // Store method into thread->callee_target. 1178 // We might end up in handle_wrong_method if the callee is 1179 // deoptimized as we race thru here. If that happens we don't want 1180 // to take a safepoint because the caller frame will look 1181 // interpreted and arguments are now "compiled" so it is much better 1182 // to make this transition invisible to the stack walking 1183 // code. Unfortunately if we try and find the callee by normal means 1184 // a safepoint is possible. So we stash the desired callee in the 1185 // thread and the vm will find there should this case occur. 1186 __ std(R19_method, thread_(callee_target)); 1187 1188 // Jump to the compiled code just as if compiled code was doing it. 1189 __ bctr(); 1190 } 1191 1192 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, 1193 int total_args_passed, 1194 int comp_args_on_stack, 1195 const BasicType *sig_bt, 1196 const VMRegPair *regs, 1197 AdapterFingerPrint* fingerprint) { 1198 address i2c_entry; 1199 address c2i_unverified_entry; 1200 address c2i_entry; 1201 1202 1203 // entry: i2c 1204 1205 __ align(CodeEntryAlignment); 1206 i2c_entry = __ pc(); 1207 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs); 1208 1209 1210 // entry: c2i unverified 1211 1212 __ align(CodeEntryAlignment); 1213 BLOCK_COMMENT("c2i unverified entry"); 1214 c2i_unverified_entry = __ pc(); 1215 1216 // inline_cache contains a compiledICHolder 1217 const Register ic = R19_method; 1218 const Register ic_klass = R11_scratch1; 1219 const Register receiver_klass = R12_scratch2; 1220 const Register code = R21_tmp1; 1221 const Register ientry = R23_tmp3; 1222 1223 assert_different_registers(ic, ic_klass, receiver_klass, R3_ARG1, code, ientry); 1224 assert(R11_scratch1 == R11, "need prologue scratch register"); 1225 1226 Label call_interpreter; 1227 1228 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), 1229 "klass offset should reach into any page"); 1230 // Check for NULL argument if we don't have implicit null checks. 1231 if (!ImplicitNullChecks || !os::zero_page_read_protected()) { 1232 if (TrapBasedNullChecks) { 1233 __ trap_null_check(R3_ARG1); 1234 } else { 1235 Label valid; 1236 __ cmpdi(CCR0, R3_ARG1, 0); 1237 __ bne_predict_taken(CCR0, valid); 1238 // We have a null argument, branch to ic_miss_stub. 1239 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), 1240 relocInfo::runtime_call_type); 1241 __ BIND(valid); 1242 } 1243 } 1244 // Assume argument is not NULL, load klass from receiver. 1245 __ load_klass(receiver_klass, R3_ARG1); 1246 1247 __ ld(ic_klass, CompiledICHolder::holder_klass_offset(), ic); 1248 1249 if (TrapBasedICMissChecks) { 1250 __ trap_ic_miss_check(receiver_klass, ic_klass); 1251 } else { 1252 Label valid; 1253 __ cmpd(CCR0, receiver_klass, ic_klass); 1254 __ beq_predict_taken(CCR0, valid); 1255 // We have an unexpected klass, branch to ic_miss_stub. 1256 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), 1257 relocInfo::runtime_call_type); 1258 __ BIND(valid); 1259 } 1260 1261 // Argument is valid and klass is as expected, continue. 1262 1263 // Extract method from inline cache, verified entry point needs it. 1264 __ ld(R19_method, CompiledICHolder::holder_metadata_offset(), ic); 1265 assert(R19_method == ic, "the inline cache register is dead here"); 1266 1267 __ ld(code, method_(code)); 1268 __ cmpdi(CCR0, code, 0); 1269 __ ld(ientry, method_(interpreter_entry)); // preloaded 1270 __ beq_predict_taken(CCR0, call_interpreter); 1271 1272 // Branch to ic_miss_stub. 1273 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type); 1274 1275 // entry: c2i 1276 1277 c2i_entry = __ pc(); 1278 1279 // Class initialization barrier for static methods 1280 address c2i_no_clinit_check_entry = NULL; 1281 if (VM_Version::supports_fast_class_init_checks()) { 1282 Label L_skip_barrier; 1283 1284 { // Bypass the barrier for non-static methods 1285 __ lwz(R0, in_bytes(Method::access_flags_offset()), R19_method); 1286 __ andi_(R0, R0, JVM_ACC_STATIC); 1287 __ beq(CCR0, L_skip_barrier); // non-static 1288 } 1289 1290 Register klass = R11_scratch1; 1291 __ load_method_holder(klass, R19_method); 1292 __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/); 1293 1294 __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0); 1295 __ mtctr(klass); 1296 __ bctr(); 1297 1298 __ bind(L_skip_barrier); 1299 c2i_no_clinit_check_entry = __ pc(); 1300 } 1301 1302 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, call_interpreter, ientry); 1303 1304 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry); 1305 } 1306 1307 #ifdef COMPILER2 1308 // An oop arg. Must pass a handle not the oop itself. 1309 static void object_move(MacroAssembler* masm, 1310 int frame_size_in_slots, 1311 OopMap* oop_map, int oop_handle_offset, 1312 bool is_receiver, int* receiver_offset, 1313 VMRegPair src, VMRegPair dst, 1314 Register r_caller_sp, Register r_temp_1, Register r_temp_2) { 1315 assert(!is_receiver || (is_receiver && (*receiver_offset == -1)), 1316 "receiver has already been moved"); 1317 1318 // We must pass a handle. First figure out the location we use as a handle. 1319 1320 if (src.first()->is_stack()) { 1321 // stack to stack or reg 1322 1323 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register(); 1324 Label skip; 1325 const int oop_slot_in_callers_frame = reg2slot(src.first()); 1326 1327 guarantee(!is_receiver, "expecting receiver in register"); 1328 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot_in_callers_frame + frame_size_in_slots)); 1329 1330 __ addi(r_handle, r_caller_sp, reg2offset(src.first())); 1331 __ ld( r_temp_2, reg2offset(src.first()), r_caller_sp); 1332 __ cmpdi(CCR0, r_temp_2, 0); 1333 __ bne(CCR0, skip); 1334 // Use a NULL handle if oop is NULL. 1335 __ li(r_handle, 0); 1336 __ bind(skip); 1337 1338 if (dst.first()->is_stack()) { 1339 // stack to stack 1340 __ std(r_handle, reg2offset(dst.first()), R1_SP); 1341 } else { 1342 // stack to reg 1343 // Nothing to do, r_handle is already the dst register. 1344 } 1345 } else { 1346 // reg to stack or reg 1347 const Register r_oop = src.first()->as_Register(); 1348 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register(); 1349 const int oop_slot = (r_oop->encoding()-R3_ARG1->encoding()) * VMRegImpl::slots_per_word 1350 + oop_handle_offset; // in slots 1351 const int oop_offset = oop_slot * VMRegImpl::stack_slot_size; 1352 Label skip; 1353 1354 if (is_receiver) { 1355 *receiver_offset = oop_offset; 1356 } 1357 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1358 1359 __ std( r_oop, oop_offset, R1_SP); 1360 __ addi(r_handle, R1_SP, oop_offset); 1361 1362 __ cmpdi(CCR0, r_oop, 0); 1363 __ bne(CCR0, skip); 1364 // Use a NULL handle if oop is NULL. 1365 __ li(r_handle, 0); 1366 __ bind(skip); 1367 1368 if (dst.first()->is_stack()) { 1369 // reg to stack 1370 __ std(r_handle, reg2offset(dst.first()), R1_SP); 1371 } else { 1372 // reg to reg 1373 // Nothing to do, r_handle is already the dst register. 1374 } 1375 } 1376 } 1377 1378 static void int_move(MacroAssembler*masm, 1379 VMRegPair src, VMRegPair dst, 1380 Register r_caller_sp, Register r_temp) { 1381 assert(src.first()->is_valid(), "incoming must be int"); 1382 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long"); 1383 1384 if (src.first()->is_stack()) { 1385 if (dst.first()->is_stack()) { 1386 // stack to stack 1387 __ lwa(r_temp, reg2offset(src.first()), r_caller_sp); 1388 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1389 } else { 1390 // stack to reg 1391 __ lwa(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); 1392 } 1393 } else if (dst.first()->is_stack()) { 1394 // reg to stack 1395 __ extsw(r_temp, src.first()->as_Register()); 1396 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1397 } else { 1398 // reg to reg 1399 __ extsw(dst.first()->as_Register(), src.first()->as_Register()); 1400 } 1401 } 1402 1403 static void long_move(MacroAssembler*masm, 1404 VMRegPair src, VMRegPair dst, 1405 Register r_caller_sp, Register r_temp) { 1406 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be long"); 1407 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long"); 1408 1409 if (src.first()->is_stack()) { 1410 if (dst.first()->is_stack()) { 1411 // stack to stack 1412 __ ld( r_temp, reg2offset(src.first()), r_caller_sp); 1413 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1414 } else { 1415 // stack to reg 1416 __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); 1417 } 1418 } else if (dst.first()->is_stack()) { 1419 // reg to stack 1420 __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP); 1421 } else { 1422 // reg to reg 1423 if (dst.first()->as_Register() != src.first()->as_Register()) 1424 __ mr(dst.first()->as_Register(), src.first()->as_Register()); 1425 } 1426 } 1427 1428 static void float_move(MacroAssembler*masm, 1429 VMRegPair src, VMRegPair dst, 1430 Register r_caller_sp, Register r_temp) { 1431 assert(src.first()->is_valid() && !src.second()->is_valid(), "incoming must be float"); 1432 assert(dst.first()->is_valid() && !dst.second()->is_valid(), "outgoing must be float"); 1433 1434 if (src.first()->is_stack()) { 1435 if (dst.first()->is_stack()) { 1436 // stack to stack 1437 __ lwz(r_temp, reg2offset(src.first()), r_caller_sp); 1438 __ stw(r_temp, reg2offset(dst.first()), R1_SP); 1439 } else { 1440 // stack to reg 1441 __ lfs(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp); 1442 } 1443 } else if (dst.first()->is_stack()) { 1444 // reg to stack 1445 __ stfs(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP); 1446 } else { 1447 // reg to reg 1448 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister()) 1449 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 1450 } 1451 } 1452 1453 static void double_move(MacroAssembler*masm, 1454 VMRegPair src, VMRegPair dst, 1455 Register r_caller_sp, Register r_temp) { 1456 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be double"); 1457 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be double"); 1458 1459 if (src.first()->is_stack()) { 1460 if (dst.first()->is_stack()) { 1461 // stack to stack 1462 __ ld( r_temp, reg2offset(src.first()), r_caller_sp); 1463 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1464 } else { 1465 // stack to reg 1466 __ lfd(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp); 1467 } 1468 } else if (dst.first()->is_stack()) { 1469 // reg to stack 1470 __ stfd(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP); 1471 } else { 1472 // reg to reg 1473 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister()) 1474 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 1475 } 1476 } 1477 1478 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1479 switch (ret_type) { 1480 case T_BOOLEAN: 1481 case T_CHAR: 1482 case T_BYTE: 1483 case T_SHORT: 1484 case T_INT: 1485 __ stw (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1486 break; 1487 case T_ARRAY: 1488 case T_OBJECT: 1489 case T_LONG: 1490 __ std (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1491 break; 1492 case T_FLOAT: 1493 __ stfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1494 break; 1495 case T_DOUBLE: 1496 __ stfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1497 break; 1498 case T_VOID: 1499 break; 1500 default: 1501 ShouldNotReachHere(); 1502 break; 1503 } 1504 } 1505 1506 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1507 switch (ret_type) { 1508 case T_BOOLEAN: 1509 case T_CHAR: 1510 case T_BYTE: 1511 case T_SHORT: 1512 case T_INT: 1513 __ lwz(R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1514 break; 1515 case T_ARRAY: 1516 case T_OBJECT: 1517 case T_LONG: 1518 __ ld (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1519 break; 1520 case T_FLOAT: 1521 __ lfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1522 break; 1523 case T_DOUBLE: 1524 __ lfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1525 break; 1526 case T_VOID: 1527 break; 1528 default: 1529 ShouldNotReachHere(); 1530 break; 1531 } 1532 } 1533 1534 static void save_or_restore_arguments(MacroAssembler* masm, 1535 const int stack_slots, 1536 const int total_in_args, 1537 const int arg_save_area, 1538 OopMap* map, 1539 VMRegPair* in_regs, 1540 BasicType* in_sig_bt) { 1541 // If map is non-NULL then the code should store the values, 1542 // otherwise it should load them. 1543 int slot = arg_save_area; 1544 // Save down double word first. 1545 for (int i = 0; i < total_in_args; i++) { 1546 if (in_regs[i].first()->is_FloatRegister() && in_sig_bt[i] == T_DOUBLE) { 1547 int offset = slot * VMRegImpl::stack_slot_size; 1548 slot += VMRegImpl::slots_per_word; 1549 assert(slot <= stack_slots, "overflow (after DOUBLE stack slot)"); 1550 if (map != NULL) { 1551 __ stfd(in_regs[i].first()->as_FloatRegister(), offset, R1_SP); 1552 } else { 1553 __ lfd(in_regs[i].first()->as_FloatRegister(), offset, R1_SP); 1554 } 1555 } else if (in_regs[i].first()->is_Register() && 1556 (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) { 1557 int offset = slot * VMRegImpl::stack_slot_size; 1558 if (map != NULL) { 1559 __ std(in_regs[i].first()->as_Register(), offset, R1_SP); 1560 if (in_sig_bt[i] == T_ARRAY) { 1561 map->set_oop(VMRegImpl::stack2reg(slot)); 1562 } 1563 } else { 1564 __ ld(in_regs[i].first()->as_Register(), offset, R1_SP); 1565 } 1566 slot += VMRegImpl::slots_per_word; 1567 assert(slot <= stack_slots, "overflow (after LONG/ARRAY stack slot)"); 1568 } 1569 } 1570 // Save or restore single word registers. 1571 for (int i = 0; i < total_in_args; i++) { 1572 if (in_regs[i].first()->is_Register()) { 1573 int offset = slot * VMRegImpl::stack_slot_size; 1574 // Value lives in an input register. Save it on stack. 1575 switch (in_sig_bt[i]) { 1576 case T_BOOLEAN: 1577 case T_CHAR: 1578 case T_BYTE: 1579 case T_SHORT: 1580 case T_INT: 1581 if (map != NULL) { 1582 __ stw(in_regs[i].first()->as_Register(), offset, R1_SP); 1583 } else { 1584 __ lwa(in_regs[i].first()->as_Register(), offset, R1_SP); 1585 } 1586 slot++; 1587 assert(slot <= stack_slots, "overflow (after INT or smaller stack slot)"); 1588 break; 1589 case T_ARRAY: 1590 case T_LONG: 1591 // handled above 1592 break; 1593 case T_OBJECT: 1594 default: ShouldNotReachHere(); 1595 } 1596 } else if (in_regs[i].first()->is_FloatRegister()) { 1597 if (in_sig_bt[i] == T_FLOAT) { 1598 int offset = slot * VMRegImpl::stack_slot_size; 1599 slot++; 1600 assert(slot <= stack_slots, "overflow (after FLOAT stack slot)"); 1601 if (map != NULL) { 1602 __ stfs(in_regs[i].first()->as_FloatRegister(), offset, R1_SP); 1603 } else { 1604 __ lfs(in_regs[i].first()->as_FloatRegister(), offset, R1_SP); 1605 } 1606 } 1607 } else if (in_regs[i].first()->is_stack()) { 1608 if (in_sig_bt[i] == T_ARRAY && map != NULL) { 1609 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1610 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots)); 1611 } 1612 } 1613 } 1614 } 1615 1616 // Check GCLocker::needs_gc and enter the runtime if it's true. This 1617 // keeps a new JNI critical region from starting until a GC has been 1618 // forced. Save down any oops in registers and describe them in an 1619 // OopMap. 1620 static void check_needs_gc_for_critical_native(MacroAssembler* masm, 1621 const int stack_slots, 1622 const int total_in_args, 1623 const int arg_save_area, 1624 OopMapSet* oop_maps, 1625 VMRegPair* in_regs, 1626 BasicType* in_sig_bt, 1627 Register tmp_reg ) { 1628 __ block_comment("check GCLocker::needs_gc"); 1629 Label cont; 1630 __ lbz(tmp_reg, (RegisterOrConstant)(intptr_t)GCLocker::needs_gc_address()); 1631 __ cmplwi(CCR0, tmp_reg, 0); 1632 __ beq(CCR0, cont); 1633 1634 // Save down any values that are live in registers and call into the 1635 // runtime to halt for a GC. 1636 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1637 save_or_restore_arguments(masm, stack_slots, total_in_args, 1638 arg_save_area, map, in_regs, in_sig_bt); 1639 1640 __ mr(R3_ARG1, R16_thread); 1641 __ set_last_Java_frame(R1_SP, noreg); 1642 1643 __ block_comment("block_for_jni_critical"); 1644 address entry_point = CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical); 1645 #if defined(ABI_ELFv2) 1646 __ call_c(entry_point, relocInfo::runtime_call_type); 1647 #else 1648 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::runtime_call_type); 1649 #endif 1650 address start = __ pc() - __ offset(), 1651 calls_return_pc = __ last_calls_return_pc(); 1652 oop_maps->add_gc_map(calls_return_pc - start, map); 1653 1654 __ reset_last_Java_frame(); 1655 1656 // Reload all the register arguments. 1657 save_or_restore_arguments(masm, stack_slots, total_in_args, 1658 arg_save_area, NULL, in_regs, in_sig_bt); 1659 1660 __ BIND(cont); 1661 1662 #ifdef ASSERT 1663 if (StressCriticalJNINatives) { 1664 // Stress register saving. 1665 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1666 save_or_restore_arguments(masm, stack_slots, total_in_args, 1667 arg_save_area, map, in_regs, in_sig_bt); 1668 // Destroy argument registers. 1669 for (int i = 0; i < total_in_args; i++) { 1670 if (in_regs[i].first()->is_Register()) { 1671 const Register reg = in_regs[i].first()->as_Register(); 1672 __ neg(reg, reg); 1673 } else if (in_regs[i].first()->is_FloatRegister()) { 1674 __ fneg(in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister()); 1675 } 1676 } 1677 1678 save_or_restore_arguments(masm, stack_slots, total_in_args, 1679 arg_save_area, NULL, in_regs, in_sig_bt); 1680 } 1681 #endif 1682 } 1683 1684 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst, Register r_caller_sp, Register r_temp) { 1685 if (src.first()->is_stack()) { 1686 if (dst.first()->is_stack()) { 1687 // stack to stack 1688 __ ld(r_temp, reg2offset(src.first()), r_caller_sp); 1689 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1690 } else { 1691 // stack to reg 1692 __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); 1693 } 1694 } else if (dst.first()->is_stack()) { 1695 // reg to stack 1696 __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP); 1697 } else { 1698 if (dst.first() != src.first()) { 1699 __ mr(dst.first()->as_Register(), src.first()->as_Register()); 1700 } 1701 } 1702 } 1703 1704 // Unpack an array argument into a pointer to the body and the length 1705 // if the array is non-null, otherwise pass 0 for both. 1706 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, 1707 VMRegPair body_arg, VMRegPair length_arg, Register r_caller_sp, 1708 Register tmp_reg, Register tmp2_reg) { 1709 assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg, 1710 "possible collision"); 1711 assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg, 1712 "possible collision"); 1713 1714 // Pass the length, ptr pair. 1715 Label set_out_args; 1716 VMRegPair tmp, tmp2; 1717 tmp.set_ptr(tmp_reg->as_VMReg()); 1718 tmp2.set_ptr(tmp2_reg->as_VMReg()); 1719 if (reg.first()->is_stack()) { 1720 // Load the arg up from the stack. 1721 move_ptr(masm, reg, tmp, r_caller_sp, /*unused*/ R0); 1722 reg = tmp; 1723 } 1724 __ li(tmp2_reg, 0); // Pass zeros if Array=null. 1725 if (tmp_reg != reg.first()->as_Register()) __ li(tmp_reg, 0); 1726 __ cmpdi(CCR0, reg.first()->as_Register(), 0); 1727 __ beq(CCR0, set_out_args); 1728 __ lwa(tmp2_reg, arrayOopDesc::length_offset_in_bytes(), reg.first()->as_Register()); 1729 __ addi(tmp_reg, reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)); 1730 __ bind(set_out_args); 1731 move_ptr(masm, tmp, body_arg, r_caller_sp, /*unused*/ R0); 1732 move_ptr(masm, tmp2, length_arg, r_caller_sp, /*unused*/ R0); // Same as move32_64 on PPC64. 1733 } 1734 1735 static void verify_oop_args(MacroAssembler* masm, 1736 const methodHandle& method, 1737 const BasicType* sig_bt, 1738 const VMRegPair* regs) { 1739 Register temp_reg = R19_method; // not part of any compiled calling seq 1740 if (VerifyOops) { 1741 for (int i = 0; i < method->size_of_parameters(); i++) { 1742 if (sig_bt[i] == T_OBJECT || 1743 sig_bt[i] == T_ARRAY) { 1744 VMReg r = regs[i].first(); 1745 assert(r->is_valid(), "bad oop arg"); 1746 if (r->is_stack()) { 1747 __ ld(temp_reg, reg2offset(r), R1_SP); 1748 __ verify_oop(temp_reg); 1749 } else { 1750 __ verify_oop(r->as_Register()); 1751 } 1752 } 1753 } 1754 } 1755 } 1756 1757 static void gen_special_dispatch(MacroAssembler* masm, 1758 const methodHandle& method, 1759 const BasicType* sig_bt, 1760 const VMRegPair* regs) { 1761 verify_oop_args(masm, method, sig_bt, regs); 1762 vmIntrinsics::ID iid = method->intrinsic_id(); 1763 1764 // Now write the args into the outgoing interpreter space 1765 bool has_receiver = false; 1766 Register receiver_reg = noreg; 1767 int member_arg_pos = -1; 1768 Register member_reg = noreg; 1769 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); 1770 if (ref_kind != 0) { 1771 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument 1772 member_reg = R19_method; // known to be free at this point 1773 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); 1774 } else if (iid == vmIntrinsics::_invokeBasic) { 1775 has_receiver = true; 1776 } else { 1777 fatal("unexpected intrinsic id %d", iid); 1778 } 1779 1780 if (member_reg != noreg) { 1781 // Load the member_arg into register, if necessary. 1782 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); 1783 VMReg r = regs[member_arg_pos].first(); 1784 if (r->is_stack()) { 1785 __ ld(member_reg, reg2offset(r), R1_SP); 1786 } else { 1787 // no data motion is needed 1788 member_reg = r->as_Register(); 1789 } 1790 } 1791 1792 if (has_receiver) { 1793 // Make sure the receiver is loaded into a register. 1794 assert(method->size_of_parameters() > 0, "oob"); 1795 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); 1796 VMReg r = regs[0].first(); 1797 assert(r->is_valid(), "bad receiver arg"); 1798 if (r->is_stack()) { 1799 // Porting note: This assumes that compiled calling conventions always 1800 // pass the receiver oop in a register. If this is not true on some 1801 // platform, pick a temp and load the receiver from stack. 1802 fatal("receiver always in a register"); 1803 receiver_reg = R11_scratch1; // TODO (hs24): is R11_scratch1 really free at this point? 1804 __ ld(receiver_reg, reg2offset(r), R1_SP); 1805 } else { 1806 // no data motion is needed 1807 receiver_reg = r->as_Register(); 1808 } 1809 } 1810 1811 // Figure out which address we are really jumping to: 1812 MethodHandles::generate_method_handle_dispatch(masm, iid, 1813 receiver_reg, member_reg, /*for_compiler_entry:*/ true); 1814 } 1815 1816 #endif // COMPILER2 1817 1818 // --------------------------------------------------------------------------- 1819 // Generate a native wrapper for a given method. The method takes arguments 1820 // in the Java compiled code convention, marshals them to the native 1821 // convention (handlizes oops, etc), transitions to native, makes the call, 1822 // returns to java state (possibly blocking), unhandlizes any result and 1823 // returns. 1824 // 1825 // Critical native functions are a shorthand for the use of 1826 // GetPrimtiveArrayCritical and disallow the use of any other JNI 1827 // functions. The wrapper is expected to unpack the arguments before 1828 // passing them to the callee and perform checks before and after the 1829 // native call to ensure that they GCLocker 1830 // lock_critical/unlock_critical semantics are followed. Some other 1831 // parts of JNI setup are skipped like the tear down of the JNI handle 1832 // block and the check for pending exceptions it's impossible for them 1833 // to be thrown. 1834 // 1835 // They are roughly structured like this: 1836 // if (GCLocker::needs_gc()) 1837 // SharedRuntime::block_for_jni_critical(); 1838 // tranistion to thread_in_native 1839 // unpack arrray arguments and call native entry point 1840 // check for safepoint in progress 1841 // check if any thread suspend flags are set 1842 // call into JVM and possible unlock the JNI critical 1843 // if a GC was suppressed while in the critical native. 1844 // transition back to thread_in_Java 1845 // return to caller 1846 // 1847 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, 1848 const methodHandle& method, 1849 int compile_id, 1850 BasicType *in_sig_bt, 1851 VMRegPair *in_regs, 1852 BasicType ret_type) { 1853 #ifdef COMPILER2 1854 if (method->is_method_handle_intrinsic()) { 1855 vmIntrinsics::ID iid = method->intrinsic_id(); 1856 intptr_t start = (intptr_t)__ pc(); 1857 int vep_offset = ((intptr_t)__ pc()) - start; 1858 gen_special_dispatch(masm, 1859 method, 1860 in_sig_bt, 1861 in_regs); 1862 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period 1863 __ flush(); 1864 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually 1865 return nmethod::new_native_nmethod(method, 1866 compile_id, 1867 masm->code(), 1868 vep_offset, 1869 frame_complete, 1870 stack_slots / VMRegImpl::slots_per_word, 1871 in_ByteSize(-1), 1872 in_ByteSize(-1), 1873 (OopMapSet*)NULL); 1874 } 1875 1876 bool is_critical_native = true; 1877 address native_func = method->critical_native_function(); 1878 if (native_func == NULL) { 1879 native_func = method->native_function(); 1880 is_critical_native = false; 1881 } 1882 assert(native_func != NULL, "must have function"); 1883 1884 // First, create signature for outgoing C call 1885 // -------------------------------------------------------------------------- 1886 1887 int total_in_args = method->size_of_parameters(); 1888 // We have received a description of where all the java args are located 1889 // on entry to the wrapper. We need to convert these args to where 1890 // the jni function will expect them. To figure out where they go 1891 // we convert the java signature to a C signature by inserting 1892 // the hidden arguments as arg[0] and possibly arg[1] (static method) 1893 1894 // Calculate the total number of C arguments and create arrays for the 1895 // signature and the outgoing registers. 1896 // On ppc64, we have two arrays for the outgoing registers, because 1897 // some floating-point arguments must be passed in registers _and_ 1898 // in stack locations. 1899 bool method_is_static = method->is_static(); 1900 int total_c_args = total_in_args; 1901 1902 if (!is_critical_native) { 1903 int n_hidden_args = method_is_static ? 2 : 1; 1904 total_c_args += n_hidden_args; 1905 } else { 1906 // No JNIEnv*, no this*, but unpacked arrays (base+length). 1907 for (int i = 0; i < total_in_args; i++) { 1908 if (in_sig_bt[i] == T_ARRAY) { 1909 total_c_args++; 1910 } 1911 } 1912 } 1913 1914 BasicType *out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 1915 VMRegPair *out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 1916 VMRegPair *out_regs2 = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 1917 BasicType* in_elem_bt = NULL; 1918 1919 // Create the signature for the C call: 1920 // 1) add the JNIEnv* 1921 // 2) add the class if the method is static 1922 // 3) copy the rest of the incoming signature (shifted by the number of 1923 // hidden arguments). 1924 1925 int argc = 0; 1926 if (!is_critical_native) { 1927 out_sig_bt[argc++] = T_ADDRESS; 1928 if (method->is_static()) { 1929 out_sig_bt[argc++] = T_OBJECT; 1930 } 1931 1932 for (int i = 0; i < total_in_args ; i++ ) { 1933 out_sig_bt[argc++] = in_sig_bt[i]; 1934 } 1935 } else { 1936 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 1937 SignatureStream ss(method->signature()); 1938 int o = 0; 1939 for (int i = 0; i < total_in_args ; i++, o++) { 1940 if (in_sig_bt[i] == T_ARRAY) { 1941 // Arrays are passed as int, elem* pair 1942 Symbol* atype = ss.as_symbol(); 1943 const char* at = atype->as_C_string(); 1944 if (strlen(at) == 2) { 1945 assert(at[0] == '[', "must be"); 1946 switch (at[1]) { 1947 case 'B': in_elem_bt[o] = T_BYTE; break; 1948 case 'C': in_elem_bt[o] = T_CHAR; break; 1949 case 'D': in_elem_bt[o] = T_DOUBLE; break; 1950 case 'F': in_elem_bt[o] = T_FLOAT; break; 1951 case 'I': in_elem_bt[o] = T_INT; break; 1952 case 'J': in_elem_bt[o] = T_LONG; break; 1953 case 'S': in_elem_bt[o] = T_SHORT; break; 1954 case 'Z': in_elem_bt[o] = T_BOOLEAN; break; 1955 default: ShouldNotReachHere(); 1956 } 1957 } 1958 } else { 1959 in_elem_bt[o] = T_VOID; 1960 } 1961 if (in_sig_bt[i] != T_VOID) { 1962 assert(in_sig_bt[i] == ss.type(), "must match"); 1963 ss.next(); 1964 } 1965 } 1966 1967 for (int i = 0; i < total_in_args ; i++ ) { 1968 if (in_sig_bt[i] == T_ARRAY) { 1969 // Arrays are passed as int, elem* pair. 1970 out_sig_bt[argc++] = T_INT; 1971 out_sig_bt[argc++] = T_ADDRESS; 1972 } else { 1973 out_sig_bt[argc++] = in_sig_bt[i]; 1974 } 1975 } 1976 } 1977 1978 1979 // Compute the wrapper's frame size. 1980 // -------------------------------------------------------------------------- 1981 1982 // Now figure out where the args must be stored and how much stack space 1983 // they require. 1984 // 1985 // Compute framesize for the wrapper. We need to handlize all oops in 1986 // incoming registers. 1987 // 1988 // Calculate the total number of stack slots we will need: 1989 // 1) abi requirements 1990 // 2) outgoing arguments 1991 // 3) space for inbound oop handle area 1992 // 4) space for handlizing a klass if static method 1993 // 5) space for a lock if synchronized method 1994 // 6) workspace for saving return values, int <-> float reg moves, etc. 1995 // 7) alignment 1996 // 1997 // Layout of the native wrapper frame: 1998 // (stack grows upwards, memory grows downwards) 1999 // 2000 // NW [ABI_REG_ARGS] <-- 1) R1_SP 2001 // [outgoing arguments] <-- 2) R1_SP + out_arg_slot_offset 2002 // [oopHandle area] <-- 3) R1_SP + oop_handle_offset (save area for critical natives) 2003 // klass <-- 4) R1_SP + klass_offset 2004 // lock <-- 5) R1_SP + lock_offset 2005 // [workspace] <-- 6) R1_SP + workspace_offset 2006 // [alignment] (optional) <-- 7) 2007 // caller [JIT_TOP_ABI_48] <-- r_callers_sp 2008 // 2009 // - *_slot_offset Indicates offset from SP in number of stack slots. 2010 // - *_offset Indicates offset from SP in bytes. 2011 2012 int stack_slots = c_calling_convention(out_sig_bt, out_regs, out_regs2, total_c_args) + // 1+2) 2013 SharedRuntime::out_preserve_stack_slots(); // See c_calling_convention. 2014 2015 // Now the space for the inbound oop handle area. 2016 int total_save_slots = num_java_iarg_registers * VMRegImpl::slots_per_word; 2017 if (is_critical_native) { 2018 // Critical natives may have to call out so they need a save area 2019 // for register arguments. 2020 int double_slots = 0; 2021 int single_slots = 0; 2022 for (int i = 0; i < total_in_args; i++) { 2023 if (in_regs[i].first()->is_Register()) { 2024 const Register reg = in_regs[i].first()->as_Register(); 2025 switch (in_sig_bt[i]) { 2026 case T_BOOLEAN: 2027 case T_BYTE: 2028 case T_SHORT: 2029 case T_CHAR: 2030 case T_INT: 2031 // Fall through. 2032 case T_ARRAY: 2033 case T_LONG: double_slots++; break; 2034 default: ShouldNotReachHere(); 2035 } 2036 } else if (in_regs[i].first()->is_FloatRegister()) { 2037 switch (in_sig_bt[i]) { 2038 case T_FLOAT: single_slots++; break; 2039 case T_DOUBLE: double_slots++; break; 2040 default: ShouldNotReachHere(); 2041 } 2042 } 2043 } 2044 total_save_slots = double_slots * 2 + align_up(single_slots, 2); // round to even 2045 } 2046 2047 int oop_handle_slot_offset = stack_slots; 2048 stack_slots += total_save_slots; // 3) 2049 2050 int klass_slot_offset = 0; 2051 int klass_offset = -1; 2052 if (method_is_static && !is_critical_native) { // 4) 2053 klass_slot_offset = stack_slots; 2054 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 2055 stack_slots += VMRegImpl::slots_per_word; 2056 } 2057 2058 int lock_slot_offset = 0; 2059 int lock_offset = -1; 2060 if (method->is_synchronized()) { // 5) 2061 lock_slot_offset = stack_slots; 2062 lock_offset = lock_slot_offset * VMRegImpl::stack_slot_size; 2063 stack_slots += VMRegImpl::slots_per_word; 2064 } 2065 2066 int workspace_slot_offset = stack_slots; // 6) 2067 stack_slots += 2; 2068 2069 // Now compute actual number of stack words we need. 2070 // Rounding to make stack properly aligned. 2071 stack_slots = align_up(stack_slots, // 7) 2072 frame::alignment_in_bytes / VMRegImpl::stack_slot_size); 2073 int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size; 2074 2075 2076 // Now we can start generating code. 2077 // -------------------------------------------------------------------------- 2078 2079 intptr_t start_pc = (intptr_t)__ pc(); 2080 intptr_t vep_start_pc; 2081 intptr_t frame_done_pc; 2082 intptr_t oopmap_pc; 2083 2084 Label ic_miss; 2085 Label handle_pending_exception; 2086 2087 Register r_callers_sp = R21; 2088 Register r_temp_1 = R22; 2089 Register r_temp_2 = R23; 2090 Register r_temp_3 = R24; 2091 Register r_temp_4 = R25; 2092 Register r_temp_5 = R26; 2093 Register r_temp_6 = R27; 2094 Register r_return_pc = R28; 2095 2096 Register r_carg1_jnienv = noreg; 2097 Register r_carg2_classorobject = noreg; 2098 if (!is_critical_native) { 2099 r_carg1_jnienv = out_regs[0].first()->as_Register(); 2100 r_carg2_classorobject = out_regs[1].first()->as_Register(); 2101 } 2102 2103 2104 // Generate the Unverified Entry Point (UEP). 2105 // -------------------------------------------------------------------------- 2106 assert(start_pc == (intptr_t)__ pc(), "uep must be at start"); 2107 2108 // Check ic: object class == cached class? 2109 if (!method_is_static) { 2110 Register ic = as_Register(Matcher::inline_cache_reg_encode()); 2111 Register receiver_klass = r_temp_1; 2112 2113 __ cmpdi(CCR0, R3_ARG1, 0); 2114 __ beq(CCR0, ic_miss); 2115 __ verify_oop(R3_ARG1); 2116 __ load_klass(receiver_klass, R3_ARG1); 2117 2118 __ cmpd(CCR0, receiver_klass, ic); 2119 __ bne(CCR0, ic_miss); 2120 } 2121 2122 2123 // Generate the Verified Entry Point (VEP). 2124 // -------------------------------------------------------------------------- 2125 vep_start_pc = (intptr_t)__ pc(); 2126 2127 if (UseRTMLocking) { 2128 // Abort RTM transaction before calling JNI 2129 // because critical section can be large and 2130 // abort anyway. Also nmethod can be deoptimized. 2131 __ tabort_(); 2132 } 2133 2134 if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) { 2135 Label L_skip_barrier; 2136 Register klass = r_temp_1; 2137 // Notify OOP recorder (don't need the relocation) 2138 AddressLiteral md = __ constant_metadata_address(method->method_holder()); 2139 __ load_const_optimized(klass, md.value(), R0); 2140 __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/); 2141 2142 __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0); 2143 __ mtctr(klass); 2144 __ bctr(); 2145 2146 __ bind(L_skip_barrier); 2147 } 2148 2149 __ save_LR_CR(r_temp_1); 2150 __ generate_stack_overflow_check(frame_size_in_bytes); // Check before creating frame. 2151 __ mr(r_callers_sp, R1_SP); // Remember frame pointer. 2152 __ push_frame(frame_size_in_bytes, r_temp_1); // Push the c2n adapter's frame. 2153 frame_done_pc = (intptr_t)__ pc(); 2154 2155 __ verify_thread(); 2156 2157 // Native nmethod wrappers never take possesion of the oop arguments. 2158 // So the caller will gc the arguments. 2159 // The only thing we need an oopMap for is if the call is static. 2160 // 2161 // An OopMap for lock (and class if static), and one for the VM call itself. 2162 OopMapSet *oop_maps = new OopMapSet(); 2163 OopMap *oop_map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 2164 2165 if (is_critical_native) { 2166 check_needs_gc_for_critical_native(masm, stack_slots, total_in_args, oop_handle_slot_offset, 2167 oop_maps, in_regs, in_sig_bt, r_temp_1); 2168 } 2169 2170 // Move arguments from register/stack to register/stack. 2171 // -------------------------------------------------------------------------- 2172 // 2173 // We immediately shuffle the arguments so that for any vm call we have 2174 // to make from here on out (sync slow path, jvmti, etc.) we will have 2175 // captured the oops from our caller and have a valid oopMap for them. 2176 // 2177 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv* 2178 // (derived from JavaThread* which is in R16_thread) and, if static, 2179 // the class mirror instead of a receiver. This pretty much guarantees that 2180 // register layout will not match. We ignore these extra arguments during 2181 // the shuffle. The shuffle is described by the two calling convention 2182 // vectors we have in our possession. We simply walk the java vector to 2183 // get the source locations and the c vector to get the destinations. 2184 2185 // Record sp-based slot for receiver on stack for non-static methods. 2186 int receiver_offset = -1; 2187 2188 // We move the arguments backward because the floating point registers 2189 // destination will always be to a register with a greater or equal 2190 // register number or the stack. 2191 // in is the index of the incoming Java arguments 2192 // out is the index of the outgoing C arguments 2193 2194 #ifdef ASSERT 2195 bool reg_destroyed[RegisterImpl::number_of_registers]; 2196 bool freg_destroyed[FloatRegisterImpl::number_of_registers]; 2197 for (int r = 0 ; r < RegisterImpl::number_of_registers ; r++) { 2198 reg_destroyed[r] = false; 2199 } 2200 for (int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++) { 2201 freg_destroyed[f] = false; 2202 } 2203 #endif // ASSERT 2204 2205 for (int in = total_in_args - 1, out = total_c_args - 1; in >= 0 ; in--, out--) { 2206 2207 #ifdef ASSERT 2208 if (in_regs[in].first()->is_Register()) { 2209 assert(!reg_destroyed[in_regs[in].first()->as_Register()->encoding()], "ack!"); 2210 } else if (in_regs[in].first()->is_FloatRegister()) { 2211 assert(!freg_destroyed[in_regs[in].first()->as_FloatRegister()->encoding()], "ack!"); 2212 } 2213 if (out_regs[out].first()->is_Register()) { 2214 reg_destroyed[out_regs[out].first()->as_Register()->encoding()] = true; 2215 } else if (out_regs[out].first()->is_FloatRegister()) { 2216 freg_destroyed[out_regs[out].first()->as_FloatRegister()->encoding()] = true; 2217 } 2218 if (out_regs2[out].first()->is_Register()) { 2219 reg_destroyed[out_regs2[out].first()->as_Register()->encoding()] = true; 2220 } else if (out_regs2[out].first()->is_FloatRegister()) { 2221 freg_destroyed[out_regs2[out].first()->as_FloatRegister()->encoding()] = true; 2222 } 2223 #endif // ASSERT 2224 2225 switch (in_sig_bt[in]) { 2226 case T_BOOLEAN: 2227 case T_CHAR: 2228 case T_BYTE: 2229 case T_SHORT: 2230 case T_INT: 2231 // Move int and do sign extension. 2232 int_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2233 break; 2234 case T_LONG: 2235 long_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2236 break; 2237 case T_ARRAY: 2238 if (is_critical_native) { 2239 int body_arg = out; 2240 out -= 1; // Point to length arg. 2241 unpack_array_argument(masm, in_regs[in], in_elem_bt[in], out_regs[body_arg], out_regs[out], 2242 r_callers_sp, r_temp_1, r_temp_2); 2243 break; 2244 } 2245 case T_OBJECT: 2246 assert(!is_critical_native, "no oop arguments"); 2247 object_move(masm, stack_slots, 2248 oop_map, oop_handle_slot_offset, 2249 ((in == 0) && (!method_is_static)), &receiver_offset, 2250 in_regs[in], out_regs[out], 2251 r_callers_sp, r_temp_1, r_temp_2); 2252 break; 2253 case T_VOID: 2254 break; 2255 case T_FLOAT: 2256 float_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2257 if (out_regs2[out].first()->is_valid()) { 2258 float_move(masm, in_regs[in], out_regs2[out], r_callers_sp, r_temp_1); 2259 } 2260 break; 2261 case T_DOUBLE: 2262 double_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2263 if (out_regs2[out].first()->is_valid()) { 2264 double_move(masm, in_regs[in], out_regs2[out], r_callers_sp, r_temp_1); 2265 } 2266 break; 2267 case T_ADDRESS: 2268 fatal("found type (T_ADDRESS) in java args"); 2269 break; 2270 default: 2271 ShouldNotReachHere(); 2272 break; 2273 } 2274 } 2275 2276 // Pre-load a static method's oop into ARG2. 2277 // Used both by locking code and the normal JNI call code. 2278 if (method_is_static && !is_critical_native) { 2279 __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), 2280 r_carg2_classorobject); 2281 2282 // Now handlize the static class mirror in carg2. It's known not-null. 2283 __ std(r_carg2_classorobject, klass_offset, R1_SP); 2284 oop_map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 2285 __ addi(r_carg2_classorobject, R1_SP, klass_offset); 2286 } 2287 2288 // Get JNIEnv* which is first argument to native. 2289 if (!is_critical_native) { 2290 __ addi(r_carg1_jnienv, R16_thread, in_bytes(JavaThread::jni_environment_offset())); 2291 } 2292 2293 // NOTE: 2294 // 2295 // We have all of the arguments setup at this point. 2296 // We MUST NOT touch any outgoing regs from this point on. 2297 // So if we must call out we must push a new frame. 2298 2299 // Get current pc for oopmap, and load it patchable relative to global toc. 2300 oopmap_pc = (intptr_t) __ pc(); 2301 __ calculate_address_from_global_toc(r_return_pc, (address)oopmap_pc, true, true, true, true); 2302 2303 // We use the same pc/oopMap repeatedly when we call out. 2304 oop_maps->add_gc_map(oopmap_pc - start_pc, oop_map); 2305 2306 // r_return_pc now has the pc loaded that we will use when we finally call 2307 // to native. 2308 2309 // Make sure that thread is non-volatile; it crosses a bunch of VM calls below. 2310 assert(R16_thread->is_nonvolatile(), "thread must be in non-volatile register"); 2311 2312 # if 0 2313 // DTrace method entry 2314 # endif 2315 2316 // Lock a synchronized method. 2317 // -------------------------------------------------------------------------- 2318 2319 if (method->is_synchronized()) { 2320 assert(!is_critical_native, "unhandled"); 2321 ConditionRegister r_flag = CCR1; 2322 Register r_oop = r_temp_4; 2323 const Register r_box = r_temp_5; 2324 Label done, locked; 2325 2326 // Load the oop for the object or class. r_carg2_classorobject contains 2327 // either the handlized oop from the incoming arguments or the handlized 2328 // class mirror (if the method is static). 2329 __ ld(r_oop, 0, r_carg2_classorobject); 2330 2331 // Get the lock box slot's address. 2332 __ addi(r_box, R1_SP, lock_offset); 2333 2334 # ifdef ASSERT 2335 if (UseBiasedLocking) { 2336 // Making the box point to itself will make it clear it went unused 2337 // but also be obviously invalid. 2338 __ std(r_box, 0, r_box); 2339 } 2340 # endif // ASSERT 2341 2342 // Try fastpath for locking. 2343 // fast_lock kills r_temp_1, r_temp_2, r_temp_3. 2344 __ compiler_fast_lock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); 2345 __ beq(r_flag, locked); 2346 2347 // None of the above fast optimizations worked so we have to get into the 2348 // slow case of monitor enter. Inline a special case of call_VM that 2349 // disallows any pending_exception. 2350 2351 // Save argument registers and leave room for C-compatible ABI_REG_ARGS. 2352 int frame_size = frame::abi_reg_args_size + align_up(total_c_args * wordSize, frame::alignment_in_bytes); 2353 __ mr(R11_scratch1, R1_SP); 2354 RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs, out_regs2); 2355 2356 // Do the call. 2357 __ set_last_Java_frame(R11_scratch1, r_return_pc); 2358 assert(r_return_pc->is_nonvolatile(), "expecting return pc to be in non-volatile register"); 2359 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), r_oop, r_box, R16_thread); 2360 __ reset_last_Java_frame(); 2361 2362 RegisterSaver::restore_argument_registers_and_pop_frame(masm, frame_size, total_c_args, out_regs, out_regs2); 2363 2364 __ asm_assert_mem8_is_zero(thread_(pending_exception), 2365 "no pending exception allowed on exit from SharedRuntime::complete_monitor_locking_C", 0); 2366 2367 __ bind(locked); 2368 } 2369 2370 2371 // Publish thread state 2372 // -------------------------------------------------------------------------- 2373 2374 // Use that pc we placed in r_return_pc a while back as the current frame anchor. 2375 __ set_last_Java_frame(R1_SP, r_return_pc); 2376 2377 // Transition from _thread_in_Java to _thread_in_native. 2378 __ li(R0, _thread_in_native); 2379 __ release(); 2380 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2381 __ stw(R0, thread_(thread_state)); 2382 2383 2384 // The JNI call 2385 // -------------------------------------------------------------------------- 2386 #if defined(ABI_ELFv2) 2387 __ call_c(native_func, relocInfo::runtime_call_type); 2388 #else 2389 FunctionDescriptor* fd_native_method = (FunctionDescriptor*) native_func; 2390 __ call_c(fd_native_method, relocInfo::runtime_call_type); 2391 #endif 2392 2393 2394 // Now, we are back from the native code. 2395 2396 2397 // Unpack the native result. 2398 // -------------------------------------------------------------------------- 2399 2400 // For int-types, we do any needed sign-extension required. 2401 // Care must be taken that the return values (R3_RET and F1_RET) 2402 // will survive any VM calls for blocking or unlocking. 2403 // An OOP result (handle) is done specially in the slow-path code. 2404 2405 switch (ret_type) { 2406 case T_VOID: break; // Nothing to do! 2407 case T_FLOAT: break; // Got it where we want it (unless slow-path). 2408 case T_DOUBLE: break; // Got it where we want it (unless slow-path). 2409 case T_LONG: break; // Got it where we want it (unless slow-path). 2410 case T_OBJECT: break; // Really a handle. 2411 // Cannot de-handlize until after reclaiming jvm_lock. 2412 case T_ARRAY: break; 2413 2414 case T_BOOLEAN: { // 0 -> false(0); !0 -> true(1) 2415 Label skip_modify; 2416 __ cmpwi(CCR0, R3_RET, 0); 2417 __ beq(CCR0, skip_modify); 2418 __ li(R3_RET, 1); 2419 __ bind(skip_modify); 2420 break; 2421 } 2422 case T_BYTE: { // sign extension 2423 __ extsb(R3_RET, R3_RET); 2424 break; 2425 } 2426 case T_CHAR: { // unsigned result 2427 __ andi(R3_RET, R3_RET, 0xffff); 2428 break; 2429 } 2430 case T_SHORT: { // sign extension 2431 __ extsh(R3_RET, R3_RET); 2432 break; 2433 } 2434 case T_INT: // nothing to do 2435 break; 2436 default: 2437 ShouldNotReachHere(); 2438 break; 2439 } 2440 2441 2442 // Publish thread state 2443 // -------------------------------------------------------------------------- 2444 2445 // Switch thread to "native transition" state before reading the 2446 // synchronization state. This additional state is necessary because reading 2447 // and testing the synchronization state is not atomic w.r.t. GC, as this 2448 // scenario demonstrates: 2449 // - Java thread A, in _thread_in_native state, loads _not_synchronized 2450 // and is preempted. 2451 // - VM thread changes sync state to synchronizing and suspends threads 2452 // for GC. 2453 // - Thread A is resumed to finish this native method, but doesn't block 2454 // here since it didn't see any synchronization in progress, and escapes. 2455 2456 // Transition from _thread_in_native to _thread_in_native_trans. 2457 __ li(R0, _thread_in_native_trans); 2458 __ release(); 2459 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2460 __ stw(R0, thread_(thread_state)); 2461 2462 2463 // Must we block? 2464 // -------------------------------------------------------------------------- 2465 2466 // Block, if necessary, before resuming in _thread_in_Java state. 2467 // In order for GC to work, don't clear the last_Java_sp until after blocking. 2468 Label after_transition; 2469 { 2470 Label no_block, sync; 2471 2472 // Force this write out before the read below. 2473 __ fence(); 2474 2475 Register sync_state_addr = r_temp_4; 2476 Register sync_state = r_temp_5; 2477 Register suspend_flags = r_temp_6; 2478 2479 // No synchronization in progress nor yet synchronized 2480 // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path). 2481 __ safepoint_poll(sync, sync_state); 2482 2483 // Not suspended. 2484 // TODO: PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size"); 2485 __ lwz(suspend_flags, thread_(suspend_flags)); 2486 __ cmpwi(CCR1, suspend_flags, 0); 2487 __ beq(CCR1, no_block); 2488 2489 // Block. Save any potential method result value before the operation and 2490 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this 2491 // lets us share the oopMap we used when we went native rather than create 2492 // a distinct one for this pc. 2493 __ bind(sync); 2494 __ isync(); 2495 2496 address entry_point = is_critical_native 2497 ? CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition) 2498 : CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans); 2499 save_native_result(masm, ret_type, workspace_slot_offset); 2500 __ call_VM_leaf(entry_point, R16_thread); 2501 restore_native_result(masm, ret_type, workspace_slot_offset); 2502 2503 if (is_critical_native) { 2504 __ b(after_transition); // No thread state transition here. 2505 } 2506 __ bind(no_block); 2507 } 2508 2509 // Publish thread state. 2510 // -------------------------------------------------------------------------- 2511 2512 // Thread state is thread_in_native_trans. Any safepoint blocking has 2513 // already happened so we can now change state to _thread_in_Java. 2514 2515 // Transition from _thread_in_native_trans to _thread_in_Java. 2516 __ li(R0, _thread_in_Java); 2517 __ lwsync(); // Acquire safepoint and suspend state, release thread state. 2518 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2519 __ stw(R0, thread_(thread_state)); 2520 __ bind(after_transition); 2521 2522 // Reguard any pages if necessary. 2523 // -------------------------------------------------------------------------- 2524 2525 Label no_reguard; 2526 __ lwz(r_temp_1, thread_(stack_guard_state)); 2527 __ cmpwi(CCR0, r_temp_1, JavaThread::stack_guard_yellow_reserved_disabled); 2528 __ bne(CCR0, no_reguard); 2529 2530 save_native_result(masm, ret_type, workspace_slot_offset); 2531 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 2532 restore_native_result(masm, ret_type, workspace_slot_offset); 2533 2534 __ bind(no_reguard); 2535 2536 2537 // Unlock 2538 // -------------------------------------------------------------------------- 2539 2540 if (method->is_synchronized()) { 2541 2542 ConditionRegister r_flag = CCR1; 2543 const Register r_oop = r_temp_4; 2544 const Register r_box = r_temp_5; 2545 const Register r_exception = r_temp_6; 2546 Label done; 2547 2548 // Get oop and address of lock object box. 2549 if (method_is_static) { 2550 assert(klass_offset != -1, ""); 2551 __ ld(r_oop, klass_offset, R1_SP); 2552 } else { 2553 assert(receiver_offset != -1, ""); 2554 __ ld(r_oop, receiver_offset, R1_SP); 2555 } 2556 __ addi(r_box, R1_SP, lock_offset); 2557 2558 // Try fastpath for unlocking. 2559 __ compiler_fast_unlock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); 2560 __ beq(r_flag, done); 2561 2562 // Save and restore any potential method result value around the unlocking operation. 2563 save_native_result(masm, ret_type, workspace_slot_offset); 2564 2565 // Must save pending exception around the slow-path VM call. Since it's a 2566 // leaf call, the pending exception (if any) can be kept in a register. 2567 __ ld(r_exception, thread_(pending_exception)); 2568 assert(r_exception->is_nonvolatile(), "exception register must be non-volatile"); 2569 __ li(R0, 0); 2570 __ std(R0, thread_(pending_exception)); 2571 2572 // Slow case of monitor enter. 2573 // Inline a special case of call_VM that disallows any pending_exception. 2574 // Arguments are (oop obj, BasicLock* lock, JavaThread* thread). 2575 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), r_oop, r_box, R16_thread); 2576 2577 __ asm_assert_mem8_is_zero(thread_(pending_exception), 2578 "no pending exception allowed on exit from SharedRuntime::complete_monitor_unlocking_C", 0); 2579 2580 restore_native_result(masm, ret_type, workspace_slot_offset); 2581 2582 // Check_forward_pending_exception jump to forward_exception if any pending 2583 // exception is set. The forward_exception routine expects to see the 2584 // exception in pending_exception and not in a register. Kind of clumsy, 2585 // since all folks who branch to forward_exception must have tested 2586 // pending_exception first and hence have it in a register already. 2587 __ std(r_exception, thread_(pending_exception)); 2588 2589 __ bind(done); 2590 } 2591 2592 # if 0 2593 // DTrace method exit 2594 # endif 2595 2596 // Clear "last Java frame" SP and PC. 2597 // -------------------------------------------------------------------------- 2598 2599 __ reset_last_Java_frame(); 2600 2601 // Unbox oop result, e.g. JNIHandles::resolve value. 2602 // -------------------------------------------------------------------------- 2603 2604 if (ret_type == T_OBJECT || ret_type == T_ARRAY) { 2605 __ resolve_jobject(R3_RET, r_temp_1, r_temp_2, /* needs_frame */ false); 2606 } 2607 2608 if (CheckJNICalls) { 2609 // clear_pending_jni_exception_check 2610 __ load_const_optimized(R0, 0L); 2611 __ st_ptr(R0, JavaThread::pending_jni_exception_check_fn_offset(), R16_thread); 2612 } 2613 2614 // Reset handle block. 2615 // -------------------------------------------------------------------------- 2616 if (!is_critical_native) { 2617 __ ld(r_temp_1, thread_(active_handles)); 2618 // TODO: PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size"); 2619 __ li(r_temp_2, 0); 2620 __ stw(r_temp_2, JNIHandleBlock::top_offset_in_bytes(), r_temp_1); 2621 2622 2623 // Check for pending exceptions. 2624 // -------------------------------------------------------------------------- 2625 __ ld(r_temp_2, thread_(pending_exception)); 2626 __ cmpdi(CCR0, r_temp_2, 0); 2627 __ bne(CCR0, handle_pending_exception); 2628 } 2629 2630 // Return 2631 // -------------------------------------------------------------------------- 2632 2633 __ pop_frame(); 2634 __ restore_LR_CR(R11); 2635 __ blr(); 2636 2637 2638 // Handler for pending exceptions (out-of-line). 2639 // -------------------------------------------------------------------------- 2640 2641 // Since this is a native call, we know the proper exception handler 2642 // is the empty function. We just pop this frame and then jump to 2643 // forward_exception_entry. 2644 if (!is_critical_native) { 2645 __ align(InteriorEntryAlignment); 2646 __ bind(handle_pending_exception); 2647 2648 __ pop_frame(); 2649 __ restore_LR_CR(R11); 2650 __ b64_patchable((address)StubRoutines::forward_exception_entry(), 2651 relocInfo::runtime_call_type); 2652 } 2653 2654 // Handler for a cache miss (out-of-line). 2655 // -------------------------------------------------------------------------- 2656 2657 if (!method_is_static) { 2658 __ align(InteriorEntryAlignment); 2659 __ bind(ic_miss); 2660 2661 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), 2662 relocInfo::runtime_call_type); 2663 } 2664 2665 // Done. 2666 // -------------------------------------------------------------------------- 2667 2668 __ flush(); 2669 2670 nmethod *nm = nmethod::new_native_nmethod(method, 2671 compile_id, 2672 masm->code(), 2673 vep_start_pc-start_pc, 2674 frame_done_pc-start_pc, 2675 stack_slots / VMRegImpl::slots_per_word, 2676 (method_is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2677 in_ByteSize(lock_offset), 2678 oop_maps); 2679 2680 if (is_critical_native) { 2681 nm->set_lazy_critical_native(true); 2682 } 2683 2684 return nm; 2685 #else 2686 ShouldNotReachHere(); 2687 return NULL; 2688 #endif // COMPILER2 2689 } 2690 2691 // This function returns the adjust size (in number of words) to a c2i adapter 2692 // activation for use during deoptimization. 2693 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { 2694 return align_up((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::alignment_in_bytes); 2695 } 2696 2697 uint SharedRuntime::out_preserve_stack_slots() { 2698 #if defined(COMPILER1) || defined(COMPILER2) 2699 return frame::jit_out_preserve_size / VMRegImpl::stack_slot_size; 2700 #else 2701 return 0; 2702 #endif 2703 } 2704 2705 #if defined(COMPILER1) || defined(COMPILER2) 2706 // Frame generation for deopt and uncommon trap blobs. 2707 static void push_skeleton_frame(MacroAssembler* masm, bool deopt, 2708 /* Read */ 2709 Register unroll_block_reg, 2710 /* Update */ 2711 Register frame_sizes_reg, 2712 Register number_of_frames_reg, 2713 Register pcs_reg, 2714 /* Invalidate */ 2715 Register frame_size_reg, 2716 Register pc_reg) { 2717 2718 __ ld(pc_reg, 0, pcs_reg); 2719 __ ld(frame_size_reg, 0, frame_sizes_reg); 2720 __ std(pc_reg, _abi(lr), R1_SP); 2721 __ push_frame(frame_size_reg, R0/*tmp*/); 2722 #ifdef ASSERT 2723 __ load_const_optimized(pc_reg, 0x5afe); 2724 __ std(pc_reg, _ijava_state_neg(ijava_reserved), R1_SP); 2725 #endif 2726 __ std(R1_SP, _ijava_state_neg(sender_sp), R1_SP); 2727 __ addi(number_of_frames_reg, number_of_frames_reg, -1); 2728 __ addi(frame_sizes_reg, frame_sizes_reg, wordSize); 2729 __ addi(pcs_reg, pcs_reg, wordSize); 2730 } 2731 2732 // Loop through the UnrollBlock info and create new frames. 2733 static void push_skeleton_frames(MacroAssembler* masm, bool deopt, 2734 /* read */ 2735 Register unroll_block_reg, 2736 /* invalidate */ 2737 Register frame_sizes_reg, 2738 Register number_of_frames_reg, 2739 Register pcs_reg, 2740 Register frame_size_reg, 2741 Register pc_reg) { 2742 Label loop; 2743 2744 // _number_of_frames is of type int (deoptimization.hpp) 2745 __ lwa(number_of_frames_reg, 2746 Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), 2747 unroll_block_reg); 2748 __ ld(pcs_reg, 2749 Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), 2750 unroll_block_reg); 2751 __ ld(frame_sizes_reg, 2752 Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), 2753 unroll_block_reg); 2754 2755 // stack: (caller_of_deoptee, ...). 2756 2757 // At this point we either have an interpreter frame or a compiled 2758 // frame on top of stack. If it is a compiled frame we push a new c2i 2759 // adapter here 2760 2761 // Memorize top-frame stack-pointer. 2762 __ mr(frame_size_reg/*old_sp*/, R1_SP); 2763 2764 // Resize interpreter top frame OR C2I adapter. 2765 2766 // At this moment, the top frame (which is the caller of the deoptee) is 2767 // an interpreter frame or a newly pushed C2I adapter or an entry frame. 2768 // The top frame has a TOP_IJAVA_FRAME_ABI and the frame contains the 2769 // outgoing arguments. 2770 // 2771 // In order to push the interpreter frame for the deoptee, we need to 2772 // resize the top frame such that we are able to place the deoptee's 2773 // locals in the frame. 2774 // Additionally, we have to turn the top frame's TOP_IJAVA_FRAME_ABI 2775 // into a valid PARENT_IJAVA_FRAME_ABI. 2776 2777 __ lwa(R11_scratch1, 2778 Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), 2779 unroll_block_reg); 2780 __ neg(R11_scratch1, R11_scratch1); 2781 2782 // R11_scratch1 contains size of locals for frame resizing. 2783 // R12_scratch2 contains top frame's lr. 2784 2785 // Resize frame by complete frame size prevents TOC from being 2786 // overwritten by locals. A more stack space saving way would be 2787 // to copy the TOC to its location in the new abi. 2788 __ addi(R11_scratch1, R11_scratch1, - frame::parent_ijava_frame_abi_size); 2789 2790 // now, resize the frame 2791 __ resize_frame(R11_scratch1, pc_reg/*tmp*/); 2792 2793 // In the case where we have resized a c2i frame above, the optional 2794 // alignment below the locals has size 32 (why?). 2795 __ std(R12_scratch2, _abi(lr), R1_SP); 2796 2797 // Initialize initial_caller_sp. 2798 #ifdef ASSERT 2799 __ load_const_optimized(pc_reg, 0x5afe); 2800 __ std(pc_reg, _ijava_state_neg(ijava_reserved), R1_SP); 2801 #endif 2802 __ std(frame_size_reg, _ijava_state_neg(sender_sp), R1_SP); 2803 2804 #ifdef ASSERT 2805 // Make sure that there is at least one entry in the array. 2806 __ cmpdi(CCR0, number_of_frames_reg, 0); 2807 __ asm_assert_ne("array_size must be > 0", 0x205); 2808 #endif 2809 2810 // Now push the new interpreter frames. 2811 // 2812 __ bind(loop); 2813 // Allocate a new frame, fill in the pc. 2814 push_skeleton_frame(masm, deopt, 2815 unroll_block_reg, 2816 frame_sizes_reg, 2817 number_of_frames_reg, 2818 pcs_reg, 2819 frame_size_reg, 2820 pc_reg); 2821 __ cmpdi(CCR0, number_of_frames_reg, 0); 2822 __ bne(CCR0, loop); 2823 2824 // Get the return address pointing into the frame manager. 2825 __ ld(R0, 0, pcs_reg); 2826 // Store it in the top interpreter frame. 2827 __ std(R0, _abi(lr), R1_SP); 2828 // Initialize frame_manager_lr of interpreter top frame. 2829 } 2830 #endif 2831 2832 void SharedRuntime::generate_deopt_blob() { 2833 // Allocate space for the code 2834 ResourceMark rm; 2835 // Setup code generation tools 2836 CodeBuffer buffer("deopt_blob", 2048, 1024); 2837 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer); 2838 Label exec_mode_initialized; 2839 int frame_size_in_words; 2840 OopMap* map = NULL; 2841 OopMapSet *oop_maps = new OopMapSet(); 2842 2843 // size of ABI112 plus spill slots for R3_RET and F1_RET. 2844 const int frame_size_in_bytes = frame::abi_reg_args_spill_size; 2845 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 2846 int first_frame_size_in_bytes = 0; // frame size of "unpack frame" for call to fetch_unroll_info. 2847 2848 const Register exec_mode_reg = R21_tmp1; 2849 2850 const address start = __ pc(); 2851 2852 #if defined(COMPILER1) || defined(COMPILER2) 2853 // -------------------------------------------------------------------------- 2854 // Prolog for non exception case! 2855 2856 // We have been called from the deopt handler of the deoptee. 2857 // 2858 // deoptee: 2859 // ... 2860 // call X 2861 // ... 2862 // deopt_handler: call_deopt_stub 2863 // cur. return pc --> ... 2864 // 2865 // So currently SR_LR points behind the call in the deopt handler. 2866 // We adjust it such that it points to the start of the deopt handler. 2867 // The return_pc has been stored in the frame of the deoptee and 2868 // will replace the address of the deopt_handler in the call 2869 // to Deoptimization::fetch_unroll_info below. 2870 // We can't grab a free register here, because all registers may 2871 // contain live values, so let the RegisterSaver do the adjustment 2872 // of the return pc. 2873 const int return_pc_adjustment_no_exception = -HandlerImpl::size_deopt_handler(); 2874 2875 // Push the "unpack frame" 2876 // Save everything in sight. 2877 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2878 &first_frame_size_in_bytes, 2879 /*generate_oop_map=*/ true, 2880 return_pc_adjustment_no_exception, 2881 RegisterSaver::return_pc_is_lr); 2882 assert(map != NULL, "OopMap must have been created"); 2883 2884 __ li(exec_mode_reg, Deoptimization::Unpack_deopt); 2885 // Save exec mode for unpack_frames. 2886 __ b(exec_mode_initialized); 2887 2888 // -------------------------------------------------------------------------- 2889 // Prolog for exception case 2890 2891 // An exception is pending. 2892 // We have been called with a return (interpreter) or a jump (exception blob). 2893 // 2894 // - R3_ARG1: exception oop 2895 // - R4_ARG2: exception pc 2896 2897 int exception_offset = __ pc() - start; 2898 2899 BLOCK_COMMENT("Prolog for exception case"); 2900 2901 // Store exception oop and pc in thread (location known to GC). 2902 // This is needed since the call to "fetch_unroll_info()" may safepoint. 2903 __ std(R3_ARG1, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 2904 __ std(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 2905 __ std(R4_ARG2, _abi(lr), R1_SP); 2906 2907 // Vanilla deoptimization with an exception pending in exception_oop. 2908 int exception_in_tls_offset = __ pc() - start; 2909 2910 // Push the "unpack frame". 2911 // Save everything in sight. 2912 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2913 &first_frame_size_in_bytes, 2914 /*generate_oop_map=*/ false, 2915 /*return_pc_adjustment_exception=*/ 0, 2916 RegisterSaver::return_pc_is_pre_saved); 2917 2918 // Deopt during an exception. Save exec mode for unpack_frames. 2919 __ li(exec_mode_reg, Deoptimization::Unpack_exception); 2920 2921 // fall through 2922 2923 int reexecute_offset = 0; 2924 #ifdef COMPILER1 2925 __ b(exec_mode_initialized); 2926 2927 // Reexecute entry, similar to c2 uncommon trap 2928 reexecute_offset = __ pc() - start; 2929 2930 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2931 &first_frame_size_in_bytes, 2932 /*generate_oop_map=*/ false, 2933 /*return_pc_adjustment_reexecute=*/ 0, 2934 RegisterSaver::return_pc_is_pre_saved); 2935 __ li(exec_mode_reg, Deoptimization::Unpack_reexecute); 2936 #endif 2937 2938 // -------------------------------------------------------------------------- 2939 __ BIND(exec_mode_initialized); 2940 2941 { 2942 const Register unroll_block_reg = R22_tmp2; 2943 2944 // We need to set `last_Java_frame' because `fetch_unroll_info' will 2945 // call `last_Java_frame()'. The value of the pc in the frame is not 2946 // particularly important. It just needs to identify this blob. 2947 __ set_last_Java_frame(R1_SP, noreg); 2948 2949 // With EscapeAnalysis turned on, this call may safepoint! 2950 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), R16_thread, exec_mode_reg); 2951 address calls_return_pc = __ last_calls_return_pc(); 2952 // Set an oopmap for the call site that describes all our saved registers. 2953 oop_maps->add_gc_map(calls_return_pc - start, map); 2954 2955 __ reset_last_Java_frame(); 2956 // Save the return value. 2957 __ mr(unroll_block_reg, R3_RET); 2958 2959 // Restore only the result registers that have been saved 2960 // by save_volatile_registers(...). 2961 RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes); 2962 2963 // reload the exec mode from the UnrollBlock (it might have changed) 2964 __ lwz(exec_mode_reg, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), unroll_block_reg); 2965 // In excp_deopt_mode, restore and clear exception oop which we 2966 // stored in the thread during exception entry above. The exception 2967 // oop will be the return value of this stub. 2968 Label skip_restore_excp; 2969 __ cmpdi(CCR0, exec_mode_reg, Deoptimization::Unpack_exception); 2970 __ bne(CCR0, skip_restore_excp); 2971 __ ld(R3_RET, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 2972 __ ld(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 2973 __ li(R0, 0); 2974 __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 2975 __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 2976 __ BIND(skip_restore_excp); 2977 2978 __ pop_frame(); 2979 2980 // stack: (deoptee, optional i2c, caller of deoptee, ...). 2981 2982 // pop the deoptee's frame 2983 __ pop_frame(); 2984 2985 // stack: (caller_of_deoptee, ...). 2986 2987 // Loop through the `UnrollBlock' info and create interpreter frames. 2988 push_skeleton_frames(masm, true/*deopt*/, 2989 unroll_block_reg, 2990 R23_tmp3, 2991 R24_tmp4, 2992 R25_tmp5, 2993 R26_tmp6, 2994 R27_tmp7); 2995 2996 // stack: (skeletal interpreter frame, ..., optional skeletal 2997 // interpreter frame, optional c2i, caller of deoptee, ...). 2998 } 2999 3000 // push an `unpack_frame' taking care of float / int return values. 3001 __ push_frame(frame_size_in_bytes, R0/*tmp*/); 3002 3003 // stack: (unpack frame, skeletal interpreter frame, ..., optional 3004 // skeletal interpreter frame, optional c2i, caller of deoptee, 3005 // ...). 3006 3007 // Spill live volatile registers since we'll do a call. 3008 __ std( R3_RET, _abi_reg_args_spill(spill_ret), R1_SP); 3009 __ stfd(F1_RET, _abi_reg_args_spill(spill_fret), R1_SP); 3010 3011 // Let the unpacker layout information in the skeletal frames just 3012 // allocated. 3013 __ get_PC_trash_LR(R3_RET); 3014 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R3_RET); 3015 // This is a call to a LEAF method, so no oop map is required. 3016 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), 3017 R16_thread/*thread*/, exec_mode_reg/*exec_mode*/); 3018 __ reset_last_Java_frame(); 3019 3020 // Restore the volatiles saved above. 3021 __ ld( R3_RET, _abi_reg_args_spill(spill_ret), R1_SP); 3022 __ lfd(F1_RET, _abi_reg_args_spill(spill_fret), R1_SP); 3023 3024 // Pop the unpack frame. 3025 __ pop_frame(); 3026 __ restore_LR_CR(R0); 3027 3028 // stack: (top interpreter frame, ..., optional interpreter frame, 3029 // optional c2i, caller of deoptee, ...). 3030 3031 // Initialize R14_state. 3032 __ restore_interpreter_state(R11_scratch1); 3033 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 3034 3035 // Return to the interpreter entry point. 3036 __ blr(); 3037 __ flush(); 3038 #else // COMPILER2 3039 __ unimplemented("deopt blob needed only with compiler"); 3040 int exception_offset = __ pc() - start; 3041 #endif // COMPILER2 3042 3043 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, 3044 reexecute_offset, first_frame_size_in_bytes / wordSize); 3045 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); 3046 } 3047 3048 #ifdef COMPILER2 3049 void SharedRuntime::generate_uncommon_trap_blob() { 3050 // Allocate space for the code. 3051 ResourceMark rm; 3052 // Setup code generation tools. 3053 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024); 3054 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer); 3055 address start = __ pc(); 3056 3057 if (UseRTMLocking) { 3058 // Abort RTM transaction before possible nmethod deoptimization. 3059 __ tabort_(); 3060 } 3061 3062 Register unroll_block_reg = R21_tmp1; 3063 Register klass_index_reg = R22_tmp2; 3064 Register unc_trap_reg = R23_tmp3; 3065 3066 OopMapSet* oop_maps = new OopMapSet(); 3067 int frame_size_in_bytes = frame::abi_reg_args_size; 3068 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 3069 3070 // stack: (deoptee, optional i2c, caller_of_deoptee, ...). 3071 3072 // Push a dummy `unpack_frame' and call 3073 // `Deoptimization::uncommon_trap' to pack the compiled frame into a 3074 // vframe array and return the `UnrollBlock' information. 3075 3076 // Save LR to compiled frame. 3077 __ save_LR_CR(R11_scratch1); 3078 3079 // Push an "uncommon_trap" frame. 3080 __ push_frame_reg_args(0, R11_scratch1); 3081 3082 // stack: (unpack frame, deoptee, optional i2c, caller_of_deoptee, ...). 3083 3084 // Set the `unpack_frame' as last_Java_frame. 3085 // `Deoptimization::uncommon_trap' expects it and considers its 3086 // sender frame as the deoptee frame. 3087 // Remember the offset of the instruction whose address will be 3088 // moved to R11_scratch1. 3089 address gc_map_pc = __ get_PC_trash_LR(R11_scratch1); 3090 3091 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1); 3092 3093 __ mr(klass_index_reg, R3); 3094 __ li(R5_ARG3, Deoptimization::Unpack_uncommon_trap); 3095 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), 3096 R16_thread, klass_index_reg, R5_ARG3); 3097 3098 // Set an oopmap for the call site. 3099 oop_maps->add_gc_map(gc_map_pc - start, map); 3100 3101 __ reset_last_Java_frame(); 3102 3103 // Pop the `unpack frame'. 3104 __ pop_frame(); 3105 3106 // stack: (deoptee, optional i2c, caller_of_deoptee, ...). 3107 3108 // Save the return value. 3109 __ mr(unroll_block_reg, R3_RET); 3110 3111 // Pop the uncommon_trap frame. 3112 __ pop_frame(); 3113 3114 // stack: (caller_of_deoptee, ...). 3115 3116 #ifdef ASSERT 3117 __ lwz(R22_tmp2, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), unroll_block_reg); 3118 __ cmpdi(CCR0, R22_tmp2, (unsigned)Deoptimization::Unpack_uncommon_trap); 3119 __ asm_assert_eq("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap", 0); 3120 #endif 3121 3122 // Allocate new interpreter frame(s) and possibly a c2i adapter 3123 // frame. 3124 push_skeleton_frames(masm, false/*deopt*/, 3125 unroll_block_reg, 3126 R22_tmp2, 3127 R23_tmp3, 3128 R24_tmp4, 3129 R25_tmp5, 3130 R26_tmp6); 3131 3132 // stack: (skeletal interpreter frame, ..., optional skeletal 3133 // interpreter frame, optional c2i, caller of deoptee, ...). 3134 3135 // Push a dummy `unpack_frame' taking care of float return values. 3136 // Call `Deoptimization::unpack_frames' to layout information in the 3137 // interpreter frames just created. 3138 3139 // Push a simple "unpack frame" here. 3140 __ push_frame_reg_args(0, R11_scratch1); 3141 3142 // stack: (unpack frame, skeletal interpreter frame, ..., optional 3143 // skeletal interpreter frame, optional c2i, caller of deoptee, 3144 // ...). 3145 3146 // Set the "unpack_frame" as last_Java_frame. 3147 __ get_PC_trash_LR(R11_scratch1); 3148 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1); 3149 3150 // Indicate it is the uncommon trap case. 3151 __ li(unc_trap_reg, Deoptimization::Unpack_uncommon_trap); 3152 // Let the unpacker layout information in the skeletal frames just 3153 // allocated. 3154 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), 3155 R16_thread, unc_trap_reg); 3156 3157 __ reset_last_Java_frame(); 3158 // Pop the `unpack frame'. 3159 __ pop_frame(); 3160 // Restore LR from top interpreter frame. 3161 __ restore_LR_CR(R11_scratch1); 3162 3163 // stack: (top interpreter frame, ..., optional interpreter frame, 3164 // optional c2i, caller of deoptee, ...). 3165 3166 __ restore_interpreter_state(R11_scratch1); 3167 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 3168 3169 // Return to the interpreter entry point. 3170 __ blr(); 3171 3172 masm->flush(); 3173 3174 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, frame_size_in_bytes/wordSize); 3175 } 3176 #endif // COMPILER2 3177 3178 // Generate a special Compile2Runtime blob that saves all registers, and setup oopmap. 3179 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) { 3180 assert(StubRoutines::forward_exception_entry() != NULL, 3181 "must be generated before"); 3182 3183 ResourceMark rm; 3184 OopMapSet *oop_maps = new OopMapSet(); 3185 OopMap* map; 3186 3187 // Allocate space for the code. Setup code generation tools. 3188 CodeBuffer buffer("handler_blob", 2048, 1024); 3189 MacroAssembler* masm = new MacroAssembler(&buffer); 3190 3191 address start = __ pc(); 3192 int frame_size_in_bytes = 0; 3193 3194 RegisterSaver::ReturnPCLocation return_pc_location; 3195 bool cause_return = (poll_type == POLL_AT_RETURN); 3196 if (cause_return) { 3197 // Nothing to do here. The frame has already been popped in MachEpilogNode. 3198 // Register LR already contains the return pc. 3199 return_pc_location = RegisterSaver::return_pc_is_lr; 3200 } else { 3201 // Use thread()->saved_exception_pc() as return pc. 3202 return_pc_location = RegisterSaver::return_pc_is_thread_saved_exception_pc; 3203 } 3204 3205 if (UseRTMLocking) { 3206 // Abort RTM transaction before calling runtime 3207 // because critical section can be large and so 3208 // will abort anyway. Also nmethod can be deoptimized. 3209 __ tabort_(); 3210 } 3211 3212 bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP); 3213 3214 // Save registers, fpu state, and flags. Set R31 = return pc. 3215 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 3216 &frame_size_in_bytes, 3217 /*generate_oop_map=*/ true, 3218 /*return_pc_adjustment=*/0, 3219 return_pc_location, save_vectors); 3220 3221 // The following is basically a call_VM. However, we need the precise 3222 // address of the call in order to generate an oopmap. Hence, we do all the 3223 // work outselves. 3224 __ set_last_Java_frame(/*sp=*/R1_SP, /*pc=*/noreg); 3225 3226 // The return address must always be correct so that the frame constructor 3227 // never sees an invalid pc. 3228 3229 // Do the call 3230 __ call_VM_leaf(call_ptr, R16_thread); 3231 address calls_return_pc = __ last_calls_return_pc(); 3232 3233 // Set an oopmap for the call site. This oopmap will map all 3234 // oop-registers and debug-info registers as callee-saved. This 3235 // will allow deoptimization at this safepoint to find all possible 3236 // debug-info recordings, as well as let GC find all oops. 3237 oop_maps->add_gc_map(calls_return_pc - start, map); 3238 3239 Label noException; 3240 3241 // Clear the last Java frame. 3242 __ reset_last_Java_frame(); 3243 3244 BLOCK_COMMENT(" Check pending exception."); 3245 const Register pending_exception = R0; 3246 __ ld(pending_exception, thread_(pending_exception)); 3247 __ cmpdi(CCR0, pending_exception, 0); 3248 __ beq(CCR0, noException); 3249 3250 // Exception pending 3251 RegisterSaver::restore_live_registers_and_pop_frame(masm, 3252 frame_size_in_bytes, 3253 /*restore_ctr=*/true, save_vectors); 3254 3255 BLOCK_COMMENT(" Jump to forward_exception_entry."); 3256 // Jump to forward_exception_entry, with the issuing PC in LR 3257 // so it looks like the original nmethod called forward_exception_entry. 3258 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 3259 3260 // No exception case. 3261 __ BIND(noException); 3262 3263 if (SafepointMechanism::uses_thread_local_poll() && !cause_return) { 3264 Label no_adjust; 3265 // If our stashed return pc was modified by the runtime we avoid touching it 3266 __ ld(R0, frame_size_in_bytes + _abi(lr), R1_SP); 3267 __ cmpd(CCR0, R0, R31); 3268 __ bne(CCR0, no_adjust); 3269 3270 // Adjust return pc forward to step over the safepoint poll instruction 3271 __ addi(R31, R31, 4); 3272 __ std(R31, frame_size_in_bytes + _abi(lr), R1_SP); 3273 3274 __ bind(no_adjust); 3275 } 3276 3277 // Normal exit, restore registers and exit. 3278 RegisterSaver::restore_live_registers_and_pop_frame(masm, 3279 frame_size_in_bytes, 3280 /*restore_ctr=*/true, save_vectors); 3281 3282 __ blr(); 3283 3284 // Make sure all code is generated 3285 masm->flush(); 3286 3287 // Fill-out other meta info 3288 // CodeBlob frame size is in words. 3289 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_bytes / wordSize); 3290 } 3291 3292 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss) 3293 // 3294 // Generate a stub that calls into the vm to find out the proper destination 3295 // of a java call. All the argument registers are live at this point 3296 // but since this is generic code we don't know what they are and the caller 3297 // must do any gc of the args. 3298 // 3299 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { 3300 3301 // allocate space for the code 3302 ResourceMark rm; 3303 3304 CodeBuffer buffer(name, 1000, 512); 3305 MacroAssembler* masm = new MacroAssembler(&buffer); 3306 3307 int frame_size_in_bytes; 3308 3309 OopMapSet *oop_maps = new OopMapSet(); 3310 OopMap* map = NULL; 3311 3312 address start = __ pc(); 3313 3314 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 3315 &frame_size_in_bytes, 3316 /*generate_oop_map*/ true, 3317 /*return_pc_adjustment*/ 0, 3318 RegisterSaver::return_pc_is_lr); 3319 3320 // Use noreg as last_Java_pc, the return pc will be reconstructed 3321 // from the physical frame. 3322 __ set_last_Java_frame(/*sp*/R1_SP, noreg); 3323 3324 int frame_complete = __ offset(); 3325 3326 // Pass R19_method as 2nd (optional) argument, used by 3327 // counter_overflow_stub. 3328 __ call_VM_leaf(destination, R16_thread, R19_method); 3329 address calls_return_pc = __ last_calls_return_pc(); 3330 // Set an oopmap for the call site. 3331 // We need this not only for callee-saved registers, but also for volatile 3332 // registers that the compiler might be keeping live across a safepoint. 3333 // Create the oopmap for the call's return pc. 3334 oop_maps->add_gc_map(calls_return_pc - start, map); 3335 3336 // R3_RET contains the address we are going to jump to assuming no exception got installed. 3337 3338 // clear last_Java_sp 3339 __ reset_last_Java_frame(); 3340 3341 // Check for pending exceptions. 3342 BLOCK_COMMENT("Check for pending exceptions."); 3343 Label pending; 3344 __ ld(R11_scratch1, thread_(pending_exception)); 3345 __ cmpdi(CCR0, R11_scratch1, 0); 3346 __ bne(CCR0, pending); 3347 3348 __ mtctr(R3_RET); // Ctr will not be touched by restore_live_registers_and_pop_frame. 3349 3350 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ false); 3351 3352 // Get the returned method. 3353 __ get_vm_result_2(R19_method); 3354 3355 __ bctr(); 3356 3357 3358 // Pending exception after the safepoint. 3359 __ BIND(pending); 3360 3361 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ true); 3362 3363 // exception pending => remove activation and forward to exception handler 3364 3365 __ li(R11_scratch1, 0); 3366 __ ld(R3_ARG1, thread_(pending_exception)); 3367 __ std(R11_scratch1, in_bytes(JavaThread::vm_result_offset()), R16_thread); 3368 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 3369 3370 // ------------- 3371 // Make sure all code is generated. 3372 masm->flush(); 3373 3374 // return the blob 3375 // frame_size_words or bytes?? 3376 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_bytes/wordSize, 3377 oop_maps, true); 3378 } 3379 3380 3381 //------------------------------Montgomery multiplication------------------------ 3382 // 3383 3384 // Subtract 0:b from carry:a. Return carry. 3385 static unsigned long 3386 sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) { 3387 long i = 0; 3388 unsigned long tmp, tmp2; 3389 __asm__ __volatile__ ( 3390 "subfc %[tmp], %[tmp], %[tmp] \n" // pre-set CA 3391 "mtctr %[len] \n" 3392 "0: \n" 3393 "ldx %[tmp], %[i], %[a] \n" 3394 "ldx %[tmp2], %[i], %[b] \n" 3395 "subfe %[tmp], %[tmp2], %[tmp] \n" // subtract extended 3396 "stdx %[tmp], %[i], %[a] \n" 3397 "addi %[i], %[i], 8 \n" 3398 "bdnz 0b \n" 3399 "addme %[tmp], %[carry] \n" // carry + CA - 1 3400 : [i]"+b"(i), [tmp]"=&r"(tmp), [tmp2]"=&r"(tmp2) 3401 : [a]"r"(a), [b]"r"(b), [carry]"r"(carry), [len]"r"(len) 3402 : "ctr", "xer", "memory" 3403 ); 3404 return tmp; 3405 } 3406 3407 // Multiply (unsigned) Long A by Long B, accumulating the double- 3408 // length result into the accumulator formed of T0, T1, and T2. 3409 inline void MACC(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) { 3410 unsigned long hi, lo; 3411 __asm__ __volatile__ ( 3412 "mulld %[lo], %[A], %[B] \n" 3413 "mulhdu %[hi], %[A], %[B] \n" 3414 "addc %[T0], %[T0], %[lo] \n" 3415 "adde %[T1], %[T1], %[hi] \n" 3416 "addze %[T2], %[T2] \n" 3417 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2) 3418 : [A]"r"(A), [B]"r"(B) 3419 : "xer" 3420 ); 3421 } 3422 3423 // As above, but add twice the double-length result into the 3424 // accumulator. 3425 inline void MACC2(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) { 3426 unsigned long hi, lo; 3427 __asm__ __volatile__ ( 3428 "mulld %[lo], %[A], %[B] \n" 3429 "mulhdu %[hi], %[A], %[B] \n" 3430 "addc %[T0], %[T0], %[lo] \n" 3431 "adde %[T1], %[T1], %[hi] \n" 3432 "addze %[T2], %[T2] \n" 3433 "addc %[T0], %[T0], %[lo] \n" 3434 "adde %[T1], %[T1], %[hi] \n" 3435 "addze %[T2], %[T2] \n" 3436 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2) 3437 : [A]"r"(A), [B]"r"(B) 3438 : "xer" 3439 ); 3440 } 3441 3442 // Fast Montgomery multiplication. The derivation of the algorithm is 3443 // in "A Cryptographic Library for the Motorola DSP56000, 3444 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237". 3445 static void 3446 montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[], 3447 unsigned long m[], unsigned long inv, int len) { 3448 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator 3449 int i; 3450 3451 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); 3452 3453 for (i = 0; i < len; i++) { 3454 int j; 3455 for (j = 0; j < i; j++) { 3456 MACC(a[j], b[i-j], t0, t1, t2); 3457 MACC(m[j], n[i-j], t0, t1, t2); 3458 } 3459 MACC(a[i], b[0], t0, t1, t2); 3460 m[i] = t0 * inv; 3461 MACC(m[i], n[0], t0, t1, t2); 3462 3463 assert(t0 == 0, "broken Montgomery multiply"); 3464 3465 t0 = t1; t1 = t2; t2 = 0; 3466 } 3467 3468 for (i = len; i < 2*len; i++) { 3469 int j; 3470 for (j = i-len+1; j < len; j++) { 3471 MACC(a[j], b[i-j], t0, t1, t2); 3472 MACC(m[j], n[i-j], t0, t1, t2); 3473 } 3474 m[i-len] = t0; 3475 t0 = t1; t1 = t2; t2 = 0; 3476 } 3477 3478 while (t0) { 3479 t0 = sub(m, n, t0, len); 3480 } 3481 } 3482 3483 // Fast Montgomery squaring. This uses asymptotically 25% fewer 3484 // multiplies so it should be up to 25% faster than Montgomery 3485 // multiplication. However, its loop control is more complex and it 3486 // may actually run slower on some machines. 3487 static void 3488 montgomery_square(unsigned long a[], unsigned long n[], 3489 unsigned long m[], unsigned long inv, int len) { 3490 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator 3491 int i; 3492 3493 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); 3494 3495 for (i = 0; i < len; i++) { 3496 int j; 3497 int end = (i+1)/2; 3498 for (j = 0; j < end; j++) { 3499 MACC2(a[j], a[i-j], t0, t1, t2); 3500 MACC(m[j], n[i-j], t0, t1, t2); 3501 } 3502 if ((i & 1) == 0) { 3503 MACC(a[j], a[j], t0, t1, t2); 3504 } 3505 for (; j < i; j++) { 3506 MACC(m[j], n[i-j], t0, t1, t2); 3507 } 3508 m[i] = t0 * inv; 3509 MACC(m[i], n[0], t0, t1, t2); 3510 3511 assert(t0 == 0, "broken Montgomery square"); 3512 3513 t0 = t1; t1 = t2; t2 = 0; 3514 } 3515 3516 for (i = len; i < 2*len; i++) { 3517 int start = i-len+1; 3518 int end = start + (len - start)/2; 3519 int j; 3520 for (j = start; j < end; j++) { 3521 MACC2(a[j], a[i-j], t0, t1, t2); 3522 MACC(m[j], n[i-j], t0, t1, t2); 3523 } 3524 if ((i & 1) == 0) { 3525 MACC(a[j], a[j], t0, t1, t2); 3526 } 3527 for (; j < len; j++) { 3528 MACC(m[j], n[i-j], t0, t1, t2); 3529 } 3530 m[i-len] = t0; 3531 t0 = t1; t1 = t2; t2 = 0; 3532 } 3533 3534 while (t0) { 3535 t0 = sub(m, n, t0, len); 3536 } 3537 } 3538 3539 // The threshold at which squaring is advantageous was determined 3540 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz. 3541 // Doesn't seem to be relevant for Power8 so we use the same value. 3542 #define MONTGOMERY_SQUARING_THRESHOLD 64 3543 3544 // Copy len longwords from s to d, word-swapping as we go. The 3545 // destination array is reversed. 3546 static void reverse_words(unsigned long *s, unsigned long *d, int len) { 3547 d += len; 3548 while(len-- > 0) { 3549 d--; 3550 unsigned long s_val = *s; 3551 // Swap words in a longword on little endian machines. 3552 #ifdef VM_LITTLE_ENDIAN 3553 s_val = (s_val << 32) | (s_val >> 32); 3554 #endif 3555 *d = s_val; 3556 s++; 3557 } 3558 } 3559 3560 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints, 3561 jint len, jlong inv, 3562 jint *m_ints) { 3563 len = len & 0x7fffFFFF; // C2 does not respect int to long conversion for stub calls. 3564 assert(len % 2 == 0, "array length in montgomery_multiply must be even"); 3565 int longwords = len/2; 3566 3567 // Make very sure we don't use so much space that the stack might 3568 // overflow. 512 jints corresponds to an 16384-bit integer and 3569 // will use here a total of 8k bytes of stack space. 3570 int total_allocation = longwords * sizeof (unsigned long) * 4; 3571 guarantee(total_allocation <= 8192, "must be"); 3572 unsigned long *scratch = (unsigned long *)alloca(total_allocation); 3573 3574 // Local scratch arrays 3575 unsigned long 3576 *a = scratch + 0 * longwords, 3577 *b = scratch + 1 * longwords, 3578 *n = scratch + 2 * longwords, 3579 *m = scratch + 3 * longwords; 3580 3581 reverse_words((unsigned long *)a_ints, a, longwords); 3582 reverse_words((unsigned long *)b_ints, b, longwords); 3583 reverse_words((unsigned long *)n_ints, n, longwords); 3584 3585 ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords); 3586 3587 reverse_words(m, (unsigned long *)m_ints, longwords); 3588 } 3589 3590 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints, 3591 jint len, jlong inv, 3592 jint *m_ints) { 3593 len = len & 0x7fffFFFF; // C2 does not respect int to long conversion for stub calls. 3594 assert(len % 2 == 0, "array length in montgomery_square must be even"); 3595 int longwords = len/2; 3596 3597 // Make very sure we don't use so much space that the stack might 3598 // overflow. 512 jints corresponds to an 16384-bit integer and 3599 // will use here a total of 6k bytes of stack space. 3600 int total_allocation = longwords * sizeof (unsigned long) * 3; 3601 guarantee(total_allocation <= 8192, "must be"); 3602 unsigned long *scratch = (unsigned long *)alloca(total_allocation); 3603 3604 // Local scratch arrays 3605 unsigned long 3606 *a = scratch + 0 * longwords, 3607 *n = scratch + 1 * longwords, 3608 *m = scratch + 2 * longwords; 3609 3610 reverse_words((unsigned long *)a_ints, a, longwords); 3611 reverse_words((unsigned long *)n_ints, n, longwords); 3612 3613 if (len >= MONTGOMERY_SQUARING_THRESHOLD) { 3614 ::montgomery_square(a, n, m, (unsigned long)inv, longwords); 3615 } else { 3616 ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords); 3617 } 3618 3619 reverse_words(m, (unsigned long *)m_ints, longwords); 3620 }