1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2018 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "code/debugInfoRec.hpp" 29 #include "code/icBuffer.hpp" 30 #include "code/vtableStubs.hpp" 31 #include "frame_ppc.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "interpreter/interp_masm.hpp" 34 #include "memory/resourceArea.hpp" 35 #include "oops/compiledICHolder.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/vframeArray.hpp" 38 #include "utilities/align.hpp" 39 #include "vmreg_ppc.inline.hpp" 40 #ifdef COMPILER1 41 #include "c1/c1_Runtime1.hpp" 42 #endif 43 #ifdef COMPILER2 44 #include "opto/ad.hpp" 45 #include "opto/runtime.hpp" 46 #endif 47 48 #include <alloca.h> 49 50 #define __ masm-> 51 52 #ifdef PRODUCT 53 #define BLOCK_COMMENT(str) // nothing 54 #else 55 #define BLOCK_COMMENT(str) __ block_comment(str) 56 #endif 57 58 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 59 60 61 class RegisterSaver { 62 // Used for saving volatile registers. 63 public: 64 65 // Support different return pc locations. 66 enum ReturnPCLocation { 67 return_pc_is_lr, 68 return_pc_is_pre_saved, 69 return_pc_is_thread_saved_exception_pc 70 }; 71 72 static OopMap* push_frame_reg_args_and_save_live_registers(MacroAssembler* masm, 73 int* out_frame_size_in_bytes, 74 bool generate_oop_map, 75 int return_pc_adjustment, 76 ReturnPCLocation return_pc_location); 77 static void restore_live_registers_and_pop_frame(MacroAssembler* masm, 78 int frame_size_in_bytes, 79 bool restore_ctr); 80 81 static void push_frame_and_save_argument_registers(MacroAssembler* masm, 82 Register r_temp, 83 int frame_size, 84 int total_args, 85 const VMRegPair *regs, const VMRegPair *regs2 = NULL); 86 static void restore_argument_registers_and_pop_frame(MacroAssembler*masm, 87 int frame_size, 88 int total_args, 89 const VMRegPair *regs, const VMRegPair *regs2 = NULL); 90 91 // During deoptimization only the result registers need to be restored 92 // all the other values have already been extracted. 93 static void restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes); 94 95 // Constants and data structures: 96 97 typedef enum { 98 int_reg = 0, 99 float_reg = 1, 100 special_reg = 2 101 } RegisterType; 102 103 typedef enum { 104 reg_size = 8, 105 half_reg_size = reg_size / 2, 106 } RegisterConstants; 107 108 typedef struct { 109 RegisterType reg_type; 110 int reg_num; 111 VMReg vmreg; 112 } LiveRegType; 113 }; 114 115 116 #define RegisterSaver_LiveSpecialReg(regname) \ 117 { RegisterSaver::special_reg, regname->encoding(), regname->as_VMReg() } 118 119 #define RegisterSaver_LiveIntReg(regname) \ 120 { RegisterSaver::int_reg, regname->encoding(), regname->as_VMReg() } 121 122 #define RegisterSaver_LiveFloatReg(regname) \ 123 { RegisterSaver::float_reg, regname->encoding(), regname->as_VMReg() } 124 125 static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = { 126 // Live registers which get spilled to the stack. Register 127 // positions in this array correspond directly to the stack layout. 128 129 // 130 // live special registers: 131 // 132 RegisterSaver_LiveSpecialReg(SR_CTR), 133 // 134 // live float registers: 135 // 136 RegisterSaver_LiveFloatReg( F0 ), 137 RegisterSaver_LiveFloatReg( F1 ), 138 RegisterSaver_LiveFloatReg( F2 ), 139 RegisterSaver_LiveFloatReg( F3 ), 140 RegisterSaver_LiveFloatReg( F4 ), 141 RegisterSaver_LiveFloatReg( F5 ), 142 RegisterSaver_LiveFloatReg( F6 ), 143 RegisterSaver_LiveFloatReg( F7 ), 144 RegisterSaver_LiveFloatReg( F8 ), 145 RegisterSaver_LiveFloatReg( F9 ), 146 RegisterSaver_LiveFloatReg( F10 ), 147 RegisterSaver_LiveFloatReg( F11 ), 148 RegisterSaver_LiveFloatReg( F12 ), 149 RegisterSaver_LiveFloatReg( F13 ), 150 RegisterSaver_LiveFloatReg( F14 ), 151 RegisterSaver_LiveFloatReg( F15 ), 152 RegisterSaver_LiveFloatReg( F16 ), 153 RegisterSaver_LiveFloatReg( F17 ), 154 RegisterSaver_LiveFloatReg( F18 ), 155 RegisterSaver_LiveFloatReg( F19 ), 156 RegisterSaver_LiveFloatReg( F20 ), 157 RegisterSaver_LiveFloatReg( F21 ), 158 RegisterSaver_LiveFloatReg( F22 ), 159 RegisterSaver_LiveFloatReg( F23 ), 160 RegisterSaver_LiveFloatReg( F24 ), 161 RegisterSaver_LiveFloatReg( F25 ), 162 RegisterSaver_LiveFloatReg( F26 ), 163 RegisterSaver_LiveFloatReg( F27 ), 164 RegisterSaver_LiveFloatReg( F28 ), 165 RegisterSaver_LiveFloatReg( F29 ), 166 RegisterSaver_LiveFloatReg( F30 ), 167 RegisterSaver_LiveFloatReg( F31 ), 168 // 169 // live integer registers: 170 // 171 RegisterSaver_LiveIntReg( R0 ), 172 //RegisterSaver_LiveIntReg( R1 ), // stack pointer 173 RegisterSaver_LiveIntReg( R2 ), 174 RegisterSaver_LiveIntReg( R3 ), 175 RegisterSaver_LiveIntReg( R4 ), 176 RegisterSaver_LiveIntReg( R5 ), 177 RegisterSaver_LiveIntReg( R6 ), 178 RegisterSaver_LiveIntReg( R7 ), 179 RegisterSaver_LiveIntReg( R8 ), 180 RegisterSaver_LiveIntReg( R9 ), 181 RegisterSaver_LiveIntReg( R10 ), 182 RegisterSaver_LiveIntReg( R11 ), 183 RegisterSaver_LiveIntReg( R12 ), 184 //RegisterSaver_LiveIntReg( R13 ), // system thread id 185 RegisterSaver_LiveIntReg( R14 ), 186 RegisterSaver_LiveIntReg( R15 ), 187 RegisterSaver_LiveIntReg( R16 ), 188 RegisterSaver_LiveIntReg( R17 ), 189 RegisterSaver_LiveIntReg( R18 ), 190 RegisterSaver_LiveIntReg( R19 ), 191 RegisterSaver_LiveIntReg( R20 ), 192 RegisterSaver_LiveIntReg( R21 ), 193 RegisterSaver_LiveIntReg( R22 ), 194 RegisterSaver_LiveIntReg( R23 ), 195 RegisterSaver_LiveIntReg( R24 ), 196 RegisterSaver_LiveIntReg( R25 ), 197 RegisterSaver_LiveIntReg( R26 ), 198 RegisterSaver_LiveIntReg( R27 ), 199 RegisterSaver_LiveIntReg( R28 ), 200 RegisterSaver_LiveIntReg( R29 ), 201 RegisterSaver_LiveIntReg( R30 ), 202 RegisterSaver_LiveIntReg( R31 ), // must be the last register (see save/restore functions below) 203 }; 204 205 OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm, 206 int* out_frame_size_in_bytes, 207 bool generate_oop_map, 208 int return_pc_adjustment, 209 ReturnPCLocation return_pc_location) { 210 // Push an abi_reg_args-frame and store all registers which may be live. 211 // If requested, create an OopMap: Record volatile registers as 212 // callee-save values in an OopMap so their save locations will be 213 // propagated to the RegisterMap of the caller frame during 214 // StackFrameStream construction (needed for deoptimization; see 215 // compiledVFrame::create_stack_value). 216 // If return_pc_adjustment != 0 adjust the return pc by return_pc_adjustment. 217 // Updated return pc is returned in R31 (if not return_pc_is_pre_saved). 218 219 int i; 220 int offset; 221 222 // calcualte frame size 223 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 224 sizeof(RegisterSaver::LiveRegType); 225 const int register_save_size = regstosave_num * reg_size; 226 const int frame_size_in_bytes = align_up(register_save_size, frame::alignment_in_bytes) 227 + frame::abi_reg_args_size; 228 *out_frame_size_in_bytes = frame_size_in_bytes; 229 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 230 const int register_save_offset = frame_size_in_bytes - register_save_size; 231 232 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words. 233 OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : NULL; 234 235 BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {"); 236 237 // Save some registers in the last slots of the not yet pushed frame so that we 238 // can use them as scratch regs. 239 __ std(R31, - reg_size, R1_SP); 240 __ std(R30, -2*reg_size, R1_SP); 241 assert(-reg_size == register_save_offset - frame_size_in_bytes + ((regstosave_num-1)*reg_size), 242 "consistency check"); 243 244 // save the flags 245 // Do the save_LR_CR by hand and adjust the return pc if requested. 246 __ mfcr(R30); 247 __ std(R30, _abi(cr), R1_SP); 248 switch (return_pc_location) { 249 case return_pc_is_lr: __ mflr(R31); break; 250 case return_pc_is_pre_saved: assert(return_pc_adjustment == 0, "unsupported"); break; 251 case return_pc_is_thread_saved_exception_pc: __ ld(R31, thread_(saved_exception_pc)); break; 252 default: ShouldNotReachHere(); 253 } 254 if (return_pc_location != return_pc_is_pre_saved) { 255 if (return_pc_adjustment != 0) { 256 __ addi(R31, R31, return_pc_adjustment); 257 } 258 __ std(R31, _abi(lr), R1_SP); 259 } 260 261 // push a new frame 262 __ push_frame(frame_size_in_bytes, R30); 263 264 // save all registers (ints and floats) 265 offset = register_save_offset; 266 for (int i = 0; i < regstosave_num; i++) { 267 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 268 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 269 270 switch (reg_type) { 271 case RegisterSaver::int_reg: { 272 if (reg_num < 30) { // We spilled R30-31 right at the beginning. 273 __ std(as_Register(reg_num), offset, R1_SP); 274 } 275 break; 276 } 277 case RegisterSaver::float_reg: { 278 __ stfd(as_FloatRegister(reg_num), offset, R1_SP); 279 break; 280 } 281 case RegisterSaver::special_reg: { 282 if (reg_num == SR_CTR_SpecialRegisterEnumValue) { 283 __ mfctr(R30); 284 __ std(R30, offset, R1_SP); 285 } else { 286 Unimplemented(); 287 } 288 break; 289 } 290 default: 291 ShouldNotReachHere(); 292 } 293 294 if (generate_oop_map) { 295 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), 296 RegisterSaver_LiveRegs[i].vmreg); 297 map->set_callee_saved(VMRegImpl::stack2reg((offset + half_reg_size)>>2), 298 RegisterSaver_LiveRegs[i].vmreg->next()); 299 } 300 offset += reg_size; 301 } 302 303 BLOCK_COMMENT("} push_frame_reg_args_and_save_live_registers"); 304 305 // And we're done. 306 return map; 307 } 308 309 310 // Pop the current frame and restore all the registers that we 311 // saved. 312 void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm, 313 int frame_size_in_bytes, 314 bool restore_ctr) { 315 int i; 316 int offset; 317 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 318 sizeof(RegisterSaver::LiveRegType); 319 const int register_save_size = regstosave_num * reg_size; 320 const int register_save_offset = frame_size_in_bytes - register_save_size; 321 322 BLOCK_COMMENT("restore_live_registers_and_pop_frame {"); 323 324 // restore all registers (ints and floats) 325 offset = register_save_offset; 326 for (int i = 0; i < regstosave_num; i++) { 327 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 328 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 329 330 switch (reg_type) { 331 case RegisterSaver::int_reg: { 332 if (reg_num != 31) // R31 restored at the end, it's the tmp reg! 333 __ ld(as_Register(reg_num), offset, R1_SP); 334 break; 335 } 336 case RegisterSaver::float_reg: { 337 __ lfd(as_FloatRegister(reg_num), offset, R1_SP); 338 break; 339 } 340 case RegisterSaver::special_reg: { 341 if (reg_num == SR_CTR_SpecialRegisterEnumValue) { 342 if (restore_ctr) { // Nothing to do here if ctr already contains the next address. 343 __ ld(R31, offset, R1_SP); 344 __ mtctr(R31); 345 } 346 } else { 347 Unimplemented(); 348 } 349 break; 350 } 351 default: 352 ShouldNotReachHere(); 353 } 354 offset += reg_size; 355 } 356 357 // pop the frame 358 __ pop_frame(); 359 360 // restore the flags 361 __ restore_LR_CR(R31); 362 363 // restore scratch register's value 364 __ ld(R31, -reg_size, R1_SP); 365 366 BLOCK_COMMENT("} restore_live_registers_and_pop_frame"); 367 } 368 369 void RegisterSaver::push_frame_and_save_argument_registers(MacroAssembler* masm, Register r_temp, 370 int frame_size,int total_args, const VMRegPair *regs, 371 const VMRegPair *regs2) { 372 __ push_frame(frame_size, r_temp); 373 int st_off = frame_size - wordSize; 374 for (int i = 0; i < total_args; i++) { 375 VMReg r_1 = regs[i].first(); 376 VMReg r_2 = regs[i].second(); 377 if (!r_1->is_valid()) { 378 assert(!r_2->is_valid(), ""); 379 continue; 380 } 381 if (r_1->is_Register()) { 382 Register r = r_1->as_Register(); 383 __ std(r, st_off, R1_SP); 384 st_off -= wordSize; 385 } else if (r_1->is_FloatRegister()) { 386 FloatRegister f = r_1->as_FloatRegister(); 387 __ stfd(f, st_off, R1_SP); 388 st_off -= wordSize; 389 } 390 } 391 if (regs2 != NULL) { 392 for (int i = 0; i < total_args; i++) { 393 VMReg r_1 = regs2[i].first(); 394 VMReg r_2 = regs2[i].second(); 395 if (!r_1->is_valid()) { 396 assert(!r_2->is_valid(), ""); 397 continue; 398 } 399 if (r_1->is_Register()) { 400 Register r = r_1->as_Register(); 401 __ std(r, st_off, R1_SP); 402 st_off -= wordSize; 403 } else if (r_1->is_FloatRegister()) { 404 FloatRegister f = r_1->as_FloatRegister(); 405 __ stfd(f, st_off, R1_SP); 406 st_off -= wordSize; 407 } 408 } 409 } 410 } 411 412 void RegisterSaver::restore_argument_registers_and_pop_frame(MacroAssembler*masm, int frame_size, 413 int total_args, const VMRegPair *regs, 414 const VMRegPair *regs2) { 415 int st_off = frame_size - wordSize; 416 for (int i = 0; i < total_args; i++) { 417 VMReg r_1 = regs[i].first(); 418 VMReg r_2 = regs[i].second(); 419 if (r_1->is_Register()) { 420 Register r = r_1->as_Register(); 421 __ ld(r, st_off, R1_SP); 422 st_off -= wordSize; 423 } else if (r_1->is_FloatRegister()) { 424 FloatRegister f = r_1->as_FloatRegister(); 425 __ lfd(f, st_off, R1_SP); 426 st_off -= wordSize; 427 } 428 } 429 if (regs2 != NULL) 430 for (int i = 0; i < total_args; i++) { 431 VMReg r_1 = regs2[i].first(); 432 VMReg r_2 = regs2[i].second(); 433 if (r_1->is_Register()) { 434 Register r = r_1->as_Register(); 435 __ ld(r, st_off, R1_SP); 436 st_off -= wordSize; 437 } else if (r_1->is_FloatRegister()) { 438 FloatRegister f = r_1->as_FloatRegister(); 439 __ lfd(f, st_off, R1_SP); 440 st_off -= wordSize; 441 } 442 } 443 __ pop_frame(); 444 } 445 446 // Restore the registers that might be holding a result. 447 void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes) { 448 int i; 449 int offset; 450 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 451 sizeof(RegisterSaver::LiveRegType); 452 const int register_save_size = regstosave_num * reg_size; 453 const int register_save_offset = frame_size_in_bytes - register_save_size; 454 455 // restore all result registers (ints and floats) 456 offset = register_save_offset; 457 for (int i = 0; i < regstosave_num; i++) { 458 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 459 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 460 switch (reg_type) { 461 case RegisterSaver::int_reg: { 462 if (as_Register(reg_num)==R3_RET) // int result_reg 463 __ ld(as_Register(reg_num), offset, R1_SP); 464 break; 465 } 466 case RegisterSaver::float_reg: { 467 if (as_FloatRegister(reg_num)==F1_RET) // float result_reg 468 __ lfd(as_FloatRegister(reg_num), offset, R1_SP); 469 break; 470 } 471 case RegisterSaver::special_reg: { 472 // Special registers don't hold a result. 473 break; 474 } 475 default: 476 ShouldNotReachHere(); 477 } 478 offset += reg_size; 479 } 480 } 481 482 // Is vector's size (in bytes) bigger than a size saved by default? 483 bool SharedRuntime::is_wide_vector(int size) { 484 // Note, MaxVectorSize == 8/16 on PPC64. 485 assert(size <= (SuperwordUseVSX ? 16 : 8), "%d bytes vectors are not supported", size); 486 return size > 8; 487 } 488 489 size_t SharedRuntime::trampoline_size() { 490 return Assembler::load_const_size + 8; 491 } 492 493 void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) { 494 Register Rtemp = R12; 495 __ load_const(Rtemp, destination); 496 __ mtctr(Rtemp); 497 __ bctr(); 498 } 499 500 #ifdef COMPILER2 501 static int reg2slot(VMReg r) { 502 return r->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 503 } 504 505 static int reg2offset(VMReg r) { 506 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 507 } 508 #endif 509 510 // --------------------------------------------------------------------------- 511 // Read the array of BasicTypes from a signature, and compute where the 512 // arguments should go. Values in the VMRegPair regs array refer to 4-byte 513 // quantities. Values less than VMRegImpl::stack0 are registers, those above 514 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer 515 // as framesizes are fixed. 516 // VMRegImpl::stack0 refers to the first slot 0(sp). 517 // and VMRegImpl::stack0+1 refers to the memory word 4-bytes higher. Register 518 // up to RegisterImpl::number_of_registers) are the 64-bit 519 // integer registers. 520 521 // Note: the INPUTS in sig_bt are in units of Java argument words, which are 522 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit 523 // units regardless of build. Of course for i486 there is no 64 bit build 524 525 // The Java calling convention is a "shifted" version of the C ABI. 526 // By skipping the first C ABI register we can call non-static jni methods 527 // with small numbers of arguments without having to shuffle the arguments 528 // at all. Since we control the java ABI we ought to at least get some 529 // advantage out of it. 530 531 const VMReg java_iarg_reg[8] = { 532 R3->as_VMReg(), 533 R4->as_VMReg(), 534 R5->as_VMReg(), 535 R6->as_VMReg(), 536 R7->as_VMReg(), 537 R8->as_VMReg(), 538 R9->as_VMReg(), 539 R10->as_VMReg() 540 }; 541 542 const VMReg java_farg_reg[13] = { 543 F1->as_VMReg(), 544 F2->as_VMReg(), 545 F3->as_VMReg(), 546 F4->as_VMReg(), 547 F5->as_VMReg(), 548 F6->as_VMReg(), 549 F7->as_VMReg(), 550 F8->as_VMReg(), 551 F9->as_VMReg(), 552 F10->as_VMReg(), 553 F11->as_VMReg(), 554 F12->as_VMReg(), 555 F13->as_VMReg() 556 }; 557 558 const int num_java_iarg_registers = sizeof(java_iarg_reg) / sizeof(java_iarg_reg[0]); 559 const int num_java_farg_registers = sizeof(java_farg_reg) / sizeof(java_farg_reg[0]); 560 561 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 562 VMRegPair *regs, 563 int total_args_passed, 564 int is_outgoing) { 565 // C2c calling conventions for compiled-compiled calls. 566 // Put 8 ints/longs into registers _AND_ 13 float/doubles into 567 // registers _AND_ put the rest on the stack. 568 569 const int inc_stk_for_intfloat = 1; // 1 slots for ints and floats 570 const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles 571 572 int i; 573 VMReg reg; 574 int stk = 0; 575 int ireg = 0; 576 int freg = 0; 577 578 // We put the first 8 arguments into registers and the rest on the 579 // stack, float arguments are already in their argument registers 580 // due to c2c calling conventions (see calling_convention). 581 for (int i = 0; i < total_args_passed; ++i) { 582 switch(sig_bt[i]) { 583 case T_BOOLEAN: 584 case T_CHAR: 585 case T_BYTE: 586 case T_SHORT: 587 case T_INT: 588 if (ireg < num_java_iarg_registers) { 589 // Put int/ptr in register 590 reg = java_iarg_reg[ireg]; 591 ++ireg; 592 } else { 593 // Put int/ptr on stack. 594 reg = VMRegImpl::stack2reg(stk); 595 stk += inc_stk_for_intfloat; 596 } 597 regs[i].set1(reg); 598 break; 599 case T_LONG: 600 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 601 if (ireg < num_java_iarg_registers) { 602 // Put long in register. 603 reg = java_iarg_reg[ireg]; 604 ++ireg; 605 } else { 606 // Put long on stack. They must be aligned to 2 slots. 607 if (stk & 0x1) ++stk; 608 reg = VMRegImpl::stack2reg(stk); 609 stk += inc_stk_for_longdouble; 610 } 611 regs[i].set2(reg); 612 break; 613 case T_OBJECT: 614 case T_ARRAY: 615 case T_ADDRESS: 616 if (ireg < num_java_iarg_registers) { 617 // Put ptr in register. 618 reg = java_iarg_reg[ireg]; 619 ++ireg; 620 } else { 621 // Put ptr on stack. Objects must be aligned to 2 slots too, 622 // because "64-bit pointers record oop-ishness on 2 aligned 623 // adjacent registers." (see OopFlow::build_oop_map). 624 if (stk & 0x1) ++stk; 625 reg = VMRegImpl::stack2reg(stk); 626 stk += inc_stk_for_longdouble; 627 } 628 regs[i].set2(reg); 629 break; 630 case T_FLOAT: 631 if (freg < num_java_farg_registers) { 632 // Put float in register. 633 reg = java_farg_reg[freg]; 634 ++freg; 635 } else { 636 // Put float on stack. 637 reg = VMRegImpl::stack2reg(stk); 638 stk += inc_stk_for_intfloat; 639 } 640 regs[i].set1(reg); 641 break; 642 case T_DOUBLE: 643 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 644 if (freg < num_java_farg_registers) { 645 // Put double in register. 646 reg = java_farg_reg[freg]; 647 ++freg; 648 } else { 649 // Put double on stack. They must be aligned to 2 slots. 650 if (stk & 0x1) ++stk; 651 reg = VMRegImpl::stack2reg(stk); 652 stk += inc_stk_for_longdouble; 653 } 654 regs[i].set2(reg); 655 break; 656 case T_VOID: 657 // Do not count halves. 658 regs[i].set_bad(); 659 break; 660 default: 661 ShouldNotReachHere(); 662 } 663 } 664 return align_up(stk, 2); 665 } 666 667 #if defined(COMPILER1) || defined(COMPILER2) 668 // Calling convention for calling C code. 669 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 670 VMRegPair *regs, 671 VMRegPair *regs2, 672 int total_args_passed) { 673 // Calling conventions for C runtime calls and calls to JNI native methods. 674 // 675 // PPC64 convention: Hoist the first 8 int/ptr/long's in the first 8 676 // int regs, leaving int regs undefined if the arg is flt/dbl. Hoist 677 // the first 13 flt/dbl's in the first 13 fp regs but additionally 678 // copy flt/dbl to the stack if they are beyond the 8th argument. 679 680 const VMReg iarg_reg[8] = { 681 R3->as_VMReg(), 682 R4->as_VMReg(), 683 R5->as_VMReg(), 684 R6->as_VMReg(), 685 R7->as_VMReg(), 686 R8->as_VMReg(), 687 R9->as_VMReg(), 688 R10->as_VMReg() 689 }; 690 691 const VMReg farg_reg[13] = { 692 F1->as_VMReg(), 693 F2->as_VMReg(), 694 F3->as_VMReg(), 695 F4->as_VMReg(), 696 F5->as_VMReg(), 697 F6->as_VMReg(), 698 F7->as_VMReg(), 699 F8->as_VMReg(), 700 F9->as_VMReg(), 701 F10->as_VMReg(), 702 F11->as_VMReg(), 703 F12->as_VMReg(), 704 F13->as_VMReg() 705 }; 706 707 // Check calling conventions consistency. 708 assert(sizeof(iarg_reg) / sizeof(iarg_reg[0]) == Argument::n_int_register_parameters_c && 709 sizeof(farg_reg) / sizeof(farg_reg[0]) == Argument::n_float_register_parameters_c, 710 "consistency"); 711 712 // `Stk' counts stack slots. Due to alignment, 32 bit values occupy 713 // 2 such slots, like 64 bit values do. 714 const int inc_stk_for_intfloat = 2; // 2 slots for ints and floats 715 const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles 716 717 int i; 718 VMReg reg; 719 // Leave room for C-compatible ABI_REG_ARGS. 720 int stk = (frame::abi_reg_args_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size; 721 int arg = 0; 722 int freg = 0; 723 724 // Avoid passing C arguments in the wrong stack slots. 725 #if defined(ABI_ELFv2) 726 assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 96, 727 "passing C arguments in wrong stack slots"); 728 #else 729 assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 112, 730 "passing C arguments in wrong stack slots"); 731 #endif 732 // We fill-out regs AND regs2 if an argument must be passed in a 733 // register AND in a stack slot. If regs2 is NULL in such a 734 // situation, we bail-out with a fatal error. 735 for (int i = 0; i < total_args_passed; ++i, ++arg) { 736 // Initialize regs2 to BAD. 737 if (regs2 != NULL) regs2[i].set_bad(); 738 739 switch(sig_bt[i]) { 740 741 // 742 // If arguments 0-7 are integers, they are passed in integer registers. 743 // Argument i is placed in iarg_reg[i]. 744 // 745 case T_BOOLEAN: 746 case T_CHAR: 747 case T_BYTE: 748 case T_SHORT: 749 case T_INT: 750 // We must cast ints to longs and use full 64 bit stack slots 751 // here. Thus fall through, handle as long. 752 case T_LONG: 753 case T_OBJECT: 754 case T_ARRAY: 755 case T_ADDRESS: 756 case T_METADATA: 757 // Oops are already boxed if required (JNI). 758 if (arg < Argument::n_int_register_parameters_c) { 759 reg = iarg_reg[arg]; 760 } else { 761 reg = VMRegImpl::stack2reg(stk); 762 stk += inc_stk_for_longdouble; 763 } 764 regs[i].set2(reg); 765 break; 766 767 // 768 // Floats are treated differently from int regs: The first 13 float arguments 769 // are passed in registers (not the float args among the first 13 args). 770 // Thus argument i is NOT passed in farg_reg[i] if it is float. It is passed 771 // in farg_reg[j] if argument i is the j-th float argument of this call. 772 // 773 case T_FLOAT: 774 #if defined(LINUX) 775 // Linux uses ELF ABI. Both original ELF and ELFv2 ABIs have float 776 // in the least significant word of an argument slot. 777 #if defined(VM_LITTLE_ENDIAN) 778 #define FLOAT_WORD_OFFSET_IN_SLOT 0 779 #else 780 #define FLOAT_WORD_OFFSET_IN_SLOT 1 781 #endif 782 #elif defined(AIX) 783 // Although AIX runs on big endian CPU, float is in the most 784 // significant word of an argument slot. 785 #define FLOAT_WORD_OFFSET_IN_SLOT 0 786 #else 787 #error "unknown OS" 788 #endif 789 if (freg < Argument::n_float_register_parameters_c) { 790 // Put float in register ... 791 reg = farg_reg[freg]; 792 ++freg; 793 794 // Argument i for i > 8 is placed on the stack even if it's 795 // placed in a register (if it's a float arg). Aix disassembly 796 // shows that xlC places these float args on the stack AND in 797 // a register. This is not documented, but we follow this 798 // convention, too. 799 if (arg >= Argument::n_regs_not_on_stack_c) { 800 // ... and on the stack. 801 guarantee(regs2 != NULL, "must pass float in register and stack slot"); 802 VMReg reg2 = VMRegImpl::stack2reg(stk + FLOAT_WORD_OFFSET_IN_SLOT); 803 regs2[i].set1(reg2); 804 stk += inc_stk_for_intfloat; 805 } 806 807 } else { 808 // Put float on stack. 809 reg = VMRegImpl::stack2reg(stk + FLOAT_WORD_OFFSET_IN_SLOT); 810 stk += inc_stk_for_intfloat; 811 } 812 regs[i].set1(reg); 813 break; 814 case T_DOUBLE: 815 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 816 if (freg < Argument::n_float_register_parameters_c) { 817 // Put double in register ... 818 reg = farg_reg[freg]; 819 ++freg; 820 821 // Argument i for i > 8 is placed on the stack even if it's 822 // placed in a register (if it's a double arg). Aix disassembly 823 // shows that xlC places these float args on the stack AND in 824 // a register. This is not documented, but we follow this 825 // convention, too. 826 if (arg >= Argument::n_regs_not_on_stack_c) { 827 // ... and on the stack. 828 guarantee(regs2 != NULL, "must pass float in register and stack slot"); 829 VMReg reg2 = VMRegImpl::stack2reg(stk); 830 regs2[i].set2(reg2); 831 stk += inc_stk_for_longdouble; 832 } 833 } else { 834 // Put double on stack. 835 reg = VMRegImpl::stack2reg(stk); 836 stk += inc_stk_for_longdouble; 837 } 838 regs[i].set2(reg); 839 break; 840 841 case T_VOID: 842 // Do not count halves. 843 regs[i].set_bad(); 844 --arg; 845 break; 846 default: 847 ShouldNotReachHere(); 848 } 849 } 850 851 return align_up(stk, 2); 852 } 853 #endif // COMPILER2 854 855 static address gen_c2i_adapter(MacroAssembler *masm, 856 int total_args_passed, 857 int comp_args_on_stack, 858 const BasicType *sig_bt, 859 const VMRegPair *regs, 860 Label& call_interpreter, 861 const Register& ientry) { 862 863 address c2i_entrypoint; 864 865 const Register sender_SP = R21_sender_SP; // == R21_tmp1 866 const Register code = R22_tmp2; 867 //const Register ientry = R23_tmp3; 868 const Register value_regs[] = { R24_tmp4, R25_tmp5, R26_tmp6 }; 869 const int num_value_regs = sizeof(value_regs) / sizeof(Register); 870 int value_regs_index = 0; 871 872 const Register return_pc = R27_tmp7; 873 const Register tmp = R28_tmp8; 874 875 assert_different_registers(sender_SP, code, ientry, return_pc, tmp); 876 877 // Adapter needs TOP_IJAVA_FRAME_ABI. 878 const int adapter_size = frame::top_ijava_frame_abi_size + 879 align_up(total_args_passed * wordSize, frame::alignment_in_bytes); 880 881 // regular (verified) c2i entry point 882 c2i_entrypoint = __ pc(); 883 884 // Does compiled code exists? If yes, patch the caller's callsite. 885 __ ld(code, method_(code)); 886 __ cmpdi(CCR0, code, 0); 887 __ ld(ientry, method_(interpreter_entry)); // preloaded 888 __ beq(CCR0, call_interpreter); 889 890 891 // Patch caller's callsite, method_(code) was not NULL which means that 892 // compiled code exists. 893 __ mflr(return_pc); 894 __ std(return_pc, _abi(lr), R1_SP); 895 RegisterSaver::push_frame_and_save_argument_registers(masm, tmp, adapter_size, total_args_passed, regs); 896 897 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), R19_method, return_pc); 898 899 RegisterSaver::restore_argument_registers_and_pop_frame(masm, adapter_size, total_args_passed, regs); 900 __ ld(return_pc, _abi(lr), R1_SP); 901 __ ld(ientry, method_(interpreter_entry)); // preloaded 902 __ mtlr(return_pc); 903 904 905 // Call the interpreter. 906 __ BIND(call_interpreter); 907 __ mtctr(ientry); 908 909 // Get a copy of the current SP for loading caller's arguments. 910 __ mr(sender_SP, R1_SP); 911 912 // Add space for the adapter. 913 __ resize_frame(-adapter_size, R12_scratch2); 914 915 int st_off = adapter_size - wordSize; 916 917 // Write the args into the outgoing interpreter space. 918 for (int i = 0; i < total_args_passed; i++) { 919 VMReg r_1 = regs[i].first(); 920 VMReg r_2 = regs[i].second(); 921 if (!r_1->is_valid()) { 922 assert(!r_2->is_valid(), ""); 923 continue; 924 } 925 if (r_1->is_stack()) { 926 Register tmp_reg = value_regs[value_regs_index]; 927 value_regs_index = (value_regs_index + 1) % num_value_regs; 928 // The calling convention produces OptoRegs that ignore the out 929 // preserve area (JIT's ABI). We must account for it here. 930 int ld_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 931 if (!r_2->is_valid()) { 932 __ lwz(tmp_reg, ld_off, sender_SP); 933 } else { 934 __ ld(tmp_reg, ld_off, sender_SP); 935 } 936 // Pretend stack targets were loaded into tmp_reg. 937 r_1 = tmp_reg->as_VMReg(); 938 } 939 940 if (r_1->is_Register()) { 941 Register r = r_1->as_Register(); 942 if (!r_2->is_valid()) { 943 __ stw(r, st_off, R1_SP); 944 st_off-=wordSize; 945 } else { 946 // Longs are given 2 64-bit slots in the interpreter, but the 947 // data is passed in only 1 slot. 948 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 949 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); ) 950 st_off-=wordSize; 951 } 952 __ std(r, st_off, R1_SP); 953 st_off-=wordSize; 954 } 955 } else { 956 assert(r_1->is_FloatRegister(), ""); 957 FloatRegister f = r_1->as_FloatRegister(); 958 if (!r_2->is_valid()) { 959 __ stfs(f, st_off, R1_SP); 960 st_off-=wordSize; 961 } else { 962 // In 64bit, doubles are given 2 64-bit slots in the interpreter, but the 963 // data is passed in only 1 slot. 964 // One of these should get known junk... 965 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); ) 966 st_off-=wordSize; 967 __ stfd(f, st_off, R1_SP); 968 st_off-=wordSize; 969 } 970 } 971 } 972 973 // Jump to the interpreter just as if interpreter was doing it. 974 975 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 976 977 // load TOS 978 __ addi(R15_esp, R1_SP, st_off); 979 980 // Frame_manager expects initial_caller_sp (= SP without resize by c2i) in R21_tmp1. 981 assert(sender_SP == R21_sender_SP, "passing initial caller's SP in wrong register"); 982 __ bctr(); 983 984 return c2i_entrypoint; 985 } 986 987 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, 988 int total_args_passed, 989 int comp_args_on_stack, 990 const BasicType *sig_bt, 991 const VMRegPair *regs) { 992 993 // Load method's entry-point from method. 994 __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method); 995 __ mtctr(R12_scratch2); 996 997 // We will only enter here from an interpreted frame and never from after 998 // passing thru a c2i. Azul allowed this but we do not. If we lose the 999 // race and use a c2i we will remain interpreted for the race loser(s). 1000 // This removes all sorts of headaches on the x86 side and also eliminates 1001 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions. 1002 1003 // Note: r13 contains the senderSP on entry. We must preserve it since 1004 // we may do a i2c -> c2i transition if we lose a race where compiled 1005 // code goes non-entrant while we get args ready. 1006 // In addition we use r13 to locate all the interpreter args as 1007 // we must align the stack to 16 bytes on an i2c entry else we 1008 // lose alignment we expect in all compiled code and register 1009 // save code can segv when fxsave instructions find improperly 1010 // aligned stack pointer. 1011 1012 const Register ld_ptr = R15_esp; 1013 const Register value_regs[] = { R22_tmp2, R23_tmp3, R24_tmp4, R25_tmp5, R26_tmp6 }; 1014 const int num_value_regs = sizeof(value_regs) / sizeof(Register); 1015 int value_regs_index = 0; 1016 1017 int ld_offset = total_args_passed*wordSize; 1018 1019 // Cut-out for having no stack args. Since up to 2 int/oop args are passed 1020 // in registers, we will occasionally have no stack args. 1021 int comp_words_on_stack = 0; 1022 if (comp_args_on_stack) { 1023 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in 1024 // registers are below. By subtracting stack0, we either get a negative 1025 // number (all values in registers) or the maximum stack slot accessed. 1026 1027 // Convert 4-byte c2 stack slots to words. 1028 comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; 1029 // Round up to miminum stack alignment, in wordSize. 1030 comp_words_on_stack = align_up(comp_words_on_stack, 2); 1031 __ resize_frame(-comp_words_on_stack * wordSize, R11_scratch1); 1032 } 1033 1034 // Now generate the shuffle code. Pick up all register args and move the 1035 // rest through register value=Z_R12. 1036 BLOCK_COMMENT("Shuffle arguments"); 1037 for (int i = 0; i < total_args_passed; i++) { 1038 if (sig_bt[i] == T_VOID) { 1039 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 1040 continue; 1041 } 1042 1043 // Pick up 0, 1 or 2 words from ld_ptr. 1044 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), 1045 "scrambled load targets?"); 1046 VMReg r_1 = regs[i].first(); 1047 VMReg r_2 = regs[i].second(); 1048 if (!r_1->is_valid()) { 1049 assert(!r_2->is_valid(), ""); 1050 continue; 1051 } 1052 if (r_1->is_FloatRegister()) { 1053 if (!r_2->is_valid()) { 1054 __ lfs(r_1->as_FloatRegister(), ld_offset, ld_ptr); 1055 ld_offset-=wordSize; 1056 } else { 1057 // Skip the unused interpreter slot. 1058 __ lfd(r_1->as_FloatRegister(), ld_offset-wordSize, ld_ptr); 1059 ld_offset-=2*wordSize; 1060 } 1061 } else { 1062 Register r; 1063 if (r_1->is_stack()) { 1064 // Must do a memory to memory move thru "value". 1065 r = value_regs[value_regs_index]; 1066 value_regs_index = (value_regs_index + 1) % num_value_regs; 1067 } else { 1068 r = r_1->as_Register(); 1069 } 1070 if (!r_2->is_valid()) { 1071 // Not sure we need to do this but it shouldn't hurt. 1072 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ADDRESS || sig_bt[i] == T_ARRAY) { 1073 __ ld(r, ld_offset, ld_ptr); 1074 ld_offset-=wordSize; 1075 } else { 1076 __ lwz(r, ld_offset, ld_ptr); 1077 ld_offset-=wordSize; 1078 } 1079 } else { 1080 // In 64bit, longs are given 2 64-bit slots in the interpreter, but the 1081 // data is passed in only 1 slot. 1082 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 1083 ld_offset-=wordSize; 1084 } 1085 __ ld(r, ld_offset, ld_ptr); 1086 ld_offset-=wordSize; 1087 } 1088 1089 if (r_1->is_stack()) { 1090 // Now store value where the compiler expects it 1091 int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots())*VMRegImpl::stack_slot_size; 1092 1093 if (sig_bt[i] == T_INT || sig_bt[i] == T_FLOAT ||sig_bt[i] == T_BOOLEAN || 1094 sig_bt[i] == T_SHORT || sig_bt[i] == T_CHAR || sig_bt[i] == T_BYTE) { 1095 __ stw(r, st_off, R1_SP); 1096 } else { 1097 __ std(r, st_off, R1_SP); 1098 } 1099 } 1100 } 1101 } 1102 1103 BLOCK_COMMENT("Store method"); 1104 // Store method into thread->callee_target. 1105 // We might end up in handle_wrong_method if the callee is 1106 // deoptimized as we race thru here. If that happens we don't want 1107 // to take a safepoint because the caller frame will look 1108 // interpreted and arguments are now "compiled" so it is much better 1109 // to make this transition invisible to the stack walking 1110 // code. Unfortunately if we try and find the callee by normal means 1111 // a safepoint is possible. So we stash the desired callee in the 1112 // thread and the vm will find there should this case occur. 1113 __ std(R19_method, thread_(callee_target)); 1114 1115 // Jump to the compiled code just as if compiled code was doing it. 1116 __ bctr(); 1117 } 1118 1119 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, 1120 int total_args_passed, 1121 int comp_args_on_stack, 1122 const BasicType *sig_bt, 1123 const VMRegPair *regs, 1124 AdapterFingerPrint* fingerprint) { 1125 address i2c_entry; 1126 address c2i_unverified_entry; 1127 address c2i_entry; 1128 1129 1130 // entry: i2c 1131 1132 __ align(CodeEntryAlignment); 1133 i2c_entry = __ pc(); 1134 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs); 1135 1136 1137 // entry: c2i unverified 1138 1139 __ align(CodeEntryAlignment); 1140 BLOCK_COMMENT("c2i unverified entry"); 1141 c2i_unverified_entry = __ pc(); 1142 1143 // inline_cache contains a compiledICHolder 1144 const Register ic = R19_method; 1145 const Register ic_klass = R11_scratch1; 1146 const Register receiver_klass = R12_scratch2; 1147 const Register code = R21_tmp1; 1148 const Register ientry = R23_tmp3; 1149 1150 assert_different_registers(ic, ic_klass, receiver_klass, R3_ARG1, code, ientry); 1151 assert(R11_scratch1 == R11, "need prologue scratch register"); 1152 1153 Label call_interpreter; 1154 1155 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), 1156 "klass offset should reach into any page"); 1157 // Check for NULL argument if we don't have implicit null checks. 1158 if (!ImplicitNullChecks || !os::zero_page_read_protected()) { 1159 if (TrapBasedNullChecks) { 1160 __ trap_null_check(R3_ARG1); 1161 } else { 1162 Label valid; 1163 __ cmpdi(CCR0, R3_ARG1, 0); 1164 __ bne_predict_taken(CCR0, valid); 1165 // We have a null argument, branch to ic_miss_stub. 1166 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), 1167 relocInfo::runtime_call_type); 1168 __ BIND(valid); 1169 } 1170 } 1171 // Assume argument is not NULL, load klass from receiver. 1172 __ load_klass(receiver_klass, R3_ARG1); 1173 1174 __ ld(ic_klass, CompiledICHolder::holder_klass_offset(), ic); 1175 1176 if (TrapBasedICMissChecks) { 1177 __ trap_ic_miss_check(receiver_klass, ic_klass); 1178 } else { 1179 Label valid; 1180 __ cmpd(CCR0, receiver_klass, ic_klass); 1181 __ beq_predict_taken(CCR0, valid); 1182 // We have an unexpected klass, branch to ic_miss_stub. 1183 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), 1184 relocInfo::runtime_call_type); 1185 __ BIND(valid); 1186 } 1187 1188 // Argument is valid and klass is as expected, continue. 1189 1190 // Extract method from inline cache, verified entry point needs it. 1191 __ ld(R19_method, CompiledICHolder::holder_metadata_offset(), ic); 1192 assert(R19_method == ic, "the inline cache register is dead here"); 1193 1194 __ ld(code, method_(code)); 1195 __ cmpdi(CCR0, code, 0); 1196 __ ld(ientry, method_(interpreter_entry)); // preloaded 1197 __ beq_predict_taken(CCR0, call_interpreter); 1198 1199 // Branch to ic_miss_stub. 1200 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type); 1201 1202 // entry: c2i 1203 1204 c2i_entry = gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, call_interpreter, ientry); 1205 1206 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); 1207 } 1208 1209 #ifdef COMPILER2 1210 // An oop arg. Must pass a handle not the oop itself. 1211 static void object_move(MacroAssembler* masm, 1212 int frame_size_in_slots, 1213 OopMap* oop_map, int oop_handle_offset, 1214 bool is_receiver, int* receiver_offset, 1215 VMRegPair src, VMRegPair dst, 1216 Register r_caller_sp, Register r_temp_1, Register r_temp_2) { 1217 assert(!is_receiver || (is_receiver && (*receiver_offset == -1)), 1218 "receiver has already been moved"); 1219 1220 // We must pass a handle. First figure out the location we use as a handle. 1221 1222 if (src.first()->is_stack()) { 1223 // stack to stack or reg 1224 1225 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register(); 1226 Label skip; 1227 const int oop_slot_in_callers_frame = reg2slot(src.first()); 1228 1229 guarantee(!is_receiver, "expecting receiver in register"); 1230 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot_in_callers_frame + frame_size_in_slots)); 1231 1232 __ addi(r_handle, r_caller_sp, reg2offset(src.first())); 1233 __ ld( r_temp_2, reg2offset(src.first()), r_caller_sp); 1234 __ cmpdi(CCR0, r_temp_2, 0); 1235 __ bne(CCR0, skip); 1236 // Use a NULL handle if oop is NULL. 1237 __ li(r_handle, 0); 1238 __ bind(skip); 1239 1240 if (dst.first()->is_stack()) { 1241 // stack to stack 1242 __ std(r_handle, reg2offset(dst.first()), R1_SP); 1243 } else { 1244 // stack to reg 1245 // Nothing to do, r_handle is already the dst register. 1246 } 1247 } else { 1248 // reg to stack or reg 1249 const Register r_oop = src.first()->as_Register(); 1250 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register(); 1251 const int oop_slot = (r_oop->encoding()-R3_ARG1->encoding()) * VMRegImpl::slots_per_word 1252 + oop_handle_offset; // in slots 1253 const int oop_offset = oop_slot * VMRegImpl::stack_slot_size; 1254 Label skip; 1255 1256 if (is_receiver) { 1257 *receiver_offset = oop_offset; 1258 } 1259 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1260 1261 __ std( r_oop, oop_offset, R1_SP); 1262 __ addi(r_handle, R1_SP, oop_offset); 1263 1264 __ cmpdi(CCR0, r_oop, 0); 1265 __ bne(CCR0, skip); 1266 // Use a NULL handle if oop is NULL. 1267 __ li(r_handle, 0); 1268 __ bind(skip); 1269 1270 if (dst.first()->is_stack()) { 1271 // reg to stack 1272 __ std(r_handle, reg2offset(dst.first()), R1_SP); 1273 } else { 1274 // reg to reg 1275 // Nothing to do, r_handle is already the dst register. 1276 } 1277 } 1278 } 1279 1280 static void int_move(MacroAssembler*masm, 1281 VMRegPair src, VMRegPair dst, 1282 Register r_caller_sp, Register r_temp) { 1283 assert(src.first()->is_valid(), "incoming must be int"); 1284 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long"); 1285 1286 if (src.first()->is_stack()) { 1287 if (dst.first()->is_stack()) { 1288 // stack to stack 1289 __ lwa(r_temp, reg2offset(src.first()), r_caller_sp); 1290 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1291 } else { 1292 // stack to reg 1293 __ lwa(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); 1294 } 1295 } else if (dst.first()->is_stack()) { 1296 // reg to stack 1297 __ extsw(r_temp, src.first()->as_Register()); 1298 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1299 } else { 1300 // reg to reg 1301 __ extsw(dst.first()->as_Register(), src.first()->as_Register()); 1302 } 1303 } 1304 1305 static void long_move(MacroAssembler*masm, 1306 VMRegPair src, VMRegPair dst, 1307 Register r_caller_sp, Register r_temp) { 1308 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be long"); 1309 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long"); 1310 1311 if (src.first()->is_stack()) { 1312 if (dst.first()->is_stack()) { 1313 // stack to stack 1314 __ ld( r_temp, reg2offset(src.first()), r_caller_sp); 1315 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1316 } else { 1317 // stack to reg 1318 __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); 1319 } 1320 } else if (dst.first()->is_stack()) { 1321 // reg to stack 1322 __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP); 1323 } else { 1324 // reg to reg 1325 if (dst.first()->as_Register() != src.first()->as_Register()) 1326 __ mr(dst.first()->as_Register(), src.first()->as_Register()); 1327 } 1328 } 1329 1330 static void float_move(MacroAssembler*masm, 1331 VMRegPair src, VMRegPair dst, 1332 Register r_caller_sp, Register r_temp) { 1333 assert(src.first()->is_valid() && !src.second()->is_valid(), "incoming must be float"); 1334 assert(dst.first()->is_valid() && !dst.second()->is_valid(), "outgoing must be float"); 1335 1336 if (src.first()->is_stack()) { 1337 if (dst.first()->is_stack()) { 1338 // stack to stack 1339 __ lwz(r_temp, reg2offset(src.first()), r_caller_sp); 1340 __ stw(r_temp, reg2offset(dst.first()), R1_SP); 1341 } else { 1342 // stack to reg 1343 __ lfs(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp); 1344 } 1345 } else if (dst.first()->is_stack()) { 1346 // reg to stack 1347 __ stfs(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP); 1348 } else { 1349 // reg to reg 1350 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister()) 1351 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 1352 } 1353 } 1354 1355 static void double_move(MacroAssembler*masm, 1356 VMRegPair src, VMRegPair dst, 1357 Register r_caller_sp, Register r_temp) { 1358 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be double"); 1359 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be double"); 1360 1361 if (src.first()->is_stack()) { 1362 if (dst.first()->is_stack()) { 1363 // stack to stack 1364 __ ld( r_temp, reg2offset(src.first()), r_caller_sp); 1365 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1366 } else { 1367 // stack to reg 1368 __ lfd(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp); 1369 } 1370 } else if (dst.first()->is_stack()) { 1371 // reg to stack 1372 __ stfd(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP); 1373 } else { 1374 // reg to reg 1375 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister()) 1376 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 1377 } 1378 } 1379 1380 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1381 switch (ret_type) { 1382 case T_BOOLEAN: 1383 case T_CHAR: 1384 case T_BYTE: 1385 case T_SHORT: 1386 case T_INT: 1387 __ stw (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1388 break; 1389 case T_ARRAY: 1390 case T_OBJECT: 1391 case T_LONG: 1392 __ std (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1393 break; 1394 case T_FLOAT: 1395 __ stfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1396 break; 1397 case T_DOUBLE: 1398 __ stfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1399 break; 1400 case T_VOID: 1401 break; 1402 default: 1403 ShouldNotReachHere(); 1404 break; 1405 } 1406 } 1407 1408 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1409 switch (ret_type) { 1410 case T_BOOLEAN: 1411 case T_CHAR: 1412 case T_BYTE: 1413 case T_SHORT: 1414 case T_INT: 1415 __ lwz(R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1416 break; 1417 case T_ARRAY: 1418 case T_OBJECT: 1419 case T_LONG: 1420 __ ld (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1421 break; 1422 case T_FLOAT: 1423 __ lfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1424 break; 1425 case T_DOUBLE: 1426 __ lfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1427 break; 1428 case T_VOID: 1429 break; 1430 default: 1431 ShouldNotReachHere(); 1432 break; 1433 } 1434 } 1435 1436 static void save_or_restore_arguments(MacroAssembler* masm, 1437 const int stack_slots, 1438 const int total_in_args, 1439 const int arg_save_area, 1440 OopMap* map, 1441 VMRegPair* in_regs, 1442 BasicType* in_sig_bt) { 1443 // If map is non-NULL then the code should store the values, 1444 // otherwise it should load them. 1445 int slot = arg_save_area; 1446 // Save down double word first. 1447 for (int i = 0; i < total_in_args; i++) { 1448 if (in_regs[i].first()->is_FloatRegister() && in_sig_bt[i] == T_DOUBLE) { 1449 int offset = slot * VMRegImpl::stack_slot_size; 1450 slot += VMRegImpl::slots_per_word; 1451 assert(slot <= stack_slots, "overflow (after DOUBLE stack slot)"); 1452 if (map != NULL) { 1453 __ stfd(in_regs[i].first()->as_FloatRegister(), offset, R1_SP); 1454 } else { 1455 __ lfd(in_regs[i].first()->as_FloatRegister(), offset, R1_SP); 1456 } 1457 } else if (in_regs[i].first()->is_Register() && 1458 (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) { 1459 int offset = slot * VMRegImpl::stack_slot_size; 1460 if (map != NULL) { 1461 __ std(in_regs[i].first()->as_Register(), offset, R1_SP); 1462 if (in_sig_bt[i] == T_ARRAY) { 1463 map->set_oop(VMRegImpl::stack2reg(slot)); 1464 } 1465 } else { 1466 __ ld(in_regs[i].first()->as_Register(), offset, R1_SP); 1467 } 1468 slot += VMRegImpl::slots_per_word; 1469 assert(slot <= stack_slots, "overflow (after LONG/ARRAY stack slot)"); 1470 } 1471 } 1472 // Save or restore single word registers. 1473 for (int i = 0; i < total_in_args; i++) { 1474 if (in_regs[i].first()->is_Register()) { 1475 int offset = slot * VMRegImpl::stack_slot_size; 1476 // Value lives in an input register. Save it on stack. 1477 switch (in_sig_bt[i]) { 1478 case T_BOOLEAN: 1479 case T_CHAR: 1480 case T_BYTE: 1481 case T_SHORT: 1482 case T_INT: 1483 if (map != NULL) { 1484 __ stw(in_regs[i].first()->as_Register(), offset, R1_SP); 1485 } else { 1486 __ lwa(in_regs[i].first()->as_Register(), offset, R1_SP); 1487 } 1488 slot++; 1489 assert(slot <= stack_slots, "overflow (after INT or smaller stack slot)"); 1490 break; 1491 case T_ARRAY: 1492 case T_LONG: 1493 // handled above 1494 break; 1495 case T_OBJECT: 1496 default: ShouldNotReachHere(); 1497 } 1498 } else if (in_regs[i].first()->is_FloatRegister()) { 1499 if (in_sig_bt[i] == T_FLOAT) { 1500 int offset = slot * VMRegImpl::stack_slot_size; 1501 slot++; 1502 assert(slot <= stack_slots, "overflow (after FLOAT stack slot)"); 1503 if (map != NULL) { 1504 __ stfs(in_regs[i].first()->as_FloatRegister(), offset, R1_SP); 1505 } else { 1506 __ lfs(in_regs[i].first()->as_FloatRegister(), offset, R1_SP); 1507 } 1508 } 1509 } else if (in_regs[i].first()->is_stack()) { 1510 if (in_sig_bt[i] == T_ARRAY && map != NULL) { 1511 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1512 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots)); 1513 } 1514 } 1515 } 1516 } 1517 1518 // Check GCLocker::needs_gc and enter the runtime if it's true. This 1519 // keeps a new JNI critical region from starting until a GC has been 1520 // forced. Save down any oops in registers and describe them in an 1521 // OopMap. 1522 static void check_needs_gc_for_critical_native(MacroAssembler* masm, 1523 const int stack_slots, 1524 const int total_in_args, 1525 const int arg_save_area, 1526 OopMapSet* oop_maps, 1527 VMRegPair* in_regs, 1528 BasicType* in_sig_bt, 1529 Register tmp_reg ) { 1530 __ block_comment("check GCLocker::needs_gc"); 1531 Label cont; 1532 __ lbz(tmp_reg, (RegisterOrConstant)(intptr_t)GCLocker::needs_gc_address()); 1533 __ cmplwi(CCR0, tmp_reg, 0); 1534 __ beq(CCR0, cont); 1535 1536 // Save down any values that are live in registers and call into the 1537 // runtime to halt for a GC. 1538 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1539 save_or_restore_arguments(masm, stack_slots, total_in_args, 1540 arg_save_area, map, in_regs, in_sig_bt); 1541 1542 __ mr(R3_ARG1, R16_thread); 1543 __ set_last_Java_frame(R1_SP, noreg); 1544 1545 __ block_comment("block_for_jni_critical"); 1546 address entry_point = CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical); 1547 #if defined(ABI_ELFv2) 1548 __ call_c(entry_point, relocInfo::runtime_call_type); 1549 #else 1550 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::runtime_call_type); 1551 #endif 1552 address start = __ pc() - __ offset(), 1553 calls_return_pc = __ last_calls_return_pc(); 1554 oop_maps->add_gc_map(calls_return_pc - start, map); 1555 1556 __ reset_last_Java_frame(); 1557 1558 // Reload all the register arguments. 1559 save_or_restore_arguments(masm, stack_slots, total_in_args, 1560 arg_save_area, NULL, in_regs, in_sig_bt); 1561 1562 __ BIND(cont); 1563 1564 #ifdef ASSERT 1565 if (StressCriticalJNINatives) { 1566 // Stress register saving. 1567 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1568 save_or_restore_arguments(masm, stack_slots, total_in_args, 1569 arg_save_area, map, in_regs, in_sig_bt); 1570 // Destroy argument registers. 1571 for (int i = 0; i < total_in_args; i++) { 1572 if (in_regs[i].first()->is_Register()) { 1573 const Register reg = in_regs[i].first()->as_Register(); 1574 __ neg(reg, reg); 1575 } else if (in_regs[i].first()->is_FloatRegister()) { 1576 __ fneg(in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister()); 1577 } 1578 } 1579 1580 save_or_restore_arguments(masm, stack_slots, total_in_args, 1581 arg_save_area, NULL, in_regs, in_sig_bt); 1582 } 1583 #endif 1584 } 1585 1586 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst, Register r_caller_sp, Register r_temp) { 1587 if (src.first()->is_stack()) { 1588 if (dst.first()->is_stack()) { 1589 // stack to stack 1590 __ ld(r_temp, reg2offset(src.first()), r_caller_sp); 1591 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1592 } else { 1593 // stack to reg 1594 __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); 1595 } 1596 } else if (dst.first()->is_stack()) { 1597 // reg to stack 1598 __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP); 1599 } else { 1600 if (dst.first() != src.first()) { 1601 __ mr(dst.first()->as_Register(), src.first()->as_Register()); 1602 } 1603 } 1604 } 1605 1606 // Unpack an array argument into a pointer to the body and the length 1607 // if the array is non-null, otherwise pass 0 for both. 1608 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, 1609 VMRegPair body_arg, VMRegPair length_arg, Register r_caller_sp, 1610 Register tmp_reg, Register tmp2_reg) { 1611 assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg, 1612 "possible collision"); 1613 assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg, 1614 "possible collision"); 1615 1616 // Pass the length, ptr pair. 1617 Label set_out_args; 1618 VMRegPair tmp, tmp2; 1619 tmp.set_ptr(tmp_reg->as_VMReg()); 1620 tmp2.set_ptr(tmp2_reg->as_VMReg()); 1621 if (reg.first()->is_stack()) { 1622 // Load the arg up from the stack. 1623 move_ptr(masm, reg, tmp, r_caller_sp, /*unused*/ R0); 1624 reg = tmp; 1625 } 1626 __ li(tmp2_reg, 0); // Pass zeros if Array=null. 1627 if (tmp_reg != reg.first()->as_Register()) __ li(tmp_reg, 0); 1628 __ cmpdi(CCR0, reg.first()->as_Register(), 0); 1629 __ beq(CCR0, set_out_args); 1630 __ lwa(tmp2_reg, arrayOopDesc::length_offset_in_bytes(), reg.first()->as_Register()); 1631 __ addi(tmp_reg, reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)); 1632 __ bind(set_out_args); 1633 move_ptr(masm, tmp, body_arg, r_caller_sp, /*unused*/ R0); 1634 move_ptr(masm, tmp2, length_arg, r_caller_sp, /*unused*/ R0); // Same as move32_64 on PPC64. 1635 } 1636 1637 static void verify_oop_args(MacroAssembler* masm, 1638 const methodHandle& method, 1639 const BasicType* sig_bt, 1640 const VMRegPair* regs) { 1641 Register temp_reg = R19_method; // not part of any compiled calling seq 1642 if (VerifyOops) { 1643 for (int i = 0; i < method->size_of_parameters(); i++) { 1644 if (sig_bt[i] == T_OBJECT || 1645 sig_bt[i] == T_ARRAY) { 1646 VMReg r = regs[i].first(); 1647 assert(r->is_valid(), "bad oop arg"); 1648 if (r->is_stack()) { 1649 __ ld(temp_reg, reg2offset(r), R1_SP); 1650 __ verify_oop(temp_reg); 1651 } else { 1652 __ verify_oop(r->as_Register()); 1653 } 1654 } 1655 } 1656 } 1657 } 1658 1659 static void gen_special_dispatch(MacroAssembler* masm, 1660 const methodHandle& method, 1661 const BasicType* sig_bt, 1662 const VMRegPair* regs) { 1663 verify_oop_args(masm, method, sig_bt, regs); 1664 vmIntrinsics::ID iid = method->intrinsic_id(); 1665 1666 // Now write the args into the outgoing interpreter space 1667 bool has_receiver = false; 1668 Register receiver_reg = noreg; 1669 int member_arg_pos = -1; 1670 Register member_reg = noreg; 1671 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); 1672 if (ref_kind != 0) { 1673 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument 1674 member_reg = R19_method; // known to be free at this point 1675 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); 1676 } else if (iid == vmIntrinsics::_invokeBasic) { 1677 has_receiver = true; 1678 } else { 1679 fatal("unexpected intrinsic id %d", iid); 1680 } 1681 1682 if (member_reg != noreg) { 1683 // Load the member_arg into register, if necessary. 1684 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); 1685 VMReg r = regs[member_arg_pos].first(); 1686 if (r->is_stack()) { 1687 __ ld(member_reg, reg2offset(r), R1_SP); 1688 } else { 1689 // no data motion is needed 1690 member_reg = r->as_Register(); 1691 } 1692 } 1693 1694 if (has_receiver) { 1695 // Make sure the receiver is loaded into a register. 1696 assert(method->size_of_parameters() > 0, "oob"); 1697 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); 1698 VMReg r = regs[0].first(); 1699 assert(r->is_valid(), "bad receiver arg"); 1700 if (r->is_stack()) { 1701 // Porting note: This assumes that compiled calling conventions always 1702 // pass the receiver oop in a register. If this is not true on some 1703 // platform, pick a temp and load the receiver from stack. 1704 fatal("receiver always in a register"); 1705 receiver_reg = R11_scratch1; // TODO (hs24): is R11_scratch1 really free at this point? 1706 __ ld(receiver_reg, reg2offset(r), R1_SP); 1707 } else { 1708 // no data motion is needed 1709 receiver_reg = r->as_Register(); 1710 } 1711 } 1712 1713 // Figure out which address we are really jumping to: 1714 MethodHandles::generate_method_handle_dispatch(masm, iid, 1715 receiver_reg, member_reg, /*for_compiler_entry:*/ true); 1716 } 1717 1718 #endif // COMPILER2 1719 1720 // --------------------------------------------------------------------------- 1721 // Generate a native wrapper for a given method. The method takes arguments 1722 // in the Java compiled code convention, marshals them to the native 1723 // convention (handlizes oops, etc), transitions to native, makes the call, 1724 // returns to java state (possibly blocking), unhandlizes any result and 1725 // returns. 1726 // 1727 // Critical native functions are a shorthand for the use of 1728 // GetPrimtiveArrayCritical and disallow the use of any other JNI 1729 // functions. The wrapper is expected to unpack the arguments before 1730 // passing them to the callee and perform checks before and after the 1731 // native call to ensure that they GCLocker 1732 // lock_critical/unlock_critical semantics are followed. Some other 1733 // parts of JNI setup are skipped like the tear down of the JNI handle 1734 // block and the check for pending exceptions it's impossible for them 1735 // to be thrown. 1736 // 1737 // They are roughly structured like this: 1738 // if (GCLocker::needs_gc()) 1739 // SharedRuntime::block_for_jni_critical(); 1740 // tranistion to thread_in_native 1741 // unpack arrray arguments and call native entry point 1742 // check for safepoint in progress 1743 // check if any thread suspend flags are set 1744 // call into JVM and possible unlock the JNI critical 1745 // if a GC was suppressed while in the critical native. 1746 // transition back to thread_in_Java 1747 // return to caller 1748 // 1749 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, 1750 const methodHandle& method, 1751 int compile_id, 1752 BasicType *in_sig_bt, 1753 VMRegPair *in_regs, 1754 BasicType ret_type) { 1755 #ifdef COMPILER2 1756 if (method->is_method_handle_intrinsic()) { 1757 vmIntrinsics::ID iid = method->intrinsic_id(); 1758 intptr_t start = (intptr_t)__ pc(); 1759 int vep_offset = ((intptr_t)__ pc()) - start; 1760 gen_special_dispatch(masm, 1761 method, 1762 in_sig_bt, 1763 in_regs); 1764 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period 1765 __ flush(); 1766 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually 1767 return nmethod::new_native_nmethod(method, 1768 compile_id, 1769 masm->code(), 1770 vep_offset, 1771 frame_complete, 1772 stack_slots / VMRegImpl::slots_per_word, 1773 in_ByteSize(-1), 1774 in_ByteSize(-1), 1775 (OopMapSet*)NULL); 1776 } 1777 1778 bool is_critical_native = true; 1779 address native_func = method->critical_native_function(); 1780 if (native_func == NULL) { 1781 native_func = method->native_function(); 1782 is_critical_native = false; 1783 } 1784 assert(native_func != NULL, "must have function"); 1785 1786 // First, create signature for outgoing C call 1787 // -------------------------------------------------------------------------- 1788 1789 int total_in_args = method->size_of_parameters(); 1790 // We have received a description of where all the java args are located 1791 // on entry to the wrapper. We need to convert these args to where 1792 // the jni function will expect them. To figure out where they go 1793 // we convert the java signature to a C signature by inserting 1794 // the hidden arguments as arg[0] and possibly arg[1] (static method) 1795 1796 // Calculate the total number of C arguments and create arrays for the 1797 // signature and the outgoing registers. 1798 // On ppc64, we have two arrays for the outgoing registers, because 1799 // some floating-point arguments must be passed in registers _and_ 1800 // in stack locations. 1801 bool method_is_static = method->is_static(); 1802 int total_c_args = total_in_args; 1803 1804 if (!is_critical_native) { 1805 int n_hidden_args = method_is_static ? 2 : 1; 1806 total_c_args += n_hidden_args; 1807 } else { 1808 // No JNIEnv*, no this*, but unpacked arrays (base+length). 1809 for (int i = 0; i < total_in_args; i++) { 1810 if (in_sig_bt[i] == T_ARRAY) { 1811 total_c_args++; 1812 } 1813 } 1814 } 1815 1816 BasicType *out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 1817 VMRegPair *out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 1818 VMRegPair *out_regs2 = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 1819 BasicType* in_elem_bt = NULL; 1820 1821 // Create the signature for the C call: 1822 // 1) add the JNIEnv* 1823 // 2) add the class if the method is static 1824 // 3) copy the rest of the incoming signature (shifted by the number of 1825 // hidden arguments). 1826 1827 int argc = 0; 1828 if (!is_critical_native) { 1829 out_sig_bt[argc++] = T_ADDRESS; 1830 if (method->is_static()) { 1831 out_sig_bt[argc++] = T_OBJECT; 1832 } 1833 1834 for (int i = 0; i < total_in_args ; i++ ) { 1835 out_sig_bt[argc++] = in_sig_bt[i]; 1836 } 1837 } else { 1838 Thread* THREAD = Thread::current(); 1839 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 1840 SignatureStream ss(method->signature()); 1841 int o = 0; 1842 for (int i = 0; i < total_in_args ; i++, o++) { 1843 if (in_sig_bt[i] == T_ARRAY) { 1844 // Arrays are passed as int, elem* pair 1845 Symbol* atype = ss.as_symbol(CHECK_NULL); 1846 const char* at = atype->as_C_string(); 1847 if (strlen(at) == 2) { 1848 assert(at[0] == '[', "must be"); 1849 switch (at[1]) { 1850 case 'B': in_elem_bt[o] = T_BYTE; break; 1851 case 'C': in_elem_bt[o] = T_CHAR; break; 1852 case 'D': in_elem_bt[o] = T_DOUBLE; break; 1853 case 'F': in_elem_bt[o] = T_FLOAT; break; 1854 case 'I': in_elem_bt[o] = T_INT; break; 1855 case 'J': in_elem_bt[o] = T_LONG; break; 1856 case 'S': in_elem_bt[o] = T_SHORT; break; 1857 case 'Z': in_elem_bt[o] = T_BOOLEAN; break; 1858 default: ShouldNotReachHere(); 1859 } 1860 } 1861 } else { 1862 in_elem_bt[o] = T_VOID; 1863 } 1864 if (in_sig_bt[i] != T_VOID) { 1865 assert(in_sig_bt[i] == ss.type(), "must match"); 1866 ss.next(); 1867 } 1868 } 1869 1870 for (int i = 0; i < total_in_args ; i++ ) { 1871 if (in_sig_bt[i] == T_ARRAY) { 1872 // Arrays are passed as int, elem* pair. 1873 out_sig_bt[argc++] = T_INT; 1874 out_sig_bt[argc++] = T_ADDRESS; 1875 } else { 1876 out_sig_bt[argc++] = in_sig_bt[i]; 1877 } 1878 } 1879 } 1880 1881 1882 // Compute the wrapper's frame size. 1883 // -------------------------------------------------------------------------- 1884 1885 // Now figure out where the args must be stored and how much stack space 1886 // they require. 1887 // 1888 // Compute framesize for the wrapper. We need to handlize all oops in 1889 // incoming registers. 1890 // 1891 // Calculate the total number of stack slots we will need: 1892 // 1) abi requirements 1893 // 2) outgoing arguments 1894 // 3) space for inbound oop handle area 1895 // 4) space for handlizing a klass if static method 1896 // 5) space for a lock if synchronized method 1897 // 6) workspace for saving return values, int <-> float reg moves, etc. 1898 // 7) alignment 1899 // 1900 // Layout of the native wrapper frame: 1901 // (stack grows upwards, memory grows downwards) 1902 // 1903 // NW [ABI_REG_ARGS] <-- 1) R1_SP 1904 // [outgoing arguments] <-- 2) R1_SP + out_arg_slot_offset 1905 // [oopHandle area] <-- 3) R1_SP + oop_handle_offset (save area for critical natives) 1906 // klass <-- 4) R1_SP + klass_offset 1907 // lock <-- 5) R1_SP + lock_offset 1908 // [workspace] <-- 6) R1_SP + workspace_offset 1909 // [alignment] (optional) <-- 7) 1910 // caller [JIT_TOP_ABI_48] <-- r_callers_sp 1911 // 1912 // - *_slot_offset Indicates offset from SP in number of stack slots. 1913 // - *_offset Indicates offset from SP in bytes. 1914 1915 int stack_slots = c_calling_convention(out_sig_bt, out_regs, out_regs2, total_c_args) + // 1+2) 1916 SharedRuntime::out_preserve_stack_slots(); // See c_calling_convention. 1917 1918 // Now the space for the inbound oop handle area. 1919 int total_save_slots = num_java_iarg_registers * VMRegImpl::slots_per_word; 1920 if (is_critical_native) { 1921 // Critical natives may have to call out so they need a save area 1922 // for register arguments. 1923 int double_slots = 0; 1924 int single_slots = 0; 1925 for (int i = 0; i < total_in_args; i++) { 1926 if (in_regs[i].first()->is_Register()) { 1927 const Register reg = in_regs[i].first()->as_Register(); 1928 switch (in_sig_bt[i]) { 1929 case T_BOOLEAN: 1930 case T_BYTE: 1931 case T_SHORT: 1932 case T_CHAR: 1933 case T_INT: 1934 // Fall through. 1935 case T_ARRAY: 1936 case T_LONG: double_slots++; break; 1937 default: ShouldNotReachHere(); 1938 } 1939 } else if (in_regs[i].first()->is_FloatRegister()) { 1940 switch (in_sig_bt[i]) { 1941 case T_FLOAT: single_slots++; break; 1942 case T_DOUBLE: double_slots++; break; 1943 default: ShouldNotReachHere(); 1944 } 1945 } 1946 } 1947 total_save_slots = double_slots * 2 + align_up(single_slots, 2); // round to even 1948 } 1949 1950 int oop_handle_slot_offset = stack_slots; 1951 stack_slots += total_save_slots; // 3) 1952 1953 int klass_slot_offset = 0; 1954 int klass_offset = -1; 1955 if (method_is_static && !is_critical_native) { // 4) 1956 klass_slot_offset = stack_slots; 1957 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 1958 stack_slots += VMRegImpl::slots_per_word; 1959 } 1960 1961 int lock_slot_offset = 0; 1962 int lock_offset = -1; 1963 if (method->is_synchronized()) { // 5) 1964 lock_slot_offset = stack_slots; 1965 lock_offset = lock_slot_offset * VMRegImpl::stack_slot_size; 1966 stack_slots += VMRegImpl::slots_per_word; 1967 } 1968 1969 int workspace_slot_offset = stack_slots; // 6) 1970 stack_slots += 2; 1971 1972 // Now compute actual number of stack words we need. 1973 // Rounding to make stack properly aligned. 1974 stack_slots = align_up(stack_slots, // 7) 1975 frame::alignment_in_bytes / VMRegImpl::stack_slot_size); 1976 int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size; 1977 1978 1979 // Now we can start generating code. 1980 // -------------------------------------------------------------------------- 1981 1982 intptr_t start_pc = (intptr_t)__ pc(); 1983 intptr_t vep_start_pc; 1984 intptr_t frame_done_pc; 1985 intptr_t oopmap_pc; 1986 1987 Label ic_miss; 1988 Label handle_pending_exception; 1989 1990 Register r_callers_sp = R21; 1991 Register r_temp_1 = R22; 1992 Register r_temp_2 = R23; 1993 Register r_temp_3 = R24; 1994 Register r_temp_4 = R25; 1995 Register r_temp_5 = R26; 1996 Register r_temp_6 = R27; 1997 Register r_return_pc = R28; 1998 1999 Register r_carg1_jnienv = noreg; 2000 Register r_carg2_classorobject = noreg; 2001 if (!is_critical_native) { 2002 r_carg1_jnienv = out_regs[0].first()->as_Register(); 2003 r_carg2_classorobject = out_regs[1].first()->as_Register(); 2004 } 2005 2006 2007 // Generate the Unverified Entry Point (UEP). 2008 // -------------------------------------------------------------------------- 2009 assert(start_pc == (intptr_t)__ pc(), "uep must be at start"); 2010 2011 // Check ic: object class == cached class? 2012 if (!method_is_static) { 2013 Register ic = as_Register(Matcher::inline_cache_reg_encode()); 2014 Register receiver_klass = r_temp_1; 2015 2016 __ cmpdi(CCR0, R3_ARG1, 0); 2017 __ beq(CCR0, ic_miss); 2018 __ verify_oop(R3_ARG1); 2019 __ load_klass(receiver_klass, R3_ARG1); 2020 2021 __ cmpd(CCR0, receiver_klass, ic); 2022 __ bne(CCR0, ic_miss); 2023 } 2024 2025 2026 // Generate the Verified Entry Point (VEP). 2027 // -------------------------------------------------------------------------- 2028 vep_start_pc = (intptr_t)__ pc(); 2029 2030 __ save_LR_CR(r_temp_1); 2031 __ generate_stack_overflow_check(frame_size_in_bytes); // Check before creating frame. 2032 __ mr(r_callers_sp, R1_SP); // Remember frame pointer. 2033 __ push_frame(frame_size_in_bytes, r_temp_1); // Push the c2n adapter's frame. 2034 frame_done_pc = (intptr_t)__ pc(); 2035 2036 __ verify_thread(); 2037 2038 // Native nmethod wrappers never take possesion of the oop arguments. 2039 // So the caller will gc the arguments. 2040 // The only thing we need an oopMap for is if the call is static. 2041 // 2042 // An OopMap for lock (and class if static), and one for the VM call itself. 2043 OopMapSet *oop_maps = new OopMapSet(); 2044 OopMap *oop_map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 2045 2046 if (is_critical_native) { 2047 check_needs_gc_for_critical_native(masm, stack_slots, total_in_args, oop_handle_slot_offset, 2048 oop_maps, in_regs, in_sig_bt, r_temp_1); 2049 } 2050 2051 // Move arguments from register/stack to register/stack. 2052 // -------------------------------------------------------------------------- 2053 // 2054 // We immediately shuffle the arguments so that for any vm call we have 2055 // to make from here on out (sync slow path, jvmti, etc.) we will have 2056 // captured the oops from our caller and have a valid oopMap for them. 2057 // 2058 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv* 2059 // (derived from JavaThread* which is in R16_thread) and, if static, 2060 // the class mirror instead of a receiver. This pretty much guarantees that 2061 // register layout will not match. We ignore these extra arguments during 2062 // the shuffle. The shuffle is described by the two calling convention 2063 // vectors we have in our possession. We simply walk the java vector to 2064 // get the source locations and the c vector to get the destinations. 2065 2066 // Record sp-based slot for receiver on stack for non-static methods. 2067 int receiver_offset = -1; 2068 2069 // We move the arguments backward because the floating point registers 2070 // destination will always be to a register with a greater or equal 2071 // register number or the stack. 2072 // in is the index of the incoming Java arguments 2073 // out is the index of the outgoing C arguments 2074 2075 #ifdef ASSERT 2076 bool reg_destroyed[RegisterImpl::number_of_registers]; 2077 bool freg_destroyed[FloatRegisterImpl::number_of_registers]; 2078 for (int r = 0 ; r < RegisterImpl::number_of_registers ; r++) { 2079 reg_destroyed[r] = false; 2080 } 2081 for (int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++) { 2082 freg_destroyed[f] = false; 2083 } 2084 #endif // ASSERT 2085 2086 for (int in = total_in_args - 1, out = total_c_args - 1; in >= 0 ; in--, out--) { 2087 2088 #ifdef ASSERT 2089 if (in_regs[in].first()->is_Register()) { 2090 assert(!reg_destroyed[in_regs[in].first()->as_Register()->encoding()], "ack!"); 2091 } else if (in_regs[in].first()->is_FloatRegister()) { 2092 assert(!freg_destroyed[in_regs[in].first()->as_FloatRegister()->encoding()], "ack!"); 2093 } 2094 if (out_regs[out].first()->is_Register()) { 2095 reg_destroyed[out_regs[out].first()->as_Register()->encoding()] = true; 2096 } else if (out_regs[out].first()->is_FloatRegister()) { 2097 freg_destroyed[out_regs[out].first()->as_FloatRegister()->encoding()] = true; 2098 } 2099 if (out_regs2[out].first()->is_Register()) { 2100 reg_destroyed[out_regs2[out].first()->as_Register()->encoding()] = true; 2101 } else if (out_regs2[out].first()->is_FloatRegister()) { 2102 freg_destroyed[out_regs2[out].first()->as_FloatRegister()->encoding()] = true; 2103 } 2104 #endif // ASSERT 2105 2106 switch (in_sig_bt[in]) { 2107 case T_BOOLEAN: 2108 case T_CHAR: 2109 case T_BYTE: 2110 case T_SHORT: 2111 case T_INT: 2112 // Move int and do sign extension. 2113 int_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2114 break; 2115 case T_LONG: 2116 long_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2117 break; 2118 case T_ARRAY: 2119 if (is_critical_native) { 2120 int body_arg = out; 2121 out -= 1; // Point to length arg. 2122 unpack_array_argument(masm, in_regs[in], in_elem_bt[in], out_regs[body_arg], out_regs[out], 2123 r_callers_sp, r_temp_1, r_temp_2); 2124 break; 2125 } 2126 case T_OBJECT: 2127 assert(!is_critical_native, "no oop arguments"); 2128 object_move(masm, stack_slots, 2129 oop_map, oop_handle_slot_offset, 2130 ((in == 0) && (!method_is_static)), &receiver_offset, 2131 in_regs[in], out_regs[out], 2132 r_callers_sp, r_temp_1, r_temp_2); 2133 break; 2134 case T_VOID: 2135 break; 2136 case T_FLOAT: 2137 float_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2138 if (out_regs2[out].first()->is_valid()) { 2139 float_move(masm, in_regs[in], out_regs2[out], r_callers_sp, r_temp_1); 2140 } 2141 break; 2142 case T_DOUBLE: 2143 double_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2144 if (out_regs2[out].first()->is_valid()) { 2145 double_move(masm, in_regs[in], out_regs2[out], r_callers_sp, r_temp_1); 2146 } 2147 break; 2148 case T_ADDRESS: 2149 fatal("found type (T_ADDRESS) in java args"); 2150 break; 2151 default: 2152 ShouldNotReachHere(); 2153 break; 2154 } 2155 } 2156 2157 // Pre-load a static method's oop into ARG2. 2158 // Used both by locking code and the normal JNI call code. 2159 if (method_is_static && !is_critical_native) { 2160 __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), 2161 r_carg2_classorobject); 2162 2163 // Now handlize the static class mirror in carg2. It's known not-null. 2164 __ std(r_carg2_classorobject, klass_offset, R1_SP); 2165 oop_map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 2166 __ addi(r_carg2_classorobject, R1_SP, klass_offset); 2167 } 2168 2169 // Get JNIEnv* which is first argument to native. 2170 if (!is_critical_native) { 2171 __ addi(r_carg1_jnienv, R16_thread, in_bytes(JavaThread::jni_environment_offset())); 2172 } 2173 2174 // NOTE: 2175 // 2176 // We have all of the arguments setup at this point. 2177 // We MUST NOT touch any outgoing regs from this point on. 2178 // So if we must call out we must push a new frame. 2179 2180 // Get current pc for oopmap, and load it patchable relative to global toc. 2181 oopmap_pc = (intptr_t) __ pc(); 2182 __ calculate_address_from_global_toc(r_return_pc, (address)oopmap_pc, true, true, true, true); 2183 2184 // We use the same pc/oopMap repeatedly when we call out. 2185 oop_maps->add_gc_map(oopmap_pc - start_pc, oop_map); 2186 2187 // r_return_pc now has the pc loaded that we will use when we finally call 2188 // to native. 2189 2190 // Make sure that thread is non-volatile; it crosses a bunch of VM calls below. 2191 assert(R16_thread->is_nonvolatile(), "thread must be in non-volatile register"); 2192 2193 # if 0 2194 // DTrace method entry 2195 # endif 2196 2197 // Lock a synchronized method. 2198 // -------------------------------------------------------------------------- 2199 2200 if (method->is_synchronized()) { 2201 assert(!is_critical_native, "unhandled"); 2202 ConditionRegister r_flag = CCR1; 2203 Register r_oop = r_temp_4; 2204 const Register r_box = r_temp_5; 2205 Label done, locked; 2206 2207 // Load the oop for the object or class. r_carg2_classorobject contains 2208 // either the handlized oop from the incoming arguments or the handlized 2209 // class mirror (if the method is static). 2210 __ ld(r_oop, 0, r_carg2_classorobject); 2211 2212 // Get the lock box slot's address. 2213 __ addi(r_box, R1_SP, lock_offset); 2214 2215 # ifdef ASSERT 2216 if (UseBiasedLocking) { 2217 // Making the box point to itself will make it clear it went unused 2218 // but also be obviously invalid. 2219 __ std(r_box, 0, r_box); 2220 } 2221 # endif // ASSERT 2222 2223 // Try fastpath for locking. 2224 // fast_lock kills r_temp_1, r_temp_2, r_temp_3. 2225 __ compiler_fast_lock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); 2226 __ beq(r_flag, locked); 2227 2228 // None of the above fast optimizations worked so we have to get into the 2229 // slow case of monitor enter. Inline a special case of call_VM that 2230 // disallows any pending_exception. 2231 2232 // Save argument registers and leave room for C-compatible ABI_REG_ARGS. 2233 int frame_size = frame::abi_reg_args_size + align_up(total_c_args * wordSize, frame::alignment_in_bytes); 2234 __ mr(R11_scratch1, R1_SP); 2235 RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs, out_regs2); 2236 2237 // Do the call. 2238 __ set_last_Java_frame(R11_scratch1, r_return_pc); 2239 assert(r_return_pc->is_nonvolatile(), "expecting return pc to be in non-volatile register"); 2240 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), r_oop, r_box, R16_thread); 2241 __ reset_last_Java_frame(); 2242 2243 RegisterSaver::restore_argument_registers_and_pop_frame(masm, frame_size, total_c_args, out_regs, out_regs2); 2244 2245 __ asm_assert_mem8_is_zero(thread_(pending_exception), 2246 "no pending exception allowed on exit from SharedRuntime::complete_monitor_locking_C", 0); 2247 2248 __ bind(locked); 2249 } 2250 2251 2252 // Publish thread state 2253 // -------------------------------------------------------------------------- 2254 2255 // Use that pc we placed in r_return_pc a while back as the current frame anchor. 2256 __ set_last_Java_frame(R1_SP, r_return_pc); 2257 2258 // Transition from _thread_in_Java to _thread_in_native. 2259 __ li(R0, _thread_in_native); 2260 __ release(); 2261 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2262 __ stw(R0, thread_(thread_state)); 2263 2264 2265 // The JNI call 2266 // -------------------------------------------------------------------------- 2267 #if defined(ABI_ELFv2) 2268 __ call_c(native_func, relocInfo::runtime_call_type); 2269 #else 2270 FunctionDescriptor* fd_native_method = (FunctionDescriptor*) native_func; 2271 __ call_c(fd_native_method, relocInfo::runtime_call_type); 2272 #endif 2273 2274 2275 // Now, we are back from the native code. 2276 2277 2278 // Unpack the native result. 2279 // -------------------------------------------------------------------------- 2280 2281 // For int-types, we do any needed sign-extension required. 2282 // Care must be taken that the return values (R3_RET and F1_RET) 2283 // will survive any VM calls for blocking or unlocking. 2284 // An OOP result (handle) is done specially in the slow-path code. 2285 2286 switch (ret_type) { 2287 case T_VOID: break; // Nothing to do! 2288 case T_FLOAT: break; // Got it where we want it (unless slow-path). 2289 case T_DOUBLE: break; // Got it where we want it (unless slow-path). 2290 case T_LONG: break; // Got it where we want it (unless slow-path). 2291 case T_OBJECT: break; // Really a handle. 2292 // Cannot de-handlize until after reclaiming jvm_lock. 2293 case T_ARRAY: break; 2294 2295 case T_BOOLEAN: { // 0 -> false(0); !0 -> true(1) 2296 Label skip_modify; 2297 __ cmpwi(CCR0, R3_RET, 0); 2298 __ beq(CCR0, skip_modify); 2299 __ li(R3_RET, 1); 2300 __ bind(skip_modify); 2301 break; 2302 } 2303 case T_BYTE: { // sign extension 2304 __ extsb(R3_RET, R3_RET); 2305 break; 2306 } 2307 case T_CHAR: { // unsigned result 2308 __ andi(R3_RET, R3_RET, 0xffff); 2309 break; 2310 } 2311 case T_SHORT: { // sign extension 2312 __ extsh(R3_RET, R3_RET); 2313 break; 2314 } 2315 case T_INT: // nothing to do 2316 break; 2317 default: 2318 ShouldNotReachHere(); 2319 break; 2320 } 2321 2322 2323 // Publish thread state 2324 // -------------------------------------------------------------------------- 2325 2326 // Switch thread to "native transition" state before reading the 2327 // synchronization state. This additional state is necessary because reading 2328 // and testing the synchronization state is not atomic w.r.t. GC, as this 2329 // scenario demonstrates: 2330 // - Java thread A, in _thread_in_native state, loads _not_synchronized 2331 // and is preempted. 2332 // - VM thread changes sync state to synchronizing and suspends threads 2333 // for GC. 2334 // - Thread A is resumed to finish this native method, but doesn't block 2335 // here since it didn't see any synchronization in progress, and escapes. 2336 2337 // Transition from _thread_in_native to _thread_in_native_trans. 2338 __ li(R0, _thread_in_native_trans); 2339 __ release(); 2340 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2341 __ stw(R0, thread_(thread_state)); 2342 2343 2344 // Must we block? 2345 // -------------------------------------------------------------------------- 2346 2347 // Block, if necessary, before resuming in _thread_in_Java state. 2348 // In order for GC to work, don't clear the last_Java_sp until after blocking. 2349 Label after_transition; 2350 { 2351 Label no_block, sync; 2352 2353 if (os::is_MP()) { 2354 if (UseMembar) { 2355 // Force this write out before the read below. 2356 __ fence(); 2357 } else { 2358 // Write serialization page so VM thread can do a pseudo remote membar. 2359 // We use the current thread pointer to calculate a thread specific 2360 // offset to write to within the page. This minimizes bus traffic 2361 // due to cache line collision. 2362 __ serialize_memory(R16_thread, r_temp_4, r_temp_5); 2363 } 2364 } 2365 2366 Register sync_state_addr = r_temp_4; 2367 Register sync_state = r_temp_5; 2368 Register suspend_flags = r_temp_6; 2369 2370 // No synchronization in progress nor yet synchronized 2371 // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path). 2372 __ safepoint_poll(sync, sync_state); 2373 2374 // Not suspended. 2375 // TODO: PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size"); 2376 __ lwz(suspend_flags, thread_(suspend_flags)); 2377 __ cmpwi(CCR1, suspend_flags, 0); 2378 __ beq(CCR1, no_block); 2379 2380 // Block. Save any potential method result value before the operation and 2381 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this 2382 // lets us share the oopMap we used when we went native rather than create 2383 // a distinct one for this pc. 2384 __ bind(sync); 2385 __ isync(); 2386 2387 address entry_point = is_critical_native 2388 ? CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition) 2389 : CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans); 2390 save_native_result(masm, ret_type, workspace_slot_offset); 2391 __ call_VM_leaf(entry_point, R16_thread); 2392 restore_native_result(masm, ret_type, workspace_slot_offset); 2393 2394 if (is_critical_native) { 2395 __ b(after_transition); // No thread state transition here. 2396 } 2397 __ bind(no_block); 2398 } 2399 2400 // Publish thread state. 2401 // -------------------------------------------------------------------------- 2402 2403 // Thread state is thread_in_native_trans. Any safepoint blocking has 2404 // already happened so we can now change state to _thread_in_Java. 2405 2406 // Transition from _thread_in_native_trans to _thread_in_Java. 2407 __ li(R0, _thread_in_Java); 2408 __ lwsync(); // Acquire safepoint and suspend state, release thread state. 2409 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2410 __ stw(R0, thread_(thread_state)); 2411 __ bind(after_transition); 2412 2413 // Reguard any pages if necessary. 2414 // -------------------------------------------------------------------------- 2415 2416 Label no_reguard; 2417 __ lwz(r_temp_1, thread_(stack_guard_state)); 2418 __ cmpwi(CCR0, r_temp_1, JavaThread::stack_guard_yellow_reserved_disabled); 2419 __ bne(CCR0, no_reguard); 2420 2421 save_native_result(masm, ret_type, workspace_slot_offset); 2422 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 2423 restore_native_result(masm, ret_type, workspace_slot_offset); 2424 2425 __ bind(no_reguard); 2426 2427 2428 // Unlock 2429 // -------------------------------------------------------------------------- 2430 2431 if (method->is_synchronized()) { 2432 2433 ConditionRegister r_flag = CCR1; 2434 const Register r_oop = r_temp_4; 2435 const Register r_box = r_temp_5; 2436 const Register r_exception = r_temp_6; 2437 Label done; 2438 2439 // Get oop and address of lock object box. 2440 if (method_is_static) { 2441 assert(klass_offset != -1, ""); 2442 __ ld(r_oop, klass_offset, R1_SP); 2443 } else { 2444 assert(receiver_offset != -1, ""); 2445 __ ld(r_oop, receiver_offset, R1_SP); 2446 } 2447 __ addi(r_box, R1_SP, lock_offset); 2448 2449 // Try fastpath for unlocking. 2450 __ compiler_fast_unlock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); 2451 __ beq(r_flag, done); 2452 2453 // Save and restore any potential method result value around the unlocking operation. 2454 save_native_result(masm, ret_type, workspace_slot_offset); 2455 2456 // Must save pending exception around the slow-path VM call. Since it's a 2457 // leaf call, the pending exception (if any) can be kept in a register. 2458 __ ld(r_exception, thread_(pending_exception)); 2459 assert(r_exception->is_nonvolatile(), "exception register must be non-volatile"); 2460 __ li(R0, 0); 2461 __ std(R0, thread_(pending_exception)); 2462 2463 // Slow case of monitor enter. 2464 // Inline a special case of call_VM that disallows any pending_exception. 2465 // Arguments are (oop obj, BasicLock* lock, JavaThread* thread). 2466 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), r_oop, r_box, R16_thread); 2467 2468 __ asm_assert_mem8_is_zero(thread_(pending_exception), 2469 "no pending exception allowed on exit from SharedRuntime::complete_monitor_unlocking_C", 0); 2470 2471 restore_native_result(masm, ret_type, workspace_slot_offset); 2472 2473 // Check_forward_pending_exception jump to forward_exception if any pending 2474 // exception is set. The forward_exception routine expects to see the 2475 // exception in pending_exception and not in a register. Kind of clumsy, 2476 // since all folks who branch to forward_exception must have tested 2477 // pending_exception first and hence have it in a register already. 2478 __ std(r_exception, thread_(pending_exception)); 2479 2480 __ bind(done); 2481 } 2482 2483 # if 0 2484 // DTrace method exit 2485 # endif 2486 2487 // Clear "last Java frame" SP and PC. 2488 // -------------------------------------------------------------------------- 2489 2490 __ reset_last_Java_frame(); 2491 2492 // Unbox oop result, e.g. JNIHandles::resolve value. 2493 // -------------------------------------------------------------------------- 2494 2495 if (ret_type == T_OBJECT || ret_type == T_ARRAY) { 2496 __ resolve_jobject(R3_RET, r_temp_1, r_temp_2, /* needs_frame */ false); // kills R31 2497 } 2498 2499 if (CheckJNICalls) { 2500 // clear_pending_jni_exception_check 2501 __ load_const_optimized(R0, 0L); 2502 __ st_ptr(R0, JavaThread::pending_jni_exception_check_fn_offset(), R16_thread); 2503 } 2504 2505 // Reset handle block. 2506 // -------------------------------------------------------------------------- 2507 if (!is_critical_native) { 2508 __ ld(r_temp_1, thread_(active_handles)); 2509 // TODO: PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size"); 2510 __ li(r_temp_2, 0); 2511 __ stw(r_temp_2, JNIHandleBlock::top_offset_in_bytes(), r_temp_1); 2512 2513 2514 // Check for pending exceptions. 2515 // -------------------------------------------------------------------------- 2516 __ ld(r_temp_2, thread_(pending_exception)); 2517 __ cmpdi(CCR0, r_temp_2, 0); 2518 __ bne(CCR0, handle_pending_exception); 2519 } 2520 2521 // Return 2522 // -------------------------------------------------------------------------- 2523 2524 __ pop_frame(); 2525 __ restore_LR_CR(R11); 2526 __ blr(); 2527 2528 2529 // Handler for pending exceptions (out-of-line). 2530 // -------------------------------------------------------------------------- 2531 2532 // Since this is a native call, we know the proper exception handler 2533 // is the empty function. We just pop this frame and then jump to 2534 // forward_exception_entry. 2535 if (!is_critical_native) { 2536 __ align(InteriorEntryAlignment); 2537 __ bind(handle_pending_exception); 2538 2539 __ pop_frame(); 2540 __ restore_LR_CR(R11); 2541 __ b64_patchable((address)StubRoutines::forward_exception_entry(), 2542 relocInfo::runtime_call_type); 2543 } 2544 2545 // Handler for a cache miss (out-of-line). 2546 // -------------------------------------------------------------------------- 2547 2548 if (!method_is_static) { 2549 __ align(InteriorEntryAlignment); 2550 __ bind(ic_miss); 2551 2552 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), 2553 relocInfo::runtime_call_type); 2554 } 2555 2556 // Done. 2557 // -------------------------------------------------------------------------- 2558 2559 __ flush(); 2560 2561 nmethod *nm = nmethod::new_native_nmethod(method, 2562 compile_id, 2563 masm->code(), 2564 vep_start_pc-start_pc, 2565 frame_done_pc-start_pc, 2566 stack_slots / VMRegImpl::slots_per_word, 2567 (method_is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2568 in_ByteSize(lock_offset), 2569 oop_maps); 2570 2571 if (is_critical_native) { 2572 nm->set_lazy_critical_native(true); 2573 } 2574 2575 return nm; 2576 #else 2577 ShouldNotReachHere(); 2578 return NULL; 2579 #endif // COMPILER2 2580 } 2581 2582 // This function returns the adjust size (in number of words) to a c2i adapter 2583 // activation for use during deoptimization. 2584 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { 2585 return align_up((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::alignment_in_bytes); 2586 } 2587 2588 uint SharedRuntime::out_preserve_stack_slots() { 2589 #if defined(COMPILER1) || defined(COMPILER2) 2590 return frame::jit_out_preserve_size / VMRegImpl::stack_slot_size; 2591 #else 2592 return 0; 2593 #endif 2594 } 2595 2596 #if defined(COMPILER1) || defined(COMPILER2) 2597 // Frame generation for deopt and uncommon trap blobs. 2598 static void push_skeleton_frame(MacroAssembler* masm, bool deopt, 2599 /* Read */ 2600 Register unroll_block_reg, 2601 /* Update */ 2602 Register frame_sizes_reg, 2603 Register number_of_frames_reg, 2604 Register pcs_reg, 2605 /* Invalidate */ 2606 Register frame_size_reg, 2607 Register pc_reg) { 2608 2609 __ ld(pc_reg, 0, pcs_reg); 2610 __ ld(frame_size_reg, 0, frame_sizes_reg); 2611 __ std(pc_reg, _abi(lr), R1_SP); 2612 __ push_frame(frame_size_reg, R0/*tmp*/); 2613 #ifdef ASSERT 2614 __ load_const_optimized(pc_reg, 0x5afe); 2615 __ std(pc_reg, _ijava_state_neg(ijava_reserved), R1_SP); 2616 #endif 2617 __ std(R1_SP, _ijava_state_neg(sender_sp), R1_SP); 2618 __ addi(number_of_frames_reg, number_of_frames_reg, -1); 2619 __ addi(frame_sizes_reg, frame_sizes_reg, wordSize); 2620 __ addi(pcs_reg, pcs_reg, wordSize); 2621 } 2622 2623 // Loop through the UnrollBlock info and create new frames. 2624 static void push_skeleton_frames(MacroAssembler* masm, bool deopt, 2625 /* read */ 2626 Register unroll_block_reg, 2627 /* invalidate */ 2628 Register frame_sizes_reg, 2629 Register number_of_frames_reg, 2630 Register pcs_reg, 2631 Register frame_size_reg, 2632 Register pc_reg) { 2633 Label loop; 2634 2635 // _number_of_frames is of type int (deoptimization.hpp) 2636 __ lwa(number_of_frames_reg, 2637 Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), 2638 unroll_block_reg); 2639 __ ld(pcs_reg, 2640 Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), 2641 unroll_block_reg); 2642 __ ld(frame_sizes_reg, 2643 Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), 2644 unroll_block_reg); 2645 2646 // stack: (caller_of_deoptee, ...). 2647 2648 // At this point we either have an interpreter frame or a compiled 2649 // frame on top of stack. If it is a compiled frame we push a new c2i 2650 // adapter here 2651 2652 // Memorize top-frame stack-pointer. 2653 __ mr(frame_size_reg/*old_sp*/, R1_SP); 2654 2655 // Resize interpreter top frame OR C2I adapter. 2656 2657 // At this moment, the top frame (which is the caller of the deoptee) is 2658 // an interpreter frame or a newly pushed C2I adapter or an entry frame. 2659 // The top frame has a TOP_IJAVA_FRAME_ABI and the frame contains the 2660 // outgoing arguments. 2661 // 2662 // In order to push the interpreter frame for the deoptee, we need to 2663 // resize the top frame such that we are able to place the deoptee's 2664 // locals in the frame. 2665 // Additionally, we have to turn the top frame's TOP_IJAVA_FRAME_ABI 2666 // into a valid PARENT_IJAVA_FRAME_ABI. 2667 2668 __ lwa(R11_scratch1, 2669 Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), 2670 unroll_block_reg); 2671 __ neg(R11_scratch1, R11_scratch1); 2672 2673 // R11_scratch1 contains size of locals for frame resizing. 2674 // R12_scratch2 contains top frame's lr. 2675 2676 // Resize frame by complete frame size prevents TOC from being 2677 // overwritten by locals. A more stack space saving way would be 2678 // to copy the TOC to its location in the new abi. 2679 __ addi(R11_scratch1, R11_scratch1, - frame::parent_ijava_frame_abi_size); 2680 2681 // now, resize the frame 2682 __ resize_frame(R11_scratch1, pc_reg/*tmp*/); 2683 2684 // In the case where we have resized a c2i frame above, the optional 2685 // alignment below the locals has size 32 (why?). 2686 __ std(R12_scratch2, _abi(lr), R1_SP); 2687 2688 // Initialize initial_caller_sp. 2689 #ifdef ASSERT 2690 __ load_const_optimized(pc_reg, 0x5afe); 2691 __ std(pc_reg, _ijava_state_neg(ijava_reserved), R1_SP); 2692 #endif 2693 __ std(frame_size_reg, _ijava_state_neg(sender_sp), R1_SP); 2694 2695 #ifdef ASSERT 2696 // Make sure that there is at least one entry in the array. 2697 __ cmpdi(CCR0, number_of_frames_reg, 0); 2698 __ asm_assert_ne("array_size must be > 0", 0x205); 2699 #endif 2700 2701 // Now push the new interpreter frames. 2702 // 2703 __ bind(loop); 2704 // Allocate a new frame, fill in the pc. 2705 push_skeleton_frame(masm, deopt, 2706 unroll_block_reg, 2707 frame_sizes_reg, 2708 number_of_frames_reg, 2709 pcs_reg, 2710 frame_size_reg, 2711 pc_reg); 2712 __ cmpdi(CCR0, number_of_frames_reg, 0); 2713 __ bne(CCR0, loop); 2714 2715 // Get the return address pointing into the frame manager. 2716 __ ld(R0, 0, pcs_reg); 2717 // Store it in the top interpreter frame. 2718 __ std(R0, _abi(lr), R1_SP); 2719 // Initialize frame_manager_lr of interpreter top frame. 2720 } 2721 #endif 2722 2723 void SharedRuntime::generate_deopt_blob() { 2724 // Allocate space for the code 2725 ResourceMark rm; 2726 // Setup code generation tools 2727 CodeBuffer buffer("deopt_blob", 2048, 1024); 2728 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer); 2729 Label exec_mode_initialized; 2730 int frame_size_in_words; 2731 OopMap* map = NULL; 2732 OopMapSet *oop_maps = new OopMapSet(); 2733 2734 // size of ABI112 plus spill slots for R3_RET and F1_RET. 2735 const int frame_size_in_bytes = frame::abi_reg_args_spill_size; 2736 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 2737 int first_frame_size_in_bytes = 0; // frame size of "unpack frame" for call to fetch_unroll_info. 2738 2739 const Register exec_mode_reg = R21_tmp1; 2740 2741 const address start = __ pc(); 2742 2743 #if defined(COMPILER1) || defined(COMPILER2) 2744 // -------------------------------------------------------------------------- 2745 // Prolog for non exception case! 2746 2747 // We have been called from the deopt handler of the deoptee. 2748 // 2749 // deoptee: 2750 // ... 2751 // call X 2752 // ... 2753 // deopt_handler: call_deopt_stub 2754 // cur. return pc --> ... 2755 // 2756 // So currently SR_LR points behind the call in the deopt handler. 2757 // We adjust it such that it points to the start of the deopt handler. 2758 // The return_pc has been stored in the frame of the deoptee and 2759 // will replace the address of the deopt_handler in the call 2760 // to Deoptimization::fetch_unroll_info below. 2761 // We can't grab a free register here, because all registers may 2762 // contain live values, so let the RegisterSaver do the adjustment 2763 // of the return pc. 2764 const int return_pc_adjustment_no_exception = -HandlerImpl::size_deopt_handler(); 2765 2766 // Push the "unpack frame" 2767 // Save everything in sight. 2768 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2769 &first_frame_size_in_bytes, 2770 /*generate_oop_map=*/ true, 2771 return_pc_adjustment_no_exception, 2772 RegisterSaver::return_pc_is_lr); 2773 assert(map != NULL, "OopMap must have been created"); 2774 2775 __ li(exec_mode_reg, Deoptimization::Unpack_deopt); 2776 // Save exec mode for unpack_frames. 2777 __ b(exec_mode_initialized); 2778 2779 // -------------------------------------------------------------------------- 2780 // Prolog for exception case 2781 2782 // An exception is pending. 2783 // We have been called with a return (interpreter) or a jump (exception blob). 2784 // 2785 // - R3_ARG1: exception oop 2786 // - R4_ARG2: exception pc 2787 2788 int exception_offset = __ pc() - start; 2789 2790 BLOCK_COMMENT("Prolog for exception case"); 2791 2792 // Store exception oop and pc in thread (location known to GC). 2793 // This is needed since the call to "fetch_unroll_info()" may safepoint. 2794 __ std(R3_ARG1, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 2795 __ std(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 2796 __ std(R4_ARG2, _abi(lr), R1_SP); 2797 2798 // Vanilla deoptimization with an exception pending in exception_oop. 2799 int exception_in_tls_offset = __ pc() - start; 2800 2801 // Push the "unpack frame". 2802 // Save everything in sight. 2803 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2804 &first_frame_size_in_bytes, 2805 /*generate_oop_map=*/ false, 2806 /*return_pc_adjustment_exception=*/ 0, 2807 RegisterSaver::return_pc_is_pre_saved); 2808 2809 // Deopt during an exception. Save exec mode for unpack_frames. 2810 __ li(exec_mode_reg, Deoptimization::Unpack_exception); 2811 2812 // fall through 2813 2814 int reexecute_offset = 0; 2815 #ifdef COMPILER1 2816 __ b(exec_mode_initialized); 2817 2818 // Reexecute entry, similar to c2 uncommon trap 2819 reexecute_offset = __ pc() - start; 2820 2821 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2822 &first_frame_size_in_bytes, 2823 /*generate_oop_map=*/ false, 2824 /*return_pc_adjustment_reexecute=*/ 0, 2825 RegisterSaver::return_pc_is_pre_saved); 2826 __ li(exec_mode_reg, Deoptimization::Unpack_reexecute); 2827 #endif 2828 2829 // -------------------------------------------------------------------------- 2830 __ BIND(exec_mode_initialized); 2831 2832 { 2833 const Register unroll_block_reg = R22_tmp2; 2834 2835 // We need to set `last_Java_frame' because `fetch_unroll_info' will 2836 // call `last_Java_frame()'. The value of the pc in the frame is not 2837 // particularly important. It just needs to identify this blob. 2838 __ set_last_Java_frame(R1_SP, noreg); 2839 2840 // With EscapeAnalysis turned on, this call may safepoint! 2841 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), R16_thread, exec_mode_reg); 2842 address calls_return_pc = __ last_calls_return_pc(); 2843 // Set an oopmap for the call site that describes all our saved registers. 2844 oop_maps->add_gc_map(calls_return_pc - start, map); 2845 2846 __ reset_last_Java_frame(); 2847 // Save the return value. 2848 __ mr(unroll_block_reg, R3_RET); 2849 2850 // Restore only the result registers that have been saved 2851 // by save_volatile_registers(...). 2852 RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes); 2853 2854 // reload the exec mode from the UnrollBlock (it might have changed) 2855 __ lwz(exec_mode_reg, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), unroll_block_reg); 2856 // In excp_deopt_mode, restore and clear exception oop which we 2857 // stored in the thread during exception entry above. The exception 2858 // oop will be the return value of this stub. 2859 Label skip_restore_excp; 2860 __ cmpdi(CCR0, exec_mode_reg, Deoptimization::Unpack_exception); 2861 __ bne(CCR0, skip_restore_excp); 2862 __ ld(R3_RET, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 2863 __ ld(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 2864 __ li(R0, 0); 2865 __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 2866 __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 2867 __ BIND(skip_restore_excp); 2868 2869 __ pop_frame(); 2870 2871 // stack: (deoptee, optional i2c, caller of deoptee, ...). 2872 2873 // pop the deoptee's frame 2874 __ pop_frame(); 2875 2876 // stack: (caller_of_deoptee, ...). 2877 2878 // Loop through the `UnrollBlock' info and create interpreter frames. 2879 push_skeleton_frames(masm, true/*deopt*/, 2880 unroll_block_reg, 2881 R23_tmp3, 2882 R24_tmp4, 2883 R25_tmp5, 2884 R26_tmp6, 2885 R27_tmp7); 2886 2887 // stack: (skeletal interpreter frame, ..., optional skeletal 2888 // interpreter frame, optional c2i, caller of deoptee, ...). 2889 } 2890 2891 // push an `unpack_frame' taking care of float / int return values. 2892 __ push_frame(frame_size_in_bytes, R0/*tmp*/); 2893 2894 // stack: (unpack frame, skeletal interpreter frame, ..., optional 2895 // skeletal interpreter frame, optional c2i, caller of deoptee, 2896 // ...). 2897 2898 // Spill live volatile registers since we'll do a call. 2899 __ std( R3_RET, _abi_reg_args_spill(spill_ret), R1_SP); 2900 __ stfd(F1_RET, _abi_reg_args_spill(spill_fret), R1_SP); 2901 2902 // Let the unpacker layout information in the skeletal frames just 2903 // allocated. 2904 __ get_PC_trash_LR(R3_RET); 2905 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R3_RET); 2906 // This is a call to a LEAF method, so no oop map is required. 2907 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), 2908 R16_thread/*thread*/, exec_mode_reg/*exec_mode*/); 2909 __ reset_last_Java_frame(); 2910 2911 // Restore the volatiles saved above. 2912 __ ld( R3_RET, _abi_reg_args_spill(spill_ret), R1_SP); 2913 __ lfd(F1_RET, _abi_reg_args_spill(spill_fret), R1_SP); 2914 2915 // Pop the unpack frame. 2916 __ pop_frame(); 2917 __ restore_LR_CR(R0); 2918 2919 // stack: (top interpreter frame, ..., optional interpreter frame, 2920 // optional c2i, caller of deoptee, ...). 2921 2922 // Initialize R14_state. 2923 __ restore_interpreter_state(R11_scratch1); 2924 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 2925 2926 // Return to the interpreter entry point. 2927 __ blr(); 2928 __ flush(); 2929 #else // COMPILER2 2930 __ unimplemented("deopt blob needed only with compiler"); 2931 int exception_offset = __ pc() - start; 2932 #endif // COMPILER2 2933 2934 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, 2935 reexecute_offset, first_frame_size_in_bytes / wordSize); 2936 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); 2937 } 2938 2939 #ifdef COMPILER2 2940 void SharedRuntime::generate_uncommon_trap_blob() { 2941 // Allocate space for the code. 2942 ResourceMark rm; 2943 // Setup code generation tools. 2944 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024); 2945 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer); 2946 address start = __ pc(); 2947 2948 Register unroll_block_reg = R21_tmp1; 2949 Register klass_index_reg = R22_tmp2; 2950 Register unc_trap_reg = R23_tmp3; 2951 2952 OopMapSet* oop_maps = new OopMapSet(); 2953 int frame_size_in_bytes = frame::abi_reg_args_size; 2954 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 2955 2956 // stack: (deoptee, optional i2c, caller_of_deoptee, ...). 2957 2958 // Push a dummy `unpack_frame' and call 2959 // `Deoptimization::uncommon_trap' to pack the compiled frame into a 2960 // vframe array and return the `UnrollBlock' information. 2961 2962 // Save LR to compiled frame. 2963 __ save_LR_CR(R11_scratch1); 2964 2965 // Push an "uncommon_trap" frame. 2966 __ push_frame_reg_args(0, R11_scratch1); 2967 2968 // stack: (unpack frame, deoptee, optional i2c, caller_of_deoptee, ...). 2969 2970 // Set the `unpack_frame' as last_Java_frame. 2971 // `Deoptimization::uncommon_trap' expects it and considers its 2972 // sender frame as the deoptee frame. 2973 // Remember the offset of the instruction whose address will be 2974 // moved to R11_scratch1. 2975 address gc_map_pc = __ get_PC_trash_LR(R11_scratch1); 2976 2977 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1); 2978 2979 __ mr(klass_index_reg, R3); 2980 __ li(R5_ARG3, Deoptimization::Unpack_uncommon_trap); 2981 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), 2982 R16_thread, klass_index_reg, R5_ARG3); 2983 2984 // Set an oopmap for the call site. 2985 oop_maps->add_gc_map(gc_map_pc - start, map); 2986 2987 __ reset_last_Java_frame(); 2988 2989 // Pop the `unpack frame'. 2990 __ pop_frame(); 2991 2992 // stack: (deoptee, optional i2c, caller_of_deoptee, ...). 2993 2994 // Save the return value. 2995 __ mr(unroll_block_reg, R3_RET); 2996 2997 // Pop the uncommon_trap frame. 2998 __ pop_frame(); 2999 3000 // stack: (caller_of_deoptee, ...). 3001 3002 #ifdef ASSERT 3003 __ lwz(R22_tmp2, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), unroll_block_reg); 3004 __ cmpdi(CCR0, R22_tmp2, (unsigned)Deoptimization::Unpack_uncommon_trap); 3005 __ asm_assert_eq("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap", 0); 3006 #endif 3007 3008 // Allocate new interpreter frame(s) and possibly a c2i adapter 3009 // frame. 3010 push_skeleton_frames(masm, false/*deopt*/, 3011 unroll_block_reg, 3012 R22_tmp2, 3013 R23_tmp3, 3014 R24_tmp4, 3015 R25_tmp5, 3016 R26_tmp6); 3017 3018 // stack: (skeletal interpreter frame, ..., optional skeletal 3019 // interpreter frame, optional c2i, caller of deoptee, ...). 3020 3021 // Push a dummy `unpack_frame' taking care of float return values. 3022 // Call `Deoptimization::unpack_frames' to layout information in the 3023 // interpreter frames just created. 3024 3025 // Push a simple "unpack frame" here. 3026 __ push_frame_reg_args(0, R11_scratch1); 3027 3028 // stack: (unpack frame, skeletal interpreter frame, ..., optional 3029 // skeletal interpreter frame, optional c2i, caller of deoptee, 3030 // ...). 3031 3032 // Set the "unpack_frame" as last_Java_frame. 3033 __ get_PC_trash_LR(R11_scratch1); 3034 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1); 3035 3036 // Indicate it is the uncommon trap case. 3037 __ li(unc_trap_reg, Deoptimization::Unpack_uncommon_trap); 3038 // Let the unpacker layout information in the skeletal frames just 3039 // allocated. 3040 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), 3041 R16_thread, unc_trap_reg); 3042 3043 __ reset_last_Java_frame(); 3044 // Pop the `unpack frame'. 3045 __ pop_frame(); 3046 // Restore LR from top interpreter frame. 3047 __ restore_LR_CR(R11_scratch1); 3048 3049 // stack: (top interpreter frame, ..., optional interpreter frame, 3050 // optional c2i, caller of deoptee, ...). 3051 3052 __ restore_interpreter_state(R11_scratch1); 3053 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 3054 3055 // Return to the interpreter entry point. 3056 __ blr(); 3057 3058 masm->flush(); 3059 3060 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, frame_size_in_bytes/wordSize); 3061 } 3062 #endif // COMPILER2 3063 3064 // Generate a special Compile2Runtime blob that saves all registers, and setup oopmap. 3065 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) { 3066 assert(StubRoutines::forward_exception_entry() != NULL, 3067 "must be generated before"); 3068 3069 ResourceMark rm; 3070 OopMapSet *oop_maps = new OopMapSet(); 3071 OopMap* map; 3072 3073 // Allocate space for the code. Setup code generation tools. 3074 CodeBuffer buffer("handler_blob", 2048, 1024); 3075 MacroAssembler* masm = new MacroAssembler(&buffer); 3076 3077 address start = __ pc(); 3078 int frame_size_in_bytes = 0; 3079 3080 RegisterSaver::ReturnPCLocation return_pc_location; 3081 bool cause_return = (poll_type == POLL_AT_RETURN); 3082 if (cause_return) { 3083 // Nothing to do here. The frame has already been popped in MachEpilogNode. 3084 // Register LR already contains the return pc. 3085 return_pc_location = RegisterSaver::return_pc_is_lr; 3086 } else { 3087 // Use thread()->saved_exception_pc() as return pc. 3088 return_pc_location = RegisterSaver::return_pc_is_thread_saved_exception_pc; 3089 } 3090 3091 // Save registers, fpu state, and flags. Set R31 = return pc. 3092 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 3093 &frame_size_in_bytes, 3094 /*generate_oop_map=*/ true, 3095 /*return_pc_adjustment=*/0, 3096 return_pc_location); 3097 3098 // The following is basically a call_VM. However, we need the precise 3099 // address of the call in order to generate an oopmap. Hence, we do all the 3100 // work outselves. 3101 __ set_last_Java_frame(/*sp=*/R1_SP, /*pc=*/noreg); 3102 3103 // The return address must always be correct so that the frame constructor 3104 // never sees an invalid pc. 3105 3106 // Do the call 3107 __ call_VM_leaf(call_ptr, R16_thread); 3108 address calls_return_pc = __ last_calls_return_pc(); 3109 3110 // Set an oopmap for the call site. This oopmap will map all 3111 // oop-registers and debug-info registers as callee-saved. This 3112 // will allow deoptimization at this safepoint to find all possible 3113 // debug-info recordings, as well as let GC find all oops. 3114 oop_maps->add_gc_map(calls_return_pc - start, map); 3115 3116 Label noException; 3117 3118 // Clear the last Java frame. 3119 __ reset_last_Java_frame(); 3120 3121 BLOCK_COMMENT(" Check pending exception."); 3122 const Register pending_exception = R0; 3123 __ ld(pending_exception, thread_(pending_exception)); 3124 __ cmpdi(CCR0, pending_exception, 0); 3125 __ beq(CCR0, noException); 3126 3127 // Exception pending 3128 RegisterSaver::restore_live_registers_and_pop_frame(masm, 3129 frame_size_in_bytes, 3130 /*restore_ctr=*/true); 3131 3132 BLOCK_COMMENT(" Jump to forward_exception_entry."); 3133 // Jump to forward_exception_entry, with the issuing PC in LR 3134 // so it looks like the original nmethod called forward_exception_entry. 3135 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 3136 3137 // No exception case. 3138 __ BIND(noException); 3139 3140 if (SafepointMechanism::uses_thread_local_poll() && !cause_return) { 3141 Label no_adjust; 3142 // If our stashed return pc was modified by the runtime we avoid touching it 3143 __ ld(R0, frame_size_in_bytes + _abi(lr), R1_SP); 3144 __ cmpd(CCR0, R0, R31); 3145 __ bne(CCR0, no_adjust); 3146 3147 // Adjust return pc forward to step over the safepoint poll instruction 3148 __ addi(R31, R31, 4); 3149 __ std(R31, frame_size_in_bytes + _abi(lr), R1_SP); 3150 3151 __ bind(no_adjust); 3152 } 3153 3154 // Normal exit, restore registers and exit. 3155 RegisterSaver::restore_live_registers_and_pop_frame(masm, 3156 frame_size_in_bytes, 3157 /*restore_ctr=*/true); 3158 3159 __ blr(); 3160 3161 // Make sure all code is generated 3162 masm->flush(); 3163 3164 // Fill-out other meta info 3165 // CodeBlob frame size is in words. 3166 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_bytes / wordSize); 3167 } 3168 3169 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss) 3170 // 3171 // Generate a stub that calls into the vm to find out the proper destination 3172 // of a java call. All the argument registers are live at this point 3173 // but since this is generic code we don't know what they are and the caller 3174 // must do any gc of the args. 3175 // 3176 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { 3177 3178 // allocate space for the code 3179 ResourceMark rm; 3180 3181 CodeBuffer buffer(name, 1000, 512); 3182 MacroAssembler* masm = new MacroAssembler(&buffer); 3183 3184 int frame_size_in_bytes; 3185 3186 OopMapSet *oop_maps = new OopMapSet(); 3187 OopMap* map = NULL; 3188 3189 address start = __ pc(); 3190 3191 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 3192 &frame_size_in_bytes, 3193 /*generate_oop_map*/ true, 3194 /*return_pc_adjustment*/ 0, 3195 RegisterSaver::return_pc_is_lr); 3196 3197 // Use noreg as last_Java_pc, the return pc will be reconstructed 3198 // from the physical frame. 3199 __ set_last_Java_frame(/*sp*/R1_SP, noreg); 3200 3201 int frame_complete = __ offset(); 3202 3203 // Pass R19_method as 2nd (optional) argument, used by 3204 // counter_overflow_stub. 3205 __ call_VM_leaf(destination, R16_thread, R19_method); 3206 address calls_return_pc = __ last_calls_return_pc(); 3207 // Set an oopmap for the call site. 3208 // We need this not only for callee-saved registers, but also for volatile 3209 // registers that the compiler might be keeping live across a safepoint. 3210 // Create the oopmap for the call's return pc. 3211 oop_maps->add_gc_map(calls_return_pc - start, map); 3212 3213 // R3_RET contains the address we are going to jump to assuming no exception got installed. 3214 3215 // clear last_Java_sp 3216 __ reset_last_Java_frame(); 3217 3218 // Check for pending exceptions. 3219 BLOCK_COMMENT("Check for pending exceptions."); 3220 Label pending; 3221 __ ld(R11_scratch1, thread_(pending_exception)); 3222 __ cmpdi(CCR0, R11_scratch1, 0); 3223 __ bne(CCR0, pending); 3224 3225 __ mtctr(R3_RET); // Ctr will not be touched by restore_live_registers_and_pop_frame. 3226 3227 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ false); 3228 3229 // Get the returned method. 3230 __ get_vm_result_2(R19_method); 3231 3232 __ bctr(); 3233 3234 3235 // Pending exception after the safepoint. 3236 __ BIND(pending); 3237 3238 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ true); 3239 3240 // exception pending => remove activation and forward to exception handler 3241 3242 __ li(R11_scratch1, 0); 3243 __ ld(R3_ARG1, thread_(pending_exception)); 3244 __ std(R11_scratch1, in_bytes(JavaThread::vm_result_offset()), R16_thread); 3245 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 3246 3247 // ------------- 3248 // Make sure all code is generated. 3249 masm->flush(); 3250 3251 // return the blob 3252 // frame_size_words or bytes?? 3253 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_bytes/wordSize, 3254 oop_maps, true); 3255 } 3256 3257 3258 //------------------------------Montgomery multiplication------------------------ 3259 // 3260 3261 // Subtract 0:b from carry:a. Return carry. 3262 static unsigned long 3263 sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) { 3264 long i = 0; 3265 unsigned long tmp, tmp2; 3266 __asm__ __volatile__ ( 3267 "subfc %[tmp], %[tmp], %[tmp] \n" // pre-set CA 3268 "mtctr %[len] \n" 3269 "0: \n" 3270 "ldx %[tmp], %[i], %[a] \n" 3271 "ldx %[tmp2], %[i], %[b] \n" 3272 "subfe %[tmp], %[tmp2], %[tmp] \n" // subtract extended 3273 "stdx %[tmp], %[i], %[a] \n" 3274 "addi %[i], %[i], 8 \n" 3275 "bdnz 0b \n" 3276 "addme %[tmp], %[carry] \n" // carry + CA - 1 3277 : [i]"+b"(i), [tmp]"=&r"(tmp), [tmp2]"=&r"(tmp2) 3278 : [a]"r"(a), [b]"r"(b), [carry]"r"(carry), [len]"r"(len) 3279 : "ctr", "xer", "memory" 3280 ); 3281 return tmp; 3282 } 3283 3284 // Multiply (unsigned) Long A by Long B, accumulating the double- 3285 // length result into the accumulator formed of T0, T1, and T2. 3286 inline void MACC(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) { 3287 unsigned long hi, lo; 3288 __asm__ __volatile__ ( 3289 "mulld %[lo], %[A], %[B] \n" 3290 "mulhdu %[hi], %[A], %[B] \n" 3291 "addc %[T0], %[T0], %[lo] \n" 3292 "adde %[T1], %[T1], %[hi] \n" 3293 "addze %[T2], %[T2] \n" 3294 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2) 3295 : [A]"r"(A), [B]"r"(B) 3296 : "xer" 3297 ); 3298 } 3299 3300 // As above, but add twice the double-length result into the 3301 // accumulator. 3302 inline void MACC2(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) { 3303 unsigned long hi, lo; 3304 __asm__ __volatile__ ( 3305 "mulld %[lo], %[A], %[B] \n" 3306 "mulhdu %[hi], %[A], %[B] \n" 3307 "addc %[T0], %[T0], %[lo] \n" 3308 "adde %[T1], %[T1], %[hi] \n" 3309 "addze %[T2], %[T2] \n" 3310 "addc %[T0], %[T0], %[lo] \n" 3311 "adde %[T1], %[T1], %[hi] \n" 3312 "addze %[T2], %[T2] \n" 3313 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2) 3314 : [A]"r"(A), [B]"r"(B) 3315 : "xer" 3316 ); 3317 } 3318 3319 // Fast Montgomery multiplication. The derivation of the algorithm is 3320 // in "A Cryptographic Library for the Motorola DSP56000, 3321 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237". 3322 static void 3323 montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[], 3324 unsigned long m[], unsigned long inv, int len) { 3325 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator 3326 int i; 3327 3328 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); 3329 3330 for (i = 0; i < len; i++) { 3331 int j; 3332 for (j = 0; j < i; j++) { 3333 MACC(a[j], b[i-j], t0, t1, t2); 3334 MACC(m[j], n[i-j], t0, t1, t2); 3335 } 3336 MACC(a[i], b[0], t0, t1, t2); 3337 m[i] = t0 * inv; 3338 MACC(m[i], n[0], t0, t1, t2); 3339 3340 assert(t0 == 0, "broken Montgomery multiply"); 3341 3342 t0 = t1; t1 = t2; t2 = 0; 3343 } 3344 3345 for (i = len; i < 2*len; i++) { 3346 int j; 3347 for (j = i-len+1; j < len; j++) { 3348 MACC(a[j], b[i-j], t0, t1, t2); 3349 MACC(m[j], n[i-j], t0, t1, t2); 3350 } 3351 m[i-len] = t0; 3352 t0 = t1; t1 = t2; t2 = 0; 3353 } 3354 3355 while (t0) { 3356 t0 = sub(m, n, t0, len); 3357 } 3358 } 3359 3360 // Fast Montgomery squaring. This uses asymptotically 25% fewer 3361 // multiplies so it should be up to 25% faster than Montgomery 3362 // multiplication. However, its loop control is more complex and it 3363 // may actually run slower on some machines. 3364 static void 3365 montgomery_square(unsigned long a[], unsigned long n[], 3366 unsigned long m[], unsigned long inv, int len) { 3367 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator 3368 int i; 3369 3370 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); 3371 3372 for (i = 0; i < len; i++) { 3373 int j; 3374 int end = (i+1)/2; 3375 for (j = 0; j < end; j++) { 3376 MACC2(a[j], a[i-j], t0, t1, t2); 3377 MACC(m[j], n[i-j], t0, t1, t2); 3378 } 3379 if ((i & 1) == 0) { 3380 MACC(a[j], a[j], t0, t1, t2); 3381 } 3382 for (; j < i; j++) { 3383 MACC(m[j], n[i-j], t0, t1, t2); 3384 } 3385 m[i] = t0 * inv; 3386 MACC(m[i], n[0], t0, t1, t2); 3387 3388 assert(t0 == 0, "broken Montgomery square"); 3389 3390 t0 = t1; t1 = t2; t2 = 0; 3391 } 3392 3393 for (i = len; i < 2*len; i++) { 3394 int start = i-len+1; 3395 int end = start + (len - start)/2; 3396 int j; 3397 for (j = start; j < end; j++) { 3398 MACC2(a[j], a[i-j], t0, t1, t2); 3399 MACC(m[j], n[i-j], t0, t1, t2); 3400 } 3401 if ((i & 1) == 0) { 3402 MACC(a[j], a[j], t0, t1, t2); 3403 } 3404 for (; j < len; j++) { 3405 MACC(m[j], n[i-j], t0, t1, t2); 3406 } 3407 m[i-len] = t0; 3408 t0 = t1; t1 = t2; t2 = 0; 3409 } 3410 3411 while (t0) { 3412 t0 = sub(m, n, t0, len); 3413 } 3414 } 3415 3416 // The threshold at which squaring is advantageous was determined 3417 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz. 3418 // Doesn't seem to be relevant for Power8 so we use the same value. 3419 #define MONTGOMERY_SQUARING_THRESHOLD 64 3420 3421 // Copy len longwords from s to d, word-swapping as we go. The 3422 // destination array is reversed. 3423 static void reverse_words(unsigned long *s, unsigned long *d, int len) { 3424 d += len; 3425 while(len-- > 0) { 3426 d--; 3427 unsigned long s_val = *s; 3428 // Swap words in a longword on little endian machines. 3429 #ifdef VM_LITTLE_ENDIAN 3430 s_val = (s_val << 32) | (s_val >> 32); 3431 #endif 3432 *d = s_val; 3433 s++; 3434 } 3435 } 3436 3437 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints, 3438 jint len, jlong inv, 3439 jint *m_ints) { 3440 len = len & 0x7fffFFFF; // C2 does not respect int to long conversion for stub calls. 3441 assert(len % 2 == 0, "array length in montgomery_multiply must be even"); 3442 int longwords = len/2; 3443 3444 // Make very sure we don't use so much space that the stack might 3445 // overflow. 512 jints corresponds to an 16384-bit integer and 3446 // will use here a total of 8k bytes of stack space. 3447 int total_allocation = longwords * sizeof (unsigned long) * 4; 3448 guarantee(total_allocation <= 8192, "must be"); 3449 unsigned long *scratch = (unsigned long *)alloca(total_allocation); 3450 3451 // Local scratch arrays 3452 unsigned long 3453 *a = scratch + 0 * longwords, 3454 *b = scratch + 1 * longwords, 3455 *n = scratch + 2 * longwords, 3456 *m = scratch + 3 * longwords; 3457 3458 reverse_words((unsigned long *)a_ints, a, longwords); 3459 reverse_words((unsigned long *)b_ints, b, longwords); 3460 reverse_words((unsigned long *)n_ints, n, longwords); 3461 3462 ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords); 3463 3464 reverse_words(m, (unsigned long *)m_ints, longwords); 3465 } 3466 3467 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints, 3468 jint len, jlong inv, 3469 jint *m_ints) { 3470 len = len & 0x7fffFFFF; // C2 does not respect int to long conversion for stub calls. 3471 assert(len % 2 == 0, "array length in montgomery_square must be even"); 3472 int longwords = len/2; 3473 3474 // Make very sure we don't use so much space that the stack might 3475 // overflow. 512 jints corresponds to an 16384-bit integer and 3476 // will use here a total of 6k bytes of stack space. 3477 int total_allocation = longwords * sizeof (unsigned long) * 3; 3478 guarantee(total_allocation <= 8192, "must be"); 3479 unsigned long *scratch = (unsigned long *)alloca(total_allocation); 3480 3481 // Local scratch arrays 3482 unsigned long 3483 *a = scratch + 0 * longwords, 3484 *n = scratch + 1 * longwords, 3485 *m = scratch + 2 * longwords; 3486 3487 reverse_words((unsigned long *)a_ints, a, longwords); 3488 reverse_words((unsigned long *)n_ints, n, longwords); 3489 3490 if (len >= MONTGOMERY_SQUARING_THRESHOLD) { 3491 ::montgomery_square(a, n, m, (unsigned long)inv, longwords); 3492 } else { 3493 ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords); 3494 } 3495 3496 reverse_words(m, (unsigned long *)m_ints, longwords); 3497 }