1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2018 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "code/debugInfoRec.hpp" 29 #include "code/icBuffer.hpp" 30 #include "code/vtableStubs.hpp" 31 #include "frame_ppc.hpp" 32 #include "gc/shared/gcLocker.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "interpreter/interp_masm.hpp" 35 #include "memory/resourceArea.hpp" 36 #include "oops/compiledICHolder.hpp" 37 #include "runtime/safepointMechanism.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 #include "runtime/vframeArray.hpp" 40 #include "utilities/align.hpp" 41 #include "vmreg_ppc.inline.hpp" 42 #ifdef COMPILER1 43 #include "c1/c1_Runtime1.hpp" 44 #endif 45 #ifdef COMPILER2 46 #include "opto/ad.hpp" 47 #include "opto/runtime.hpp" 48 #endif 49 50 #include <alloca.h> 51 52 #define __ masm-> 53 54 #ifdef PRODUCT 55 #define BLOCK_COMMENT(str) // nothing 56 #else 57 #define BLOCK_COMMENT(str) __ block_comment(str) 58 #endif 59 60 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 61 62 63 class RegisterSaver { 64 // Used for saving volatile registers. 65 public: 66 67 // Support different return pc locations. 68 enum ReturnPCLocation { 69 return_pc_is_lr, 70 return_pc_is_pre_saved, 71 return_pc_is_thread_saved_exception_pc 72 }; 73 74 static OopMap* push_frame_reg_args_and_save_live_registers(MacroAssembler* masm, 75 int* out_frame_size_in_bytes, 76 bool generate_oop_map, 77 int return_pc_adjustment, 78 ReturnPCLocation return_pc_location); 79 static void restore_live_registers_and_pop_frame(MacroAssembler* masm, 80 int frame_size_in_bytes, 81 bool restore_ctr); 82 83 static void push_frame_and_save_argument_registers(MacroAssembler* masm, 84 Register r_temp, 85 int frame_size, 86 int total_args, 87 const VMRegPair *regs, const VMRegPair *regs2 = NULL); 88 static void restore_argument_registers_and_pop_frame(MacroAssembler*masm, 89 int frame_size, 90 int total_args, 91 const VMRegPair *regs, const VMRegPair *regs2 = NULL); 92 93 // During deoptimization only the result registers need to be restored 94 // all the other values have already been extracted. 95 static void restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes); 96 97 // Constants and data structures: 98 99 typedef enum { 100 int_reg = 0, 101 float_reg = 1, 102 special_reg = 2 103 } RegisterType; 104 105 typedef enum { 106 reg_size = 8, 107 half_reg_size = reg_size / 2, 108 } RegisterConstants; 109 110 typedef struct { 111 RegisterType reg_type; 112 int reg_num; 113 VMReg vmreg; 114 } LiveRegType; 115 }; 116 117 118 #define RegisterSaver_LiveSpecialReg(regname) \ 119 { RegisterSaver::special_reg, regname->encoding(), regname->as_VMReg() } 120 121 #define RegisterSaver_LiveIntReg(regname) \ 122 { RegisterSaver::int_reg, regname->encoding(), regname->as_VMReg() } 123 124 #define RegisterSaver_LiveFloatReg(regname) \ 125 { RegisterSaver::float_reg, regname->encoding(), regname->as_VMReg() } 126 127 static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = { 128 // Live registers which get spilled to the stack. Register 129 // positions in this array correspond directly to the stack layout. 130 131 // 132 // live special registers: 133 // 134 RegisterSaver_LiveSpecialReg(SR_CTR), 135 // 136 // live float registers: 137 // 138 RegisterSaver_LiveFloatReg( F0 ), 139 RegisterSaver_LiveFloatReg( F1 ), 140 RegisterSaver_LiveFloatReg( F2 ), 141 RegisterSaver_LiveFloatReg( F3 ), 142 RegisterSaver_LiveFloatReg( F4 ), 143 RegisterSaver_LiveFloatReg( F5 ), 144 RegisterSaver_LiveFloatReg( F6 ), 145 RegisterSaver_LiveFloatReg( F7 ), 146 RegisterSaver_LiveFloatReg( F8 ), 147 RegisterSaver_LiveFloatReg( F9 ), 148 RegisterSaver_LiveFloatReg( F10 ), 149 RegisterSaver_LiveFloatReg( F11 ), 150 RegisterSaver_LiveFloatReg( F12 ), 151 RegisterSaver_LiveFloatReg( F13 ), 152 RegisterSaver_LiveFloatReg( F14 ), 153 RegisterSaver_LiveFloatReg( F15 ), 154 RegisterSaver_LiveFloatReg( F16 ), 155 RegisterSaver_LiveFloatReg( F17 ), 156 RegisterSaver_LiveFloatReg( F18 ), 157 RegisterSaver_LiveFloatReg( F19 ), 158 RegisterSaver_LiveFloatReg( F20 ), 159 RegisterSaver_LiveFloatReg( F21 ), 160 RegisterSaver_LiveFloatReg( F22 ), 161 RegisterSaver_LiveFloatReg( F23 ), 162 RegisterSaver_LiveFloatReg( F24 ), 163 RegisterSaver_LiveFloatReg( F25 ), 164 RegisterSaver_LiveFloatReg( F26 ), 165 RegisterSaver_LiveFloatReg( F27 ), 166 RegisterSaver_LiveFloatReg( F28 ), 167 RegisterSaver_LiveFloatReg( F29 ), 168 RegisterSaver_LiveFloatReg( F30 ), 169 RegisterSaver_LiveFloatReg( F31 ), 170 // 171 // live integer registers: 172 // 173 RegisterSaver_LiveIntReg( R0 ), 174 //RegisterSaver_LiveIntReg( R1 ), // stack pointer 175 RegisterSaver_LiveIntReg( R2 ), 176 RegisterSaver_LiveIntReg( R3 ), 177 RegisterSaver_LiveIntReg( R4 ), 178 RegisterSaver_LiveIntReg( R5 ), 179 RegisterSaver_LiveIntReg( R6 ), 180 RegisterSaver_LiveIntReg( R7 ), 181 RegisterSaver_LiveIntReg( R8 ), 182 RegisterSaver_LiveIntReg( R9 ), 183 RegisterSaver_LiveIntReg( R10 ), 184 RegisterSaver_LiveIntReg( R11 ), 185 RegisterSaver_LiveIntReg( R12 ), 186 //RegisterSaver_LiveIntReg( R13 ), // system thread id 187 RegisterSaver_LiveIntReg( R14 ), 188 RegisterSaver_LiveIntReg( R15 ), 189 RegisterSaver_LiveIntReg( R16 ), 190 RegisterSaver_LiveIntReg( R17 ), 191 RegisterSaver_LiveIntReg( R18 ), 192 RegisterSaver_LiveIntReg( R19 ), 193 RegisterSaver_LiveIntReg( R20 ), 194 RegisterSaver_LiveIntReg( R21 ), 195 RegisterSaver_LiveIntReg( R22 ), 196 RegisterSaver_LiveIntReg( R23 ), 197 RegisterSaver_LiveIntReg( R24 ), 198 RegisterSaver_LiveIntReg( R25 ), 199 RegisterSaver_LiveIntReg( R26 ), 200 RegisterSaver_LiveIntReg( R27 ), 201 RegisterSaver_LiveIntReg( R28 ), 202 RegisterSaver_LiveIntReg( R29 ), 203 RegisterSaver_LiveIntReg( R30 ), 204 RegisterSaver_LiveIntReg( R31 ), // must be the last register (see save/restore functions below) 205 }; 206 207 OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm, 208 int* out_frame_size_in_bytes, 209 bool generate_oop_map, 210 int return_pc_adjustment, 211 ReturnPCLocation return_pc_location) { 212 // Push an abi_reg_args-frame and store all registers which may be live. 213 // If requested, create an OopMap: Record volatile registers as 214 // callee-save values in an OopMap so their save locations will be 215 // propagated to the RegisterMap of the caller frame during 216 // StackFrameStream construction (needed for deoptimization; see 217 // compiledVFrame::create_stack_value). 218 // If return_pc_adjustment != 0 adjust the return pc by return_pc_adjustment. 219 // Updated return pc is returned in R31 (if not return_pc_is_pre_saved). 220 221 int i; 222 int offset; 223 224 // calcualte frame size 225 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 226 sizeof(RegisterSaver::LiveRegType); 227 const int register_save_size = regstosave_num * reg_size; 228 const int frame_size_in_bytes = align_up(register_save_size, frame::alignment_in_bytes) 229 + frame::abi_reg_args_size; 230 *out_frame_size_in_bytes = frame_size_in_bytes; 231 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 232 const int register_save_offset = frame_size_in_bytes - register_save_size; 233 234 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words. 235 OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : NULL; 236 237 BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {"); 238 239 // Save some registers in the last slots of the not yet pushed frame so that we 240 // can use them as scratch regs. 241 __ std(R31, - reg_size, R1_SP); 242 __ std(R30, -2*reg_size, R1_SP); 243 assert(-reg_size == register_save_offset - frame_size_in_bytes + ((regstosave_num-1)*reg_size), 244 "consistency check"); 245 246 // save the flags 247 // Do the save_LR_CR by hand and adjust the return pc if requested. 248 __ mfcr(R30); 249 __ std(R30, _abi(cr), R1_SP); 250 switch (return_pc_location) { 251 case return_pc_is_lr: __ mflr(R31); break; 252 case return_pc_is_pre_saved: assert(return_pc_adjustment == 0, "unsupported"); break; 253 case return_pc_is_thread_saved_exception_pc: __ ld(R31, thread_(saved_exception_pc)); break; 254 default: ShouldNotReachHere(); 255 } 256 if (return_pc_location != return_pc_is_pre_saved) { 257 if (return_pc_adjustment != 0) { 258 __ addi(R31, R31, return_pc_adjustment); 259 } 260 __ std(R31, _abi(lr), R1_SP); 261 } 262 263 // push a new frame 264 __ push_frame(frame_size_in_bytes, R30); 265 266 // save all registers (ints and floats) 267 offset = register_save_offset; 268 for (int i = 0; i < regstosave_num; i++) { 269 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 270 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 271 272 switch (reg_type) { 273 case RegisterSaver::int_reg: { 274 if (reg_num < 30) { // We spilled R30-31 right at the beginning. 275 __ std(as_Register(reg_num), offset, R1_SP); 276 } 277 break; 278 } 279 case RegisterSaver::float_reg: { 280 __ stfd(as_FloatRegister(reg_num), offset, R1_SP); 281 break; 282 } 283 case RegisterSaver::special_reg: { 284 if (reg_num == SR_CTR_SpecialRegisterEnumValue) { 285 __ mfctr(R30); 286 __ std(R30, offset, R1_SP); 287 } else { 288 Unimplemented(); 289 } 290 break; 291 } 292 default: 293 ShouldNotReachHere(); 294 } 295 296 if (generate_oop_map) { 297 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), 298 RegisterSaver_LiveRegs[i].vmreg); 299 map->set_callee_saved(VMRegImpl::stack2reg((offset + half_reg_size)>>2), 300 RegisterSaver_LiveRegs[i].vmreg->next()); 301 } 302 offset += reg_size; 303 } 304 305 BLOCK_COMMENT("} push_frame_reg_args_and_save_live_registers"); 306 307 // And we're done. 308 return map; 309 } 310 311 312 // Pop the current frame and restore all the registers that we 313 // saved. 314 void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm, 315 int frame_size_in_bytes, 316 bool restore_ctr) { 317 int i; 318 int offset; 319 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 320 sizeof(RegisterSaver::LiveRegType); 321 const int register_save_size = regstosave_num * reg_size; 322 const int register_save_offset = frame_size_in_bytes - register_save_size; 323 324 BLOCK_COMMENT("restore_live_registers_and_pop_frame {"); 325 326 // restore all registers (ints and floats) 327 offset = register_save_offset; 328 for (int i = 0; i < regstosave_num; i++) { 329 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 330 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 331 332 switch (reg_type) { 333 case RegisterSaver::int_reg: { 334 if (reg_num != 31) // R31 restored at the end, it's the tmp reg! 335 __ ld(as_Register(reg_num), offset, R1_SP); 336 break; 337 } 338 case RegisterSaver::float_reg: { 339 __ lfd(as_FloatRegister(reg_num), offset, R1_SP); 340 break; 341 } 342 case RegisterSaver::special_reg: { 343 if (reg_num == SR_CTR_SpecialRegisterEnumValue) { 344 if (restore_ctr) { // Nothing to do here if ctr already contains the next address. 345 __ ld(R31, offset, R1_SP); 346 __ mtctr(R31); 347 } 348 } else { 349 Unimplemented(); 350 } 351 break; 352 } 353 default: 354 ShouldNotReachHere(); 355 } 356 offset += reg_size; 357 } 358 359 // pop the frame 360 __ pop_frame(); 361 362 // restore the flags 363 __ restore_LR_CR(R31); 364 365 // restore scratch register's value 366 __ ld(R31, -reg_size, R1_SP); 367 368 BLOCK_COMMENT("} restore_live_registers_and_pop_frame"); 369 } 370 371 void RegisterSaver::push_frame_and_save_argument_registers(MacroAssembler* masm, Register r_temp, 372 int frame_size,int total_args, const VMRegPair *regs, 373 const VMRegPair *regs2) { 374 __ push_frame(frame_size, r_temp); 375 int st_off = frame_size - wordSize; 376 for (int i = 0; i < total_args; i++) { 377 VMReg r_1 = regs[i].first(); 378 VMReg r_2 = regs[i].second(); 379 if (!r_1->is_valid()) { 380 assert(!r_2->is_valid(), ""); 381 continue; 382 } 383 if (r_1->is_Register()) { 384 Register r = r_1->as_Register(); 385 __ std(r, st_off, R1_SP); 386 st_off -= wordSize; 387 } else if (r_1->is_FloatRegister()) { 388 FloatRegister f = r_1->as_FloatRegister(); 389 __ stfd(f, st_off, R1_SP); 390 st_off -= wordSize; 391 } 392 } 393 if (regs2 != NULL) { 394 for (int i = 0; i < total_args; i++) { 395 VMReg r_1 = regs2[i].first(); 396 VMReg r_2 = regs2[i].second(); 397 if (!r_1->is_valid()) { 398 assert(!r_2->is_valid(), ""); 399 continue; 400 } 401 if (r_1->is_Register()) { 402 Register r = r_1->as_Register(); 403 __ std(r, st_off, R1_SP); 404 st_off -= wordSize; 405 } else if (r_1->is_FloatRegister()) { 406 FloatRegister f = r_1->as_FloatRegister(); 407 __ stfd(f, st_off, R1_SP); 408 st_off -= wordSize; 409 } 410 } 411 } 412 } 413 414 void RegisterSaver::restore_argument_registers_and_pop_frame(MacroAssembler*masm, int frame_size, 415 int total_args, const VMRegPair *regs, 416 const VMRegPair *regs2) { 417 int st_off = frame_size - wordSize; 418 for (int i = 0; i < total_args; i++) { 419 VMReg r_1 = regs[i].first(); 420 VMReg r_2 = regs[i].second(); 421 if (r_1->is_Register()) { 422 Register r = r_1->as_Register(); 423 __ ld(r, st_off, R1_SP); 424 st_off -= wordSize; 425 } else if (r_1->is_FloatRegister()) { 426 FloatRegister f = r_1->as_FloatRegister(); 427 __ lfd(f, st_off, R1_SP); 428 st_off -= wordSize; 429 } 430 } 431 if (regs2 != NULL) 432 for (int i = 0; i < total_args; i++) { 433 VMReg r_1 = regs2[i].first(); 434 VMReg r_2 = regs2[i].second(); 435 if (r_1->is_Register()) { 436 Register r = r_1->as_Register(); 437 __ ld(r, st_off, R1_SP); 438 st_off -= wordSize; 439 } else if (r_1->is_FloatRegister()) { 440 FloatRegister f = r_1->as_FloatRegister(); 441 __ lfd(f, st_off, R1_SP); 442 st_off -= wordSize; 443 } 444 } 445 __ pop_frame(); 446 } 447 448 // Restore the registers that might be holding a result. 449 void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes) { 450 int i; 451 int offset; 452 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 453 sizeof(RegisterSaver::LiveRegType); 454 const int register_save_size = regstosave_num * reg_size; 455 const int register_save_offset = frame_size_in_bytes - register_save_size; 456 457 // restore all result registers (ints and floats) 458 offset = register_save_offset; 459 for (int i = 0; i < regstosave_num; i++) { 460 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 461 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 462 switch (reg_type) { 463 case RegisterSaver::int_reg: { 464 if (as_Register(reg_num)==R3_RET) // int result_reg 465 __ ld(as_Register(reg_num), offset, R1_SP); 466 break; 467 } 468 case RegisterSaver::float_reg: { 469 if (as_FloatRegister(reg_num)==F1_RET) // float result_reg 470 __ lfd(as_FloatRegister(reg_num), offset, R1_SP); 471 break; 472 } 473 case RegisterSaver::special_reg: { 474 // Special registers don't hold a result. 475 break; 476 } 477 default: 478 ShouldNotReachHere(); 479 } 480 offset += reg_size; 481 } 482 } 483 484 // Is vector's size (in bytes) bigger than a size saved by default? 485 bool SharedRuntime::is_wide_vector(int size) { 486 // Note, MaxVectorSize == 8/16 on PPC64. 487 assert(size <= (SuperwordUseVSX ? 16 : 8), "%d bytes vectors are not supported", size); 488 return size > 8; 489 } 490 491 size_t SharedRuntime::trampoline_size() { 492 return Assembler::load_const_size + 8; 493 } 494 495 void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) { 496 Register Rtemp = R12; 497 __ load_const(Rtemp, destination); 498 __ mtctr(Rtemp); 499 __ bctr(); 500 } 501 502 #ifdef COMPILER2 503 static int reg2slot(VMReg r) { 504 return r->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 505 } 506 507 static int reg2offset(VMReg r) { 508 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 509 } 510 #endif 511 512 // --------------------------------------------------------------------------- 513 // Read the array of BasicTypes from a signature, and compute where the 514 // arguments should go. Values in the VMRegPair regs array refer to 4-byte 515 // quantities. Values less than VMRegImpl::stack0 are registers, those above 516 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer 517 // as framesizes are fixed. 518 // VMRegImpl::stack0 refers to the first slot 0(sp). 519 // and VMRegImpl::stack0+1 refers to the memory word 4-bytes higher. Register 520 // up to RegisterImpl::number_of_registers) are the 64-bit 521 // integer registers. 522 523 // Note: the INPUTS in sig_bt are in units of Java argument words, which are 524 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit 525 // units regardless of build. Of course for i486 there is no 64 bit build 526 527 // The Java calling convention is a "shifted" version of the C ABI. 528 // By skipping the first C ABI register we can call non-static jni methods 529 // with small numbers of arguments without having to shuffle the arguments 530 // at all. Since we control the java ABI we ought to at least get some 531 // advantage out of it. 532 533 const VMReg java_iarg_reg[8] = { 534 R3->as_VMReg(), 535 R4->as_VMReg(), 536 R5->as_VMReg(), 537 R6->as_VMReg(), 538 R7->as_VMReg(), 539 R8->as_VMReg(), 540 R9->as_VMReg(), 541 R10->as_VMReg() 542 }; 543 544 const VMReg java_farg_reg[13] = { 545 F1->as_VMReg(), 546 F2->as_VMReg(), 547 F3->as_VMReg(), 548 F4->as_VMReg(), 549 F5->as_VMReg(), 550 F6->as_VMReg(), 551 F7->as_VMReg(), 552 F8->as_VMReg(), 553 F9->as_VMReg(), 554 F10->as_VMReg(), 555 F11->as_VMReg(), 556 F12->as_VMReg(), 557 F13->as_VMReg() 558 }; 559 560 const int num_java_iarg_registers = sizeof(java_iarg_reg) / sizeof(java_iarg_reg[0]); 561 const int num_java_farg_registers = sizeof(java_farg_reg) / sizeof(java_farg_reg[0]); 562 563 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 564 VMRegPair *regs, 565 int total_args_passed, 566 int is_outgoing) { 567 // C2c calling conventions for compiled-compiled calls. 568 // Put 8 ints/longs into registers _AND_ 13 float/doubles into 569 // registers _AND_ put the rest on the stack. 570 571 const int inc_stk_for_intfloat = 1; // 1 slots for ints and floats 572 const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles 573 574 int i; 575 VMReg reg; 576 int stk = 0; 577 int ireg = 0; 578 int freg = 0; 579 580 // We put the first 8 arguments into registers and the rest on the 581 // stack, float arguments are already in their argument registers 582 // due to c2c calling conventions (see calling_convention). 583 for (int i = 0; i < total_args_passed; ++i) { 584 switch(sig_bt[i]) { 585 case T_BOOLEAN: 586 case T_CHAR: 587 case T_BYTE: 588 case T_SHORT: 589 case T_INT: 590 if (ireg < num_java_iarg_registers) { 591 // Put int/ptr in register 592 reg = java_iarg_reg[ireg]; 593 ++ireg; 594 } else { 595 // Put int/ptr on stack. 596 reg = VMRegImpl::stack2reg(stk); 597 stk += inc_stk_for_intfloat; 598 } 599 regs[i].set1(reg); 600 break; 601 case T_LONG: 602 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 603 if (ireg < num_java_iarg_registers) { 604 // Put long in register. 605 reg = java_iarg_reg[ireg]; 606 ++ireg; 607 } else { 608 // Put long on stack. They must be aligned to 2 slots. 609 if (stk & 0x1) ++stk; 610 reg = VMRegImpl::stack2reg(stk); 611 stk += inc_stk_for_longdouble; 612 } 613 regs[i].set2(reg); 614 break; 615 case T_OBJECT: 616 case T_ARRAY: 617 case T_ADDRESS: 618 if (ireg < num_java_iarg_registers) { 619 // Put ptr in register. 620 reg = java_iarg_reg[ireg]; 621 ++ireg; 622 } else { 623 // Put ptr on stack. Objects must be aligned to 2 slots too, 624 // because "64-bit pointers record oop-ishness on 2 aligned 625 // adjacent registers." (see OopFlow::build_oop_map). 626 if (stk & 0x1) ++stk; 627 reg = VMRegImpl::stack2reg(stk); 628 stk += inc_stk_for_longdouble; 629 } 630 regs[i].set2(reg); 631 break; 632 case T_FLOAT: 633 if (freg < num_java_farg_registers) { 634 // Put float in register. 635 reg = java_farg_reg[freg]; 636 ++freg; 637 } else { 638 // Put float on stack. 639 reg = VMRegImpl::stack2reg(stk); 640 stk += inc_stk_for_intfloat; 641 } 642 regs[i].set1(reg); 643 break; 644 case T_DOUBLE: 645 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 646 if (freg < num_java_farg_registers) { 647 // Put double in register. 648 reg = java_farg_reg[freg]; 649 ++freg; 650 } else { 651 // Put double on stack. They must be aligned to 2 slots. 652 if (stk & 0x1) ++stk; 653 reg = VMRegImpl::stack2reg(stk); 654 stk += inc_stk_for_longdouble; 655 } 656 regs[i].set2(reg); 657 break; 658 case T_VOID: 659 // Do not count halves. 660 regs[i].set_bad(); 661 break; 662 default: 663 ShouldNotReachHere(); 664 } 665 } 666 return align_up(stk, 2); 667 } 668 669 #if defined(COMPILER1) || defined(COMPILER2) 670 // Calling convention for calling C code. 671 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 672 VMRegPair *regs, 673 VMRegPair *regs2, 674 int total_args_passed) { 675 // Calling conventions for C runtime calls and calls to JNI native methods. 676 // 677 // PPC64 convention: Hoist the first 8 int/ptr/long's in the first 8 678 // int regs, leaving int regs undefined if the arg is flt/dbl. Hoist 679 // the first 13 flt/dbl's in the first 13 fp regs but additionally 680 // copy flt/dbl to the stack if they are beyond the 8th argument. 681 682 const VMReg iarg_reg[8] = { 683 R3->as_VMReg(), 684 R4->as_VMReg(), 685 R5->as_VMReg(), 686 R6->as_VMReg(), 687 R7->as_VMReg(), 688 R8->as_VMReg(), 689 R9->as_VMReg(), 690 R10->as_VMReg() 691 }; 692 693 const VMReg farg_reg[13] = { 694 F1->as_VMReg(), 695 F2->as_VMReg(), 696 F3->as_VMReg(), 697 F4->as_VMReg(), 698 F5->as_VMReg(), 699 F6->as_VMReg(), 700 F7->as_VMReg(), 701 F8->as_VMReg(), 702 F9->as_VMReg(), 703 F10->as_VMReg(), 704 F11->as_VMReg(), 705 F12->as_VMReg(), 706 F13->as_VMReg() 707 }; 708 709 // Check calling conventions consistency. 710 assert(sizeof(iarg_reg) / sizeof(iarg_reg[0]) == Argument::n_int_register_parameters_c && 711 sizeof(farg_reg) / sizeof(farg_reg[0]) == Argument::n_float_register_parameters_c, 712 "consistency"); 713 714 // `Stk' counts stack slots. Due to alignment, 32 bit values occupy 715 // 2 such slots, like 64 bit values do. 716 const int inc_stk_for_intfloat = 2; // 2 slots for ints and floats 717 const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles 718 719 int i; 720 VMReg reg; 721 // Leave room for C-compatible ABI_REG_ARGS. 722 int stk = (frame::abi_reg_args_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size; 723 int arg = 0; 724 int freg = 0; 725 726 // Avoid passing C arguments in the wrong stack slots. 727 #if defined(ABI_ELFv2) 728 assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 96, 729 "passing C arguments in wrong stack slots"); 730 #else 731 assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 112, 732 "passing C arguments in wrong stack slots"); 733 #endif 734 // We fill-out regs AND regs2 if an argument must be passed in a 735 // register AND in a stack slot. If regs2 is NULL in such a 736 // situation, we bail-out with a fatal error. 737 for (int i = 0; i < total_args_passed; ++i, ++arg) { 738 // Initialize regs2 to BAD. 739 if (regs2 != NULL) regs2[i].set_bad(); 740 741 switch(sig_bt[i]) { 742 743 // 744 // If arguments 0-7 are integers, they are passed in integer registers. 745 // Argument i is placed in iarg_reg[i]. 746 // 747 case T_BOOLEAN: 748 case T_CHAR: 749 case T_BYTE: 750 case T_SHORT: 751 case T_INT: 752 // We must cast ints to longs and use full 64 bit stack slots 753 // here. Thus fall through, handle as long. 754 case T_LONG: 755 case T_OBJECT: 756 case T_ARRAY: 757 case T_ADDRESS: 758 case T_METADATA: 759 // Oops are already boxed if required (JNI). 760 if (arg < Argument::n_int_register_parameters_c) { 761 reg = iarg_reg[arg]; 762 } else { 763 reg = VMRegImpl::stack2reg(stk); 764 stk += inc_stk_for_longdouble; 765 } 766 regs[i].set2(reg); 767 break; 768 769 // 770 // Floats are treated differently from int regs: The first 13 float arguments 771 // are passed in registers (not the float args among the first 13 args). 772 // Thus argument i is NOT passed in farg_reg[i] if it is float. It is passed 773 // in farg_reg[j] if argument i is the j-th float argument of this call. 774 // 775 case T_FLOAT: 776 #if defined(LINUX) 777 // Linux uses ELF ABI. Both original ELF and ELFv2 ABIs have float 778 // in the least significant word of an argument slot. 779 #if defined(VM_LITTLE_ENDIAN) 780 #define FLOAT_WORD_OFFSET_IN_SLOT 0 781 #else 782 #define FLOAT_WORD_OFFSET_IN_SLOT 1 783 #endif 784 #elif defined(AIX) 785 // Although AIX runs on big endian CPU, float is in the most 786 // significant word of an argument slot. 787 #define FLOAT_WORD_OFFSET_IN_SLOT 0 788 #else 789 #error "unknown OS" 790 #endif 791 if (freg < Argument::n_float_register_parameters_c) { 792 // Put float in register ... 793 reg = farg_reg[freg]; 794 ++freg; 795 796 // Argument i for i > 8 is placed on the stack even if it's 797 // placed in a register (if it's a float arg). Aix disassembly 798 // shows that xlC places these float args on the stack AND in 799 // a register. This is not documented, but we follow this 800 // convention, too. 801 if (arg >= Argument::n_regs_not_on_stack_c) { 802 // ... and on the stack. 803 guarantee(regs2 != NULL, "must pass float in register and stack slot"); 804 VMReg reg2 = VMRegImpl::stack2reg(stk + FLOAT_WORD_OFFSET_IN_SLOT); 805 regs2[i].set1(reg2); 806 stk += inc_stk_for_intfloat; 807 } 808 809 } else { 810 // Put float on stack. 811 reg = VMRegImpl::stack2reg(stk + FLOAT_WORD_OFFSET_IN_SLOT); 812 stk += inc_stk_for_intfloat; 813 } 814 regs[i].set1(reg); 815 break; 816 case T_DOUBLE: 817 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 818 if (freg < Argument::n_float_register_parameters_c) { 819 // Put double in register ... 820 reg = farg_reg[freg]; 821 ++freg; 822 823 // Argument i for i > 8 is placed on the stack even if it's 824 // placed in a register (if it's a double arg). Aix disassembly 825 // shows that xlC places these float args on the stack AND in 826 // a register. This is not documented, but we follow this 827 // convention, too. 828 if (arg >= Argument::n_regs_not_on_stack_c) { 829 // ... and on the stack. 830 guarantee(regs2 != NULL, "must pass float in register and stack slot"); 831 VMReg reg2 = VMRegImpl::stack2reg(stk); 832 regs2[i].set2(reg2); 833 stk += inc_stk_for_longdouble; 834 } 835 } else { 836 // Put double on stack. 837 reg = VMRegImpl::stack2reg(stk); 838 stk += inc_stk_for_longdouble; 839 } 840 regs[i].set2(reg); 841 break; 842 843 case T_VOID: 844 // Do not count halves. 845 regs[i].set_bad(); 846 --arg; 847 break; 848 default: 849 ShouldNotReachHere(); 850 } 851 } 852 853 return align_up(stk, 2); 854 } 855 #endif // COMPILER2 856 857 static address gen_c2i_adapter(MacroAssembler *masm, 858 int total_args_passed, 859 int comp_args_on_stack, 860 const BasicType *sig_bt, 861 const VMRegPair *regs, 862 Label& call_interpreter, 863 const Register& ientry) { 864 865 address c2i_entrypoint; 866 867 const Register sender_SP = R21_sender_SP; // == R21_tmp1 868 const Register code = R22_tmp2; 869 //const Register ientry = R23_tmp3; 870 const Register value_regs[] = { R24_tmp4, R25_tmp5, R26_tmp6 }; 871 const int num_value_regs = sizeof(value_regs) / sizeof(Register); 872 int value_regs_index = 0; 873 874 const Register return_pc = R27_tmp7; 875 const Register tmp = R28_tmp8; 876 877 assert_different_registers(sender_SP, code, ientry, return_pc, tmp); 878 879 // Adapter needs TOP_IJAVA_FRAME_ABI. 880 const int adapter_size = frame::top_ijava_frame_abi_size + 881 align_up(total_args_passed * wordSize, frame::alignment_in_bytes); 882 883 // regular (verified) c2i entry point 884 c2i_entrypoint = __ pc(); 885 886 // Does compiled code exists? If yes, patch the caller's callsite. 887 __ ld(code, method_(code)); 888 __ cmpdi(CCR0, code, 0); 889 __ ld(ientry, method_(interpreter_entry)); // preloaded 890 __ beq(CCR0, call_interpreter); 891 892 893 // Patch caller's callsite, method_(code) was not NULL which means that 894 // compiled code exists. 895 __ mflr(return_pc); 896 __ std(return_pc, _abi(lr), R1_SP); 897 RegisterSaver::push_frame_and_save_argument_registers(masm, tmp, adapter_size, total_args_passed, regs); 898 899 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), R19_method, return_pc); 900 901 RegisterSaver::restore_argument_registers_and_pop_frame(masm, adapter_size, total_args_passed, regs); 902 __ ld(return_pc, _abi(lr), R1_SP); 903 __ ld(ientry, method_(interpreter_entry)); // preloaded 904 __ mtlr(return_pc); 905 906 907 // Call the interpreter. 908 __ BIND(call_interpreter); 909 __ mtctr(ientry); 910 911 // Get a copy of the current SP for loading caller's arguments. 912 __ mr(sender_SP, R1_SP); 913 914 // Add space for the adapter. 915 __ resize_frame(-adapter_size, R12_scratch2); 916 917 int st_off = adapter_size - wordSize; 918 919 // Write the args into the outgoing interpreter space. 920 for (int i = 0; i < total_args_passed; i++) { 921 VMReg r_1 = regs[i].first(); 922 VMReg r_2 = regs[i].second(); 923 if (!r_1->is_valid()) { 924 assert(!r_2->is_valid(), ""); 925 continue; 926 } 927 if (r_1->is_stack()) { 928 Register tmp_reg = value_regs[value_regs_index]; 929 value_regs_index = (value_regs_index + 1) % num_value_regs; 930 // The calling convention produces OptoRegs that ignore the out 931 // preserve area (JIT's ABI). We must account for it here. 932 int ld_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 933 if (!r_2->is_valid()) { 934 __ lwz(tmp_reg, ld_off, sender_SP); 935 } else { 936 __ ld(tmp_reg, ld_off, sender_SP); 937 } 938 // Pretend stack targets were loaded into tmp_reg. 939 r_1 = tmp_reg->as_VMReg(); 940 } 941 942 if (r_1->is_Register()) { 943 Register r = r_1->as_Register(); 944 if (!r_2->is_valid()) { 945 __ stw(r, st_off, R1_SP); 946 st_off-=wordSize; 947 } else { 948 // Longs are given 2 64-bit slots in the interpreter, but the 949 // data is passed in only 1 slot. 950 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 951 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); ) 952 st_off-=wordSize; 953 } 954 __ std(r, st_off, R1_SP); 955 st_off-=wordSize; 956 } 957 } else { 958 assert(r_1->is_FloatRegister(), ""); 959 FloatRegister f = r_1->as_FloatRegister(); 960 if (!r_2->is_valid()) { 961 __ stfs(f, st_off, R1_SP); 962 st_off-=wordSize; 963 } else { 964 // In 64bit, doubles are given 2 64-bit slots in the interpreter, but the 965 // data is passed in only 1 slot. 966 // One of these should get known junk... 967 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); ) 968 st_off-=wordSize; 969 __ stfd(f, st_off, R1_SP); 970 st_off-=wordSize; 971 } 972 } 973 } 974 975 // Jump to the interpreter just as if interpreter was doing it. 976 977 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 978 979 // load TOS 980 __ addi(R15_esp, R1_SP, st_off); 981 982 // Frame_manager expects initial_caller_sp (= SP without resize by c2i) in R21_tmp1. 983 assert(sender_SP == R21_sender_SP, "passing initial caller's SP in wrong register"); 984 __ bctr(); 985 986 return c2i_entrypoint; 987 } 988 989 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, 990 int total_args_passed, 991 int comp_args_on_stack, 992 const BasicType *sig_bt, 993 const VMRegPair *regs) { 994 995 // Load method's entry-point from method. 996 __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method); 997 __ mtctr(R12_scratch2); 998 999 // We will only enter here from an interpreted frame and never from after 1000 // passing thru a c2i. Azul allowed this but we do not. If we lose the 1001 // race and use a c2i we will remain interpreted for the race loser(s). 1002 // This removes all sorts of headaches on the x86 side and also eliminates 1003 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions. 1004 1005 // Note: r13 contains the senderSP on entry. We must preserve it since 1006 // we may do a i2c -> c2i transition if we lose a race where compiled 1007 // code goes non-entrant while we get args ready. 1008 // In addition we use r13 to locate all the interpreter args as 1009 // we must align the stack to 16 bytes on an i2c entry else we 1010 // lose alignment we expect in all compiled code and register 1011 // save code can segv when fxsave instructions find improperly 1012 // aligned stack pointer. 1013 1014 const Register ld_ptr = R15_esp; 1015 const Register value_regs[] = { R22_tmp2, R23_tmp3, R24_tmp4, R25_tmp5, R26_tmp6 }; 1016 const int num_value_regs = sizeof(value_regs) / sizeof(Register); 1017 int value_regs_index = 0; 1018 1019 int ld_offset = total_args_passed*wordSize; 1020 1021 // Cut-out for having no stack args. Since up to 2 int/oop args are passed 1022 // in registers, we will occasionally have no stack args. 1023 int comp_words_on_stack = 0; 1024 if (comp_args_on_stack) { 1025 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in 1026 // registers are below. By subtracting stack0, we either get a negative 1027 // number (all values in registers) or the maximum stack slot accessed. 1028 1029 // Convert 4-byte c2 stack slots to words. 1030 comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; 1031 // Round up to miminum stack alignment, in wordSize. 1032 comp_words_on_stack = align_up(comp_words_on_stack, 2); 1033 __ resize_frame(-comp_words_on_stack * wordSize, R11_scratch1); 1034 } 1035 1036 // Now generate the shuffle code. Pick up all register args and move the 1037 // rest through register value=Z_R12. 1038 BLOCK_COMMENT("Shuffle arguments"); 1039 for (int i = 0; i < total_args_passed; i++) { 1040 if (sig_bt[i] == T_VOID) { 1041 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 1042 continue; 1043 } 1044 1045 // Pick up 0, 1 or 2 words from ld_ptr. 1046 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), 1047 "scrambled load targets?"); 1048 VMReg r_1 = regs[i].first(); 1049 VMReg r_2 = regs[i].second(); 1050 if (!r_1->is_valid()) { 1051 assert(!r_2->is_valid(), ""); 1052 continue; 1053 } 1054 if (r_1->is_FloatRegister()) { 1055 if (!r_2->is_valid()) { 1056 __ lfs(r_1->as_FloatRegister(), ld_offset, ld_ptr); 1057 ld_offset-=wordSize; 1058 } else { 1059 // Skip the unused interpreter slot. 1060 __ lfd(r_1->as_FloatRegister(), ld_offset-wordSize, ld_ptr); 1061 ld_offset-=2*wordSize; 1062 } 1063 } else { 1064 Register r; 1065 if (r_1->is_stack()) { 1066 // Must do a memory to memory move thru "value". 1067 r = value_regs[value_regs_index]; 1068 value_regs_index = (value_regs_index + 1) % num_value_regs; 1069 } else { 1070 r = r_1->as_Register(); 1071 } 1072 if (!r_2->is_valid()) { 1073 // Not sure we need to do this but it shouldn't hurt. 1074 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ADDRESS || sig_bt[i] == T_ARRAY) { 1075 __ ld(r, ld_offset, ld_ptr); 1076 ld_offset-=wordSize; 1077 } else { 1078 __ lwz(r, ld_offset, ld_ptr); 1079 ld_offset-=wordSize; 1080 } 1081 } else { 1082 // In 64bit, longs are given 2 64-bit slots in the interpreter, but the 1083 // data is passed in only 1 slot. 1084 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 1085 ld_offset-=wordSize; 1086 } 1087 __ ld(r, ld_offset, ld_ptr); 1088 ld_offset-=wordSize; 1089 } 1090 1091 if (r_1->is_stack()) { 1092 // Now store value where the compiler expects it 1093 int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots())*VMRegImpl::stack_slot_size; 1094 1095 if (sig_bt[i] == T_INT || sig_bt[i] == T_FLOAT ||sig_bt[i] == T_BOOLEAN || 1096 sig_bt[i] == T_SHORT || sig_bt[i] == T_CHAR || sig_bt[i] == T_BYTE) { 1097 __ stw(r, st_off, R1_SP); 1098 } else { 1099 __ std(r, st_off, R1_SP); 1100 } 1101 } 1102 } 1103 } 1104 1105 BLOCK_COMMENT("Store method"); 1106 // Store method into thread->callee_target. 1107 // We might end up in handle_wrong_method if the callee is 1108 // deoptimized as we race thru here. If that happens we don't want 1109 // to take a safepoint because the caller frame will look 1110 // interpreted and arguments are now "compiled" so it is much better 1111 // to make this transition invisible to the stack walking 1112 // code. Unfortunately if we try and find the callee by normal means 1113 // a safepoint is possible. So we stash the desired callee in the 1114 // thread and the vm will find there should this case occur. 1115 __ std(R19_method, thread_(callee_target)); 1116 1117 // Jump to the compiled code just as if compiled code was doing it. 1118 __ bctr(); 1119 } 1120 1121 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, 1122 int total_args_passed, 1123 int comp_args_on_stack, 1124 const BasicType *sig_bt, 1125 const VMRegPair *regs, 1126 AdapterFingerPrint* fingerprint) { 1127 address i2c_entry; 1128 address c2i_unverified_entry; 1129 address c2i_entry; 1130 1131 1132 // entry: i2c 1133 1134 __ align(CodeEntryAlignment); 1135 i2c_entry = __ pc(); 1136 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs); 1137 1138 1139 // entry: c2i unverified 1140 1141 __ align(CodeEntryAlignment); 1142 BLOCK_COMMENT("c2i unverified entry"); 1143 c2i_unverified_entry = __ pc(); 1144 1145 // inline_cache contains a compiledICHolder 1146 const Register ic = R19_method; 1147 const Register ic_klass = R11_scratch1; 1148 const Register receiver_klass = R12_scratch2; 1149 const Register code = R21_tmp1; 1150 const Register ientry = R23_tmp3; 1151 1152 assert_different_registers(ic, ic_klass, receiver_klass, R3_ARG1, code, ientry); 1153 assert(R11_scratch1 == R11, "need prologue scratch register"); 1154 1155 Label call_interpreter; 1156 1157 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), 1158 "klass offset should reach into any page"); 1159 // Check for NULL argument if we don't have implicit null checks. 1160 if (!ImplicitNullChecks || !os::zero_page_read_protected()) { 1161 if (TrapBasedNullChecks) { 1162 __ trap_null_check(R3_ARG1); 1163 } else { 1164 Label valid; 1165 __ cmpdi(CCR0, R3_ARG1, 0); 1166 __ bne_predict_taken(CCR0, valid); 1167 // We have a null argument, branch to ic_miss_stub. 1168 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), 1169 relocInfo::runtime_call_type); 1170 __ BIND(valid); 1171 } 1172 } 1173 // Assume argument is not NULL, load klass from receiver. 1174 __ load_klass(receiver_klass, R3_ARG1); 1175 1176 __ ld(ic_klass, CompiledICHolder::holder_klass_offset(), ic); 1177 1178 if (TrapBasedICMissChecks) { 1179 __ trap_ic_miss_check(receiver_klass, ic_klass); 1180 } else { 1181 Label valid; 1182 __ cmpd(CCR0, receiver_klass, ic_klass); 1183 __ beq_predict_taken(CCR0, valid); 1184 // We have an unexpected klass, branch to ic_miss_stub. 1185 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), 1186 relocInfo::runtime_call_type); 1187 __ BIND(valid); 1188 } 1189 1190 // Argument is valid and klass is as expected, continue. 1191 1192 // Extract method from inline cache, verified entry point needs it. 1193 __ ld(R19_method, CompiledICHolder::holder_metadata_offset(), ic); 1194 assert(R19_method == ic, "the inline cache register is dead here"); 1195 1196 __ ld(code, method_(code)); 1197 __ cmpdi(CCR0, code, 0); 1198 __ ld(ientry, method_(interpreter_entry)); // preloaded 1199 __ beq_predict_taken(CCR0, call_interpreter); 1200 1201 // Branch to ic_miss_stub. 1202 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type); 1203 1204 // entry: c2i 1205 1206 c2i_entry = gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, call_interpreter, ientry); 1207 1208 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); 1209 } 1210 1211 #ifdef COMPILER2 1212 // An oop arg. Must pass a handle not the oop itself. 1213 static void object_move(MacroAssembler* masm, 1214 int frame_size_in_slots, 1215 OopMap* oop_map, int oop_handle_offset, 1216 bool is_receiver, int* receiver_offset, 1217 VMRegPair src, VMRegPair dst, 1218 Register r_caller_sp, Register r_temp_1, Register r_temp_2) { 1219 assert(!is_receiver || (is_receiver && (*receiver_offset == -1)), 1220 "receiver has already been moved"); 1221 1222 // We must pass a handle. First figure out the location we use as a handle. 1223 1224 if (src.first()->is_stack()) { 1225 // stack to stack or reg 1226 1227 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register(); 1228 Label skip; 1229 const int oop_slot_in_callers_frame = reg2slot(src.first()); 1230 1231 guarantee(!is_receiver, "expecting receiver in register"); 1232 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot_in_callers_frame + frame_size_in_slots)); 1233 1234 __ addi(r_handle, r_caller_sp, reg2offset(src.first())); 1235 __ ld( r_temp_2, reg2offset(src.first()), r_caller_sp); 1236 __ cmpdi(CCR0, r_temp_2, 0); 1237 __ bne(CCR0, skip); 1238 // Use a NULL handle if oop is NULL. 1239 __ li(r_handle, 0); 1240 __ bind(skip); 1241 1242 if (dst.first()->is_stack()) { 1243 // stack to stack 1244 __ std(r_handle, reg2offset(dst.first()), R1_SP); 1245 } else { 1246 // stack to reg 1247 // Nothing to do, r_handle is already the dst register. 1248 } 1249 } else { 1250 // reg to stack or reg 1251 const Register r_oop = src.first()->as_Register(); 1252 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register(); 1253 const int oop_slot = (r_oop->encoding()-R3_ARG1->encoding()) * VMRegImpl::slots_per_word 1254 + oop_handle_offset; // in slots 1255 const int oop_offset = oop_slot * VMRegImpl::stack_slot_size; 1256 Label skip; 1257 1258 if (is_receiver) { 1259 *receiver_offset = oop_offset; 1260 } 1261 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1262 1263 __ std( r_oop, oop_offset, R1_SP); 1264 __ addi(r_handle, R1_SP, oop_offset); 1265 1266 __ cmpdi(CCR0, r_oop, 0); 1267 __ bne(CCR0, skip); 1268 // Use a NULL handle if oop is NULL. 1269 __ li(r_handle, 0); 1270 __ bind(skip); 1271 1272 if (dst.first()->is_stack()) { 1273 // reg to stack 1274 __ std(r_handle, reg2offset(dst.first()), R1_SP); 1275 } else { 1276 // reg to reg 1277 // Nothing to do, r_handle is already the dst register. 1278 } 1279 } 1280 } 1281 1282 static void int_move(MacroAssembler*masm, 1283 VMRegPair src, VMRegPair dst, 1284 Register r_caller_sp, Register r_temp) { 1285 assert(src.first()->is_valid(), "incoming must be int"); 1286 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long"); 1287 1288 if (src.first()->is_stack()) { 1289 if (dst.first()->is_stack()) { 1290 // stack to stack 1291 __ lwa(r_temp, reg2offset(src.first()), r_caller_sp); 1292 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1293 } else { 1294 // stack to reg 1295 __ lwa(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); 1296 } 1297 } else if (dst.first()->is_stack()) { 1298 // reg to stack 1299 __ extsw(r_temp, src.first()->as_Register()); 1300 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1301 } else { 1302 // reg to reg 1303 __ extsw(dst.first()->as_Register(), src.first()->as_Register()); 1304 } 1305 } 1306 1307 static void long_move(MacroAssembler*masm, 1308 VMRegPair src, VMRegPair dst, 1309 Register r_caller_sp, Register r_temp) { 1310 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be long"); 1311 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long"); 1312 1313 if (src.first()->is_stack()) { 1314 if (dst.first()->is_stack()) { 1315 // stack to stack 1316 __ ld( r_temp, reg2offset(src.first()), r_caller_sp); 1317 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1318 } else { 1319 // stack to reg 1320 __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); 1321 } 1322 } else if (dst.first()->is_stack()) { 1323 // reg to stack 1324 __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP); 1325 } else { 1326 // reg to reg 1327 if (dst.first()->as_Register() != src.first()->as_Register()) 1328 __ mr(dst.first()->as_Register(), src.first()->as_Register()); 1329 } 1330 } 1331 1332 static void float_move(MacroAssembler*masm, 1333 VMRegPair src, VMRegPair dst, 1334 Register r_caller_sp, Register r_temp) { 1335 assert(src.first()->is_valid() && !src.second()->is_valid(), "incoming must be float"); 1336 assert(dst.first()->is_valid() && !dst.second()->is_valid(), "outgoing must be float"); 1337 1338 if (src.first()->is_stack()) { 1339 if (dst.first()->is_stack()) { 1340 // stack to stack 1341 __ lwz(r_temp, reg2offset(src.first()), r_caller_sp); 1342 __ stw(r_temp, reg2offset(dst.first()), R1_SP); 1343 } else { 1344 // stack to reg 1345 __ lfs(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp); 1346 } 1347 } else if (dst.first()->is_stack()) { 1348 // reg to stack 1349 __ stfs(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP); 1350 } else { 1351 // reg to reg 1352 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister()) 1353 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 1354 } 1355 } 1356 1357 static void double_move(MacroAssembler*masm, 1358 VMRegPair src, VMRegPair dst, 1359 Register r_caller_sp, Register r_temp) { 1360 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be double"); 1361 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be double"); 1362 1363 if (src.first()->is_stack()) { 1364 if (dst.first()->is_stack()) { 1365 // stack to stack 1366 __ ld( r_temp, reg2offset(src.first()), r_caller_sp); 1367 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1368 } else { 1369 // stack to reg 1370 __ lfd(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp); 1371 } 1372 } else if (dst.first()->is_stack()) { 1373 // reg to stack 1374 __ stfd(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP); 1375 } else { 1376 // reg to reg 1377 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister()) 1378 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 1379 } 1380 } 1381 1382 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1383 switch (ret_type) { 1384 case T_BOOLEAN: 1385 case T_CHAR: 1386 case T_BYTE: 1387 case T_SHORT: 1388 case T_INT: 1389 __ stw (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1390 break; 1391 case T_ARRAY: 1392 case T_OBJECT: 1393 case T_LONG: 1394 __ std (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1395 break; 1396 case T_FLOAT: 1397 __ stfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1398 break; 1399 case T_DOUBLE: 1400 __ stfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1401 break; 1402 case T_VOID: 1403 break; 1404 default: 1405 ShouldNotReachHere(); 1406 break; 1407 } 1408 } 1409 1410 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1411 switch (ret_type) { 1412 case T_BOOLEAN: 1413 case T_CHAR: 1414 case T_BYTE: 1415 case T_SHORT: 1416 case T_INT: 1417 __ lwz(R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1418 break; 1419 case T_ARRAY: 1420 case T_OBJECT: 1421 case T_LONG: 1422 __ ld (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1423 break; 1424 case T_FLOAT: 1425 __ lfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1426 break; 1427 case T_DOUBLE: 1428 __ lfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1429 break; 1430 case T_VOID: 1431 break; 1432 default: 1433 ShouldNotReachHere(); 1434 break; 1435 } 1436 } 1437 1438 static void save_or_restore_arguments(MacroAssembler* masm, 1439 const int stack_slots, 1440 const int total_in_args, 1441 const int arg_save_area, 1442 OopMap* map, 1443 VMRegPair* in_regs, 1444 BasicType* in_sig_bt) { 1445 // If map is non-NULL then the code should store the values, 1446 // otherwise it should load them. 1447 int slot = arg_save_area; 1448 // Save down double word first. 1449 for (int i = 0; i < total_in_args; i++) { 1450 if (in_regs[i].first()->is_FloatRegister() && in_sig_bt[i] == T_DOUBLE) { 1451 int offset = slot * VMRegImpl::stack_slot_size; 1452 slot += VMRegImpl::slots_per_word; 1453 assert(slot <= stack_slots, "overflow (after DOUBLE stack slot)"); 1454 if (map != NULL) { 1455 __ stfd(in_regs[i].first()->as_FloatRegister(), offset, R1_SP); 1456 } else { 1457 __ lfd(in_regs[i].first()->as_FloatRegister(), offset, R1_SP); 1458 } 1459 } else if (in_regs[i].first()->is_Register() && 1460 (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) { 1461 int offset = slot * VMRegImpl::stack_slot_size; 1462 if (map != NULL) { 1463 __ std(in_regs[i].first()->as_Register(), offset, R1_SP); 1464 if (in_sig_bt[i] == T_ARRAY) { 1465 map->set_oop(VMRegImpl::stack2reg(slot)); 1466 } 1467 } else { 1468 __ ld(in_regs[i].first()->as_Register(), offset, R1_SP); 1469 } 1470 slot += VMRegImpl::slots_per_word; 1471 assert(slot <= stack_slots, "overflow (after LONG/ARRAY stack slot)"); 1472 } 1473 } 1474 // Save or restore single word registers. 1475 for (int i = 0; i < total_in_args; i++) { 1476 if (in_regs[i].first()->is_Register()) { 1477 int offset = slot * VMRegImpl::stack_slot_size; 1478 // Value lives in an input register. Save it on stack. 1479 switch (in_sig_bt[i]) { 1480 case T_BOOLEAN: 1481 case T_CHAR: 1482 case T_BYTE: 1483 case T_SHORT: 1484 case T_INT: 1485 if (map != NULL) { 1486 __ stw(in_regs[i].first()->as_Register(), offset, R1_SP); 1487 } else { 1488 __ lwa(in_regs[i].first()->as_Register(), offset, R1_SP); 1489 } 1490 slot++; 1491 assert(slot <= stack_slots, "overflow (after INT or smaller stack slot)"); 1492 break; 1493 case T_ARRAY: 1494 case T_LONG: 1495 // handled above 1496 break; 1497 case T_OBJECT: 1498 default: ShouldNotReachHere(); 1499 } 1500 } else if (in_regs[i].first()->is_FloatRegister()) { 1501 if (in_sig_bt[i] == T_FLOAT) { 1502 int offset = slot * VMRegImpl::stack_slot_size; 1503 slot++; 1504 assert(slot <= stack_slots, "overflow (after FLOAT stack slot)"); 1505 if (map != NULL) { 1506 __ stfs(in_regs[i].first()->as_FloatRegister(), offset, R1_SP); 1507 } else { 1508 __ lfs(in_regs[i].first()->as_FloatRegister(), offset, R1_SP); 1509 } 1510 } 1511 } else if (in_regs[i].first()->is_stack()) { 1512 if (in_sig_bt[i] == T_ARRAY && map != NULL) { 1513 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1514 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots)); 1515 } 1516 } 1517 } 1518 } 1519 1520 // Check GCLocker::needs_gc and enter the runtime if it's true. This 1521 // keeps a new JNI critical region from starting until a GC has been 1522 // forced. Save down any oops in registers and describe them in an 1523 // OopMap. 1524 static void check_needs_gc_for_critical_native(MacroAssembler* masm, 1525 const int stack_slots, 1526 const int total_in_args, 1527 const int arg_save_area, 1528 OopMapSet* oop_maps, 1529 VMRegPair* in_regs, 1530 BasicType* in_sig_bt, 1531 Register tmp_reg ) { 1532 __ block_comment("check GCLocker::needs_gc"); 1533 Label cont; 1534 __ lbz(tmp_reg, (RegisterOrConstant)(intptr_t)GCLocker::needs_gc_address()); 1535 __ cmplwi(CCR0, tmp_reg, 0); 1536 __ beq(CCR0, cont); 1537 1538 // Save down any values that are live in registers and call into the 1539 // runtime to halt for a GC. 1540 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1541 save_or_restore_arguments(masm, stack_slots, total_in_args, 1542 arg_save_area, map, in_regs, in_sig_bt); 1543 1544 __ mr(R3_ARG1, R16_thread); 1545 __ set_last_Java_frame(R1_SP, noreg); 1546 1547 __ block_comment("block_for_jni_critical"); 1548 address entry_point = CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical); 1549 #if defined(ABI_ELFv2) 1550 __ call_c(entry_point, relocInfo::runtime_call_type); 1551 #else 1552 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::runtime_call_type); 1553 #endif 1554 address start = __ pc() - __ offset(), 1555 calls_return_pc = __ last_calls_return_pc(); 1556 oop_maps->add_gc_map(calls_return_pc - start, map); 1557 1558 __ reset_last_Java_frame(); 1559 1560 // Reload all the register arguments. 1561 save_or_restore_arguments(masm, stack_slots, total_in_args, 1562 arg_save_area, NULL, in_regs, in_sig_bt); 1563 1564 __ BIND(cont); 1565 1566 #ifdef ASSERT 1567 if (StressCriticalJNINatives) { 1568 // Stress register saving. 1569 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1570 save_or_restore_arguments(masm, stack_slots, total_in_args, 1571 arg_save_area, map, in_regs, in_sig_bt); 1572 // Destroy argument registers. 1573 for (int i = 0; i < total_in_args; i++) { 1574 if (in_regs[i].first()->is_Register()) { 1575 const Register reg = in_regs[i].first()->as_Register(); 1576 __ neg(reg, reg); 1577 } else if (in_regs[i].first()->is_FloatRegister()) { 1578 __ fneg(in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister()); 1579 } 1580 } 1581 1582 save_or_restore_arguments(masm, stack_slots, total_in_args, 1583 arg_save_area, NULL, in_regs, in_sig_bt); 1584 } 1585 #endif 1586 } 1587 1588 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst, Register r_caller_sp, Register r_temp) { 1589 if (src.first()->is_stack()) { 1590 if (dst.first()->is_stack()) { 1591 // stack to stack 1592 __ ld(r_temp, reg2offset(src.first()), r_caller_sp); 1593 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1594 } else { 1595 // stack to reg 1596 __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); 1597 } 1598 } else if (dst.first()->is_stack()) { 1599 // reg to stack 1600 __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP); 1601 } else { 1602 if (dst.first() != src.first()) { 1603 __ mr(dst.first()->as_Register(), src.first()->as_Register()); 1604 } 1605 } 1606 } 1607 1608 // Unpack an array argument into a pointer to the body and the length 1609 // if the array is non-null, otherwise pass 0 for both. 1610 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, 1611 VMRegPair body_arg, VMRegPair length_arg, Register r_caller_sp, 1612 Register tmp_reg, Register tmp2_reg) { 1613 assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg, 1614 "possible collision"); 1615 assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg, 1616 "possible collision"); 1617 1618 // Pass the length, ptr pair. 1619 Label set_out_args; 1620 VMRegPair tmp, tmp2; 1621 tmp.set_ptr(tmp_reg->as_VMReg()); 1622 tmp2.set_ptr(tmp2_reg->as_VMReg()); 1623 if (reg.first()->is_stack()) { 1624 // Load the arg up from the stack. 1625 move_ptr(masm, reg, tmp, r_caller_sp, /*unused*/ R0); 1626 reg = tmp; 1627 } 1628 __ li(tmp2_reg, 0); // Pass zeros if Array=null. 1629 if (tmp_reg != reg.first()->as_Register()) __ li(tmp_reg, 0); 1630 __ cmpdi(CCR0, reg.first()->as_Register(), 0); 1631 __ beq(CCR0, set_out_args); 1632 __ lwa(tmp2_reg, arrayOopDesc::length_offset_in_bytes(), reg.first()->as_Register()); 1633 __ addi(tmp_reg, reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)); 1634 __ bind(set_out_args); 1635 move_ptr(masm, tmp, body_arg, r_caller_sp, /*unused*/ R0); 1636 move_ptr(masm, tmp2, length_arg, r_caller_sp, /*unused*/ R0); // Same as move32_64 on PPC64. 1637 } 1638 1639 static void verify_oop_args(MacroAssembler* masm, 1640 const methodHandle& method, 1641 const BasicType* sig_bt, 1642 const VMRegPair* regs) { 1643 Register temp_reg = R19_method; // not part of any compiled calling seq 1644 if (VerifyOops) { 1645 for (int i = 0; i < method->size_of_parameters(); i++) { 1646 if (sig_bt[i] == T_OBJECT || 1647 sig_bt[i] == T_ARRAY) { 1648 VMReg r = regs[i].first(); 1649 assert(r->is_valid(), "bad oop arg"); 1650 if (r->is_stack()) { 1651 __ ld(temp_reg, reg2offset(r), R1_SP); 1652 __ verify_oop(temp_reg); 1653 } else { 1654 __ verify_oop(r->as_Register()); 1655 } 1656 } 1657 } 1658 } 1659 } 1660 1661 static void gen_special_dispatch(MacroAssembler* masm, 1662 const methodHandle& method, 1663 const BasicType* sig_bt, 1664 const VMRegPair* regs) { 1665 verify_oop_args(masm, method, sig_bt, regs); 1666 vmIntrinsics::ID iid = method->intrinsic_id(); 1667 1668 // Now write the args into the outgoing interpreter space 1669 bool has_receiver = false; 1670 Register receiver_reg = noreg; 1671 int member_arg_pos = -1; 1672 Register member_reg = noreg; 1673 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); 1674 if (ref_kind != 0) { 1675 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument 1676 member_reg = R19_method; // known to be free at this point 1677 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); 1678 } else if (iid == vmIntrinsics::_invokeBasic) { 1679 has_receiver = true; 1680 } else { 1681 fatal("unexpected intrinsic id %d", iid); 1682 } 1683 1684 if (member_reg != noreg) { 1685 // Load the member_arg into register, if necessary. 1686 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); 1687 VMReg r = regs[member_arg_pos].first(); 1688 if (r->is_stack()) { 1689 __ ld(member_reg, reg2offset(r), R1_SP); 1690 } else { 1691 // no data motion is needed 1692 member_reg = r->as_Register(); 1693 } 1694 } 1695 1696 if (has_receiver) { 1697 // Make sure the receiver is loaded into a register. 1698 assert(method->size_of_parameters() > 0, "oob"); 1699 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); 1700 VMReg r = regs[0].first(); 1701 assert(r->is_valid(), "bad receiver arg"); 1702 if (r->is_stack()) { 1703 // Porting note: This assumes that compiled calling conventions always 1704 // pass the receiver oop in a register. If this is not true on some 1705 // platform, pick a temp and load the receiver from stack. 1706 fatal("receiver always in a register"); 1707 receiver_reg = R11_scratch1; // TODO (hs24): is R11_scratch1 really free at this point? 1708 __ ld(receiver_reg, reg2offset(r), R1_SP); 1709 } else { 1710 // no data motion is needed 1711 receiver_reg = r->as_Register(); 1712 } 1713 } 1714 1715 // Figure out which address we are really jumping to: 1716 MethodHandles::generate_method_handle_dispatch(masm, iid, 1717 receiver_reg, member_reg, /*for_compiler_entry:*/ true); 1718 } 1719 1720 #endif // COMPILER2 1721 1722 // --------------------------------------------------------------------------- 1723 // Generate a native wrapper for a given method. The method takes arguments 1724 // in the Java compiled code convention, marshals them to the native 1725 // convention (handlizes oops, etc), transitions to native, makes the call, 1726 // returns to java state (possibly blocking), unhandlizes any result and 1727 // returns. 1728 // 1729 // Critical native functions are a shorthand for the use of 1730 // GetPrimtiveArrayCritical and disallow the use of any other JNI 1731 // functions. The wrapper is expected to unpack the arguments before 1732 // passing them to the callee and perform checks before and after the 1733 // native call to ensure that they GCLocker 1734 // lock_critical/unlock_critical semantics are followed. Some other 1735 // parts of JNI setup are skipped like the tear down of the JNI handle 1736 // block and the check for pending exceptions it's impossible for them 1737 // to be thrown. 1738 // 1739 // They are roughly structured like this: 1740 // if (GCLocker::needs_gc()) 1741 // SharedRuntime::block_for_jni_critical(); 1742 // tranistion to thread_in_native 1743 // unpack arrray arguments and call native entry point 1744 // check for safepoint in progress 1745 // check if any thread suspend flags are set 1746 // call into JVM and possible unlock the JNI critical 1747 // if a GC was suppressed while in the critical native. 1748 // transition back to thread_in_Java 1749 // return to caller 1750 // 1751 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, 1752 const methodHandle& method, 1753 int compile_id, 1754 BasicType *in_sig_bt, 1755 VMRegPair *in_regs, 1756 BasicType ret_type) { 1757 #ifdef COMPILER2 1758 if (method->is_method_handle_intrinsic()) { 1759 vmIntrinsics::ID iid = method->intrinsic_id(); 1760 intptr_t start = (intptr_t)__ pc(); 1761 int vep_offset = ((intptr_t)__ pc()) - start; 1762 gen_special_dispatch(masm, 1763 method, 1764 in_sig_bt, 1765 in_regs); 1766 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period 1767 __ flush(); 1768 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually 1769 return nmethod::new_native_nmethod(method, 1770 compile_id, 1771 masm->code(), 1772 vep_offset, 1773 frame_complete, 1774 stack_slots / VMRegImpl::slots_per_word, 1775 in_ByteSize(-1), 1776 in_ByteSize(-1), 1777 (OopMapSet*)NULL); 1778 } 1779 1780 bool is_critical_native = true; 1781 address native_func = method->critical_native_function(); 1782 if (native_func == NULL) { 1783 native_func = method->native_function(); 1784 is_critical_native = false; 1785 } 1786 assert(native_func != NULL, "must have function"); 1787 1788 // First, create signature for outgoing C call 1789 // -------------------------------------------------------------------------- 1790 1791 int total_in_args = method->size_of_parameters(); 1792 // We have received a description of where all the java args are located 1793 // on entry to the wrapper. We need to convert these args to where 1794 // the jni function will expect them. To figure out where they go 1795 // we convert the java signature to a C signature by inserting 1796 // the hidden arguments as arg[0] and possibly arg[1] (static method) 1797 1798 // Calculate the total number of C arguments and create arrays for the 1799 // signature and the outgoing registers. 1800 // On ppc64, we have two arrays for the outgoing registers, because 1801 // some floating-point arguments must be passed in registers _and_ 1802 // in stack locations. 1803 bool method_is_static = method->is_static(); 1804 int total_c_args = total_in_args; 1805 1806 if (!is_critical_native) { 1807 int n_hidden_args = method_is_static ? 2 : 1; 1808 total_c_args += n_hidden_args; 1809 } else { 1810 // No JNIEnv*, no this*, but unpacked arrays (base+length). 1811 for (int i = 0; i < total_in_args; i++) { 1812 if (in_sig_bt[i] == T_ARRAY) { 1813 total_c_args++; 1814 } 1815 } 1816 } 1817 1818 BasicType *out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 1819 VMRegPair *out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 1820 VMRegPair *out_regs2 = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 1821 BasicType* in_elem_bt = NULL; 1822 1823 // Create the signature for the C call: 1824 // 1) add the JNIEnv* 1825 // 2) add the class if the method is static 1826 // 3) copy the rest of the incoming signature (shifted by the number of 1827 // hidden arguments). 1828 1829 int argc = 0; 1830 if (!is_critical_native) { 1831 out_sig_bt[argc++] = T_ADDRESS; 1832 if (method->is_static()) { 1833 out_sig_bt[argc++] = T_OBJECT; 1834 } 1835 1836 for (int i = 0; i < total_in_args ; i++ ) { 1837 out_sig_bt[argc++] = in_sig_bt[i]; 1838 } 1839 } else { 1840 Thread* THREAD = Thread::current(); 1841 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 1842 SignatureStream ss(method->signature()); 1843 int o = 0; 1844 for (int i = 0; i < total_in_args ; i++, o++) { 1845 if (in_sig_bt[i] == T_ARRAY) { 1846 // Arrays are passed as int, elem* pair 1847 Symbol* atype = ss.as_symbol(CHECK_NULL); 1848 const char* at = atype->as_C_string(); 1849 if (strlen(at) == 2) { 1850 assert(at[0] == '[', "must be"); 1851 switch (at[1]) { 1852 case 'B': in_elem_bt[o] = T_BYTE; break; 1853 case 'C': in_elem_bt[o] = T_CHAR; break; 1854 case 'D': in_elem_bt[o] = T_DOUBLE; break; 1855 case 'F': in_elem_bt[o] = T_FLOAT; break; 1856 case 'I': in_elem_bt[o] = T_INT; break; 1857 case 'J': in_elem_bt[o] = T_LONG; break; 1858 case 'S': in_elem_bt[o] = T_SHORT; break; 1859 case 'Z': in_elem_bt[o] = T_BOOLEAN; break; 1860 default: ShouldNotReachHere(); 1861 } 1862 } 1863 } else { 1864 in_elem_bt[o] = T_VOID; 1865 } 1866 if (in_sig_bt[i] != T_VOID) { 1867 assert(in_sig_bt[i] == ss.type(), "must match"); 1868 ss.next(); 1869 } 1870 } 1871 1872 for (int i = 0; i < total_in_args ; i++ ) { 1873 if (in_sig_bt[i] == T_ARRAY) { 1874 // Arrays are passed as int, elem* pair. 1875 out_sig_bt[argc++] = T_INT; 1876 out_sig_bt[argc++] = T_ADDRESS; 1877 } else { 1878 out_sig_bt[argc++] = in_sig_bt[i]; 1879 } 1880 } 1881 } 1882 1883 1884 // Compute the wrapper's frame size. 1885 // -------------------------------------------------------------------------- 1886 1887 // Now figure out where the args must be stored and how much stack space 1888 // they require. 1889 // 1890 // Compute framesize for the wrapper. We need to handlize all oops in 1891 // incoming registers. 1892 // 1893 // Calculate the total number of stack slots we will need: 1894 // 1) abi requirements 1895 // 2) outgoing arguments 1896 // 3) space for inbound oop handle area 1897 // 4) space for handlizing a klass if static method 1898 // 5) space for a lock if synchronized method 1899 // 6) workspace for saving return values, int <-> float reg moves, etc. 1900 // 7) alignment 1901 // 1902 // Layout of the native wrapper frame: 1903 // (stack grows upwards, memory grows downwards) 1904 // 1905 // NW [ABI_REG_ARGS] <-- 1) R1_SP 1906 // [outgoing arguments] <-- 2) R1_SP + out_arg_slot_offset 1907 // [oopHandle area] <-- 3) R1_SP + oop_handle_offset (save area for critical natives) 1908 // klass <-- 4) R1_SP + klass_offset 1909 // lock <-- 5) R1_SP + lock_offset 1910 // [workspace] <-- 6) R1_SP + workspace_offset 1911 // [alignment] (optional) <-- 7) 1912 // caller [JIT_TOP_ABI_48] <-- r_callers_sp 1913 // 1914 // - *_slot_offset Indicates offset from SP in number of stack slots. 1915 // - *_offset Indicates offset from SP in bytes. 1916 1917 int stack_slots = c_calling_convention(out_sig_bt, out_regs, out_regs2, total_c_args) + // 1+2) 1918 SharedRuntime::out_preserve_stack_slots(); // See c_calling_convention. 1919 1920 // Now the space for the inbound oop handle area. 1921 int total_save_slots = num_java_iarg_registers * VMRegImpl::slots_per_word; 1922 if (is_critical_native) { 1923 // Critical natives may have to call out so they need a save area 1924 // for register arguments. 1925 int double_slots = 0; 1926 int single_slots = 0; 1927 for (int i = 0; i < total_in_args; i++) { 1928 if (in_regs[i].first()->is_Register()) { 1929 const Register reg = in_regs[i].first()->as_Register(); 1930 switch (in_sig_bt[i]) { 1931 case T_BOOLEAN: 1932 case T_BYTE: 1933 case T_SHORT: 1934 case T_CHAR: 1935 case T_INT: 1936 // Fall through. 1937 case T_ARRAY: 1938 case T_LONG: double_slots++; break; 1939 default: ShouldNotReachHere(); 1940 } 1941 } else if (in_regs[i].first()->is_FloatRegister()) { 1942 switch (in_sig_bt[i]) { 1943 case T_FLOAT: single_slots++; break; 1944 case T_DOUBLE: double_slots++; break; 1945 default: ShouldNotReachHere(); 1946 } 1947 } 1948 } 1949 total_save_slots = double_slots * 2 + align_up(single_slots, 2); // round to even 1950 } 1951 1952 int oop_handle_slot_offset = stack_slots; 1953 stack_slots += total_save_slots; // 3) 1954 1955 int klass_slot_offset = 0; 1956 int klass_offset = -1; 1957 if (method_is_static && !is_critical_native) { // 4) 1958 klass_slot_offset = stack_slots; 1959 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 1960 stack_slots += VMRegImpl::slots_per_word; 1961 } 1962 1963 int lock_slot_offset = 0; 1964 int lock_offset = -1; 1965 if (method->is_synchronized()) { // 5) 1966 lock_slot_offset = stack_slots; 1967 lock_offset = lock_slot_offset * VMRegImpl::stack_slot_size; 1968 stack_slots += VMRegImpl::slots_per_word; 1969 } 1970 1971 int workspace_slot_offset = stack_slots; // 6) 1972 stack_slots += 2; 1973 1974 // Now compute actual number of stack words we need. 1975 // Rounding to make stack properly aligned. 1976 stack_slots = align_up(stack_slots, // 7) 1977 frame::alignment_in_bytes / VMRegImpl::stack_slot_size); 1978 int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size; 1979 1980 1981 // Now we can start generating code. 1982 // -------------------------------------------------------------------------- 1983 1984 intptr_t start_pc = (intptr_t)__ pc(); 1985 intptr_t vep_start_pc; 1986 intptr_t frame_done_pc; 1987 intptr_t oopmap_pc; 1988 1989 Label ic_miss; 1990 Label handle_pending_exception; 1991 1992 Register r_callers_sp = R21; 1993 Register r_temp_1 = R22; 1994 Register r_temp_2 = R23; 1995 Register r_temp_3 = R24; 1996 Register r_temp_4 = R25; 1997 Register r_temp_5 = R26; 1998 Register r_temp_6 = R27; 1999 Register r_return_pc = R28; 2000 2001 Register r_carg1_jnienv = noreg; 2002 Register r_carg2_classorobject = noreg; 2003 if (!is_critical_native) { 2004 r_carg1_jnienv = out_regs[0].first()->as_Register(); 2005 r_carg2_classorobject = out_regs[1].first()->as_Register(); 2006 } 2007 2008 2009 // Generate the Unverified Entry Point (UEP). 2010 // -------------------------------------------------------------------------- 2011 assert(start_pc == (intptr_t)__ pc(), "uep must be at start"); 2012 2013 // Check ic: object class == cached class? 2014 if (!method_is_static) { 2015 Register ic = as_Register(Matcher::inline_cache_reg_encode()); 2016 Register receiver_klass = r_temp_1; 2017 2018 __ cmpdi(CCR0, R3_ARG1, 0); 2019 __ beq(CCR0, ic_miss); 2020 __ verify_oop(R3_ARG1); 2021 __ load_klass(receiver_klass, R3_ARG1); 2022 2023 __ cmpd(CCR0, receiver_klass, ic); 2024 __ bne(CCR0, ic_miss); 2025 } 2026 2027 2028 // Generate the Verified Entry Point (VEP). 2029 // -------------------------------------------------------------------------- 2030 vep_start_pc = (intptr_t)__ pc(); 2031 2032 __ save_LR_CR(r_temp_1); 2033 __ generate_stack_overflow_check(frame_size_in_bytes); // Check before creating frame. 2034 __ mr(r_callers_sp, R1_SP); // Remember frame pointer. 2035 __ push_frame(frame_size_in_bytes, r_temp_1); // Push the c2n adapter's frame. 2036 frame_done_pc = (intptr_t)__ pc(); 2037 2038 __ verify_thread(); 2039 2040 // Native nmethod wrappers never take possesion of the oop arguments. 2041 // So the caller will gc the arguments. 2042 // The only thing we need an oopMap for is if the call is static. 2043 // 2044 // An OopMap for lock (and class if static), and one for the VM call itself. 2045 OopMapSet *oop_maps = new OopMapSet(); 2046 OopMap *oop_map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 2047 2048 if (is_critical_native) { 2049 check_needs_gc_for_critical_native(masm, stack_slots, total_in_args, oop_handle_slot_offset, 2050 oop_maps, in_regs, in_sig_bt, r_temp_1); 2051 } 2052 2053 // Move arguments from register/stack to register/stack. 2054 // -------------------------------------------------------------------------- 2055 // 2056 // We immediately shuffle the arguments so that for any vm call we have 2057 // to make from here on out (sync slow path, jvmti, etc.) we will have 2058 // captured the oops from our caller and have a valid oopMap for them. 2059 // 2060 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv* 2061 // (derived from JavaThread* which is in R16_thread) and, if static, 2062 // the class mirror instead of a receiver. This pretty much guarantees that 2063 // register layout will not match. We ignore these extra arguments during 2064 // the shuffle. The shuffle is described by the two calling convention 2065 // vectors we have in our possession. We simply walk the java vector to 2066 // get the source locations and the c vector to get the destinations. 2067 2068 // Record sp-based slot for receiver on stack for non-static methods. 2069 int receiver_offset = -1; 2070 2071 // We move the arguments backward because the floating point registers 2072 // destination will always be to a register with a greater or equal 2073 // register number or the stack. 2074 // in is the index of the incoming Java arguments 2075 // out is the index of the outgoing C arguments 2076 2077 #ifdef ASSERT 2078 bool reg_destroyed[RegisterImpl::number_of_registers]; 2079 bool freg_destroyed[FloatRegisterImpl::number_of_registers]; 2080 for (int r = 0 ; r < RegisterImpl::number_of_registers ; r++) { 2081 reg_destroyed[r] = false; 2082 } 2083 for (int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++) { 2084 freg_destroyed[f] = false; 2085 } 2086 #endif // ASSERT 2087 2088 for (int in = total_in_args - 1, out = total_c_args - 1; in >= 0 ; in--, out--) { 2089 2090 #ifdef ASSERT 2091 if (in_regs[in].first()->is_Register()) { 2092 assert(!reg_destroyed[in_regs[in].first()->as_Register()->encoding()], "ack!"); 2093 } else if (in_regs[in].first()->is_FloatRegister()) { 2094 assert(!freg_destroyed[in_regs[in].first()->as_FloatRegister()->encoding()], "ack!"); 2095 } 2096 if (out_regs[out].first()->is_Register()) { 2097 reg_destroyed[out_regs[out].first()->as_Register()->encoding()] = true; 2098 } else if (out_regs[out].first()->is_FloatRegister()) { 2099 freg_destroyed[out_regs[out].first()->as_FloatRegister()->encoding()] = true; 2100 } 2101 if (out_regs2[out].first()->is_Register()) { 2102 reg_destroyed[out_regs2[out].first()->as_Register()->encoding()] = true; 2103 } else if (out_regs2[out].first()->is_FloatRegister()) { 2104 freg_destroyed[out_regs2[out].first()->as_FloatRegister()->encoding()] = true; 2105 } 2106 #endif // ASSERT 2107 2108 switch (in_sig_bt[in]) { 2109 case T_BOOLEAN: 2110 case T_CHAR: 2111 case T_BYTE: 2112 case T_SHORT: 2113 case T_INT: 2114 // Move int and do sign extension. 2115 int_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2116 break; 2117 case T_LONG: 2118 long_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2119 break; 2120 case T_ARRAY: 2121 if (is_critical_native) { 2122 int body_arg = out; 2123 out -= 1; // Point to length arg. 2124 unpack_array_argument(masm, in_regs[in], in_elem_bt[in], out_regs[body_arg], out_regs[out], 2125 r_callers_sp, r_temp_1, r_temp_2); 2126 break; 2127 } 2128 case T_OBJECT: 2129 assert(!is_critical_native, "no oop arguments"); 2130 object_move(masm, stack_slots, 2131 oop_map, oop_handle_slot_offset, 2132 ((in == 0) && (!method_is_static)), &receiver_offset, 2133 in_regs[in], out_regs[out], 2134 r_callers_sp, r_temp_1, r_temp_2); 2135 break; 2136 case T_VOID: 2137 break; 2138 case T_FLOAT: 2139 float_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2140 if (out_regs2[out].first()->is_valid()) { 2141 float_move(masm, in_regs[in], out_regs2[out], r_callers_sp, r_temp_1); 2142 } 2143 break; 2144 case T_DOUBLE: 2145 double_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2146 if (out_regs2[out].first()->is_valid()) { 2147 double_move(masm, in_regs[in], out_regs2[out], r_callers_sp, r_temp_1); 2148 } 2149 break; 2150 case T_ADDRESS: 2151 fatal("found type (T_ADDRESS) in java args"); 2152 break; 2153 default: 2154 ShouldNotReachHere(); 2155 break; 2156 } 2157 } 2158 2159 // Pre-load a static method's oop into ARG2. 2160 // Used both by locking code and the normal JNI call code. 2161 if (method_is_static && !is_critical_native) { 2162 __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), 2163 r_carg2_classorobject); 2164 2165 // Now handlize the static class mirror in carg2. It's known not-null. 2166 __ std(r_carg2_classorobject, klass_offset, R1_SP); 2167 oop_map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 2168 __ addi(r_carg2_classorobject, R1_SP, klass_offset); 2169 } 2170 2171 // Get JNIEnv* which is first argument to native. 2172 if (!is_critical_native) { 2173 __ addi(r_carg1_jnienv, R16_thread, in_bytes(JavaThread::jni_environment_offset())); 2174 } 2175 2176 // NOTE: 2177 // 2178 // We have all of the arguments setup at this point. 2179 // We MUST NOT touch any outgoing regs from this point on. 2180 // So if we must call out we must push a new frame. 2181 2182 // Get current pc for oopmap, and load it patchable relative to global toc. 2183 oopmap_pc = (intptr_t) __ pc(); 2184 __ calculate_address_from_global_toc(r_return_pc, (address)oopmap_pc, true, true, true, true); 2185 2186 // We use the same pc/oopMap repeatedly when we call out. 2187 oop_maps->add_gc_map(oopmap_pc - start_pc, oop_map); 2188 2189 // r_return_pc now has the pc loaded that we will use when we finally call 2190 // to native. 2191 2192 // Make sure that thread is non-volatile; it crosses a bunch of VM calls below. 2193 assert(R16_thread->is_nonvolatile(), "thread must be in non-volatile register"); 2194 2195 # if 0 2196 // DTrace method entry 2197 # endif 2198 2199 // Lock a synchronized method. 2200 // -------------------------------------------------------------------------- 2201 2202 if (method->is_synchronized()) { 2203 assert(!is_critical_native, "unhandled"); 2204 ConditionRegister r_flag = CCR1; 2205 Register r_oop = r_temp_4; 2206 const Register r_box = r_temp_5; 2207 Label done, locked; 2208 2209 // Load the oop for the object or class. r_carg2_classorobject contains 2210 // either the handlized oop from the incoming arguments or the handlized 2211 // class mirror (if the method is static). 2212 __ ld(r_oop, 0, r_carg2_classorobject); 2213 2214 // Get the lock box slot's address. 2215 __ addi(r_box, R1_SP, lock_offset); 2216 2217 # ifdef ASSERT 2218 if (UseBiasedLocking) { 2219 // Making the box point to itself will make it clear it went unused 2220 // but also be obviously invalid. 2221 __ std(r_box, 0, r_box); 2222 } 2223 # endif // ASSERT 2224 2225 // Try fastpath for locking. 2226 // fast_lock kills r_temp_1, r_temp_2, r_temp_3. 2227 __ compiler_fast_lock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); 2228 __ beq(r_flag, locked); 2229 2230 // None of the above fast optimizations worked so we have to get into the 2231 // slow case of monitor enter. Inline a special case of call_VM that 2232 // disallows any pending_exception. 2233 2234 // Save argument registers and leave room for C-compatible ABI_REG_ARGS. 2235 int frame_size = frame::abi_reg_args_size + align_up(total_c_args * wordSize, frame::alignment_in_bytes); 2236 __ mr(R11_scratch1, R1_SP); 2237 RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs, out_regs2); 2238 2239 // Do the call. 2240 __ set_last_Java_frame(R11_scratch1, r_return_pc); 2241 assert(r_return_pc->is_nonvolatile(), "expecting return pc to be in non-volatile register"); 2242 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), r_oop, r_box, R16_thread); 2243 __ reset_last_Java_frame(); 2244 2245 RegisterSaver::restore_argument_registers_and_pop_frame(masm, frame_size, total_c_args, out_regs, out_regs2); 2246 2247 __ asm_assert_mem8_is_zero(thread_(pending_exception), 2248 "no pending exception allowed on exit from SharedRuntime::complete_monitor_locking_C", 0); 2249 2250 __ bind(locked); 2251 } 2252 2253 2254 // Publish thread state 2255 // -------------------------------------------------------------------------- 2256 2257 // Use that pc we placed in r_return_pc a while back as the current frame anchor. 2258 __ set_last_Java_frame(R1_SP, r_return_pc); 2259 2260 // Transition from _thread_in_Java to _thread_in_native. 2261 __ li(R0, _thread_in_native); 2262 __ release(); 2263 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2264 __ stw(R0, thread_(thread_state)); 2265 2266 2267 // The JNI call 2268 // -------------------------------------------------------------------------- 2269 #if defined(ABI_ELFv2) 2270 __ call_c(native_func, relocInfo::runtime_call_type); 2271 #else 2272 FunctionDescriptor* fd_native_method = (FunctionDescriptor*) native_func; 2273 __ call_c(fd_native_method, relocInfo::runtime_call_type); 2274 #endif 2275 2276 2277 // Now, we are back from the native code. 2278 2279 2280 // Unpack the native result. 2281 // -------------------------------------------------------------------------- 2282 2283 // For int-types, we do any needed sign-extension required. 2284 // Care must be taken that the return values (R3_RET and F1_RET) 2285 // will survive any VM calls for blocking or unlocking. 2286 // An OOP result (handle) is done specially in the slow-path code. 2287 2288 switch (ret_type) { 2289 case T_VOID: break; // Nothing to do! 2290 case T_FLOAT: break; // Got it where we want it (unless slow-path). 2291 case T_DOUBLE: break; // Got it where we want it (unless slow-path). 2292 case T_LONG: break; // Got it where we want it (unless slow-path). 2293 case T_OBJECT: break; // Really a handle. 2294 // Cannot de-handlize until after reclaiming jvm_lock. 2295 case T_ARRAY: break; 2296 2297 case T_BOOLEAN: { // 0 -> false(0); !0 -> true(1) 2298 Label skip_modify; 2299 __ cmpwi(CCR0, R3_RET, 0); 2300 __ beq(CCR0, skip_modify); 2301 __ li(R3_RET, 1); 2302 __ bind(skip_modify); 2303 break; 2304 } 2305 case T_BYTE: { // sign extension 2306 __ extsb(R3_RET, R3_RET); 2307 break; 2308 } 2309 case T_CHAR: { // unsigned result 2310 __ andi(R3_RET, R3_RET, 0xffff); 2311 break; 2312 } 2313 case T_SHORT: { // sign extension 2314 __ extsh(R3_RET, R3_RET); 2315 break; 2316 } 2317 case T_INT: // nothing to do 2318 break; 2319 default: 2320 ShouldNotReachHere(); 2321 break; 2322 } 2323 2324 2325 // Publish thread state 2326 // -------------------------------------------------------------------------- 2327 2328 // Switch thread to "native transition" state before reading the 2329 // synchronization state. This additional state is necessary because reading 2330 // and testing the synchronization state is not atomic w.r.t. GC, as this 2331 // scenario demonstrates: 2332 // - Java thread A, in _thread_in_native state, loads _not_synchronized 2333 // and is preempted. 2334 // - VM thread changes sync state to synchronizing and suspends threads 2335 // for GC. 2336 // - Thread A is resumed to finish this native method, but doesn't block 2337 // here since it didn't see any synchronization in progress, and escapes. 2338 2339 // Transition from _thread_in_native to _thread_in_native_trans. 2340 __ li(R0, _thread_in_native_trans); 2341 __ release(); 2342 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2343 __ stw(R0, thread_(thread_state)); 2344 2345 2346 // Must we block? 2347 // -------------------------------------------------------------------------- 2348 2349 // Block, if necessary, before resuming in _thread_in_Java state. 2350 // In order for GC to work, don't clear the last_Java_sp until after blocking. 2351 Label after_transition; 2352 { 2353 Label no_block, sync; 2354 2355 if (os::is_MP()) { 2356 if (UseMembar) { 2357 // Force this write out before the read below. 2358 __ fence(); 2359 } else { 2360 // Write serialization page so VM thread can do a pseudo remote membar. 2361 // We use the current thread pointer to calculate a thread specific 2362 // offset to write to within the page. This minimizes bus traffic 2363 // due to cache line collision. 2364 __ serialize_memory(R16_thread, r_temp_4, r_temp_5); 2365 } 2366 } 2367 2368 Register sync_state_addr = r_temp_4; 2369 Register sync_state = r_temp_5; 2370 Register suspend_flags = r_temp_6; 2371 2372 // No synchronization in progress nor yet synchronized 2373 // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path). 2374 __ safepoint_poll(sync, sync_state); 2375 2376 // Not suspended. 2377 // TODO: PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size"); 2378 __ lwz(suspend_flags, thread_(suspend_flags)); 2379 __ cmpwi(CCR1, suspend_flags, 0); 2380 __ beq(CCR1, no_block); 2381 2382 // Block. Save any potential method result value before the operation and 2383 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this 2384 // lets us share the oopMap we used when we went native rather than create 2385 // a distinct one for this pc. 2386 __ bind(sync); 2387 __ isync(); 2388 2389 address entry_point = is_critical_native 2390 ? CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition) 2391 : CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans); 2392 save_native_result(masm, ret_type, workspace_slot_offset); 2393 __ call_VM_leaf(entry_point, R16_thread); 2394 restore_native_result(masm, ret_type, workspace_slot_offset); 2395 2396 if (is_critical_native) { 2397 __ b(after_transition); // No thread state transition here. 2398 } 2399 __ bind(no_block); 2400 } 2401 2402 // Publish thread state. 2403 // -------------------------------------------------------------------------- 2404 2405 // Thread state is thread_in_native_trans. Any safepoint blocking has 2406 // already happened so we can now change state to _thread_in_Java. 2407 2408 // Transition from _thread_in_native_trans to _thread_in_Java. 2409 __ li(R0, _thread_in_Java); 2410 __ lwsync(); // Acquire safepoint and suspend state, release thread state. 2411 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2412 __ stw(R0, thread_(thread_state)); 2413 __ bind(after_transition); 2414 2415 // Reguard any pages if necessary. 2416 // -------------------------------------------------------------------------- 2417 2418 Label no_reguard; 2419 __ lwz(r_temp_1, thread_(stack_guard_state)); 2420 __ cmpwi(CCR0, r_temp_1, JavaThread::stack_guard_yellow_reserved_disabled); 2421 __ bne(CCR0, no_reguard); 2422 2423 save_native_result(masm, ret_type, workspace_slot_offset); 2424 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 2425 restore_native_result(masm, ret_type, workspace_slot_offset); 2426 2427 __ bind(no_reguard); 2428 2429 2430 // Unlock 2431 // -------------------------------------------------------------------------- 2432 2433 if (method->is_synchronized()) { 2434 2435 ConditionRegister r_flag = CCR1; 2436 const Register r_oop = r_temp_4; 2437 const Register r_box = r_temp_5; 2438 const Register r_exception = r_temp_6; 2439 Label done; 2440 2441 // Get oop and address of lock object box. 2442 if (method_is_static) { 2443 assert(klass_offset != -1, ""); 2444 __ ld(r_oop, klass_offset, R1_SP); 2445 } else { 2446 assert(receiver_offset != -1, ""); 2447 __ ld(r_oop, receiver_offset, R1_SP); 2448 } 2449 __ addi(r_box, R1_SP, lock_offset); 2450 2451 // Try fastpath for unlocking. 2452 __ compiler_fast_unlock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); 2453 __ beq(r_flag, done); 2454 2455 // Save and restore any potential method result value around the unlocking operation. 2456 save_native_result(masm, ret_type, workspace_slot_offset); 2457 2458 // Must save pending exception around the slow-path VM call. Since it's a 2459 // leaf call, the pending exception (if any) can be kept in a register. 2460 __ ld(r_exception, thread_(pending_exception)); 2461 assert(r_exception->is_nonvolatile(), "exception register must be non-volatile"); 2462 __ li(R0, 0); 2463 __ std(R0, thread_(pending_exception)); 2464 2465 // Slow case of monitor enter. 2466 // Inline a special case of call_VM that disallows any pending_exception. 2467 // Arguments are (oop obj, BasicLock* lock, JavaThread* thread). 2468 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), r_oop, r_box, R16_thread); 2469 2470 __ asm_assert_mem8_is_zero(thread_(pending_exception), 2471 "no pending exception allowed on exit from SharedRuntime::complete_monitor_unlocking_C", 0); 2472 2473 restore_native_result(masm, ret_type, workspace_slot_offset); 2474 2475 // Check_forward_pending_exception jump to forward_exception if any pending 2476 // exception is set. The forward_exception routine expects to see the 2477 // exception in pending_exception and not in a register. Kind of clumsy, 2478 // since all folks who branch to forward_exception must have tested 2479 // pending_exception first and hence have it in a register already. 2480 __ std(r_exception, thread_(pending_exception)); 2481 2482 __ bind(done); 2483 } 2484 2485 # if 0 2486 // DTrace method exit 2487 # endif 2488 2489 // Clear "last Java frame" SP and PC. 2490 // -------------------------------------------------------------------------- 2491 2492 __ reset_last_Java_frame(); 2493 2494 // Unbox oop result, e.g. JNIHandles::resolve value. 2495 // -------------------------------------------------------------------------- 2496 2497 if (ret_type == T_OBJECT || ret_type == T_ARRAY) { 2498 __ resolve_jobject(R3_RET, r_temp_1, r_temp_2, /* needs_frame */ false); // kills R31 2499 } 2500 2501 if (CheckJNICalls) { 2502 // clear_pending_jni_exception_check 2503 __ load_const_optimized(R0, 0L); 2504 __ st_ptr(R0, JavaThread::pending_jni_exception_check_fn_offset(), R16_thread); 2505 } 2506 2507 // Reset handle block. 2508 // -------------------------------------------------------------------------- 2509 if (!is_critical_native) { 2510 __ ld(r_temp_1, thread_(active_handles)); 2511 // TODO: PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size"); 2512 __ li(r_temp_2, 0); 2513 __ stw(r_temp_2, JNIHandleBlock::top_offset_in_bytes(), r_temp_1); 2514 2515 2516 // Check for pending exceptions. 2517 // -------------------------------------------------------------------------- 2518 __ ld(r_temp_2, thread_(pending_exception)); 2519 __ cmpdi(CCR0, r_temp_2, 0); 2520 __ bne(CCR0, handle_pending_exception); 2521 } 2522 2523 // Return 2524 // -------------------------------------------------------------------------- 2525 2526 __ pop_frame(); 2527 __ restore_LR_CR(R11); 2528 __ blr(); 2529 2530 2531 // Handler for pending exceptions (out-of-line). 2532 // -------------------------------------------------------------------------- 2533 2534 // Since this is a native call, we know the proper exception handler 2535 // is the empty function. We just pop this frame and then jump to 2536 // forward_exception_entry. 2537 if (!is_critical_native) { 2538 __ align(InteriorEntryAlignment); 2539 __ bind(handle_pending_exception); 2540 2541 __ pop_frame(); 2542 __ restore_LR_CR(R11); 2543 __ b64_patchable((address)StubRoutines::forward_exception_entry(), 2544 relocInfo::runtime_call_type); 2545 } 2546 2547 // Handler for a cache miss (out-of-line). 2548 // -------------------------------------------------------------------------- 2549 2550 if (!method_is_static) { 2551 __ align(InteriorEntryAlignment); 2552 __ bind(ic_miss); 2553 2554 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), 2555 relocInfo::runtime_call_type); 2556 } 2557 2558 // Done. 2559 // -------------------------------------------------------------------------- 2560 2561 __ flush(); 2562 2563 nmethod *nm = nmethod::new_native_nmethod(method, 2564 compile_id, 2565 masm->code(), 2566 vep_start_pc-start_pc, 2567 frame_done_pc-start_pc, 2568 stack_slots / VMRegImpl::slots_per_word, 2569 (method_is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2570 in_ByteSize(lock_offset), 2571 oop_maps); 2572 2573 if (is_critical_native) { 2574 nm->set_lazy_critical_native(true); 2575 } 2576 2577 return nm; 2578 #else 2579 ShouldNotReachHere(); 2580 return NULL; 2581 #endif // COMPILER2 2582 } 2583 2584 // This function returns the adjust size (in number of words) to a c2i adapter 2585 // activation for use during deoptimization. 2586 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { 2587 return align_up((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::alignment_in_bytes); 2588 } 2589 2590 uint SharedRuntime::out_preserve_stack_slots() { 2591 #if defined(COMPILER1) || defined(COMPILER2) 2592 return frame::jit_out_preserve_size / VMRegImpl::stack_slot_size; 2593 #else 2594 return 0; 2595 #endif 2596 } 2597 2598 #if defined(COMPILER1) || defined(COMPILER2) 2599 // Frame generation for deopt and uncommon trap blobs. 2600 static void push_skeleton_frame(MacroAssembler* masm, bool deopt, 2601 /* Read */ 2602 Register unroll_block_reg, 2603 /* Update */ 2604 Register frame_sizes_reg, 2605 Register number_of_frames_reg, 2606 Register pcs_reg, 2607 /* Invalidate */ 2608 Register frame_size_reg, 2609 Register pc_reg) { 2610 2611 __ ld(pc_reg, 0, pcs_reg); 2612 __ ld(frame_size_reg, 0, frame_sizes_reg); 2613 __ std(pc_reg, _abi(lr), R1_SP); 2614 __ push_frame(frame_size_reg, R0/*tmp*/); 2615 #ifdef ASSERT 2616 __ load_const_optimized(pc_reg, 0x5afe); 2617 __ std(pc_reg, _ijava_state_neg(ijava_reserved), R1_SP); 2618 #endif 2619 __ std(R1_SP, _ijava_state_neg(sender_sp), R1_SP); 2620 __ addi(number_of_frames_reg, number_of_frames_reg, -1); 2621 __ addi(frame_sizes_reg, frame_sizes_reg, wordSize); 2622 __ addi(pcs_reg, pcs_reg, wordSize); 2623 } 2624 2625 // Loop through the UnrollBlock info and create new frames. 2626 static void push_skeleton_frames(MacroAssembler* masm, bool deopt, 2627 /* read */ 2628 Register unroll_block_reg, 2629 /* invalidate */ 2630 Register frame_sizes_reg, 2631 Register number_of_frames_reg, 2632 Register pcs_reg, 2633 Register frame_size_reg, 2634 Register pc_reg) { 2635 Label loop; 2636 2637 // _number_of_frames is of type int (deoptimization.hpp) 2638 __ lwa(number_of_frames_reg, 2639 Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), 2640 unroll_block_reg); 2641 __ ld(pcs_reg, 2642 Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), 2643 unroll_block_reg); 2644 __ ld(frame_sizes_reg, 2645 Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), 2646 unroll_block_reg); 2647 2648 // stack: (caller_of_deoptee, ...). 2649 2650 // At this point we either have an interpreter frame or a compiled 2651 // frame on top of stack. If it is a compiled frame we push a new c2i 2652 // adapter here 2653 2654 // Memorize top-frame stack-pointer. 2655 __ mr(frame_size_reg/*old_sp*/, R1_SP); 2656 2657 // Resize interpreter top frame OR C2I adapter. 2658 2659 // At this moment, the top frame (which is the caller of the deoptee) is 2660 // an interpreter frame or a newly pushed C2I adapter or an entry frame. 2661 // The top frame has a TOP_IJAVA_FRAME_ABI and the frame contains the 2662 // outgoing arguments. 2663 // 2664 // In order to push the interpreter frame for the deoptee, we need to 2665 // resize the top frame such that we are able to place the deoptee's 2666 // locals in the frame. 2667 // Additionally, we have to turn the top frame's TOP_IJAVA_FRAME_ABI 2668 // into a valid PARENT_IJAVA_FRAME_ABI. 2669 2670 __ lwa(R11_scratch1, 2671 Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), 2672 unroll_block_reg); 2673 __ neg(R11_scratch1, R11_scratch1); 2674 2675 // R11_scratch1 contains size of locals for frame resizing. 2676 // R12_scratch2 contains top frame's lr. 2677 2678 // Resize frame by complete frame size prevents TOC from being 2679 // overwritten by locals. A more stack space saving way would be 2680 // to copy the TOC to its location in the new abi. 2681 __ addi(R11_scratch1, R11_scratch1, - frame::parent_ijava_frame_abi_size); 2682 2683 // now, resize the frame 2684 __ resize_frame(R11_scratch1, pc_reg/*tmp*/); 2685 2686 // In the case where we have resized a c2i frame above, the optional 2687 // alignment below the locals has size 32 (why?). 2688 __ std(R12_scratch2, _abi(lr), R1_SP); 2689 2690 // Initialize initial_caller_sp. 2691 #ifdef ASSERT 2692 __ load_const_optimized(pc_reg, 0x5afe); 2693 __ std(pc_reg, _ijava_state_neg(ijava_reserved), R1_SP); 2694 #endif 2695 __ std(frame_size_reg, _ijava_state_neg(sender_sp), R1_SP); 2696 2697 #ifdef ASSERT 2698 // Make sure that there is at least one entry in the array. 2699 __ cmpdi(CCR0, number_of_frames_reg, 0); 2700 __ asm_assert_ne("array_size must be > 0", 0x205); 2701 #endif 2702 2703 // Now push the new interpreter frames. 2704 // 2705 __ bind(loop); 2706 // Allocate a new frame, fill in the pc. 2707 push_skeleton_frame(masm, deopt, 2708 unroll_block_reg, 2709 frame_sizes_reg, 2710 number_of_frames_reg, 2711 pcs_reg, 2712 frame_size_reg, 2713 pc_reg); 2714 __ cmpdi(CCR0, number_of_frames_reg, 0); 2715 __ bne(CCR0, loop); 2716 2717 // Get the return address pointing into the frame manager. 2718 __ ld(R0, 0, pcs_reg); 2719 // Store it in the top interpreter frame. 2720 __ std(R0, _abi(lr), R1_SP); 2721 // Initialize frame_manager_lr of interpreter top frame. 2722 } 2723 #endif 2724 2725 void SharedRuntime::generate_deopt_blob() { 2726 // Allocate space for the code 2727 ResourceMark rm; 2728 // Setup code generation tools 2729 CodeBuffer buffer("deopt_blob", 2048, 1024); 2730 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer); 2731 Label exec_mode_initialized; 2732 int frame_size_in_words; 2733 OopMap* map = NULL; 2734 OopMapSet *oop_maps = new OopMapSet(); 2735 2736 // size of ABI112 plus spill slots for R3_RET and F1_RET. 2737 const int frame_size_in_bytes = frame::abi_reg_args_spill_size; 2738 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 2739 int first_frame_size_in_bytes = 0; // frame size of "unpack frame" for call to fetch_unroll_info. 2740 2741 const Register exec_mode_reg = R21_tmp1; 2742 2743 const address start = __ pc(); 2744 2745 #if defined(COMPILER1) || defined(COMPILER2) 2746 // -------------------------------------------------------------------------- 2747 // Prolog for non exception case! 2748 2749 // We have been called from the deopt handler of the deoptee. 2750 // 2751 // deoptee: 2752 // ... 2753 // call X 2754 // ... 2755 // deopt_handler: call_deopt_stub 2756 // cur. return pc --> ... 2757 // 2758 // So currently SR_LR points behind the call in the deopt handler. 2759 // We adjust it such that it points to the start of the deopt handler. 2760 // The return_pc has been stored in the frame of the deoptee and 2761 // will replace the address of the deopt_handler in the call 2762 // to Deoptimization::fetch_unroll_info below. 2763 // We can't grab a free register here, because all registers may 2764 // contain live values, so let the RegisterSaver do the adjustment 2765 // of the return pc. 2766 const int return_pc_adjustment_no_exception = -HandlerImpl::size_deopt_handler(); 2767 2768 // Push the "unpack frame" 2769 // Save everything in sight. 2770 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2771 &first_frame_size_in_bytes, 2772 /*generate_oop_map=*/ true, 2773 return_pc_adjustment_no_exception, 2774 RegisterSaver::return_pc_is_lr); 2775 assert(map != NULL, "OopMap must have been created"); 2776 2777 __ li(exec_mode_reg, Deoptimization::Unpack_deopt); 2778 // Save exec mode for unpack_frames. 2779 __ b(exec_mode_initialized); 2780 2781 // -------------------------------------------------------------------------- 2782 // Prolog for exception case 2783 2784 // An exception is pending. 2785 // We have been called with a return (interpreter) or a jump (exception blob). 2786 // 2787 // - R3_ARG1: exception oop 2788 // - R4_ARG2: exception pc 2789 2790 int exception_offset = __ pc() - start; 2791 2792 BLOCK_COMMENT("Prolog for exception case"); 2793 2794 // Store exception oop and pc in thread (location known to GC). 2795 // This is needed since the call to "fetch_unroll_info()" may safepoint. 2796 __ std(R3_ARG1, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 2797 __ std(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 2798 __ std(R4_ARG2, _abi(lr), R1_SP); 2799 2800 // Vanilla deoptimization with an exception pending in exception_oop. 2801 int exception_in_tls_offset = __ pc() - start; 2802 2803 // Push the "unpack frame". 2804 // Save everything in sight. 2805 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2806 &first_frame_size_in_bytes, 2807 /*generate_oop_map=*/ false, 2808 /*return_pc_adjustment_exception=*/ 0, 2809 RegisterSaver::return_pc_is_pre_saved); 2810 2811 // Deopt during an exception. Save exec mode for unpack_frames. 2812 __ li(exec_mode_reg, Deoptimization::Unpack_exception); 2813 2814 // fall through 2815 2816 int reexecute_offset = 0; 2817 #ifdef COMPILER1 2818 __ b(exec_mode_initialized); 2819 2820 // Reexecute entry, similar to c2 uncommon trap 2821 reexecute_offset = __ pc() - start; 2822 2823 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2824 &first_frame_size_in_bytes, 2825 /*generate_oop_map=*/ false, 2826 /*return_pc_adjustment_reexecute=*/ 0, 2827 RegisterSaver::return_pc_is_pre_saved); 2828 __ li(exec_mode_reg, Deoptimization::Unpack_reexecute); 2829 #endif 2830 2831 // -------------------------------------------------------------------------- 2832 __ BIND(exec_mode_initialized); 2833 2834 { 2835 const Register unroll_block_reg = R22_tmp2; 2836 2837 // We need to set `last_Java_frame' because `fetch_unroll_info' will 2838 // call `last_Java_frame()'. The value of the pc in the frame is not 2839 // particularly important. It just needs to identify this blob. 2840 __ set_last_Java_frame(R1_SP, noreg); 2841 2842 // With EscapeAnalysis turned on, this call may safepoint! 2843 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), R16_thread, exec_mode_reg); 2844 address calls_return_pc = __ last_calls_return_pc(); 2845 // Set an oopmap for the call site that describes all our saved registers. 2846 oop_maps->add_gc_map(calls_return_pc - start, map); 2847 2848 __ reset_last_Java_frame(); 2849 // Save the return value. 2850 __ mr(unroll_block_reg, R3_RET); 2851 2852 // Restore only the result registers that have been saved 2853 // by save_volatile_registers(...). 2854 RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes); 2855 2856 // reload the exec mode from the UnrollBlock (it might have changed) 2857 __ lwz(exec_mode_reg, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), unroll_block_reg); 2858 // In excp_deopt_mode, restore and clear exception oop which we 2859 // stored in the thread during exception entry above. The exception 2860 // oop will be the return value of this stub. 2861 Label skip_restore_excp; 2862 __ cmpdi(CCR0, exec_mode_reg, Deoptimization::Unpack_exception); 2863 __ bne(CCR0, skip_restore_excp); 2864 __ ld(R3_RET, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 2865 __ ld(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 2866 __ li(R0, 0); 2867 __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 2868 __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 2869 __ BIND(skip_restore_excp); 2870 2871 __ pop_frame(); 2872 2873 // stack: (deoptee, optional i2c, caller of deoptee, ...). 2874 2875 // pop the deoptee's frame 2876 __ pop_frame(); 2877 2878 // stack: (caller_of_deoptee, ...). 2879 2880 // Loop through the `UnrollBlock' info and create interpreter frames. 2881 push_skeleton_frames(masm, true/*deopt*/, 2882 unroll_block_reg, 2883 R23_tmp3, 2884 R24_tmp4, 2885 R25_tmp5, 2886 R26_tmp6, 2887 R27_tmp7); 2888 2889 // stack: (skeletal interpreter frame, ..., optional skeletal 2890 // interpreter frame, optional c2i, caller of deoptee, ...). 2891 } 2892 2893 // push an `unpack_frame' taking care of float / int return values. 2894 __ push_frame(frame_size_in_bytes, R0/*tmp*/); 2895 2896 // stack: (unpack frame, skeletal interpreter frame, ..., optional 2897 // skeletal interpreter frame, optional c2i, caller of deoptee, 2898 // ...). 2899 2900 // Spill live volatile registers since we'll do a call. 2901 __ std( R3_RET, _abi_reg_args_spill(spill_ret), R1_SP); 2902 __ stfd(F1_RET, _abi_reg_args_spill(spill_fret), R1_SP); 2903 2904 // Let the unpacker layout information in the skeletal frames just 2905 // allocated. 2906 __ get_PC_trash_LR(R3_RET); 2907 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R3_RET); 2908 // This is a call to a LEAF method, so no oop map is required. 2909 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), 2910 R16_thread/*thread*/, exec_mode_reg/*exec_mode*/); 2911 __ reset_last_Java_frame(); 2912 2913 // Restore the volatiles saved above. 2914 __ ld( R3_RET, _abi_reg_args_spill(spill_ret), R1_SP); 2915 __ lfd(F1_RET, _abi_reg_args_spill(spill_fret), R1_SP); 2916 2917 // Pop the unpack frame. 2918 __ pop_frame(); 2919 __ restore_LR_CR(R0); 2920 2921 // stack: (top interpreter frame, ..., optional interpreter frame, 2922 // optional c2i, caller of deoptee, ...). 2923 2924 // Initialize R14_state. 2925 __ restore_interpreter_state(R11_scratch1); 2926 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 2927 2928 // Return to the interpreter entry point. 2929 __ blr(); 2930 __ flush(); 2931 #else // COMPILER2 2932 __ unimplemented("deopt blob needed only with compiler"); 2933 int exception_offset = __ pc() - start; 2934 #endif // COMPILER2 2935 2936 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, 2937 reexecute_offset, first_frame_size_in_bytes / wordSize); 2938 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); 2939 } 2940 2941 #ifdef COMPILER2 2942 void SharedRuntime::generate_uncommon_trap_blob() { 2943 // Allocate space for the code. 2944 ResourceMark rm; 2945 // Setup code generation tools. 2946 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024); 2947 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer); 2948 address start = __ pc(); 2949 2950 Register unroll_block_reg = R21_tmp1; 2951 Register klass_index_reg = R22_tmp2; 2952 Register unc_trap_reg = R23_tmp3; 2953 2954 OopMapSet* oop_maps = new OopMapSet(); 2955 int frame_size_in_bytes = frame::abi_reg_args_size; 2956 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 2957 2958 // stack: (deoptee, optional i2c, caller_of_deoptee, ...). 2959 2960 // Push a dummy `unpack_frame' and call 2961 // `Deoptimization::uncommon_trap' to pack the compiled frame into a 2962 // vframe array and return the `UnrollBlock' information. 2963 2964 // Save LR to compiled frame. 2965 __ save_LR_CR(R11_scratch1); 2966 2967 // Push an "uncommon_trap" frame. 2968 __ push_frame_reg_args(0, R11_scratch1); 2969 2970 // stack: (unpack frame, deoptee, optional i2c, caller_of_deoptee, ...). 2971 2972 // Set the `unpack_frame' as last_Java_frame. 2973 // `Deoptimization::uncommon_trap' expects it and considers its 2974 // sender frame as the deoptee frame. 2975 // Remember the offset of the instruction whose address will be 2976 // moved to R11_scratch1. 2977 address gc_map_pc = __ get_PC_trash_LR(R11_scratch1); 2978 2979 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1); 2980 2981 __ mr(klass_index_reg, R3); 2982 __ li(R5_ARG3, Deoptimization::Unpack_uncommon_trap); 2983 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), 2984 R16_thread, klass_index_reg, R5_ARG3); 2985 2986 // Set an oopmap for the call site. 2987 oop_maps->add_gc_map(gc_map_pc - start, map); 2988 2989 __ reset_last_Java_frame(); 2990 2991 // Pop the `unpack frame'. 2992 __ pop_frame(); 2993 2994 // stack: (deoptee, optional i2c, caller_of_deoptee, ...). 2995 2996 // Save the return value. 2997 __ mr(unroll_block_reg, R3_RET); 2998 2999 // Pop the uncommon_trap frame. 3000 __ pop_frame(); 3001 3002 // stack: (caller_of_deoptee, ...). 3003 3004 #ifdef ASSERT 3005 __ lwz(R22_tmp2, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), unroll_block_reg); 3006 __ cmpdi(CCR0, R22_tmp2, (unsigned)Deoptimization::Unpack_uncommon_trap); 3007 __ asm_assert_eq("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap", 0); 3008 #endif 3009 3010 // Allocate new interpreter frame(s) and possibly a c2i adapter 3011 // frame. 3012 push_skeleton_frames(masm, false/*deopt*/, 3013 unroll_block_reg, 3014 R22_tmp2, 3015 R23_tmp3, 3016 R24_tmp4, 3017 R25_tmp5, 3018 R26_tmp6); 3019 3020 // stack: (skeletal interpreter frame, ..., optional skeletal 3021 // interpreter frame, optional c2i, caller of deoptee, ...). 3022 3023 // Push a dummy `unpack_frame' taking care of float return values. 3024 // Call `Deoptimization::unpack_frames' to layout information in the 3025 // interpreter frames just created. 3026 3027 // Push a simple "unpack frame" here. 3028 __ push_frame_reg_args(0, R11_scratch1); 3029 3030 // stack: (unpack frame, skeletal interpreter frame, ..., optional 3031 // skeletal interpreter frame, optional c2i, caller of deoptee, 3032 // ...). 3033 3034 // Set the "unpack_frame" as last_Java_frame. 3035 __ get_PC_trash_LR(R11_scratch1); 3036 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1); 3037 3038 // Indicate it is the uncommon trap case. 3039 __ li(unc_trap_reg, Deoptimization::Unpack_uncommon_trap); 3040 // Let the unpacker layout information in the skeletal frames just 3041 // allocated. 3042 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), 3043 R16_thread, unc_trap_reg); 3044 3045 __ reset_last_Java_frame(); 3046 // Pop the `unpack frame'. 3047 __ pop_frame(); 3048 // Restore LR from top interpreter frame. 3049 __ restore_LR_CR(R11_scratch1); 3050 3051 // stack: (top interpreter frame, ..., optional interpreter frame, 3052 // optional c2i, caller of deoptee, ...). 3053 3054 __ restore_interpreter_state(R11_scratch1); 3055 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 3056 3057 // Return to the interpreter entry point. 3058 __ blr(); 3059 3060 masm->flush(); 3061 3062 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, frame_size_in_bytes/wordSize); 3063 } 3064 #endif // COMPILER2 3065 3066 // Generate a special Compile2Runtime blob that saves all registers, and setup oopmap. 3067 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) { 3068 assert(StubRoutines::forward_exception_entry() != NULL, 3069 "must be generated before"); 3070 3071 ResourceMark rm; 3072 OopMapSet *oop_maps = new OopMapSet(); 3073 OopMap* map; 3074 3075 // Allocate space for the code. Setup code generation tools. 3076 CodeBuffer buffer("handler_blob", 2048, 1024); 3077 MacroAssembler* masm = new MacroAssembler(&buffer); 3078 3079 address start = __ pc(); 3080 int frame_size_in_bytes = 0; 3081 3082 RegisterSaver::ReturnPCLocation return_pc_location; 3083 bool cause_return = (poll_type == POLL_AT_RETURN); 3084 if (cause_return) { 3085 // Nothing to do here. The frame has already been popped in MachEpilogNode. 3086 // Register LR already contains the return pc. 3087 return_pc_location = RegisterSaver::return_pc_is_lr; 3088 } else { 3089 // Use thread()->saved_exception_pc() as return pc. 3090 return_pc_location = RegisterSaver::return_pc_is_thread_saved_exception_pc; 3091 } 3092 3093 // Save registers, fpu state, and flags. Set R31 = return pc. 3094 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 3095 &frame_size_in_bytes, 3096 /*generate_oop_map=*/ true, 3097 /*return_pc_adjustment=*/0, 3098 return_pc_location); 3099 3100 // The following is basically a call_VM. However, we need the precise 3101 // address of the call in order to generate an oopmap. Hence, we do all the 3102 // work outselves. 3103 __ set_last_Java_frame(/*sp=*/R1_SP, /*pc=*/noreg); 3104 3105 // The return address must always be correct so that the frame constructor 3106 // never sees an invalid pc. 3107 3108 // Do the call 3109 __ call_VM_leaf(call_ptr, R16_thread); 3110 address calls_return_pc = __ last_calls_return_pc(); 3111 3112 // Set an oopmap for the call site. This oopmap will map all 3113 // oop-registers and debug-info registers as callee-saved. This 3114 // will allow deoptimization at this safepoint to find all possible 3115 // debug-info recordings, as well as let GC find all oops. 3116 oop_maps->add_gc_map(calls_return_pc - start, map); 3117 3118 Label noException; 3119 3120 // Clear the last Java frame. 3121 __ reset_last_Java_frame(); 3122 3123 BLOCK_COMMENT(" Check pending exception."); 3124 const Register pending_exception = R0; 3125 __ ld(pending_exception, thread_(pending_exception)); 3126 __ cmpdi(CCR0, pending_exception, 0); 3127 __ beq(CCR0, noException); 3128 3129 // Exception pending 3130 RegisterSaver::restore_live_registers_and_pop_frame(masm, 3131 frame_size_in_bytes, 3132 /*restore_ctr=*/true); 3133 3134 BLOCK_COMMENT(" Jump to forward_exception_entry."); 3135 // Jump to forward_exception_entry, with the issuing PC in LR 3136 // so it looks like the original nmethod called forward_exception_entry. 3137 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 3138 3139 // No exception case. 3140 __ BIND(noException); 3141 3142 if (SafepointMechanism::uses_thread_local_poll() && !cause_return) { 3143 Label no_adjust; 3144 // If our stashed return pc was modified by the runtime we avoid touching it 3145 __ ld(R0, frame_size_in_bytes + _abi(lr), R1_SP); 3146 __ cmpd(CCR0, R0, R31); 3147 __ bne(CCR0, no_adjust); 3148 3149 // Adjust return pc forward to step over the safepoint poll instruction 3150 __ addi(R31, R31, 4); 3151 __ std(R31, frame_size_in_bytes + _abi(lr), R1_SP); 3152 3153 __ bind(no_adjust); 3154 } 3155 3156 // Normal exit, restore registers and exit. 3157 RegisterSaver::restore_live_registers_and_pop_frame(masm, 3158 frame_size_in_bytes, 3159 /*restore_ctr=*/true); 3160 3161 __ blr(); 3162 3163 // Make sure all code is generated 3164 masm->flush(); 3165 3166 // Fill-out other meta info 3167 // CodeBlob frame size is in words. 3168 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_bytes / wordSize); 3169 } 3170 3171 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss) 3172 // 3173 // Generate a stub that calls into the vm to find out the proper destination 3174 // of a java call. All the argument registers are live at this point 3175 // but since this is generic code we don't know what they are and the caller 3176 // must do any gc of the args. 3177 // 3178 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { 3179 3180 // allocate space for the code 3181 ResourceMark rm; 3182 3183 CodeBuffer buffer(name, 1000, 512); 3184 MacroAssembler* masm = new MacroAssembler(&buffer); 3185 3186 int frame_size_in_bytes; 3187 3188 OopMapSet *oop_maps = new OopMapSet(); 3189 OopMap* map = NULL; 3190 3191 address start = __ pc(); 3192 3193 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 3194 &frame_size_in_bytes, 3195 /*generate_oop_map*/ true, 3196 /*return_pc_adjustment*/ 0, 3197 RegisterSaver::return_pc_is_lr); 3198 3199 // Use noreg as last_Java_pc, the return pc will be reconstructed 3200 // from the physical frame. 3201 __ set_last_Java_frame(/*sp*/R1_SP, noreg); 3202 3203 int frame_complete = __ offset(); 3204 3205 // Pass R19_method as 2nd (optional) argument, used by 3206 // counter_overflow_stub. 3207 __ call_VM_leaf(destination, R16_thread, R19_method); 3208 address calls_return_pc = __ last_calls_return_pc(); 3209 // Set an oopmap for the call site. 3210 // We need this not only for callee-saved registers, but also for volatile 3211 // registers that the compiler might be keeping live across a safepoint. 3212 // Create the oopmap for the call's return pc. 3213 oop_maps->add_gc_map(calls_return_pc - start, map); 3214 3215 // R3_RET contains the address we are going to jump to assuming no exception got installed. 3216 3217 // clear last_Java_sp 3218 __ reset_last_Java_frame(); 3219 3220 // Check for pending exceptions. 3221 BLOCK_COMMENT("Check for pending exceptions."); 3222 Label pending; 3223 __ ld(R11_scratch1, thread_(pending_exception)); 3224 __ cmpdi(CCR0, R11_scratch1, 0); 3225 __ bne(CCR0, pending); 3226 3227 __ mtctr(R3_RET); // Ctr will not be touched by restore_live_registers_and_pop_frame. 3228 3229 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ false); 3230 3231 // Get the returned method. 3232 __ get_vm_result_2(R19_method); 3233 3234 __ bctr(); 3235 3236 3237 // Pending exception after the safepoint. 3238 __ BIND(pending); 3239 3240 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ true); 3241 3242 // exception pending => remove activation and forward to exception handler 3243 3244 __ li(R11_scratch1, 0); 3245 __ ld(R3_ARG1, thread_(pending_exception)); 3246 __ std(R11_scratch1, in_bytes(JavaThread::vm_result_offset()), R16_thread); 3247 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 3248 3249 // ------------- 3250 // Make sure all code is generated. 3251 masm->flush(); 3252 3253 // return the blob 3254 // frame_size_words or bytes?? 3255 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_bytes/wordSize, 3256 oop_maps, true); 3257 } 3258 3259 3260 //------------------------------Montgomery multiplication------------------------ 3261 // 3262 3263 // Subtract 0:b from carry:a. Return carry. 3264 static unsigned long 3265 sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) { 3266 long i = 0; 3267 unsigned long tmp, tmp2; 3268 __asm__ __volatile__ ( 3269 "subfc %[tmp], %[tmp], %[tmp] \n" // pre-set CA 3270 "mtctr %[len] \n" 3271 "0: \n" 3272 "ldx %[tmp], %[i], %[a] \n" 3273 "ldx %[tmp2], %[i], %[b] \n" 3274 "subfe %[tmp], %[tmp2], %[tmp] \n" // subtract extended 3275 "stdx %[tmp], %[i], %[a] \n" 3276 "addi %[i], %[i], 8 \n" 3277 "bdnz 0b \n" 3278 "addme %[tmp], %[carry] \n" // carry + CA - 1 3279 : [i]"+b"(i), [tmp]"=&r"(tmp), [tmp2]"=&r"(tmp2) 3280 : [a]"r"(a), [b]"r"(b), [carry]"r"(carry), [len]"r"(len) 3281 : "ctr", "xer", "memory" 3282 ); 3283 return tmp; 3284 } 3285 3286 // Multiply (unsigned) Long A by Long B, accumulating the double- 3287 // length result into the accumulator formed of T0, T1, and T2. 3288 inline void MACC(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) { 3289 unsigned long hi, lo; 3290 __asm__ __volatile__ ( 3291 "mulld %[lo], %[A], %[B] \n" 3292 "mulhdu %[hi], %[A], %[B] \n" 3293 "addc %[T0], %[T0], %[lo] \n" 3294 "adde %[T1], %[T1], %[hi] \n" 3295 "addze %[T2], %[T2] \n" 3296 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2) 3297 : [A]"r"(A), [B]"r"(B) 3298 : "xer" 3299 ); 3300 } 3301 3302 // As above, but add twice the double-length result into the 3303 // accumulator. 3304 inline void MACC2(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) { 3305 unsigned long hi, lo; 3306 __asm__ __volatile__ ( 3307 "mulld %[lo], %[A], %[B] \n" 3308 "mulhdu %[hi], %[A], %[B] \n" 3309 "addc %[T0], %[T0], %[lo] \n" 3310 "adde %[T1], %[T1], %[hi] \n" 3311 "addze %[T2], %[T2] \n" 3312 "addc %[T0], %[T0], %[lo] \n" 3313 "adde %[T1], %[T1], %[hi] \n" 3314 "addze %[T2], %[T2] \n" 3315 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2) 3316 : [A]"r"(A), [B]"r"(B) 3317 : "xer" 3318 ); 3319 } 3320 3321 // Fast Montgomery multiplication. The derivation of the algorithm is 3322 // in "A Cryptographic Library for the Motorola DSP56000, 3323 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237". 3324 static void 3325 montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[], 3326 unsigned long m[], unsigned long inv, int len) { 3327 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator 3328 int i; 3329 3330 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); 3331 3332 for (i = 0; i < len; i++) { 3333 int j; 3334 for (j = 0; j < i; j++) { 3335 MACC(a[j], b[i-j], t0, t1, t2); 3336 MACC(m[j], n[i-j], t0, t1, t2); 3337 } 3338 MACC(a[i], b[0], t0, t1, t2); 3339 m[i] = t0 * inv; 3340 MACC(m[i], n[0], t0, t1, t2); 3341 3342 assert(t0 == 0, "broken Montgomery multiply"); 3343 3344 t0 = t1; t1 = t2; t2 = 0; 3345 } 3346 3347 for (i = len; i < 2*len; i++) { 3348 int j; 3349 for (j = i-len+1; j < len; j++) { 3350 MACC(a[j], b[i-j], t0, t1, t2); 3351 MACC(m[j], n[i-j], t0, t1, t2); 3352 } 3353 m[i-len] = t0; 3354 t0 = t1; t1 = t2; t2 = 0; 3355 } 3356 3357 while (t0) { 3358 t0 = sub(m, n, t0, len); 3359 } 3360 } 3361 3362 // Fast Montgomery squaring. This uses asymptotically 25% fewer 3363 // multiplies so it should be up to 25% faster than Montgomery 3364 // multiplication. However, its loop control is more complex and it 3365 // may actually run slower on some machines. 3366 static void 3367 montgomery_square(unsigned long a[], unsigned long n[], 3368 unsigned long m[], unsigned long inv, int len) { 3369 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator 3370 int i; 3371 3372 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); 3373 3374 for (i = 0; i < len; i++) { 3375 int j; 3376 int end = (i+1)/2; 3377 for (j = 0; j < end; j++) { 3378 MACC2(a[j], a[i-j], t0, t1, t2); 3379 MACC(m[j], n[i-j], t0, t1, t2); 3380 } 3381 if ((i & 1) == 0) { 3382 MACC(a[j], a[j], t0, t1, t2); 3383 } 3384 for (; j < i; j++) { 3385 MACC(m[j], n[i-j], t0, t1, t2); 3386 } 3387 m[i] = t0 * inv; 3388 MACC(m[i], n[0], t0, t1, t2); 3389 3390 assert(t0 == 0, "broken Montgomery square"); 3391 3392 t0 = t1; t1 = t2; t2 = 0; 3393 } 3394 3395 for (i = len; i < 2*len; i++) { 3396 int start = i-len+1; 3397 int end = start + (len - start)/2; 3398 int j; 3399 for (j = start; j < end; j++) { 3400 MACC2(a[j], a[i-j], t0, t1, t2); 3401 MACC(m[j], n[i-j], t0, t1, t2); 3402 } 3403 if ((i & 1) == 0) { 3404 MACC(a[j], a[j], t0, t1, t2); 3405 } 3406 for (; j < len; j++) { 3407 MACC(m[j], n[i-j], t0, t1, t2); 3408 } 3409 m[i-len] = t0; 3410 t0 = t1; t1 = t2; t2 = 0; 3411 } 3412 3413 while (t0) { 3414 t0 = sub(m, n, t0, len); 3415 } 3416 } 3417 3418 // The threshold at which squaring is advantageous was determined 3419 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz. 3420 // Doesn't seem to be relevant for Power8 so we use the same value. 3421 #define MONTGOMERY_SQUARING_THRESHOLD 64 3422 3423 // Copy len longwords from s to d, word-swapping as we go. The 3424 // destination array is reversed. 3425 static void reverse_words(unsigned long *s, unsigned long *d, int len) { 3426 d += len; 3427 while(len-- > 0) { 3428 d--; 3429 unsigned long s_val = *s; 3430 // Swap words in a longword on little endian machines. 3431 #ifdef VM_LITTLE_ENDIAN 3432 s_val = (s_val << 32) | (s_val >> 32); 3433 #endif 3434 *d = s_val; 3435 s++; 3436 } 3437 } 3438 3439 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints, 3440 jint len, jlong inv, 3441 jint *m_ints) { 3442 len = len & 0x7fffFFFF; // C2 does not respect int to long conversion for stub calls. 3443 assert(len % 2 == 0, "array length in montgomery_multiply must be even"); 3444 int longwords = len/2; 3445 3446 // Make very sure we don't use so much space that the stack might 3447 // overflow. 512 jints corresponds to an 16384-bit integer and 3448 // will use here a total of 8k bytes of stack space. 3449 int total_allocation = longwords * sizeof (unsigned long) * 4; 3450 guarantee(total_allocation <= 8192, "must be"); 3451 unsigned long *scratch = (unsigned long *)alloca(total_allocation); 3452 3453 // Local scratch arrays 3454 unsigned long 3455 *a = scratch + 0 * longwords, 3456 *b = scratch + 1 * longwords, 3457 *n = scratch + 2 * longwords, 3458 *m = scratch + 3 * longwords; 3459 3460 reverse_words((unsigned long *)a_ints, a, longwords); 3461 reverse_words((unsigned long *)b_ints, b, longwords); 3462 reverse_words((unsigned long *)n_ints, n, longwords); 3463 3464 ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords); 3465 3466 reverse_words(m, (unsigned long *)m_ints, longwords); 3467 } 3468 3469 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints, 3470 jint len, jlong inv, 3471 jint *m_ints) { 3472 len = len & 0x7fffFFFF; // C2 does not respect int to long conversion for stub calls. 3473 assert(len % 2 == 0, "array length in montgomery_square must be even"); 3474 int longwords = len/2; 3475 3476 // Make very sure we don't use so much space that the stack might 3477 // overflow. 512 jints corresponds to an 16384-bit integer and 3478 // will use here a total of 6k bytes of stack space. 3479 int total_allocation = longwords * sizeof (unsigned long) * 3; 3480 guarantee(total_allocation <= 8192, "must be"); 3481 unsigned long *scratch = (unsigned long *)alloca(total_allocation); 3482 3483 // Local scratch arrays 3484 unsigned long 3485 *a = scratch + 0 * longwords, 3486 *n = scratch + 1 * longwords, 3487 *m = scratch + 2 * longwords; 3488 3489 reverse_words((unsigned long *)a_ints, a, longwords); 3490 reverse_words((unsigned long *)n_ints, n, longwords); 3491 3492 if (len >= MONTGOMERY_SQUARING_THRESHOLD) { 3493 ::montgomery_square(a, n, m, (unsigned long)inv, longwords); 3494 } else { 3495 ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords); 3496 } 3497 3498 reverse_words(m, (unsigned long *)m_ints, longwords); 3499 }