1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2017 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "code/debugInfoRec.hpp" 29 #include "code/icBuffer.hpp" 30 #include "code/vtableStubs.hpp" 31 #include "frame_ppc.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "interpreter/interp_masm.hpp" 34 #include "memory/resourceArea.hpp" 35 #include "oops/compiledICHolder.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/vframeArray.hpp" 38 #include "utilities/align.hpp" 39 #include "vmreg_ppc.inline.hpp" 40 #ifdef COMPILER1 41 #include "c1/c1_Runtime1.hpp" 42 #endif 43 #ifdef COMPILER2 44 #include "opto/ad.hpp" 45 #include "opto/runtime.hpp" 46 #endif 47 48 #include <alloca.h> 49 50 #define __ masm-> 51 52 #ifdef PRODUCT 53 #define BLOCK_COMMENT(str) // nothing 54 #else 55 #define BLOCK_COMMENT(str) __ block_comment(str) 56 #endif 57 58 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 59 60 61 class RegisterSaver { 62 // Used for saving volatile registers. 63 public: 64 65 // Support different return pc locations. 66 enum ReturnPCLocation { 67 return_pc_is_lr, 68 return_pc_is_pre_saved, 69 return_pc_is_thread_saved_exception_pc 70 }; 71 72 static OopMap* push_frame_reg_args_and_save_live_registers(MacroAssembler* masm, 73 int* out_frame_size_in_bytes, 74 bool generate_oop_map, 75 int return_pc_adjustment, 76 ReturnPCLocation return_pc_location); 77 static void restore_live_registers_and_pop_frame(MacroAssembler* masm, 78 int frame_size_in_bytes, 79 bool restore_ctr); 80 81 static void push_frame_and_save_argument_registers(MacroAssembler* masm, 82 Register r_temp, 83 int frame_size, 84 int total_args, 85 const VMRegPair *regs, const VMRegPair *regs2 = NULL); 86 static void restore_argument_registers_and_pop_frame(MacroAssembler*masm, 87 int frame_size, 88 int total_args, 89 const VMRegPair *regs, const VMRegPair *regs2 = NULL); 90 91 // During deoptimization only the result registers need to be restored 92 // all the other values have already been extracted. 93 static void restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes); 94 95 // Constants and data structures: 96 97 typedef enum { 98 int_reg = 0, 99 float_reg = 1, 100 special_reg = 2 101 } RegisterType; 102 103 typedef enum { 104 reg_size = 8, 105 half_reg_size = reg_size / 2, 106 } RegisterConstants; 107 108 typedef struct { 109 RegisterType reg_type; 110 int reg_num; 111 VMReg vmreg; 112 } LiveRegType; 113 }; 114 115 116 #define RegisterSaver_LiveSpecialReg(regname) \ 117 { RegisterSaver::special_reg, regname->encoding(), regname->as_VMReg() } 118 119 #define RegisterSaver_LiveIntReg(regname) \ 120 { RegisterSaver::int_reg, regname->encoding(), regname->as_VMReg() } 121 122 #define RegisterSaver_LiveFloatReg(regname) \ 123 { RegisterSaver::float_reg, regname->encoding(), regname->as_VMReg() } 124 125 static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = { 126 // Live registers which get spilled to the stack. Register 127 // positions in this array correspond directly to the stack layout. 128 129 // 130 // live special registers: 131 // 132 RegisterSaver_LiveSpecialReg(SR_CTR), 133 // 134 // live float registers: 135 // 136 RegisterSaver_LiveFloatReg( F0 ), 137 RegisterSaver_LiveFloatReg( F1 ), 138 RegisterSaver_LiveFloatReg( F2 ), 139 RegisterSaver_LiveFloatReg( F3 ), 140 RegisterSaver_LiveFloatReg( F4 ), 141 RegisterSaver_LiveFloatReg( F5 ), 142 RegisterSaver_LiveFloatReg( F6 ), 143 RegisterSaver_LiveFloatReg( F7 ), 144 RegisterSaver_LiveFloatReg( F8 ), 145 RegisterSaver_LiveFloatReg( F9 ), 146 RegisterSaver_LiveFloatReg( F10 ), 147 RegisterSaver_LiveFloatReg( F11 ), 148 RegisterSaver_LiveFloatReg( F12 ), 149 RegisterSaver_LiveFloatReg( F13 ), 150 RegisterSaver_LiveFloatReg( F14 ), 151 RegisterSaver_LiveFloatReg( F15 ), 152 RegisterSaver_LiveFloatReg( F16 ), 153 RegisterSaver_LiveFloatReg( F17 ), 154 RegisterSaver_LiveFloatReg( F18 ), 155 RegisterSaver_LiveFloatReg( F19 ), 156 RegisterSaver_LiveFloatReg( F20 ), 157 RegisterSaver_LiveFloatReg( F21 ), 158 RegisterSaver_LiveFloatReg( F22 ), 159 RegisterSaver_LiveFloatReg( F23 ), 160 RegisterSaver_LiveFloatReg( F24 ), 161 RegisterSaver_LiveFloatReg( F25 ), 162 RegisterSaver_LiveFloatReg( F26 ), 163 RegisterSaver_LiveFloatReg( F27 ), 164 RegisterSaver_LiveFloatReg( F28 ), 165 RegisterSaver_LiveFloatReg( F29 ), 166 RegisterSaver_LiveFloatReg( F30 ), 167 RegisterSaver_LiveFloatReg( F31 ), 168 // 169 // live integer registers: 170 // 171 RegisterSaver_LiveIntReg( R0 ), 172 //RegisterSaver_LiveIntReg( R1 ), // stack pointer 173 RegisterSaver_LiveIntReg( R2 ), 174 RegisterSaver_LiveIntReg( R3 ), 175 RegisterSaver_LiveIntReg( R4 ), 176 RegisterSaver_LiveIntReg( R5 ), 177 RegisterSaver_LiveIntReg( R6 ), 178 RegisterSaver_LiveIntReg( R7 ), 179 RegisterSaver_LiveIntReg( R8 ), 180 RegisterSaver_LiveIntReg( R9 ), 181 RegisterSaver_LiveIntReg( R10 ), 182 RegisterSaver_LiveIntReg( R11 ), 183 RegisterSaver_LiveIntReg( R12 ), 184 //RegisterSaver_LiveIntReg( R13 ), // system thread id 185 RegisterSaver_LiveIntReg( R14 ), 186 RegisterSaver_LiveIntReg( R15 ), 187 RegisterSaver_LiveIntReg( R16 ), 188 RegisterSaver_LiveIntReg( R17 ), 189 RegisterSaver_LiveIntReg( R18 ), 190 RegisterSaver_LiveIntReg( R19 ), 191 RegisterSaver_LiveIntReg( R20 ), 192 RegisterSaver_LiveIntReg( R21 ), 193 RegisterSaver_LiveIntReg( R22 ), 194 RegisterSaver_LiveIntReg( R23 ), 195 RegisterSaver_LiveIntReg( R24 ), 196 RegisterSaver_LiveIntReg( R25 ), 197 RegisterSaver_LiveIntReg( R26 ), 198 RegisterSaver_LiveIntReg( R27 ), 199 RegisterSaver_LiveIntReg( R28 ), 200 RegisterSaver_LiveIntReg( R29 ), 201 RegisterSaver_LiveIntReg( R30 ), 202 RegisterSaver_LiveIntReg( R31 ), // must be the last register (see save/restore functions below) 203 }; 204 205 OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm, 206 int* out_frame_size_in_bytes, 207 bool generate_oop_map, 208 int return_pc_adjustment, 209 ReturnPCLocation return_pc_location) { 210 // Push an abi_reg_args-frame and store all registers which may be live. 211 // If requested, create an OopMap: Record volatile registers as 212 // callee-save values in an OopMap so their save locations will be 213 // propagated to the RegisterMap of the caller frame during 214 // StackFrameStream construction (needed for deoptimization; see 215 // compiledVFrame::create_stack_value). 216 // If return_pc_adjustment != 0 adjust the return pc by return_pc_adjustment. 217 218 int i; 219 int offset; 220 221 // calcualte frame size 222 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 223 sizeof(RegisterSaver::LiveRegType); 224 const int register_save_size = regstosave_num * reg_size; 225 const int frame_size_in_bytes = align_up(register_save_size, frame::alignment_in_bytes) 226 + frame::abi_reg_args_size; 227 *out_frame_size_in_bytes = frame_size_in_bytes; 228 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 229 const int register_save_offset = frame_size_in_bytes - register_save_size; 230 231 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words. 232 OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : NULL; 233 234 BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {"); 235 236 // Save r31 in the last slot of the not yet pushed frame so that we 237 // can use it as scratch reg. 238 __ std(R31, -reg_size, R1_SP); 239 assert(-reg_size == register_save_offset - frame_size_in_bytes + ((regstosave_num-1)*reg_size), 240 "consistency check"); 241 242 // save the flags 243 // Do the save_LR_CR by hand and adjust the return pc if requested. 244 __ mfcr(R31); 245 __ std(R31, _abi(cr), R1_SP); 246 switch (return_pc_location) { 247 case return_pc_is_lr: __ mflr(R31); break; 248 case return_pc_is_pre_saved: assert(return_pc_adjustment == 0, "unsupported"); break; 249 case return_pc_is_thread_saved_exception_pc: __ ld(R31, thread_(saved_exception_pc)); break; 250 default: ShouldNotReachHere(); 251 } 252 if (return_pc_location != return_pc_is_pre_saved) { 253 if (return_pc_adjustment != 0) { 254 __ addi(R31, R31, return_pc_adjustment); 255 } 256 __ std(R31, _abi(lr), R1_SP); 257 } 258 259 // push a new frame 260 __ push_frame(frame_size_in_bytes, R31); 261 262 // save all registers (ints and floats) 263 offset = register_save_offset; 264 for (int i = 0; i < regstosave_num; i++) { 265 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 266 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 267 268 switch (reg_type) { 269 case RegisterSaver::int_reg: { 270 if (reg_num != 31) { // We spilled R31 right at the beginning. 271 __ std(as_Register(reg_num), offset, R1_SP); 272 } 273 break; 274 } 275 case RegisterSaver::float_reg: { 276 __ stfd(as_FloatRegister(reg_num), offset, R1_SP); 277 break; 278 } 279 case RegisterSaver::special_reg: { 280 if (reg_num == SR_CTR_SpecialRegisterEnumValue) { 281 __ mfctr(R31); 282 __ std(R31, offset, R1_SP); 283 } else { 284 Unimplemented(); 285 } 286 break; 287 } 288 default: 289 ShouldNotReachHere(); 290 } 291 292 if (generate_oop_map) { 293 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), 294 RegisterSaver_LiveRegs[i].vmreg); 295 map->set_callee_saved(VMRegImpl::stack2reg((offset + half_reg_size)>>2), 296 RegisterSaver_LiveRegs[i].vmreg->next()); 297 } 298 offset += reg_size; 299 } 300 301 BLOCK_COMMENT("} push_frame_reg_args_and_save_live_registers"); 302 303 // And we're done. 304 return map; 305 } 306 307 308 // Pop the current frame and restore all the registers that we 309 // saved. 310 void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm, 311 int frame_size_in_bytes, 312 bool restore_ctr) { 313 int i; 314 int offset; 315 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 316 sizeof(RegisterSaver::LiveRegType); 317 const int register_save_size = regstosave_num * reg_size; 318 const int register_save_offset = frame_size_in_bytes - register_save_size; 319 320 BLOCK_COMMENT("restore_live_registers_and_pop_frame {"); 321 322 // restore all registers (ints and floats) 323 offset = register_save_offset; 324 for (int i = 0; i < regstosave_num; i++) { 325 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 326 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 327 328 switch (reg_type) { 329 case RegisterSaver::int_reg: { 330 if (reg_num != 31) // R31 restored at the end, it's the tmp reg! 331 __ ld(as_Register(reg_num), offset, R1_SP); 332 break; 333 } 334 case RegisterSaver::float_reg: { 335 __ lfd(as_FloatRegister(reg_num), offset, R1_SP); 336 break; 337 } 338 case RegisterSaver::special_reg: { 339 if (reg_num == SR_CTR_SpecialRegisterEnumValue) { 340 if (restore_ctr) { // Nothing to do here if ctr already contains the next address. 341 __ ld(R31, offset, R1_SP); 342 __ mtctr(R31); 343 } 344 } else { 345 Unimplemented(); 346 } 347 break; 348 } 349 default: 350 ShouldNotReachHere(); 351 } 352 offset += reg_size; 353 } 354 355 // pop the frame 356 __ pop_frame(); 357 358 // restore the flags 359 __ restore_LR_CR(R31); 360 361 // restore scratch register's value 362 __ ld(R31, -reg_size, R1_SP); 363 364 BLOCK_COMMENT("} restore_live_registers_and_pop_frame"); 365 } 366 367 void RegisterSaver::push_frame_and_save_argument_registers(MacroAssembler* masm, Register r_temp, 368 int frame_size,int total_args, const VMRegPair *regs, 369 const VMRegPair *regs2) { 370 __ push_frame(frame_size, r_temp); 371 int st_off = frame_size - wordSize; 372 for (int i = 0; i < total_args; i++) { 373 VMReg r_1 = regs[i].first(); 374 VMReg r_2 = regs[i].second(); 375 if (!r_1->is_valid()) { 376 assert(!r_2->is_valid(), ""); 377 continue; 378 } 379 if (r_1->is_Register()) { 380 Register r = r_1->as_Register(); 381 __ std(r, st_off, R1_SP); 382 st_off -= wordSize; 383 } else if (r_1->is_FloatRegister()) { 384 FloatRegister f = r_1->as_FloatRegister(); 385 __ stfd(f, st_off, R1_SP); 386 st_off -= wordSize; 387 } 388 } 389 if (regs2 != NULL) { 390 for (int i = 0; i < total_args; i++) { 391 VMReg r_1 = regs2[i].first(); 392 VMReg r_2 = regs2[i].second(); 393 if (!r_1->is_valid()) { 394 assert(!r_2->is_valid(), ""); 395 continue; 396 } 397 if (r_1->is_Register()) { 398 Register r = r_1->as_Register(); 399 __ std(r, st_off, R1_SP); 400 st_off -= wordSize; 401 } else if (r_1->is_FloatRegister()) { 402 FloatRegister f = r_1->as_FloatRegister(); 403 __ stfd(f, st_off, R1_SP); 404 st_off -= wordSize; 405 } 406 } 407 } 408 } 409 410 void RegisterSaver::restore_argument_registers_and_pop_frame(MacroAssembler*masm, int frame_size, 411 int total_args, const VMRegPair *regs, 412 const VMRegPair *regs2) { 413 int st_off = frame_size - wordSize; 414 for (int i = 0; i < total_args; i++) { 415 VMReg r_1 = regs[i].first(); 416 VMReg r_2 = regs[i].second(); 417 if (r_1->is_Register()) { 418 Register r = r_1->as_Register(); 419 __ ld(r, st_off, R1_SP); 420 st_off -= wordSize; 421 } else if (r_1->is_FloatRegister()) { 422 FloatRegister f = r_1->as_FloatRegister(); 423 __ lfd(f, st_off, R1_SP); 424 st_off -= wordSize; 425 } 426 } 427 if (regs2 != NULL) 428 for (int i = 0; i < total_args; i++) { 429 VMReg r_1 = regs2[i].first(); 430 VMReg r_2 = regs2[i].second(); 431 if (r_1->is_Register()) { 432 Register r = r_1->as_Register(); 433 __ ld(r, st_off, R1_SP); 434 st_off -= wordSize; 435 } else if (r_1->is_FloatRegister()) { 436 FloatRegister f = r_1->as_FloatRegister(); 437 __ lfd(f, st_off, R1_SP); 438 st_off -= wordSize; 439 } 440 } 441 __ pop_frame(); 442 } 443 444 // Restore the registers that might be holding a result. 445 void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes) { 446 int i; 447 int offset; 448 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 449 sizeof(RegisterSaver::LiveRegType); 450 const int register_save_size = regstosave_num * reg_size; 451 const int register_save_offset = frame_size_in_bytes - register_save_size; 452 453 // restore all result registers (ints and floats) 454 offset = register_save_offset; 455 for (int i = 0; i < regstosave_num; i++) { 456 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 457 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 458 switch (reg_type) { 459 case RegisterSaver::int_reg: { 460 if (as_Register(reg_num)==R3_RET) // int result_reg 461 __ ld(as_Register(reg_num), offset, R1_SP); 462 break; 463 } 464 case RegisterSaver::float_reg: { 465 if (as_FloatRegister(reg_num)==F1_RET) // float result_reg 466 __ lfd(as_FloatRegister(reg_num), offset, R1_SP); 467 break; 468 } 469 case RegisterSaver::special_reg: { 470 // Special registers don't hold a result. 471 break; 472 } 473 default: 474 ShouldNotReachHere(); 475 } 476 offset += reg_size; 477 } 478 } 479 480 // Is vector's size (in bytes) bigger than a size saved by default? 481 bool SharedRuntime::is_wide_vector(int size) { 482 // Note, MaxVectorSize == 8/16 on PPC64. 483 assert(size <= (SuperwordUseVSX ? 16 : 8), "%d bytes vectors are not supported", size); 484 return size > 8; 485 } 486 487 size_t SharedRuntime::trampoline_size() { 488 return Assembler::load_const_size + 8; 489 } 490 491 void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) { 492 Register Rtemp = R12; 493 __ load_const(Rtemp, destination); 494 __ mtctr(Rtemp); 495 __ bctr(); 496 } 497 498 #ifdef COMPILER2 499 static int reg2slot(VMReg r) { 500 return r->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 501 } 502 503 static int reg2offset(VMReg r) { 504 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 505 } 506 #endif 507 508 // --------------------------------------------------------------------------- 509 // Read the array of BasicTypes from a signature, and compute where the 510 // arguments should go. Values in the VMRegPair regs array refer to 4-byte 511 // quantities. Values less than VMRegImpl::stack0 are registers, those above 512 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer 513 // as framesizes are fixed. 514 // VMRegImpl::stack0 refers to the first slot 0(sp). 515 // and VMRegImpl::stack0+1 refers to the memory word 4-bytes higher. Register 516 // up to RegisterImpl::number_of_registers) are the 64-bit 517 // integer registers. 518 519 // Note: the INPUTS in sig_bt are in units of Java argument words, which are 520 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit 521 // units regardless of build. Of course for i486 there is no 64 bit build 522 523 // The Java calling convention is a "shifted" version of the C ABI. 524 // By skipping the first C ABI register we can call non-static jni methods 525 // with small numbers of arguments without having to shuffle the arguments 526 // at all. Since we control the java ABI we ought to at least get some 527 // advantage out of it. 528 529 const VMReg java_iarg_reg[8] = { 530 R3->as_VMReg(), 531 R4->as_VMReg(), 532 R5->as_VMReg(), 533 R6->as_VMReg(), 534 R7->as_VMReg(), 535 R8->as_VMReg(), 536 R9->as_VMReg(), 537 R10->as_VMReg() 538 }; 539 540 const VMReg java_farg_reg[13] = { 541 F1->as_VMReg(), 542 F2->as_VMReg(), 543 F3->as_VMReg(), 544 F4->as_VMReg(), 545 F5->as_VMReg(), 546 F6->as_VMReg(), 547 F7->as_VMReg(), 548 F8->as_VMReg(), 549 F9->as_VMReg(), 550 F10->as_VMReg(), 551 F11->as_VMReg(), 552 F12->as_VMReg(), 553 F13->as_VMReg() 554 }; 555 556 const int num_java_iarg_registers = sizeof(java_iarg_reg) / sizeof(java_iarg_reg[0]); 557 const int num_java_farg_registers = sizeof(java_farg_reg) / sizeof(java_farg_reg[0]); 558 559 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 560 VMRegPair *regs, 561 int total_args_passed, 562 int is_outgoing) { 563 // C2c calling conventions for compiled-compiled calls. 564 // Put 8 ints/longs into registers _AND_ 13 float/doubles into 565 // registers _AND_ put the rest on the stack. 566 567 const int inc_stk_for_intfloat = 1; // 1 slots for ints and floats 568 const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles 569 570 int i; 571 VMReg reg; 572 int stk = 0; 573 int ireg = 0; 574 int freg = 0; 575 576 // We put the first 8 arguments into registers and the rest on the 577 // stack, float arguments are already in their argument registers 578 // due to c2c calling conventions (see calling_convention). 579 for (int i = 0; i < total_args_passed; ++i) { 580 switch(sig_bt[i]) { 581 case T_BOOLEAN: 582 case T_CHAR: 583 case T_BYTE: 584 case T_SHORT: 585 case T_INT: 586 if (ireg < num_java_iarg_registers) { 587 // Put int/ptr in register 588 reg = java_iarg_reg[ireg]; 589 ++ireg; 590 } else { 591 // Put int/ptr on stack. 592 reg = VMRegImpl::stack2reg(stk); 593 stk += inc_stk_for_intfloat; 594 } 595 regs[i].set1(reg); 596 break; 597 case T_LONG: 598 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 599 if (ireg < num_java_iarg_registers) { 600 // Put long in register. 601 reg = java_iarg_reg[ireg]; 602 ++ireg; 603 } else { 604 // Put long on stack. They must be aligned to 2 slots. 605 if (stk & 0x1) ++stk; 606 reg = VMRegImpl::stack2reg(stk); 607 stk += inc_stk_for_longdouble; 608 } 609 regs[i].set2(reg); 610 break; 611 case T_OBJECT: 612 case T_ARRAY: 613 case T_ADDRESS: 614 if (ireg < num_java_iarg_registers) { 615 // Put ptr in register. 616 reg = java_iarg_reg[ireg]; 617 ++ireg; 618 } else { 619 // Put ptr on stack. Objects must be aligned to 2 slots too, 620 // because "64-bit pointers record oop-ishness on 2 aligned 621 // adjacent registers." (see OopFlow::build_oop_map). 622 if (stk & 0x1) ++stk; 623 reg = VMRegImpl::stack2reg(stk); 624 stk += inc_stk_for_longdouble; 625 } 626 regs[i].set2(reg); 627 break; 628 case T_FLOAT: 629 if (freg < num_java_farg_registers) { 630 // Put float in register. 631 reg = java_farg_reg[freg]; 632 ++freg; 633 } else { 634 // Put float on stack. 635 reg = VMRegImpl::stack2reg(stk); 636 stk += inc_stk_for_intfloat; 637 } 638 regs[i].set1(reg); 639 break; 640 case T_DOUBLE: 641 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 642 if (freg < num_java_farg_registers) { 643 // Put double in register. 644 reg = java_farg_reg[freg]; 645 ++freg; 646 } else { 647 // Put double on stack. They must be aligned to 2 slots. 648 if (stk & 0x1) ++stk; 649 reg = VMRegImpl::stack2reg(stk); 650 stk += inc_stk_for_longdouble; 651 } 652 regs[i].set2(reg); 653 break; 654 case T_VOID: 655 // Do not count halves. 656 regs[i].set_bad(); 657 break; 658 default: 659 ShouldNotReachHere(); 660 } 661 } 662 return align_up(stk, 2); 663 } 664 665 #if defined(COMPILER1) || defined(COMPILER2) 666 // Calling convention for calling C code. 667 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 668 VMRegPair *regs, 669 VMRegPair *regs2, 670 int total_args_passed) { 671 // Calling conventions for C runtime calls and calls to JNI native methods. 672 // 673 // PPC64 convention: Hoist the first 8 int/ptr/long's in the first 8 674 // int regs, leaving int regs undefined if the arg is flt/dbl. Hoist 675 // the first 13 flt/dbl's in the first 13 fp regs but additionally 676 // copy flt/dbl to the stack if they are beyond the 8th argument. 677 678 const VMReg iarg_reg[8] = { 679 R3->as_VMReg(), 680 R4->as_VMReg(), 681 R5->as_VMReg(), 682 R6->as_VMReg(), 683 R7->as_VMReg(), 684 R8->as_VMReg(), 685 R9->as_VMReg(), 686 R10->as_VMReg() 687 }; 688 689 const VMReg farg_reg[13] = { 690 F1->as_VMReg(), 691 F2->as_VMReg(), 692 F3->as_VMReg(), 693 F4->as_VMReg(), 694 F5->as_VMReg(), 695 F6->as_VMReg(), 696 F7->as_VMReg(), 697 F8->as_VMReg(), 698 F9->as_VMReg(), 699 F10->as_VMReg(), 700 F11->as_VMReg(), 701 F12->as_VMReg(), 702 F13->as_VMReg() 703 }; 704 705 // Check calling conventions consistency. 706 assert(sizeof(iarg_reg) / sizeof(iarg_reg[0]) == Argument::n_int_register_parameters_c && 707 sizeof(farg_reg) / sizeof(farg_reg[0]) == Argument::n_float_register_parameters_c, 708 "consistency"); 709 710 // `Stk' counts stack slots. Due to alignment, 32 bit values occupy 711 // 2 such slots, like 64 bit values do. 712 const int inc_stk_for_intfloat = 2; // 2 slots for ints and floats 713 const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles 714 715 int i; 716 VMReg reg; 717 // Leave room for C-compatible ABI_REG_ARGS. 718 int stk = (frame::abi_reg_args_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size; 719 int arg = 0; 720 int freg = 0; 721 722 // Avoid passing C arguments in the wrong stack slots. 723 #if defined(ABI_ELFv2) 724 assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 96, 725 "passing C arguments in wrong stack slots"); 726 #else 727 assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 112, 728 "passing C arguments in wrong stack slots"); 729 #endif 730 // We fill-out regs AND regs2 if an argument must be passed in a 731 // register AND in a stack slot. If regs2 is NULL in such a 732 // situation, we bail-out with a fatal error. 733 for (int i = 0; i < total_args_passed; ++i, ++arg) { 734 // Initialize regs2 to BAD. 735 if (regs2 != NULL) regs2[i].set_bad(); 736 737 switch(sig_bt[i]) { 738 739 // 740 // If arguments 0-7 are integers, they are passed in integer registers. 741 // Argument i is placed in iarg_reg[i]. 742 // 743 case T_BOOLEAN: 744 case T_CHAR: 745 case T_BYTE: 746 case T_SHORT: 747 case T_INT: 748 // We must cast ints to longs and use full 64 bit stack slots 749 // here. Thus fall through, handle as long. 750 case T_LONG: 751 case T_OBJECT: 752 case T_ARRAY: 753 case T_ADDRESS: 754 case T_METADATA: 755 // Oops are already boxed if required (JNI). 756 if (arg < Argument::n_int_register_parameters_c) { 757 reg = iarg_reg[arg]; 758 } else { 759 reg = VMRegImpl::stack2reg(stk); 760 stk += inc_stk_for_longdouble; 761 } 762 regs[i].set2(reg); 763 break; 764 765 // 766 // Floats are treated differently from int regs: The first 13 float arguments 767 // are passed in registers (not the float args among the first 13 args). 768 // Thus argument i is NOT passed in farg_reg[i] if it is float. It is passed 769 // in farg_reg[j] if argument i is the j-th float argument of this call. 770 // 771 case T_FLOAT: 772 #if defined(LINUX) 773 // Linux uses ELF ABI. Both original ELF and ELFv2 ABIs have float 774 // in the least significant word of an argument slot. 775 #if defined(VM_LITTLE_ENDIAN) 776 #define FLOAT_WORD_OFFSET_IN_SLOT 0 777 #else 778 #define FLOAT_WORD_OFFSET_IN_SLOT 1 779 #endif 780 #elif defined(AIX) 781 // Although AIX runs on big endian CPU, float is in the most 782 // significant word of an argument slot. 783 #define FLOAT_WORD_OFFSET_IN_SLOT 0 784 #else 785 #error "unknown OS" 786 #endif 787 if (freg < Argument::n_float_register_parameters_c) { 788 // Put float in register ... 789 reg = farg_reg[freg]; 790 ++freg; 791 792 // Argument i for i > 8 is placed on the stack even if it's 793 // placed in a register (if it's a float arg). Aix disassembly 794 // shows that xlC places these float args on the stack AND in 795 // a register. This is not documented, but we follow this 796 // convention, too. 797 if (arg >= Argument::n_regs_not_on_stack_c) { 798 // ... and on the stack. 799 guarantee(regs2 != NULL, "must pass float in register and stack slot"); 800 VMReg reg2 = VMRegImpl::stack2reg(stk + FLOAT_WORD_OFFSET_IN_SLOT); 801 regs2[i].set1(reg2); 802 stk += inc_stk_for_intfloat; 803 } 804 805 } else { 806 // Put float on stack. 807 reg = VMRegImpl::stack2reg(stk + FLOAT_WORD_OFFSET_IN_SLOT); 808 stk += inc_stk_for_intfloat; 809 } 810 regs[i].set1(reg); 811 break; 812 case T_DOUBLE: 813 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 814 if (freg < Argument::n_float_register_parameters_c) { 815 // Put double in register ... 816 reg = farg_reg[freg]; 817 ++freg; 818 819 // Argument i for i > 8 is placed on the stack even if it's 820 // placed in a register (if it's a double arg). Aix disassembly 821 // shows that xlC places these float args on the stack AND in 822 // a register. This is not documented, but we follow this 823 // convention, too. 824 if (arg >= Argument::n_regs_not_on_stack_c) { 825 // ... and on the stack. 826 guarantee(regs2 != NULL, "must pass float in register and stack slot"); 827 VMReg reg2 = VMRegImpl::stack2reg(stk); 828 regs2[i].set2(reg2); 829 stk += inc_stk_for_longdouble; 830 } 831 } else { 832 // Put double on stack. 833 reg = VMRegImpl::stack2reg(stk); 834 stk += inc_stk_for_longdouble; 835 } 836 regs[i].set2(reg); 837 break; 838 839 case T_VOID: 840 // Do not count halves. 841 regs[i].set_bad(); 842 --arg; 843 break; 844 default: 845 ShouldNotReachHere(); 846 } 847 } 848 849 return align_up(stk, 2); 850 } 851 #endif // COMPILER2 852 853 static address gen_c2i_adapter(MacroAssembler *masm, 854 int total_args_passed, 855 int comp_args_on_stack, 856 const BasicType *sig_bt, 857 const VMRegPair *regs, 858 Label& call_interpreter, 859 const Register& ientry) { 860 861 address c2i_entrypoint; 862 863 const Register sender_SP = R21_sender_SP; // == R21_tmp1 864 const Register code = R22_tmp2; 865 //const Register ientry = R23_tmp3; 866 const Register value_regs[] = { R24_tmp4, R25_tmp5, R26_tmp6 }; 867 const int num_value_regs = sizeof(value_regs) / sizeof(Register); 868 int value_regs_index = 0; 869 870 const Register return_pc = R27_tmp7; 871 const Register tmp = R28_tmp8; 872 873 assert_different_registers(sender_SP, code, ientry, return_pc, tmp); 874 875 // Adapter needs TOP_IJAVA_FRAME_ABI. 876 const int adapter_size = frame::top_ijava_frame_abi_size + 877 align_up(total_args_passed * wordSize, frame::alignment_in_bytes); 878 879 // regular (verified) c2i entry point 880 c2i_entrypoint = __ pc(); 881 882 // Does compiled code exists? If yes, patch the caller's callsite. 883 __ ld(code, method_(code)); 884 __ cmpdi(CCR0, code, 0); 885 __ ld(ientry, method_(interpreter_entry)); // preloaded 886 __ beq(CCR0, call_interpreter); 887 888 889 // Patch caller's callsite, method_(code) was not NULL which means that 890 // compiled code exists. 891 __ mflr(return_pc); 892 __ std(return_pc, _abi(lr), R1_SP); 893 RegisterSaver::push_frame_and_save_argument_registers(masm, tmp, adapter_size, total_args_passed, regs); 894 895 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), R19_method, return_pc); 896 897 RegisterSaver::restore_argument_registers_and_pop_frame(masm, adapter_size, total_args_passed, regs); 898 __ ld(return_pc, _abi(lr), R1_SP); 899 __ ld(ientry, method_(interpreter_entry)); // preloaded 900 __ mtlr(return_pc); 901 902 903 // Call the interpreter. 904 __ BIND(call_interpreter); 905 __ mtctr(ientry); 906 907 // Get a copy of the current SP for loading caller's arguments. 908 __ mr(sender_SP, R1_SP); 909 910 // Add space for the adapter. 911 __ resize_frame(-adapter_size, R12_scratch2); 912 913 int st_off = adapter_size - wordSize; 914 915 // Write the args into the outgoing interpreter space. 916 for (int i = 0; i < total_args_passed; i++) { 917 VMReg r_1 = regs[i].first(); 918 VMReg r_2 = regs[i].second(); 919 if (!r_1->is_valid()) { 920 assert(!r_2->is_valid(), ""); 921 continue; 922 } 923 if (r_1->is_stack()) { 924 Register tmp_reg = value_regs[value_regs_index]; 925 value_regs_index = (value_regs_index + 1) % num_value_regs; 926 // The calling convention produces OptoRegs that ignore the out 927 // preserve area (JIT's ABI). We must account for it here. 928 int ld_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 929 if (!r_2->is_valid()) { 930 __ lwz(tmp_reg, ld_off, sender_SP); 931 } else { 932 __ ld(tmp_reg, ld_off, sender_SP); 933 } 934 // Pretend stack targets were loaded into tmp_reg. 935 r_1 = tmp_reg->as_VMReg(); 936 } 937 938 if (r_1->is_Register()) { 939 Register r = r_1->as_Register(); 940 if (!r_2->is_valid()) { 941 __ stw(r, st_off, R1_SP); 942 st_off-=wordSize; 943 } else { 944 // Longs are given 2 64-bit slots in the interpreter, but the 945 // data is passed in only 1 slot. 946 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 947 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); ) 948 st_off-=wordSize; 949 } 950 __ std(r, st_off, R1_SP); 951 st_off-=wordSize; 952 } 953 } else { 954 assert(r_1->is_FloatRegister(), ""); 955 FloatRegister f = r_1->as_FloatRegister(); 956 if (!r_2->is_valid()) { 957 __ stfs(f, st_off, R1_SP); 958 st_off-=wordSize; 959 } else { 960 // In 64bit, doubles are given 2 64-bit slots in the interpreter, but the 961 // data is passed in only 1 slot. 962 // One of these should get known junk... 963 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); ) 964 st_off-=wordSize; 965 __ stfd(f, st_off, R1_SP); 966 st_off-=wordSize; 967 } 968 } 969 } 970 971 // Jump to the interpreter just as if interpreter was doing it. 972 973 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 974 975 // load TOS 976 __ addi(R15_esp, R1_SP, st_off); 977 978 // Frame_manager expects initial_caller_sp (= SP without resize by c2i) in R21_tmp1. 979 assert(sender_SP == R21_sender_SP, "passing initial caller's SP in wrong register"); 980 __ bctr(); 981 982 return c2i_entrypoint; 983 } 984 985 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, 986 int total_args_passed, 987 int comp_args_on_stack, 988 const BasicType *sig_bt, 989 const VMRegPair *regs) { 990 991 // Load method's entry-point from method. 992 __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method); 993 __ mtctr(R12_scratch2); 994 995 // We will only enter here from an interpreted frame and never from after 996 // passing thru a c2i. Azul allowed this but we do not. If we lose the 997 // race and use a c2i we will remain interpreted for the race loser(s). 998 // This removes all sorts of headaches on the x86 side and also eliminates 999 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions. 1000 1001 // Note: r13 contains the senderSP on entry. We must preserve it since 1002 // we may do a i2c -> c2i transition if we lose a race where compiled 1003 // code goes non-entrant while we get args ready. 1004 // In addition we use r13 to locate all the interpreter args as 1005 // we must align the stack to 16 bytes on an i2c entry else we 1006 // lose alignment we expect in all compiled code and register 1007 // save code can segv when fxsave instructions find improperly 1008 // aligned stack pointer. 1009 1010 const Register ld_ptr = R15_esp; 1011 const Register value_regs[] = { R22_tmp2, R23_tmp3, R24_tmp4, R25_tmp5, R26_tmp6 }; 1012 const int num_value_regs = sizeof(value_regs) / sizeof(Register); 1013 int value_regs_index = 0; 1014 1015 int ld_offset = total_args_passed*wordSize; 1016 1017 // Cut-out for having no stack args. Since up to 2 int/oop args are passed 1018 // in registers, we will occasionally have no stack args. 1019 int comp_words_on_stack = 0; 1020 if (comp_args_on_stack) { 1021 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in 1022 // registers are below. By subtracting stack0, we either get a negative 1023 // number (all values in registers) or the maximum stack slot accessed. 1024 1025 // Convert 4-byte c2 stack slots to words. 1026 comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; 1027 // Round up to miminum stack alignment, in wordSize. 1028 comp_words_on_stack = align_up(comp_words_on_stack, 2); 1029 __ resize_frame(-comp_words_on_stack * wordSize, R11_scratch1); 1030 } 1031 1032 // Now generate the shuffle code. Pick up all register args and move the 1033 // rest through register value=Z_R12. 1034 BLOCK_COMMENT("Shuffle arguments"); 1035 for (int i = 0; i < total_args_passed; i++) { 1036 if (sig_bt[i] == T_VOID) { 1037 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 1038 continue; 1039 } 1040 1041 // Pick up 0, 1 or 2 words from ld_ptr. 1042 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), 1043 "scrambled load targets?"); 1044 VMReg r_1 = regs[i].first(); 1045 VMReg r_2 = regs[i].second(); 1046 if (!r_1->is_valid()) { 1047 assert(!r_2->is_valid(), ""); 1048 continue; 1049 } 1050 if (r_1->is_FloatRegister()) { 1051 if (!r_2->is_valid()) { 1052 __ lfs(r_1->as_FloatRegister(), ld_offset, ld_ptr); 1053 ld_offset-=wordSize; 1054 } else { 1055 // Skip the unused interpreter slot. 1056 __ lfd(r_1->as_FloatRegister(), ld_offset-wordSize, ld_ptr); 1057 ld_offset-=2*wordSize; 1058 } 1059 } else { 1060 Register r; 1061 if (r_1->is_stack()) { 1062 // Must do a memory to memory move thru "value". 1063 r = value_regs[value_regs_index]; 1064 value_regs_index = (value_regs_index + 1) % num_value_regs; 1065 } else { 1066 r = r_1->as_Register(); 1067 } 1068 if (!r_2->is_valid()) { 1069 // Not sure we need to do this but it shouldn't hurt. 1070 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ADDRESS || sig_bt[i] == T_ARRAY) { 1071 __ ld(r, ld_offset, ld_ptr); 1072 ld_offset-=wordSize; 1073 } else { 1074 __ lwz(r, ld_offset, ld_ptr); 1075 ld_offset-=wordSize; 1076 } 1077 } else { 1078 // In 64bit, longs are given 2 64-bit slots in the interpreter, but the 1079 // data is passed in only 1 slot. 1080 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 1081 ld_offset-=wordSize; 1082 } 1083 __ ld(r, ld_offset, ld_ptr); 1084 ld_offset-=wordSize; 1085 } 1086 1087 if (r_1->is_stack()) { 1088 // Now store value where the compiler expects it 1089 int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots())*VMRegImpl::stack_slot_size; 1090 1091 if (sig_bt[i] == T_INT || sig_bt[i] == T_FLOAT ||sig_bt[i] == T_BOOLEAN || 1092 sig_bt[i] == T_SHORT || sig_bt[i] == T_CHAR || sig_bt[i] == T_BYTE) { 1093 __ stw(r, st_off, R1_SP); 1094 } else { 1095 __ std(r, st_off, R1_SP); 1096 } 1097 } 1098 } 1099 } 1100 1101 BLOCK_COMMENT("Store method"); 1102 // Store method into thread->callee_target. 1103 // We might end up in handle_wrong_method if the callee is 1104 // deoptimized as we race thru here. If that happens we don't want 1105 // to take a safepoint because the caller frame will look 1106 // interpreted and arguments are now "compiled" so it is much better 1107 // to make this transition invisible to the stack walking 1108 // code. Unfortunately if we try and find the callee by normal means 1109 // a safepoint is possible. So we stash the desired callee in the 1110 // thread and the vm will find there should this case occur. 1111 __ std(R19_method, thread_(callee_target)); 1112 1113 // Jump to the compiled code just as if compiled code was doing it. 1114 __ bctr(); 1115 } 1116 1117 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, 1118 int total_args_passed, 1119 int comp_args_on_stack, 1120 const BasicType *sig_bt, 1121 const VMRegPair *regs, 1122 AdapterFingerPrint* fingerprint) { 1123 address i2c_entry; 1124 address c2i_unverified_entry; 1125 address c2i_entry; 1126 1127 1128 // entry: i2c 1129 1130 __ align(CodeEntryAlignment); 1131 i2c_entry = __ pc(); 1132 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs); 1133 1134 1135 // entry: c2i unverified 1136 1137 __ align(CodeEntryAlignment); 1138 BLOCK_COMMENT("c2i unverified entry"); 1139 c2i_unverified_entry = __ pc(); 1140 1141 // inline_cache contains a compiledICHolder 1142 const Register ic = R19_method; 1143 const Register ic_klass = R11_scratch1; 1144 const Register receiver_klass = R12_scratch2; 1145 const Register code = R21_tmp1; 1146 const Register ientry = R23_tmp3; 1147 1148 assert_different_registers(ic, ic_klass, receiver_klass, R3_ARG1, code, ientry); 1149 assert(R11_scratch1 == R11, "need prologue scratch register"); 1150 1151 Label call_interpreter; 1152 1153 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), 1154 "klass offset should reach into any page"); 1155 // Check for NULL argument if we don't have implicit null checks. 1156 if (!ImplicitNullChecks || !os::zero_page_read_protected()) { 1157 if (TrapBasedNullChecks) { 1158 __ trap_null_check(R3_ARG1); 1159 } else { 1160 Label valid; 1161 __ cmpdi(CCR0, R3_ARG1, 0); 1162 __ bne_predict_taken(CCR0, valid); 1163 // We have a null argument, branch to ic_miss_stub. 1164 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), 1165 relocInfo::runtime_call_type); 1166 __ BIND(valid); 1167 } 1168 } 1169 // Assume argument is not NULL, load klass from receiver. 1170 __ load_klass(receiver_klass, R3_ARG1); 1171 1172 __ ld(ic_klass, CompiledICHolder::holder_klass_offset(), ic); 1173 1174 if (TrapBasedICMissChecks) { 1175 __ trap_ic_miss_check(receiver_klass, ic_klass); 1176 } else { 1177 Label valid; 1178 __ cmpd(CCR0, receiver_klass, ic_klass); 1179 __ beq_predict_taken(CCR0, valid); 1180 // We have an unexpected klass, branch to ic_miss_stub. 1181 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), 1182 relocInfo::runtime_call_type); 1183 __ BIND(valid); 1184 } 1185 1186 // Argument is valid and klass is as expected, continue. 1187 1188 // Extract method from inline cache, verified entry point needs it. 1189 __ ld(R19_method, CompiledICHolder::holder_method_offset(), ic); 1190 assert(R19_method == ic, "the inline cache register is dead here"); 1191 1192 __ ld(code, method_(code)); 1193 __ cmpdi(CCR0, code, 0); 1194 __ ld(ientry, method_(interpreter_entry)); // preloaded 1195 __ beq_predict_taken(CCR0, call_interpreter); 1196 1197 // Branch to ic_miss_stub. 1198 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type); 1199 1200 // entry: c2i 1201 1202 c2i_entry = gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, call_interpreter, ientry); 1203 1204 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); 1205 } 1206 1207 #ifdef COMPILER2 1208 // An oop arg. Must pass a handle not the oop itself. 1209 static void object_move(MacroAssembler* masm, 1210 int frame_size_in_slots, 1211 OopMap* oop_map, int oop_handle_offset, 1212 bool is_receiver, int* receiver_offset, 1213 VMRegPair src, VMRegPair dst, 1214 Register r_caller_sp, Register r_temp_1, Register r_temp_2) { 1215 assert(!is_receiver || (is_receiver && (*receiver_offset == -1)), 1216 "receiver has already been moved"); 1217 1218 // We must pass a handle. First figure out the location we use as a handle. 1219 1220 if (src.first()->is_stack()) { 1221 // stack to stack or reg 1222 1223 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register(); 1224 Label skip; 1225 const int oop_slot_in_callers_frame = reg2slot(src.first()); 1226 1227 guarantee(!is_receiver, "expecting receiver in register"); 1228 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot_in_callers_frame + frame_size_in_slots)); 1229 1230 __ addi(r_handle, r_caller_sp, reg2offset(src.first())); 1231 __ ld( r_temp_2, reg2offset(src.first()), r_caller_sp); 1232 __ cmpdi(CCR0, r_temp_2, 0); 1233 __ bne(CCR0, skip); 1234 // Use a NULL handle if oop is NULL. 1235 __ li(r_handle, 0); 1236 __ bind(skip); 1237 1238 if (dst.first()->is_stack()) { 1239 // stack to stack 1240 __ std(r_handle, reg2offset(dst.first()), R1_SP); 1241 } else { 1242 // stack to reg 1243 // Nothing to do, r_handle is already the dst register. 1244 } 1245 } else { 1246 // reg to stack or reg 1247 const Register r_oop = src.first()->as_Register(); 1248 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register(); 1249 const int oop_slot = (r_oop->encoding()-R3_ARG1->encoding()) * VMRegImpl::slots_per_word 1250 + oop_handle_offset; // in slots 1251 const int oop_offset = oop_slot * VMRegImpl::stack_slot_size; 1252 Label skip; 1253 1254 if (is_receiver) { 1255 *receiver_offset = oop_offset; 1256 } 1257 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1258 1259 __ std( r_oop, oop_offset, R1_SP); 1260 __ addi(r_handle, R1_SP, oop_offset); 1261 1262 __ cmpdi(CCR0, r_oop, 0); 1263 __ bne(CCR0, skip); 1264 // Use a NULL handle if oop is NULL. 1265 __ li(r_handle, 0); 1266 __ bind(skip); 1267 1268 if (dst.first()->is_stack()) { 1269 // reg to stack 1270 __ std(r_handle, reg2offset(dst.first()), R1_SP); 1271 } else { 1272 // reg to reg 1273 // Nothing to do, r_handle is already the dst register. 1274 } 1275 } 1276 } 1277 1278 static void int_move(MacroAssembler*masm, 1279 VMRegPair src, VMRegPair dst, 1280 Register r_caller_sp, Register r_temp) { 1281 assert(src.first()->is_valid(), "incoming must be int"); 1282 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long"); 1283 1284 if (src.first()->is_stack()) { 1285 if (dst.first()->is_stack()) { 1286 // stack to stack 1287 __ lwa(r_temp, reg2offset(src.first()), r_caller_sp); 1288 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1289 } else { 1290 // stack to reg 1291 __ lwa(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); 1292 } 1293 } else if (dst.first()->is_stack()) { 1294 // reg to stack 1295 __ extsw(r_temp, src.first()->as_Register()); 1296 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1297 } else { 1298 // reg to reg 1299 __ extsw(dst.first()->as_Register(), src.first()->as_Register()); 1300 } 1301 } 1302 1303 static void long_move(MacroAssembler*masm, 1304 VMRegPair src, VMRegPair dst, 1305 Register r_caller_sp, Register r_temp) { 1306 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be long"); 1307 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long"); 1308 1309 if (src.first()->is_stack()) { 1310 if (dst.first()->is_stack()) { 1311 // stack to stack 1312 __ ld( r_temp, reg2offset(src.first()), r_caller_sp); 1313 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1314 } else { 1315 // stack to reg 1316 __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); 1317 } 1318 } else if (dst.first()->is_stack()) { 1319 // reg to stack 1320 __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP); 1321 } else { 1322 // reg to reg 1323 if (dst.first()->as_Register() != src.first()->as_Register()) 1324 __ mr(dst.first()->as_Register(), src.first()->as_Register()); 1325 } 1326 } 1327 1328 static void float_move(MacroAssembler*masm, 1329 VMRegPair src, VMRegPair dst, 1330 Register r_caller_sp, Register r_temp) { 1331 assert(src.first()->is_valid() && !src.second()->is_valid(), "incoming must be float"); 1332 assert(dst.first()->is_valid() && !dst.second()->is_valid(), "outgoing must be float"); 1333 1334 if (src.first()->is_stack()) { 1335 if (dst.first()->is_stack()) { 1336 // stack to stack 1337 __ lwz(r_temp, reg2offset(src.first()), r_caller_sp); 1338 __ stw(r_temp, reg2offset(dst.first()), R1_SP); 1339 } else { 1340 // stack to reg 1341 __ lfs(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp); 1342 } 1343 } else if (dst.first()->is_stack()) { 1344 // reg to stack 1345 __ stfs(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP); 1346 } else { 1347 // reg to reg 1348 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister()) 1349 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 1350 } 1351 } 1352 1353 static void double_move(MacroAssembler*masm, 1354 VMRegPair src, VMRegPair dst, 1355 Register r_caller_sp, Register r_temp) { 1356 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be double"); 1357 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be double"); 1358 1359 if (src.first()->is_stack()) { 1360 if (dst.first()->is_stack()) { 1361 // stack to stack 1362 __ ld( r_temp, reg2offset(src.first()), r_caller_sp); 1363 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1364 } else { 1365 // stack to reg 1366 __ lfd(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp); 1367 } 1368 } else if (dst.first()->is_stack()) { 1369 // reg to stack 1370 __ stfd(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP); 1371 } else { 1372 // reg to reg 1373 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister()) 1374 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 1375 } 1376 } 1377 1378 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1379 switch (ret_type) { 1380 case T_BOOLEAN: 1381 case T_CHAR: 1382 case T_BYTE: 1383 case T_SHORT: 1384 case T_INT: 1385 __ stw (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1386 break; 1387 case T_ARRAY: 1388 case T_OBJECT: 1389 case T_LONG: 1390 __ std (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1391 break; 1392 case T_FLOAT: 1393 __ stfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1394 break; 1395 case T_DOUBLE: 1396 __ stfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1397 break; 1398 case T_VOID: 1399 break; 1400 default: 1401 ShouldNotReachHere(); 1402 break; 1403 } 1404 } 1405 1406 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1407 switch (ret_type) { 1408 case T_BOOLEAN: 1409 case T_CHAR: 1410 case T_BYTE: 1411 case T_SHORT: 1412 case T_INT: 1413 __ lwz(R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1414 break; 1415 case T_ARRAY: 1416 case T_OBJECT: 1417 case T_LONG: 1418 __ ld (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1419 break; 1420 case T_FLOAT: 1421 __ lfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1422 break; 1423 case T_DOUBLE: 1424 __ lfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1425 break; 1426 case T_VOID: 1427 break; 1428 default: 1429 ShouldNotReachHere(); 1430 break; 1431 } 1432 } 1433 1434 static void save_or_restore_arguments(MacroAssembler* masm, 1435 const int stack_slots, 1436 const int total_in_args, 1437 const int arg_save_area, 1438 OopMap* map, 1439 VMRegPair* in_regs, 1440 BasicType* in_sig_bt) { 1441 // If map is non-NULL then the code should store the values, 1442 // otherwise it should load them. 1443 int slot = arg_save_area; 1444 // Save down double word first. 1445 for (int i = 0; i < total_in_args; i++) { 1446 if (in_regs[i].first()->is_FloatRegister() && in_sig_bt[i] == T_DOUBLE) { 1447 int offset = slot * VMRegImpl::stack_slot_size; 1448 slot += VMRegImpl::slots_per_word; 1449 assert(slot <= stack_slots, "overflow (after DOUBLE stack slot)"); 1450 if (map != NULL) { 1451 __ stfd(in_regs[i].first()->as_FloatRegister(), offset, R1_SP); 1452 } else { 1453 __ lfd(in_regs[i].first()->as_FloatRegister(), offset, R1_SP); 1454 } 1455 } else if (in_regs[i].first()->is_Register() && 1456 (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) { 1457 int offset = slot * VMRegImpl::stack_slot_size; 1458 if (map != NULL) { 1459 __ std(in_regs[i].first()->as_Register(), offset, R1_SP); 1460 if (in_sig_bt[i] == T_ARRAY) { 1461 map->set_oop(VMRegImpl::stack2reg(slot)); 1462 } 1463 } else { 1464 __ ld(in_regs[i].first()->as_Register(), offset, R1_SP); 1465 } 1466 slot += VMRegImpl::slots_per_word; 1467 assert(slot <= stack_slots, "overflow (after LONG/ARRAY stack slot)"); 1468 } 1469 } 1470 // Save or restore single word registers. 1471 for (int i = 0; i < total_in_args; i++) { 1472 if (in_regs[i].first()->is_Register()) { 1473 int offset = slot * VMRegImpl::stack_slot_size; 1474 // Value lives in an input register. Save it on stack. 1475 switch (in_sig_bt[i]) { 1476 case T_BOOLEAN: 1477 case T_CHAR: 1478 case T_BYTE: 1479 case T_SHORT: 1480 case T_INT: 1481 if (map != NULL) { 1482 __ stw(in_regs[i].first()->as_Register(), offset, R1_SP); 1483 } else { 1484 __ lwa(in_regs[i].first()->as_Register(), offset, R1_SP); 1485 } 1486 slot++; 1487 assert(slot <= stack_slots, "overflow (after INT or smaller stack slot)"); 1488 break; 1489 case T_ARRAY: 1490 case T_LONG: 1491 // handled above 1492 break; 1493 case T_OBJECT: 1494 default: ShouldNotReachHere(); 1495 } 1496 } else if (in_regs[i].first()->is_FloatRegister()) { 1497 if (in_sig_bt[i] == T_FLOAT) { 1498 int offset = slot * VMRegImpl::stack_slot_size; 1499 slot++; 1500 assert(slot <= stack_slots, "overflow (after FLOAT stack slot)"); 1501 if (map != NULL) { 1502 __ stfs(in_regs[i].first()->as_FloatRegister(), offset, R1_SP); 1503 } else { 1504 __ lfs(in_regs[i].first()->as_FloatRegister(), offset, R1_SP); 1505 } 1506 } 1507 } else if (in_regs[i].first()->is_stack()) { 1508 if (in_sig_bt[i] == T_ARRAY && map != NULL) { 1509 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1510 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots)); 1511 } 1512 } 1513 } 1514 } 1515 1516 // Check GCLocker::needs_gc and enter the runtime if it's true. This 1517 // keeps a new JNI critical region from starting until a GC has been 1518 // forced. Save down any oops in registers and describe them in an 1519 // OopMap. 1520 static void check_needs_gc_for_critical_native(MacroAssembler* masm, 1521 const int stack_slots, 1522 const int total_in_args, 1523 const int arg_save_area, 1524 OopMapSet* oop_maps, 1525 VMRegPair* in_regs, 1526 BasicType* in_sig_bt, 1527 Register tmp_reg ) { 1528 __ block_comment("check GCLocker::needs_gc"); 1529 Label cont; 1530 __ lbz(tmp_reg, (RegisterOrConstant)(intptr_t)GCLocker::needs_gc_address()); 1531 __ cmplwi(CCR0, tmp_reg, 0); 1532 __ beq(CCR0, cont); 1533 1534 // Save down any values that are live in registers and call into the 1535 // runtime to halt for a GC. 1536 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1537 save_or_restore_arguments(masm, stack_slots, total_in_args, 1538 arg_save_area, map, in_regs, in_sig_bt); 1539 1540 __ mr(R3_ARG1, R16_thread); 1541 __ set_last_Java_frame(R1_SP, noreg); 1542 1543 __ block_comment("block_for_jni_critical"); 1544 address entry_point = CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical); 1545 #if defined(ABI_ELFv2) 1546 __ call_c(entry_point, relocInfo::runtime_call_type); 1547 #else 1548 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::runtime_call_type); 1549 #endif 1550 address start = __ pc() - __ offset(), 1551 calls_return_pc = __ last_calls_return_pc(); 1552 oop_maps->add_gc_map(calls_return_pc - start, map); 1553 1554 __ reset_last_Java_frame(); 1555 1556 // Reload all the register arguments. 1557 save_or_restore_arguments(masm, stack_slots, total_in_args, 1558 arg_save_area, NULL, in_regs, in_sig_bt); 1559 1560 __ BIND(cont); 1561 1562 #ifdef ASSERT 1563 if (StressCriticalJNINatives) { 1564 // Stress register saving. 1565 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1566 save_or_restore_arguments(masm, stack_slots, total_in_args, 1567 arg_save_area, map, in_regs, in_sig_bt); 1568 // Destroy argument registers. 1569 for (int i = 0; i < total_in_args; i++) { 1570 if (in_regs[i].first()->is_Register()) { 1571 const Register reg = in_regs[i].first()->as_Register(); 1572 __ neg(reg, reg); 1573 } else if (in_regs[i].first()->is_FloatRegister()) { 1574 __ fneg(in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister()); 1575 } 1576 } 1577 1578 save_or_restore_arguments(masm, stack_slots, total_in_args, 1579 arg_save_area, NULL, in_regs, in_sig_bt); 1580 } 1581 #endif 1582 } 1583 1584 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst, Register r_caller_sp, Register r_temp) { 1585 if (src.first()->is_stack()) { 1586 if (dst.first()->is_stack()) { 1587 // stack to stack 1588 __ ld(r_temp, reg2offset(src.first()), r_caller_sp); 1589 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1590 } else { 1591 // stack to reg 1592 __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); 1593 } 1594 } else if (dst.first()->is_stack()) { 1595 // reg to stack 1596 __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP); 1597 } else { 1598 if (dst.first() != src.first()) { 1599 __ mr(dst.first()->as_Register(), src.first()->as_Register()); 1600 } 1601 } 1602 } 1603 1604 // Unpack an array argument into a pointer to the body and the length 1605 // if the array is non-null, otherwise pass 0 for both. 1606 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, 1607 VMRegPair body_arg, VMRegPair length_arg, Register r_caller_sp, 1608 Register tmp_reg, Register tmp2_reg) { 1609 assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg, 1610 "possible collision"); 1611 assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg, 1612 "possible collision"); 1613 1614 // Pass the length, ptr pair. 1615 Label set_out_args; 1616 VMRegPair tmp, tmp2; 1617 tmp.set_ptr(tmp_reg->as_VMReg()); 1618 tmp2.set_ptr(tmp2_reg->as_VMReg()); 1619 if (reg.first()->is_stack()) { 1620 // Load the arg up from the stack. 1621 move_ptr(masm, reg, tmp, r_caller_sp, /*unused*/ R0); 1622 reg = tmp; 1623 } 1624 __ li(tmp2_reg, 0); // Pass zeros if Array=null. 1625 if (tmp_reg != reg.first()->as_Register()) __ li(tmp_reg, 0); 1626 __ cmpdi(CCR0, reg.first()->as_Register(), 0); 1627 __ beq(CCR0, set_out_args); 1628 __ lwa(tmp2_reg, arrayOopDesc::length_offset_in_bytes(), reg.first()->as_Register()); 1629 __ addi(tmp_reg, reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)); 1630 __ bind(set_out_args); 1631 move_ptr(masm, tmp, body_arg, r_caller_sp, /*unused*/ R0); 1632 move_ptr(masm, tmp2, length_arg, r_caller_sp, /*unused*/ R0); // Same as move32_64 on PPC64. 1633 } 1634 1635 static void verify_oop_args(MacroAssembler* masm, 1636 const methodHandle& method, 1637 const BasicType* sig_bt, 1638 const VMRegPair* regs) { 1639 Register temp_reg = R19_method; // not part of any compiled calling seq 1640 if (VerifyOops) { 1641 for (int i = 0; i < method->size_of_parameters(); i++) { 1642 if (sig_bt[i] == T_OBJECT || 1643 sig_bt[i] == T_ARRAY) { 1644 VMReg r = regs[i].first(); 1645 assert(r->is_valid(), "bad oop arg"); 1646 if (r->is_stack()) { 1647 __ ld(temp_reg, reg2offset(r), R1_SP); 1648 __ verify_oop(temp_reg); 1649 } else { 1650 __ verify_oop(r->as_Register()); 1651 } 1652 } 1653 } 1654 } 1655 } 1656 1657 static void gen_special_dispatch(MacroAssembler* masm, 1658 const methodHandle& method, 1659 const BasicType* sig_bt, 1660 const VMRegPair* regs) { 1661 verify_oop_args(masm, method, sig_bt, regs); 1662 vmIntrinsics::ID iid = method->intrinsic_id(); 1663 1664 // Now write the args into the outgoing interpreter space 1665 bool has_receiver = false; 1666 Register receiver_reg = noreg; 1667 int member_arg_pos = -1; 1668 Register member_reg = noreg; 1669 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); 1670 if (ref_kind != 0) { 1671 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument 1672 member_reg = R19_method; // known to be free at this point 1673 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); 1674 } else if (iid == vmIntrinsics::_invokeBasic) { 1675 has_receiver = true; 1676 } else { 1677 fatal("unexpected intrinsic id %d", iid); 1678 } 1679 1680 if (member_reg != noreg) { 1681 // Load the member_arg into register, if necessary. 1682 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); 1683 VMReg r = regs[member_arg_pos].first(); 1684 if (r->is_stack()) { 1685 __ ld(member_reg, reg2offset(r), R1_SP); 1686 } else { 1687 // no data motion is needed 1688 member_reg = r->as_Register(); 1689 } 1690 } 1691 1692 if (has_receiver) { 1693 // Make sure the receiver is loaded into a register. 1694 assert(method->size_of_parameters() > 0, "oob"); 1695 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); 1696 VMReg r = regs[0].first(); 1697 assert(r->is_valid(), "bad receiver arg"); 1698 if (r->is_stack()) { 1699 // Porting note: This assumes that compiled calling conventions always 1700 // pass the receiver oop in a register. If this is not true on some 1701 // platform, pick a temp and load the receiver from stack. 1702 fatal("receiver always in a register"); 1703 receiver_reg = R11_scratch1; // TODO (hs24): is R11_scratch1 really free at this point? 1704 __ ld(receiver_reg, reg2offset(r), R1_SP); 1705 } else { 1706 // no data motion is needed 1707 receiver_reg = r->as_Register(); 1708 } 1709 } 1710 1711 // Figure out which address we are really jumping to: 1712 MethodHandles::generate_method_handle_dispatch(masm, iid, 1713 receiver_reg, member_reg, /*for_compiler_entry:*/ true); 1714 } 1715 1716 #endif // COMPILER2 1717 1718 // --------------------------------------------------------------------------- 1719 // Generate a native wrapper for a given method. The method takes arguments 1720 // in the Java compiled code convention, marshals them to the native 1721 // convention (handlizes oops, etc), transitions to native, makes the call, 1722 // returns to java state (possibly blocking), unhandlizes any result and 1723 // returns. 1724 // 1725 // Critical native functions are a shorthand for the use of 1726 // GetPrimtiveArrayCritical and disallow the use of any other JNI 1727 // functions. The wrapper is expected to unpack the arguments before 1728 // passing them to the callee and perform checks before and after the 1729 // native call to ensure that they GCLocker 1730 // lock_critical/unlock_critical semantics are followed. Some other 1731 // parts of JNI setup are skipped like the tear down of the JNI handle 1732 // block and the check for pending exceptions it's impossible for them 1733 // to be thrown. 1734 // 1735 // They are roughly structured like this: 1736 // if (GCLocker::needs_gc()) 1737 // SharedRuntime::block_for_jni_critical(); 1738 // tranistion to thread_in_native 1739 // unpack arrray arguments and call native entry point 1740 // check for safepoint in progress 1741 // check if any thread suspend flags are set 1742 // call into JVM and possible unlock the JNI critical 1743 // if a GC was suppressed while in the critical native. 1744 // transition back to thread_in_Java 1745 // return to caller 1746 // 1747 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, 1748 const methodHandle& method, 1749 int compile_id, 1750 BasicType *in_sig_bt, 1751 VMRegPair *in_regs, 1752 BasicType ret_type) { 1753 #ifdef COMPILER2 1754 if (method->is_method_handle_intrinsic()) { 1755 vmIntrinsics::ID iid = method->intrinsic_id(); 1756 intptr_t start = (intptr_t)__ pc(); 1757 int vep_offset = ((intptr_t)__ pc()) - start; 1758 gen_special_dispatch(masm, 1759 method, 1760 in_sig_bt, 1761 in_regs); 1762 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period 1763 __ flush(); 1764 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually 1765 return nmethod::new_native_nmethod(method, 1766 compile_id, 1767 masm->code(), 1768 vep_offset, 1769 frame_complete, 1770 stack_slots / VMRegImpl::slots_per_word, 1771 in_ByteSize(-1), 1772 in_ByteSize(-1), 1773 (OopMapSet*)NULL); 1774 } 1775 1776 bool is_critical_native = true; 1777 address native_func = method->critical_native_function(); 1778 if (native_func == NULL) { 1779 native_func = method->native_function(); 1780 is_critical_native = false; 1781 } 1782 assert(native_func != NULL, "must have function"); 1783 1784 // First, create signature for outgoing C call 1785 // -------------------------------------------------------------------------- 1786 1787 int total_in_args = method->size_of_parameters(); 1788 // We have received a description of where all the java args are located 1789 // on entry to the wrapper. We need to convert these args to where 1790 // the jni function will expect them. To figure out where they go 1791 // we convert the java signature to a C signature by inserting 1792 // the hidden arguments as arg[0] and possibly arg[1] (static method) 1793 1794 // Calculate the total number of C arguments and create arrays for the 1795 // signature and the outgoing registers. 1796 // On ppc64, we have two arrays for the outgoing registers, because 1797 // some floating-point arguments must be passed in registers _and_ 1798 // in stack locations. 1799 bool method_is_static = method->is_static(); 1800 int total_c_args = total_in_args; 1801 1802 if (!is_critical_native) { 1803 int n_hidden_args = method_is_static ? 2 : 1; 1804 total_c_args += n_hidden_args; 1805 } else { 1806 // No JNIEnv*, no this*, but unpacked arrays (base+length). 1807 for (int i = 0; i < total_in_args; i++) { 1808 if (in_sig_bt[i] == T_ARRAY) { 1809 total_c_args++; 1810 } 1811 } 1812 } 1813 1814 BasicType *out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 1815 VMRegPair *out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 1816 VMRegPair *out_regs2 = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 1817 BasicType* in_elem_bt = NULL; 1818 1819 // Create the signature for the C call: 1820 // 1) add the JNIEnv* 1821 // 2) add the class if the method is static 1822 // 3) copy the rest of the incoming signature (shifted by the number of 1823 // hidden arguments). 1824 1825 int argc = 0; 1826 if (!is_critical_native) { 1827 out_sig_bt[argc++] = T_ADDRESS; 1828 if (method->is_static()) { 1829 out_sig_bt[argc++] = T_OBJECT; 1830 } 1831 1832 for (int i = 0; i < total_in_args ; i++ ) { 1833 out_sig_bt[argc++] = in_sig_bt[i]; 1834 } 1835 } else { 1836 Thread* THREAD = Thread::current(); 1837 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 1838 SignatureStream ss(method->signature()); 1839 int o = 0; 1840 for (int i = 0; i < total_in_args ; i++, o++) { 1841 if (in_sig_bt[i] == T_ARRAY) { 1842 // Arrays are passed as int, elem* pair 1843 Symbol* atype = ss.as_symbol(CHECK_NULL); 1844 const char* at = atype->as_C_string(); 1845 if (strlen(at) == 2) { 1846 assert(at[0] == '[', "must be"); 1847 switch (at[1]) { 1848 case 'B': in_elem_bt[o] = T_BYTE; break; 1849 case 'C': in_elem_bt[o] = T_CHAR; break; 1850 case 'D': in_elem_bt[o] = T_DOUBLE; break; 1851 case 'F': in_elem_bt[o] = T_FLOAT; break; 1852 case 'I': in_elem_bt[o] = T_INT; break; 1853 case 'J': in_elem_bt[o] = T_LONG; break; 1854 case 'S': in_elem_bt[o] = T_SHORT; break; 1855 case 'Z': in_elem_bt[o] = T_BOOLEAN; break; 1856 default: ShouldNotReachHere(); 1857 } 1858 } 1859 } else { 1860 in_elem_bt[o] = T_VOID; 1861 } 1862 if (in_sig_bt[i] != T_VOID) { 1863 assert(in_sig_bt[i] == ss.type(), "must match"); 1864 ss.next(); 1865 } 1866 } 1867 1868 for (int i = 0; i < total_in_args ; i++ ) { 1869 if (in_sig_bt[i] == T_ARRAY) { 1870 // Arrays are passed as int, elem* pair. 1871 out_sig_bt[argc++] = T_INT; 1872 out_sig_bt[argc++] = T_ADDRESS; 1873 } else { 1874 out_sig_bt[argc++] = in_sig_bt[i]; 1875 } 1876 } 1877 } 1878 1879 1880 // Compute the wrapper's frame size. 1881 // -------------------------------------------------------------------------- 1882 1883 // Now figure out where the args must be stored and how much stack space 1884 // they require. 1885 // 1886 // Compute framesize for the wrapper. We need to handlize all oops in 1887 // incoming registers. 1888 // 1889 // Calculate the total number of stack slots we will need: 1890 // 1) abi requirements 1891 // 2) outgoing arguments 1892 // 3) space for inbound oop handle area 1893 // 4) space for handlizing a klass if static method 1894 // 5) space for a lock if synchronized method 1895 // 6) workspace for saving return values, int <-> float reg moves, etc. 1896 // 7) alignment 1897 // 1898 // Layout of the native wrapper frame: 1899 // (stack grows upwards, memory grows downwards) 1900 // 1901 // NW [ABI_REG_ARGS] <-- 1) R1_SP 1902 // [outgoing arguments] <-- 2) R1_SP + out_arg_slot_offset 1903 // [oopHandle area] <-- 3) R1_SP + oop_handle_offset (save area for critical natives) 1904 // klass <-- 4) R1_SP + klass_offset 1905 // lock <-- 5) R1_SP + lock_offset 1906 // [workspace] <-- 6) R1_SP + workspace_offset 1907 // [alignment] (optional) <-- 7) 1908 // caller [JIT_TOP_ABI_48] <-- r_callers_sp 1909 // 1910 // - *_slot_offset Indicates offset from SP in number of stack slots. 1911 // - *_offset Indicates offset from SP in bytes. 1912 1913 int stack_slots = c_calling_convention(out_sig_bt, out_regs, out_regs2, total_c_args) // 1+2) 1914 + SharedRuntime::out_preserve_stack_slots(); // See c_calling_convention. 1915 1916 // Now the space for the inbound oop handle area. 1917 int total_save_slots = num_java_iarg_registers * VMRegImpl::slots_per_word; 1918 if (is_critical_native) { 1919 // Critical natives may have to call out so they need a save area 1920 // for register arguments. 1921 int double_slots = 0; 1922 int single_slots = 0; 1923 for (int i = 0; i < total_in_args; i++) { 1924 if (in_regs[i].first()->is_Register()) { 1925 const Register reg = in_regs[i].first()->as_Register(); 1926 switch (in_sig_bt[i]) { 1927 case T_BOOLEAN: 1928 case T_BYTE: 1929 case T_SHORT: 1930 case T_CHAR: 1931 case T_INT: 1932 // Fall through. 1933 case T_ARRAY: 1934 case T_LONG: double_slots++; break; 1935 default: ShouldNotReachHere(); 1936 } 1937 } else if (in_regs[i].first()->is_FloatRegister()) { 1938 switch (in_sig_bt[i]) { 1939 case T_FLOAT: single_slots++; break; 1940 case T_DOUBLE: double_slots++; break; 1941 default: ShouldNotReachHere(); 1942 } 1943 } 1944 } 1945 total_save_slots = double_slots * 2 + align_up(single_slots, 2); // round to even 1946 } 1947 1948 int oop_handle_slot_offset = stack_slots; 1949 stack_slots += total_save_slots; // 3) 1950 1951 int klass_slot_offset = 0; 1952 int klass_offset = -1; 1953 if (method_is_static && !is_critical_native) { // 4) 1954 klass_slot_offset = stack_slots; 1955 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 1956 stack_slots += VMRegImpl::slots_per_word; 1957 } 1958 1959 int lock_slot_offset = 0; 1960 int lock_offset = -1; 1961 if (method->is_synchronized()) { // 5) 1962 lock_slot_offset = stack_slots; 1963 lock_offset = lock_slot_offset * VMRegImpl::stack_slot_size; 1964 stack_slots += VMRegImpl::slots_per_word; 1965 } 1966 1967 int workspace_slot_offset = stack_slots; // 6) 1968 stack_slots += 2; 1969 1970 // Now compute actual number of stack words we need. 1971 // Rounding to make stack properly aligned. 1972 stack_slots = align_up(stack_slots, // 7) 1973 frame::alignment_in_bytes / VMRegImpl::stack_slot_size); 1974 int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size; 1975 1976 1977 // Now we can start generating code. 1978 // -------------------------------------------------------------------------- 1979 1980 intptr_t start_pc = (intptr_t)__ pc(); 1981 intptr_t vep_start_pc; 1982 intptr_t frame_done_pc; 1983 intptr_t oopmap_pc; 1984 1985 Label ic_miss; 1986 Label handle_pending_exception; 1987 1988 Register r_callers_sp = R21; 1989 Register r_temp_1 = R22; 1990 Register r_temp_2 = R23; 1991 Register r_temp_3 = R24; 1992 Register r_temp_4 = R25; 1993 Register r_temp_5 = R26; 1994 Register r_temp_6 = R27; 1995 Register r_return_pc = R28; 1996 1997 Register r_carg1_jnienv = noreg; 1998 Register r_carg2_classorobject = noreg; 1999 if (!is_critical_native) { 2000 r_carg1_jnienv = out_regs[0].first()->as_Register(); 2001 r_carg2_classorobject = out_regs[1].first()->as_Register(); 2002 } 2003 2004 2005 // Generate the Unverified Entry Point (UEP). 2006 // -------------------------------------------------------------------------- 2007 assert(start_pc == (intptr_t)__ pc(), "uep must be at start"); 2008 2009 // Check ic: object class == cached class? 2010 if (!method_is_static) { 2011 Register ic = as_Register(Matcher::inline_cache_reg_encode()); 2012 Register receiver_klass = r_temp_1; 2013 2014 __ cmpdi(CCR0, R3_ARG1, 0); 2015 __ beq(CCR0, ic_miss); 2016 __ verify_oop(R3_ARG1); 2017 __ load_klass(receiver_klass, R3_ARG1); 2018 2019 __ cmpd(CCR0, receiver_klass, ic); 2020 __ bne(CCR0, ic_miss); 2021 } 2022 2023 2024 // Generate the Verified Entry Point (VEP). 2025 // -------------------------------------------------------------------------- 2026 vep_start_pc = (intptr_t)__ pc(); 2027 2028 __ save_LR_CR(r_temp_1); 2029 __ generate_stack_overflow_check(frame_size_in_bytes); // Check before creating frame. 2030 __ mr(r_callers_sp, R1_SP); // Remember frame pointer. 2031 __ push_frame(frame_size_in_bytes, r_temp_1); // Push the c2n adapter's frame. 2032 frame_done_pc = (intptr_t)__ pc(); 2033 2034 __ verify_thread(); 2035 2036 // Native nmethod wrappers never take possesion of the oop arguments. 2037 // So the caller will gc the arguments. 2038 // The only thing we need an oopMap for is if the call is static. 2039 // 2040 // An OopMap for lock (and class if static), and one for the VM call itself. 2041 OopMapSet *oop_maps = new OopMapSet(); 2042 OopMap *oop_map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 2043 2044 if (is_critical_native) { 2045 check_needs_gc_for_critical_native(masm, stack_slots, total_in_args, oop_handle_slot_offset, oop_maps, in_regs, in_sig_bt, r_temp_1); 2046 } 2047 2048 // Move arguments from register/stack to register/stack. 2049 // -------------------------------------------------------------------------- 2050 // 2051 // We immediately shuffle the arguments so that for any vm call we have 2052 // to make from here on out (sync slow path, jvmti, etc.) we will have 2053 // captured the oops from our caller and have a valid oopMap for them. 2054 // 2055 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv* 2056 // (derived from JavaThread* which is in R16_thread) and, if static, 2057 // the class mirror instead of a receiver. This pretty much guarantees that 2058 // register layout will not match. We ignore these extra arguments during 2059 // the shuffle. The shuffle is described by the two calling convention 2060 // vectors we have in our possession. We simply walk the java vector to 2061 // get the source locations and the c vector to get the destinations. 2062 2063 // Record sp-based slot for receiver on stack for non-static methods. 2064 int receiver_offset = -1; 2065 2066 // We move the arguments backward because the floating point registers 2067 // destination will always be to a register with a greater or equal 2068 // register number or the stack. 2069 // in is the index of the incoming Java arguments 2070 // out is the index of the outgoing C arguments 2071 2072 #ifdef ASSERT 2073 bool reg_destroyed[RegisterImpl::number_of_registers]; 2074 bool freg_destroyed[FloatRegisterImpl::number_of_registers]; 2075 for (int r = 0 ; r < RegisterImpl::number_of_registers ; r++) { 2076 reg_destroyed[r] = false; 2077 } 2078 for (int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++) { 2079 freg_destroyed[f] = false; 2080 } 2081 #endif // ASSERT 2082 2083 for (int in = total_in_args - 1, out = total_c_args - 1; in >= 0 ; in--, out--) { 2084 2085 #ifdef ASSERT 2086 if (in_regs[in].first()->is_Register()) { 2087 assert(!reg_destroyed[in_regs[in].first()->as_Register()->encoding()], "ack!"); 2088 } else if (in_regs[in].first()->is_FloatRegister()) { 2089 assert(!freg_destroyed[in_regs[in].first()->as_FloatRegister()->encoding()], "ack!"); 2090 } 2091 if (out_regs[out].first()->is_Register()) { 2092 reg_destroyed[out_regs[out].first()->as_Register()->encoding()] = true; 2093 } else if (out_regs[out].first()->is_FloatRegister()) { 2094 freg_destroyed[out_regs[out].first()->as_FloatRegister()->encoding()] = true; 2095 } 2096 if (out_regs2[out].first()->is_Register()) { 2097 reg_destroyed[out_regs2[out].first()->as_Register()->encoding()] = true; 2098 } else if (out_regs2[out].first()->is_FloatRegister()) { 2099 freg_destroyed[out_regs2[out].first()->as_FloatRegister()->encoding()] = true; 2100 } 2101 #endif // ASSERT 2102 2103 switch (in_sig_bt[in]) { 2104 case T_BOOLEAN: 2105 case T_CHAR: 2106 case T_BYTE: 2107 case T_SHORT: 2108 case T_INT: 2109 // Move int and do sign extension. 2110 int_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2111 break; 2112 case T_LONG: 2113 long_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2114 break; 2115 case T_ARRAY: 2116 if (is_critical_native) { 2117 int body_arg = out; 2118 out -= 1; // Point to length arg. 2119 unpack_array_argument(masm, in_regs[in], in_elem_bt[in], out_regs[body_arg], out_regs[out], 2120 r_callers_sp, r_temp_1, r_temp_2); 2121 break; 2122 } 2123 case T_OBJECT: 2124 assert(!is_critical_native, "no oop arguments"); 2125 object_move(masm, stack_slots, 2126 oop_map, oop_handle_slot_offset, 2127 ((in == 0) && (!method_is_static)), &receiver_offset, 2128 in_regs[in], out_regs[out], 2129 r_callers_sp, r_temp_1, r_temp_2); 2130 break; 2131 case T_VOID: 2132 break; 2133 case T_FLOAT: 2134 float_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2135 if (out_regs2[out].first()->is_valid()) { 2136 float_move(masm, in_regs[in], out_regs2[out], r_callers_sp, r_temp_1); 2137 } 2138 break; 2139 case T_DOUBLE: 2140 double_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2141 if (out_regs2[out].first()->is_valid()) { 2142 double_move(masm, in_regs[in], out_regs2[out], r_callers_sp, r_temp_1); 2143 } 2144 break; 2145 case T_ADDRESS: 2146 fatal("found type (T_ADDRESS) in java args"); 2147 break; 2148 default: 2149 ShouldNotReachHere(); 2150 break; 2151 } 2152 } 2153 2154 // Pre-load a static method's oop into ARG2. 2155 // Used both by locking code and the normal JNI call code. 2156 if (method_is_static && !is_critical_native) { 2157 __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), 2158 r_carg2_classorobject); 2159 2160 // Now handlize the static class mirror in carg2. It's known not-null. 2161 __ std(r_carg2_classorobject, klass_offset, R1_SP); 2162 oop_map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 2163 __ addi(r_carg2_classorobject, R1_SP, klass_offset); 2164 } 2165 2166 // Get JNIEnv* which is first argument to native. 2167 if (!is_critical_native) { 2168 __ addi(r_carg1_jnienv, R16_thread, in_bytes(JavaThread::jni_environment_offset())); 2169 } 2170 2171 // NOTE: 2172 // 2173 // We have all of the arguments setup at this point. 2174 // We MUST NOT touch any outgoing regs from this point on. 2175 // So if we must call out we must push a new frame. 2176 2177 // Get current pc for oopmap, and load it patchable relative to global toc. 2178 oopmap_pc = (intptr_t) __ pc(); 2179 __ calculate_address_from_global_toc(r_return_pc, (address)oopmap_pc, true, true, true, true); 2180 2181 // We use the same pc/oopMap repeatedly when we call out. 2182 oop_maps->add_gc_map(oopmap_pc - start_pc, oop_map); 2183 2184 // r_return_pc now has the pc loaded that we will use when we finally call 2185 // to native. 2186 2187 // Make sure that thread is non-volatile; it crosses a bunch of VM calls below. 2188 assert(R16_thread->is_nonvolatile(), "thread must be in non-volatile register"); 2189 2190 # if 0 2191 // DTrace method entry 2192 # endif 2193 2194 // Lock a synchronized method. 2195 // -------------------------------------------------------------------------- 2196 2197 if (method->is_synchronized()) { 2198 assert(!is_critical_native, "unhandled"); 2199 ConditionRegister r_flag = CCR1; 2200 Register r_oop = r_temp_4; 2201 const Register r_box = r_temp_5; 2202 Label done, locked; 2203 2204 // Load the oop for the object or class. r_carg2_classorobject contains 2205 // either the handlized oop from the incoming arguments or the handlized 2206 // class mirror (if the method is static). 2207 __ ld(r_oop, 0, r_carg2_classorobject); 2208 2209 // Get the lock box slot's address. 2210 __ addi(r_box, R1_SP, lock_offset); 2211 2212 # ifdef ASSERT 2213 if (UseBiasedLocking) { 2214 // Making the box point to itself will make it clear it went unused 2215 // but also be obviously invalid. 2216 __ std(r_box, 0, r_box); 2217 } 2218 # endif // ASSERT 2219 2220 // Try fastpath for locking. 2221 // fast_lock kills r_temp_1, r_temp_2, r_temp_3. 2222 __ compiler_fast_lock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); 2223 __ beq(r_flag, locked); 2224 2225 // None of the above fast optimizations worked so we have to get into the 2226 // slow case of monitor enter. Inline a special case of call_VM that 2227 // disallows any pending_exception. 2228 2229 // Save argument registers and leave room for C-compatible ABI_REG_ARGS. 2230 int frame_size = frame::abi_reg_args_size + align_up(total_c_args * wordSize, frame::alignment_in_bytes); 2231 __ mr(R11_scratch1, R1_SP); 2232 RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs, out_regs2); 2233 2234 // Do the call. 2235 __ set_last_Java_frame(R11_scratch1, r_return_pc); 2236 assert(r_return_pc->is_nonvolatile(), "expecting return pc to be in non-volatile register"); 2237 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), r_oop, r_box, R16_thread); 2238 __ reset_last_Java_frame(); 2239 2240 RegisterSaver::restore_argument_registers_and_pop_frame(masm, frame_size, total_c_args, out_regs, out_regs2); 2241 2242 __ asm_assert_mem8_is_zero(thread_(pending_exception), 2243 "no pending exception allowed on exit from SharedRuntime::complete_monitor_locking_C", 0); 2244 2245 __ bind(locked); 2246 } 2247 2248 2249 // Publish thread state 2250 // -------------------------------------------------------------------------- 2251 2252 // Use that pc we placed in r_return_pc a while back as the current frame anchor. 2253 __ set_last_Java_frame(R1_SP, r_return_pc); 2254 2255 // Transition from _thread_in_Java to _thread_in_native. 2256 __ li(R0, _thread_in_native); 2257 __ release(); 2258 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2259 __ stw(R0, thread_(thread_state)); 2260 2261 2262 // The JNI call 2263 // -------------------------------------------------------------------------- 2264 #if defined(ABI_ELFv2) 2265 __ call_c(native_func, relocInfo::runtime_call_type); 2266 #else 2267 FunctionDescriptor* fd_native_method = (FunctionDescriptor*) native_func; 2268 __ call_c(fd_native_method, relocInfo::runtime_call_type); 2269 #endif 2270 2271 2272 // Now, we are back from the native code. 2273 2274 2275 // Unpack the native result. 2276 // -------------------------------------------------------------------------- 2277 2278 // For int-types, we do any needed sign-extension required. 2279 // Care must be taken that the return values (R3_RET and F1_RET) 2280 // will survive any VM calls for blocking or unlocking. 2281 // An OOP result (handle) is done specially in the slow-path code. 2282 2283 switch (ret_type) { 2284 case T_VOID: break; // Nothing to do! 2285 case T_FLOAT: break; // Got it where we want it (unless slow-path). 2286 case T_DOUBLE: break; // Got it where we want it (unless slow-path). 2287 case T_LONG: break; // Got it where we want it (unless slow-path). 2288 case T_OBJECT: break; // Really a handle. 2289 // Cannot de-handlize until after reclaiming jvm_lock. 2290 case T_ARRAY: break; 2291 2292 case T_BOOLEAN: { // 0 -> false(0); !0 -> true(1) 2293 Label skip_modify; 2294 __ cmpwi(CCR0, R3_RET, 0); 2295 __ beq(CCR0, skip_modify); 2296 __ li(R3_RET, 1); 2297 __ bind(skip_modify); 2298 break; 2299 } 2300 case T_BYTE: { // sign extension 2301 __ extsb(R3_RET, R3_RET); 2302 break; 2303 } 2304 case T_CHAR: { // unsigned result 2305 __ andi(R3_RET, R3_RET, 0xffff); 2306 break; 2307 } 2308 case T_SHORT: { // sign extension 2309 __ extsh(R3_RET, R3_RET); 2310 break; 2311 } 2312 case T_INT: // nothing to do 2313 break; 2314 default: 2315 ShouldNotReachHere(); 2316 break; 2317 } 2318 2319 2320 // Publish thread state 2321 // -------------------------------------------------------------------------- 2322 2323 // Switch thread to "native transition" state before reading the 2324 // synchronization state. This additional state is necessary because reading 2325 // and testing the synchronization state is not atomic w.r.t. GC, as this 2326 // scenario demonstrates: 2327 // - Java thread A, in _thread_in_native state, loads _not_synchronized 2328 // and is preempted. 2329 // - VM thread changes sync state to synchronizing and suspends threads 2330 // for GC. 2331 // - Thread A is resumed to finish this native method, but doesn't block 2332 // here since it didn't see any synchronization in progress, and escapes. 2333 2334 // Transition from _thread_in_native to _thread_in_native_trans. 2335 __ li(R0, _thread_in_native_trans); 2336 __ release(); 2337 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2338 __ stw(R0, thread_(thread_state)); 2339 2340 2341 // Must we block? 2342 // -------------------------------------------------------------------------- 2343 2344 // Block, if necessary, before resuming in _thread_in_Java state. 2345 // In order for GC to work, don't clear the last_Java_sp until after blocking. 2346 Label after_transition; 2347 { 2348 Label no_block, sync; 2349 2350 if (os::is_MP()) { 2351 if (UseMembar) { 2352 // Force this write out before the read below. 2353 __ fence(); 2354 } else { 2355 // Write serialization page so VM thread can do a pseudo remote membar. 2356 // We use the current thread pointer to calculate a thread specific 2357 // offset to write to within the page. This minimizes bus traffic 2358 // due to cache line collision. 2359 __ serialize_memory(R16_thread, r_temp_4, r_temp_5); 2360 } 2361 } 2362 2363 Register sync_state_addr = r_temp_4; 2364 Register sync_state = r_temp_5; 2365 Register suspend_flags = r_temp_6; 2366 2367 __ load_const(sync_state_addr, SafepointSynchronize::address_of_state(), /*temp*/ sync_state); 2368 2369 // TODO: PPC port assert(4 == SafepointSynchronize::sz_state(), "unexpected field size"); 2370 __ lwz(sync_state, 0, sync_state_addr); 2371 2372 // TODO: PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size"); 2373 __ lwz(suspend_flags, thread_(suspend_flags)); 2374 2375 __ acquire(); 2376 2377 Label do_safepoint; 2378 // No synchronization in progress nor yet synchronized. 2379 __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized); 2380 // Not suspended. 2381 __ cmpwi(CCR1, suspend_flags, 0); 2382 2383 __ bne(CCR0, sync); 2384 __ beq(CCR1, no_block); 2385 2386 // Block. Save any potential method result value before the operation and 2387 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this 2388 // lets us share the oopMap we used when we went native rather than create 2389 // a distinct one for this pc. 2390 __ bind(sync); 2391 2392 address entry_point = is_critical_native 2393 ? CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition) 2394 : CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans); 2395 save_native_result(masm, ret_type, workspace_slot_offset); 2396 __ call_VM_leaf(entry_point, R16_thread); 2397 restore_native_result(masm, ret_type, workspace_slot_offset); 2398 2399 if (is_critical_native) { 2400 __ b(after_transition); // No thread state transition here. 2401 } 2402 __ bind(no_block); 2403 } 2404 2405 // Publish thread state. 2406 // -------------------------------------------------------------------------- 2407 2408 // Thread state is thread_in_native_trans. Any safepoint blocking has 2409 // already happened so we can now change state to _thread_in_Java. 2410 2411 // Transition from _thread_in_native_trans to _thread_in_Java. 2412 __ li(R0, _thread_in_Java); 2413 __ release(); 2414 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2415 __ stw(R0, thread_(thread_state)); 2416 __ bind(after_transition); 2417 2418 // Reguard any pages if necessary. 2419 // -------------------------------------------------------------------------- 2420 2421 Label no_reguard; 2422 __ lwz(r_temp_1, thread_(stack_guard_state)); 2423 __ cmpwi(CCR0, r_temp_1, JavaThread::stack_guard_yellow_reserved_disabled); 2424 __ bne(CCR0, no_reguard); 2425 2426 save_native_result(masm, ret_type, workspace_slot_offset); 2427 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 2428 restore_native_result(masm, ret_type, workspace_slot_offset); 2429 2430 __ bind(no_reguard); 2431 2432 2433 // Unlock 2434 // -------------------------------------------------------------------------- 2435 2436 if (method->is_synchronized()) { 2437 2438 ConditionRegister r_flag = CCR1; 2439 const Register r_oop = r_temp_4; 2440 const Register r_box = r_temp_5; 2441 const Register r_exception = r_temp_6; 2442 Label done; 2443 2444 // Get oop and address of lock object box. 2445 if (method_is_static) { 2446 assert(klass_offset != -1, ""); 2447 __ ld(r_oop, klass_offset, R1_SP); 2448 } else { 2449 assert(receiver_offset != -1, ""); 2450 __ ld(r_oop, receiver_offset, R1_SP); 2451 } 2452 __ addi(r_box, R1_SP, lock_offset); 2453 2454 // Try fastpath for unlocking. 2455 __ compiler_fast_unlock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); 2456 __ beq(r_flag, done); 2457 2458 // Save and restore any potential method result value around the unlocking operation. 2459 save_native_result(masm, ret_type, workspace_slot_offset); 2460 2461 // Must save pending exception around the slow-path VM call. Since it's a 2462 // leaf call, the pending exception (if any) can be kept in a register. 2463 __ ld(r_exception, thread_(pending_exception)); 2464 assert(r_exception->is_nonvolatile(), "exception register must be non-volatile"); 2465 __ li(R0, 0); 2466 __ std(R0, thread_(pending_exception)); 2467 2468 // Slow case of monitor enter. 2469 // Inline a special case of call_VM that disallows any pending_exception. 2470 // Arguments are (oop obj, BasicLock* lock, JavaThread* thread). 2471 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), r_oop, r_box, R16_thread); 2472 2473 __ asm_assert_mem8_is_zero(thread_(pending_exception), 2474 "no pending exception allowed on exit from SharedRuntime::complete_monitor_unlocking_C", 0); 2475 2476 restore_native_result(masm, ret_type, workspace_slot_offset); 2477 2478 // Check_forward_pending_exception jump to forward_exception if any pending 2479 // exception is set. The forward_exception routine expects to see the 2480 // exception in pending_exception and not in a register. Kind of clumsy, 2481 // since all folks who branch to forward_exception must have tested 2482 // pending_exception first and hence have it in a register already. 2483 __ std(r_exception, thread_(pending_exception)); 2484 2485 __ bind(done); 2486 } 2487 2488 # if 0 2489 // DTrace method exit 2490 # endif 2491 2492 // Clear "last Java frame" SP and PC. 2493 // -------------------------------------------------------------------------- 2494 2495 __ reset_last_Java_frame(); 2496 2497 // Unbox oop result, e.g. JNIHandles::resolve value. 2498 // -------------------------------------------------------------------------- 2499 2500 if (ret_type == T_OBJECT || ret_type == T_ARRAY) { 2501 __ resolve_jobject(R3_RET, r_temp_1, r_temp_2, /* needs_frame */ false); // kills R31 2502 } 2503 2504 if (CheckJNICalls) { 2505 // clear_pending_jni_exception_check 2506 __ load_const_optimized(R0, 0L); 2507 __ st_ptr(R0, JavaThread::pending_jni_exception_check_fn_offset(), R16_thread); 2508 } 2509 2510 // Reset handle block. 2511 // -------------------------------------------------------------------------- 2512 if (!is_critical_native) { 2513 __ ld(r_temp_1, thread_(active_handles)); 2514 // TODO: PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size"); 2515 __ li(r_temp_2, 0); 2516 __ stw(r_temp_2, JNIHandleBlock::top_offset_in_bytes(), r_temp_1); 2517 2518 2519 // Check for pending exceptions. 2520 // -------------------------------------------------------------------------- 2521 __ ld(r_temp_2, thread_(pending_exception)); 2522 __ cmpdi(CCR0, r_temp_2, 0); 2523 __ bne(CCR0, handle_pending_exception); 2524 } 2525 2526 // Return 2527 // -------------------------------------------------------------------------- 2528 2529 __ pop_frame(); 2530 __ restore_LR_CR(R11); 2531 __ blr(); 2532 2533 2534 // Handler for pending exceptions (out-of-line). 2535 // -------------------------------------------------------------------------- 2536 2537 // Since this is a native call, we know the proper exception handler 2538 // is the empty function. We just pop this frame and then jump to 2539 // forward_exception_entry. 2540 if (!is_critical_native) { 2541 __ align(InteriorEntryAlignment); 2542 __ bind(handle_pending_exception); 2543 2544 __ pop_frame(); 2545 __ restore_LR_CR(R11); 2546 __ b64_patchable((address)StubRoutines::forward_exception_entry(), 2547 relocInfo::runtime_call_type); 2548 } 2549 2550 // Handler for a cache miss (out-of-line). 2551 // -------------------------------------------------------------------------- 2552 2553 if (!method_is_static) { 2554 __ align(InteriorEntryAlignment); 2555 __ bind(ic_miss); 2556 2557 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), 2558 relocInfo::runtime_call_type); 2559 } 2560 2561 // Done. 2562 // -------------------------------------------------------------------------- 2563 2564 __ flush(); 2565 2566 nmethod *nm = nmethod::new_native_nmethod(method, 2567 compile_id, 2568 masm->code(), 2569 vep_start_pc-start_pc, 2570 frame_done_pc-start_pc, 2571 stack_slots / VMRegImpl::slots_per_word, 2572 (method_is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2573 in_ByteSize(lock_offset), 2574 oop_maps); 2575 2576 if (is_critical_native) { 2577 nm->set_lazy_critical_native(true); 2578 } 2579 2580 return nm; 2581 #else 2582 ShouldNotReachHere(); 2583 return NULL; 2584 #endif // COMPILER2 2585 } 2586 2587 // This function returns the adjust size (in number of words) to a c2i adapter 2588 // activation for use during deoptimization. 2589 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { 2590 return align_up((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::alignment_in_bytes); 2591 } 2592 2593 uint SharedRuntime::out_preserve_stack_slots() { 2594 #if defined(COMPILER1) || defined(COMPILER2) 2595 return frame::jit_out_preserve_size / VMRegImpl::stack_slot_size; 2596 #else 2597 return 0; 2598 #endif 2599 } 2600 2601 #if defined(COMPILER1) || defined(COMPILER2) 2602 // Frame generation for deopt and uncommon trap blobs. 2603 static void push_skeleton_frame(MacroAssembler* masm, bool deopt, 2604 /* Read */ 2605 Register unroll_block_reg, 2606 /* Update */ 2607 Register frame_sizes_reg, 2608 Register number_of_frames_reg, 2609 Register pcs_reg, 2610 /* Invalidate */ 2611 Register frame_size_reg, 2612 Register pc_reg) { 2613 2614 __ ld(pc_reg, 0, pcs_reg); 2615 __ ld(frame_size_reg, 0, frame_sizes_reg); 2616 __ std(pc_reg, _abi(lr), R1_SP); 2617 __ push_frame(frame_size_reg, R0/*tmp*/); 2618 #ifdef ASSERT 2619 __ load_const_optimized(pc_reg, 0x5afe); 2620 __ std(pc_reg, _ijava_state_neg(ijava_reserved), R1_SP); 2621 #endif 2622 __ std(R1_SP, _ijava_state_neg(sender_sp), R1_SP); 2623 __ addi(number_of_frames_reg, number_of_frames_reg, -1); 2624 __ addi(frame_sizes_reg, frame_sizes_reg, wordSize); 2625 __ addi(pcs_reg, pcs_reg, wordSize); 2626 } 2627 2628 // Loop through the UnrollBlock info and create new frames. 2629 static void push_skeleton_frames(MacroAssembler* masm, bool deopt, 2630 /* read */ 2631 Register unroll_block_reg, 2632 /* invalidate */ 2633 Register frame_sizes_reg, 2634 Register number_of_frames_reg, 2635 Register pcs_reg, 2636 Register frame_size_reg, 2637 Register pc_reg) { 2638 Label loop; 2639 2640 // _number_of_frames is of type int (deoptimization.hpp) 2641 __ lwa(number_of_frames_reg, 2642 Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), 2643 unroll_block_reg); 2644 __ ld(pcs_reg, 2645 Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), 2646 unroll_block_reg); 2647 __ ld(frame_sizes_reg, 2648 Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), 2649 unroll_block_reg); 2650 2651 // stack: (caller_of_deoptee, ...). 2652 2653 // At this point we either have an interpreter frame or a compiled 2654 // frame on top of stack. If it is a compiled frame we push a new c2i 2655 // adapter here 2656 2657 // Memorize top-frame stack-pointer. 2658 __ mr(frame_size_reg/*old_sp*/, R1_SP); 2659 2660 // Resize interpreter top frame OR C2I adapter. 2661 2662 // At this moment, the top frame (which is the caller of the deoptee) is 2663 // an interpreter frame or a newly pushed C2I adapter or an entry frame. 2664 // The top frame has a TOP_IJAVA_FRAME_ABI and the frame contains the 2665 // outgoing arguments. 2666 // 2667 // In order to push the interpreter frame for the deoptee, we need to 2668 // resize the top frame such that we are able to place the deoptee's 2669 // locals in the frame. 2670 // Additionally, we have to turn the top frame's TOP_IJAVA_FRAME_ABI 2671 // into a valid PARENT_IJAVA_FRAME_ABI. 2672 2673 __ lwa(R11_scratch1, 2674 Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), 2675 unroll_block_reg); 2676 __ neg(R11_scratch1, R11_scratch1); 2677 2678 // R11_scratch1 contains size of locals for frame resizing. 2679 // R12_scratch2 contains top frame's lr. 2680 2681 // Resize frame by complete frame size prevents TOC from being 2682 // overwritten by locals. A more stack space saving way would be 2683 // to copy the TOC to its location in the new abi. 2684 __ addi(R11_scratch1, R11_scratch1, - frame::parent_ijava_frame_abi_size); 2685 2686 // now, resize the frame 2687 __ resize_frame(R11_scratch1, pc_reg/*tmp*/); 2688 2689 // In the case where we have resized a c2i frame above, the optional 2690 // alignment below the locals has size 32 (why?). 2691 __ std(R12_scratch2, _abi(lr), R1_SP); 2692 2693 // Initialize initial_caller_sp. 2694 #ifdef ASSERT 2695 __ load_const_optimized(pc_reg, 0x5afe); 2696 __ std(pc_reg, _ijava_state_neg(ijava_reserved), R1_SP); 2697 #endif 2698 __ std(frame_size_reg, _ijava_state_neg(sender_sp), R1_SP); 2699 2700 #ifdef ASSERT 2701 // Make sure that there is at least one entry in the array. 2702 __ cmpdi(CCR0, number_of_frames_reg, 0); 2703 __ asm_assert_ne("array_size must be > 0", 0x205); 2704 #endif 2705 2706 // Now push the new interpreter frames. 2707 // 2708 __ bind(loop); 2709 // Allocate a new frame, fill in the pc. 2710 push_skeleton_frame(masm, deopt, 2711 unroll_block_reg, 2712 frame_sizes_reg, 2713 number_of_frames_reg, 2714 pcs_reg, 2715 frame_size_reg, 2716 pc_reg); 2717 __ cmpdi(CCR0, number_of_frames_reg, 0); 2718 __ bne(CCR0, loop); 2719 2720 // Get the return address pointing into the frame manager. 2721 __ ld(R0, 0, pcs_reg); 2722 // Store it in the top interpreter frame. 2723 __ std(R0, _abi(lr), R1_SP); 2724 // Initialize frame_manager_lr of interpreter top frame. 2725 } 2726 #endif 2727 2728 void SharedRuntime::generate_deopt_blob() { 2729 // Allocate space for the code 2730 ResourceMark rm; 2731 // Setup code generation tools 2732 CodeBuffer buffer("deopt_blob", 2048, 1024); 2733 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer); 2734 Label exec_mode_initialized; 2735 int frame_size_in_words; 2736 OopMap* map = NULL; 2737 OopMapSet *oop_maps = new OopMapSet(); 2738 2739 // size of ABI112 plus spill slots for R3_RET and F1_RET. 2740 const int frame_size_in_bytes = frame::abi_reg_args_spill_size; 2741 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 2742 int first_frame_size_in_bytes = 0; // frame size of "unpack frame" for call to fetch_unroll_info. 2743 2744 const Register exec_mode_reg = R21_tmp1; 2745 2746 const address start = __ pc(); 2747 2748 #if defined(COMPILER1) || defined(COMPILER2) 2749 // -------------------------------------------------------------------------- 2750 // Prolog for non exception case! 2751 2752 // We have been called from the deopt handler of the deoptee. 2753 // 2754 // deoptee: 2755 // ... 2756 // call X 2757 // ... 2758 // deopt_handler: call_deopt_stub 2759 // cur. return pc --> ... 2760 // 2761 // So currently SR_LR points behind the call in the deopt handler. 2762 // We adjust it such that it points to the start of the deopt handler. 2763 // The return_pc has been stored in the frame of the deoptee and 2764 // will replace the address of the deopt_handler in the call 2765 // to Deoptimization::fetch_unroll_info below. 2766 // We can't grab a free register here, because all registers may 2767 // contain live values, so let the RegisterSaver do the adjustment 2768 // of the return pc. 2769 const int return_pc_adjustment_no_exception = -HandlerImpl::size_deopt_handler(); 2770 2771 // Push the "unpack frame" 2772 // Save everything in sight. 2773 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2774 &first_frame_size_in_bytes, 2775 /*generate_oop_map=*/ true, 2776 return_pc_adjustment_no_exception, 2777 RegisterSaver::return_pc_is_lr); 2778 assert(map != NULL, "OopMap must have been created"); 2779 2780 __ li(exec_mode_reg, Deoptimization::Unpack_deopt); 2781 // Save exec mode for unpack_frames. 2782 __ b(exec_mode_initialized); 2783 2784 // -------------------------------------------------------------------------- 2785 // Prolog for exception case 2786 2787 // An exception is pending. 2788 // We have been called with a return (interpreter) or a jump (exception blob). 2789 // 2790 // - R3_ARG1: exception oop 2791 // - R4_ARG2: exception pc 2792 2793 int exception_offset = __ pc() - start; 2794 2795 BLOCK_COMMENT("Prolog for exception case"); 2796 2797 // Store exception oop and pc in thread (location known to GC). 2798 // This is needed since the call to "fetch_unroll_info()" may safepoint. 2799 __ std(R3_ARG1, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 2800 __ std(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 2801 __ std(R4_ARG2, _abi(lr), R1_SP); 2802 2803 // Vanilla deoptimization with an exception pending in exception_oop. 2804 int exception_in_tls_offset = __ pc() - start; 2805 2806 // Push the "unpack frame". 2807 // Save everything in sight. 2808 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2809 &first_frame_size_in_bytes, 2810 /*generate_oop_map=*/ false, 2811 /*return_pc_adjustment_exception=*/ 0, 2812 RegisterSaver::return_pc_is_pre_saved); 2813 2814 // Deopt during an exception. Save exec mode for unpack_frames. 2815 __ li(exec_mode_reg, Deoptimization::Unpack_exception); 2816 2817 // fall through 2818 2819 int reexecute_offset = 0; 2820 #ifdef COMPILER1 2821 __ b(exec_mode_initialized); 2822 2823 // Reexecute entry, similar to c2 uncommon trap 2824 reexecute_offset = __ pc() - start; 2825 2826 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2827 &first_frame_size_in_bytes, 2828 /*generate_oop_map=*/ false, 2829 /*return_pc_adjustment_reexecute=*/ 0, 2830 RegisterSaver::return_pc_is_pre_saved); 2831 __ li(exec_mode_reg, Deoptimization::Unpack_reexecute); 2832 #endif 2833 2834 // -------------------------------------------------------------------------- 2835 __ BIND(exec_mode_initialized); 2836 2837 { 2838 const Register unroll_block_reg = R22_tmp2; 2839 2840 // We need to set `last_Java_frame' because `fetch_unroll_info' will 2841 // call `last_Java_frame()'. The value of the pc in the frame is not 2842 // particularly important. It just needs to identify this blob. 2843 __ set_last_Java_frame(R1_SP, noreg); 2844 2845 // With EscapeAnalysis turned on, this call may safepoint! 2846 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), R16_thread, exec_mode_reg); 2847 address calls_return_pc = __ last_calls_return_pc(); 2848 // Set an oopmap for the call site that describes all our saved registers. 2849 oop_maps->add_gc_map(calls_return_pc - start, map); 2850 2851 __ reset_last_Java_frame(); 2852 // Save the return value. 2853 __ mr(unroll_block_reg, R3_RET); 2854 2855 // Restore only the result registers that have been saved 2856 // by save_volatile_registers(...). 2857 RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes); 2858 2859 // reload the exec mode from the UnrollBlock (it might have changed) 2860 __ lwz(exec_mode_reg, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), unroll_block_reg); 2861 // In excp_deopt_mode, restore and clear exception oop which we 2862 // stored in the thread during exception entry above. The exception 2863 // oop will be the return value of this stub. 2864 Label skip_restore_excp; 2865 __ cmpdi(CCR0, exec_mode_reg, Deoptimization::Unpack_exception); 2866 __ bne(CCR0, skip_restore_excp); 2867 __ ld(R3_RET, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 2868 __ ld(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 2869 __ li(R0, 0); 2870 __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 2871 __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 2872 __ BIND(skip_restore_excp); 2873 2874 __ pop_frame(); 2875 2876 // stack: (deoptee, optional i2c, caller of deoptee, ...). 2877 2878 // pop the deoptee's frame 2879 __ pop_frame(); 2880 2881 // stack: (caller_of_deoptee, ...). 2882 2883 // Loop through the `UnrollBlock' info and create interpreter frames. 2884 push_skeleton_frames(masm, true/*deopt*/, 2885 unroll_block_reg, 2886 R23_tmp3, 2887 R24_tmp4, 2888 R25_tmp5, 2889 R26_tmp6, 2890 R27_tmp7); 2891 2892 // stack: (skeletal interpreter frame, ..., optional skeletal 2893 // interpreter frame, optional c2i, caller of deoptee, ...). 2894 } 2895 2896 // push an `unpack_frame' taking care of float / int return values. 2897 __ push_frame(frame_size_in_bytes, R0/*tmp*/); 2898 2899 // stack: (unpack frame, skeletal interpreter frame, ..., optional 2900 // skeletal interpreter frame, optional c2i, caller of deoptee, 2901 // ...). 2902 2903 // Spill live volatile registers since we'll do a call. 2904 __ std( R3_RET, _abi_reg_args_spill(spill_ret), R1_SP); 2905 __ stfd(F1_RET, _abi_reg_args_spill(spill_fret), R1_SP); 2906 2907 // Let the unpacker layout information in the skeletal frames just 2908 // allocated. 2909 __ get_PC_trash_LR(R3_RET); 2910 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R3_RET); 2911 // This is a call to a LEAF method, so no oop map is required. 2912 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), 2913 R16_thread/*thread*/, exec_mode_reg/*exec_mode*/); 2914 __ reset_last_Java_frame(); 2915 2916 // Restore the volatiles saved above. 2917 __ ld( R3_RET, _abi_reg_args_spill(spill_ret), R1_SP); 2918 __ lfd(F1_RET, _abi_reg_args_spill(spill_fret), R1_SP); 2919 2920 // Pop the unpack frame. 2921 __ pop_frame(); 2922 __ restore_LR_CR(R0); 2923 2924 // stack: (top interpreter frame, ..., optional interpreter frame, 2925 // optional c2i, caller of deoptee, ...). 2926 2927 // Initialize R14_state. 2928 __ restore_interpreter_state(R11_scratch1); 2929 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 2930 2931 // Return to the interpreter entry point. 2932 __ blr(); 2933 __ flush(); 2934 #else // COMPILER2 2935 __ unimplemented("deopt blob needed only with compiler"); 2936 int exception_offset = __ pc() - start; 2937 #endif // COMPILER2 2938 2939 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, 2940 reexecute_offset, first_frame_size_in_bytes / wordSize); 2941 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); 2942 } 2943 2944 #ifdef COMPILER2 2945 void SharedRuntime::generate_uncommon_trap_blob() { 2946 // Allocate space for the code. 2947 ResourceMark rm; 2948 // Setup code generation tools. 2949 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024); 2950 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer); 2951 address start = __ pc(); 2952 2953 Register unroll_block_reg = R21_tmp1; 2954 Register klass_index_reg = R22_tmp2; 2955 Register unc_trap_reg = R23_tmp3; 2956 2957 OopMapSet* oop_maps = new OopMapSet(); 2958 int frame_size_in_bytes = frame::abi_reg_args_size; 2959 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 2960 2961 // stack: (deoptee, optional i2c, caller_of_deoptee, ...). 2962 2963 // Push a dummy `unpack_frame' and call 2964 // `Deoptimization::uncommon_trap' to pack the compiled frame into a 2965 // vframe array and return the `UnrollBlock' information. 2966 2967 // Save LR to compiled frame. 2968 __ save_LR_CR(R11_scratch1); 2969 2970 // Push an "uncommon_trap" frame. 2971 __ push_frame_reg_args(0, R11_scratch1); 2972 2973 // stack: (unpack frame, deoptee, optional i2c, caller_of_deoptee, ...). 2974 2975 // Set the `unpack_frame' as last_Java_frame. 2976 // `Deoptimization::uncommon_trap' expects it and considers its 2977 // sender frame as the deoptee frame. 2978 // Remember the offset of the instruction whose address will be 2979 // moved to R11_scratch1. 2980 address gc_map_pc = __ get_PC_trash_LR(R11_scratch1); 2981 2982 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1); 2983 2984 __ mr(klass_index_reg, R3); 2985 __ li(R5_ARG3, Deoptimization::Unpack_uncommon_trap); 2986 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), 2987 R16_thread, klass_index_reg, R5_ARG3); 2988 2989 // Set an oopmap for the call site. 2990 oop_maps->add_gc_map(gc_map_pc - start, map); 2991 2992 __ reset_last_Java_frame(); 2993 2994 // Pop the `unpack frame'. 2995 __ pop_frame(); 2996 2997 // stack: (deoptee, optional i2c, caller_of_deoptee, ...). 2998 2999 // Save the return value. 3000 __ mr(unroll_block_reg, R3_RET); 3001 3002 // Pop the uncommon_trap frame. 3003 __ pop_frame(); 3004 3005 // stack: (caller_of_deoptee, ...). 3006 3007 #ifdef ASSERT 3008 __ lwz(R22_tmp2, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), unroll_block_reg); 3009 __ cmpdi(CCR0, R22_tmp2, (unsigned)Deoptimization::Unpack_uncommon_trap); 3010 __ asm_assert_eq("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap", 0); 3011 #endif 3012 3013 // Allocate new interpreter frame(s) and possibly a c2i adapter 3014 // frame. 3015 push_skeleton_frames(masm, false/*deopt*/, 3016 unroll_block_reg, 3017 R22_tmp2, 3018 R23_tmp3, 3019 R24_tmp4, 3020 R25_tmp5, 3021 R26_tmp6); 3022 3023 // stack: (skeletal interpreter frame, ..., optional skeletal 3024 // interpreter frame, optional c2i, caller of deoptee, ...). 3025 3026 // Push a dummy `unpack_frame' taking care of float return values. 3027 // Call `Deoptimization::unpack_frames' to layout information in the 3028 // interpreter frames just created. 3029 3030 // Push a simple "unpack frame" here. 3031 __ push_frame_reg_args(0, R11_scratch1); 3032 3033 // stack: (unpack frame, skeletal interpreter frame, ..., optional 3034 // skeletal interpreter frame, optional c2i, caller of deoptee, 3035 // ...). 3036 3037 // Set the "unpack_frame" as last_Java_frame. 3038 __ get_PC_trash_LR(R11_scratch1); 3039 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1); 3040 3041 // Indicate it is the uncommon trap case. 3042 __ li(unc_trap_reg, Deoptimization::Unpack_uncommon_trap); 3043 // Let the unpacker layout information in the skeletal frames just 3044 // allocated. 3045 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), 3046 R16_thread, unc_trap_reg); 3047 3048 __ reset_last_Java_frame(); 3049 // Pop the `unpack frame'. 3050 __ pop_frame(); 3051 // Restore LR from top interpreter frame. 3052 __ restore_LR_CR(R11_scratch1); 3053 3054 // stack: (top interpreter frame, ..., optional interpreter frame, 3055 // optional c2i, caller of deoptee, ...). 3056 3057 __ restore_interpreter_state(R11_scratch1); 3058 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 3059 3060 // Return to the interpreter entry point. 3061 __ blr(); 3062 3063 masm->flush(); 3064 3065 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, frame_size_in_bytes/wordSize); 3066 } 3067 #endif // COMPILER2 3068 3069 // Generate a special Compile2Runtime blob that saves all registers, and setup oopmap. 3070 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) { 3071 assert(StubRoutines::forward_exception_entry() != NULL, 3072 "must be generated before"); 3073 3074 ResourceMark rm; 3075 OopMapSet *oop_maps = new OopMapSet(); 3076 OopMap* map; 3077 3078 // Allocate space for the code. Setup code generation tools. 3079 CodeBuffer buffer("handler_blob", 2048, 1024); 3080 MacroAssembler* masm = new MacroAssembler(&buffer); 3081 3082 address start = __ pc(); 3083 int frame_size_in_bytes = 0; 3084 3085 RegisterSaver::ReturnPCLocation return_pc_location; 3086 bool cause_return = (poll_type == POLL_AT_RETURN); 3087 if (cause_return) { 3088 // Nothing to do here. The frame has already been popped in MachEpilogNode. 3089 // Register LR already contains the return pc. 3090 return_pc_location = RegisterSaver::return_pc_is_lr; 3091 } else { 3092 // Use thread()->saved_exception_pc() as return pc. 3093 return_pc_location = RegisterSaver::return_pc_is_thread_saved_exception_pc; 3094 } 3095 3096 // Save registers, fpu state, and flags. 3097 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 3098 &frame_size_in_bytes, 3099 /*generate_oop_map=*/ true, 3100 /*return_pc_adjustment=*/0, 3101 return_pc_location); 3102 3103 // The following is basically a call_VM. However, we need the precise 3104 // address of the call in order to generate an oopmap. Hence, we do all the 3105 // work outselves. 3106 __ set_last_Java_frame(/*sp=*/R1_SP, /*pc=*/noreg); 3107 3108 // The return address must always be correct so that the frame constructor 3109 // never sees an invalid pc. 3110 3111 // Do the call 3112 __ call_VM_leaf(call_ptr, R16_thread); 3113 address calls_return_pc = __ last_calls_return_pc(); 3114 3115 // Set an oopmap for the call site. This oopmap will map all 3116 // oop-registers and debug-info registers as callee-saved. This 3117 // will allow deoptimization at this safepoint to find all possible 3118 // debug-info recordings, as well as let GC find all oops. 3119 oop_maps->add_gc_map(calls_return_pc - start, map); 3120 3121 Label noException; 3122 3123 // Clear the last Java frame. 3124 __ reset_last_Java_frame(); 3125 3126 BLOCK_COMMENT(" Check pending exception."); 3127 const Register pending_exception = R0; 3128 __ ld(pending_exception, thread_(pending_exception)); 3129 __ cmpdi(CCR0, pending_exception, 0); 3130 __ beq(CCR0, noException); 3131 3132 // Exception pending 3133 RegisterSaver::restore_live_registers_and_pop_frame(masm, 3134 frame_size_in_bytes, 3135 /*restore_ctr=*/true); 3136 3137 BLOCK_COMMENT(" Jump to forward_exception_entry."); 3138 // Jump to forward_exception_entry, with the issuing PC in LR 3139 // so it looks like the original nmethod called forward_exception_entry. 3140 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 3141 3142 // No exception case. 3143 __ BIND(noException); 3144 3145 3146 // Normal exit, restore registers and exit. 3147 RegisterSaver::restore_live_registers_and_pop_frame(masm, 3148 frame_size_in_bytes, 3149 /*restore_ctr=*/true); 3150 3151 __ blr(); 3152 3153 // Make sure all code is generated 3154 masm->flush(); 3155 3156 // Fill-out other meta info 3157 // CodeBlob frame size is in words. 3158 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_bytes / wordSize); 3159 } 3160 3161 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss) 3162 // 3163 // Generate a stub that calls into the vm to find out the proper destination 3164 // of a java call. All the argument registers are live at this point 3165 // but since this is generic code we don't know what they are and the caller 3166 // must do any gc of the args. 3167 // 3168 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { 3169 3170 // allocate space for the code 3171 ResourceMark rm; 3172 3173 CodeBuffer buffer(name, 1000, 512); 3174 MacroAssembler* masm = new MacroAssembler(&buffer); 3175 3176 int frame_size_in_bytes; 3177 3178 OopMapSet *oop_maps = new OopMapSet(); 3179 OopMap* map = NULL; 3180 3181 address start = __ pc(); 3182 3183 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 3184 &frame_size_in_bytes, 3185 /*generate_oop_map*/ true, 3186 /*return_pc_adjustment*/ 0, 3187 RegisterSaver::return_pc_is_lr); 3188 3189 // Use noreg as last_Java_pc, the return pc will be reconstructed 3190 // from the physical frame. 3191 __ set_last_Java_frame(/*sp*/R1_SP, noreg); 3192 3193 int frame_complete = __ offset(); 3194 3195 // Pass R19_method as 2nd (optional) argument, used by 3196 // counter_overflow_stub. 3197 __ call_VM_leaf(destination, R16_thread, R19_method); 3198 address calls_return_pc = __ last_calls_return_pc(); 3199 // Set an oopmap for the call site. 3200 // We need this not only for callee-saved registers, but also for volatile 3201 // registers that the compiler might be keeping live across a safepoint. 3202 // Create the oopmap for the call's return pc. 3203 oop_maps->add_gc_map(calls_return_pc - start, map); 3204 3205 // R3_RET contains the address we are going to jump to assuming no exception got installed. 3206 3207 // clear last_Java_sp 3208 __ reset_last_Java_frame(); 3209 3210 // Check for pending exceptions. 3211 BLOCK_COMMENT("Check for pending exceptions."); 3212 Label pending; 3213 __ ld(R11_scratch1, thread_(pending_exception)); 3214 __ cmpdi(CCR0, R11_scratch1, 0); 3215 __ bne(CCR0, pending); 3216 3217 __ mtctr(R3_RET); // Ctr will not be touched by restore_live_registers_and_pop_frame. 3218 3219 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ false); 3220 3221 // Get the returned method. 3222 __ get_vm_result_2(R19_method); 3223 3224 __ bctr(); 3225 3226 3227 // Pending exception after the safepoint. 3228 __ BIND(pending); 3229 3230 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ true); 3231 3232 // exception pending => remove activation and forward to exception handler 3233 3234 __ li(R11_scratch1, 0); 3235 __ ld(R3_ARG1, thread_(pending_exception)); 3236 __ std(R11_scratch1, in_bytes(JavaThread::vm_result_offset()), R16_thread); 3237 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 3238 3239 // ------------- 3240 // Make sure all code is generated. 3241 masm->flush(); 3242 3243 // return the blob 3244 // frame_size_words or bytes?? 3245 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_bytes/wordSize, 3246 oop_maps, true); 3247 } 3248 3249 3250 //------------------------------Montgomery multiplication------------------------ 3251 // 3252 3253 // Subtract 0:b from carry:a. Return carry. 3254 static unsigned long 3255 sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) { 3256 long i = 0; 3257 unsigned long tmp, tmp2; 3258 __asm__ __volatile__ ( 3259 "subfc %[tmp], %[tmp], %[tmp] \n" // pre-set CA 3260 "mtctr %[len] \n" 3261 "0: \n" 3262 "ldx %[tmp], %[i], %[a] \n" 3263 "ldx %[tmp2], %[i], %[b] \n" 3264 "subfe %[tmp], %[tmp2], %[tmp] \n" // subtract extended 3265 "stdx %[tmp], %[i], %[a] \n" 3266 "addi %[i], %[i], 8 \n" 3267 "bdnz 0b \n" 3268 "addme %[tmp], %[carry] \n" // carry + CA - 1 3269 : [i]"+b"(i), [tmp]"=&r"(tmp), [tmp2]"=&r"(tmp2) 3270 : [a]"r"(a), [b]"r"(b), [carry]"r"(carry), [len]"r"(len) 3271 : "ctr", "xer", "memory" 3272 ); 3273 return tmp; 3274 } 3275 3276 // Multiply (unsigned) Long A by Long B, accumulating the double- 3277 // length result into the accumulator formed of T0, T1, and T2. 3278 inline void MACC(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) { 3279 unsigned long hi, lo; 3280 __asm__ __volatile__ ( 3281 "mulld %[lo], %[A], %[B] \n" 3282 "mulhdu %[hi], %[A], %[B] \n" 3283 "addc %[T0], %[T0], %[lo] \n" 3284 "adde %[T1], %[T1], %[hi] \n" 3285 "addze %[T2], %[T2] \n" 3286 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2) 3287 : [A]"r"(A), [B]"r"(B) 3288 : "xer" 3289 ); 3290 } 3291 3292 // As above, but add twice the double-length result into the 3293 // accumulator. 3294 inline void MACC2(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) { 3295 unsigned long hi, lo; 3296 __asm__ __volatile__ ( 3297 "mulld %[lo], %[A], %[B] \n" 3298 "mulhdu %[hi], %[A], %[B] \n" 3299 "addc %[T0], %[T0], %[lo] \n" 3300 "adde %[T1], %[T1], %[hi] \n" 3301 "addze %[T2], %[T2] \n" 3302 "addc %[T0], %[T0], %[lo] \n" 3303 "adde %[T1], %[T1], %[hi] \n" 3304 "addze %[T2], %[T2] \n" 3305 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2) 3306 : [A]"r"(A), [B]"r"(B) 3307 : "xer" 3308 ); 3309 } 3310 3311 // Fast Montgomery multiplication. The derivation of the algorithm is 3312 // in "A Cryptographic Library for the Motorola DSP56000, 3313 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237". 3314 static void 3315 montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[], 3316 unsigned long m[], unsigned long inv, int len) { 3317 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator 3318 int i; 3319 3320 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); 3321 3322 for (i = 0; i < len; i++) { 3323 int j; 3324 for (j = 0; j < i; j++) { 3325 MACC(a[j], b[i-j], t0, t1, t2); 3326 MACC(m[j], n[i-j], t0, t1, t2); 3327 } 3328 MACC(a[i], b[0], t0, t1, t2); 3329 m[i] = t0 * inv; 3330 MACC(m[i], n[0], t0, t1, t2); 3331 3332 assert(t0 == 0, "broken Montgomery multiply"); 3333 3334 t0 = t1; t1 = t2; t2 = 0; 3335 } 3336 3337 for (i = len; i < 2*len; i++) { 3338 int j; 3339 for (j = i-len+1; j < len; j++) { 3340 MACC(a[j], b[i-j], t0, t1, t2); 3341 MACC(m[j], n[i-j], t0, t1, t2); 3342 } 3343 m[i-len] = t0; 3344 t0 = t1; t1 = t2; t2 = 0; 3345 } 3346 3347 while (t0) { 3348 t0 = sub(m, n, t0, len); 3349 } 3350 } 3351 3352 // Fast Montgomery squaring. This uses asymptotically 25% fewer 3353 // multiplies so it should be up to 25% faster than Montgomery 3354 // multiplication. However, its loop control is more complex and it 3355 // may actually run slower on some machines. 3356 static void 3357 montgomery_square(unsigned long a[], unsigned long n[], 3358 unsigned long m[], unsigned long inv, int len) { 3359 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator 3360 int i; 3361 3362 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); 3363 3364 for (i = 0; i < len; i++) { 3365 int j; 3366 int end = (i+1)/2; 3367 for (j = 0; j < end; j++) { 3368 MACC2(a[j], a[i-j], t0, t1, t2); 3369 MACC(m[j], n[i-j], t0, t1, t2); 3370 } 3371 if ((i & 1) == 0) { 3372 MACC(a[j], a[j], t0, t1, t2); 3373 } 3374 for (; j < i; j++) { 3375 MACC(m[j], n[i-j], t0, t1, t2); 3376 } 3377 m[i] = t0 * inv; 3378 MACC(m[i], n[0], t0, t1, t2); 3379 3380 assert(t0 == 0, "broken Montgomery square"); 3381 3382 t0 = t1; t1 = t2; t2 = 0; 3383 } 3384 3385 for (i = len; i < 2*len; i++) { 3386 int start = i-len+1; 3387 int end = start + (len - start)/2; 3388 int j; 3389 for (j = start; j < end; j++) { 3390 MACC2(a[j], a[i-j], t0, t1, t2); 3391 MACC(m[j], n[i-j], t0, t1, t2); 3392 } 3393 if ((i & 1) == 0) { 3394 MACC(a[j], a[j], t0, t1, t2); 3395 } 3396 for (; j < len; j++) { 3397 MACC(m[j], n[i-j], t0, t1, t2); 3398 } 3399 m[i-len] = t0; 3400 t0 = t1; t1 = t2; t2 = 0; 3401 } 3402 3403 while (t0) { 3404 t0 = sub(m, n, t0, len); 3405 } 3406 } 3407 3408 // The threshold at which squaring is advantageous was determined 3409 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz. 3410 // Doesn't seem to be relevant for Power8 so we use the same value. 3411 #define MONTGOMERY_SQUARING_THRESHOLD 64 3412 3413 // Copy len longwords from s to d, word-swapping as we go. The 3414 // destination array is reversed. 3415 static void reverse_words(unsigned long *s, unsigned long *d, int len) { 3416 d += len; 3417 while(len-- > 0) { 3418 d--; 3419 unsigned long s_val = *s; 3420 // Swap words in a longword on little endian machines. 3421 #ifdef VM_LITTLE_ENDIAN 3422 s_val = (s_val << 32) | (s_val >> 32); 3423 #endif 3424 *d = s_val; 3425 s++; 3426 } 3427 } 3428 3429 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints, 3430 jint len, jlong inv, 3431 jint *m_ints) { 3432 len = len & 0x7fffFFFF; // C2 does not respect int to long conversion for stub calls. 3433 assert(len % 2 == 0, "array length in montgomery_multiply must be even"); 3434 int longwords = len/2; 3435 3436 // Make very sure we don't use so much space that the stack might 3437 // overflow. 512 jints corresponds to an 16384-bit integer and 3438 // will use here a total of 8k bytes of stack space. 3439 int total_allocation = longwords * sizeof (unsigned long) * 4; 3440 guarantee(total_allocation <= 8192, "must be"); 3441 unsigned long *scratch = (unsigned long *)alloca(total_allocation); 3442 3443 // Local scratch arrays 3444 unsigned long 3445 *a = scratch + 0 * longwords, 3446 *b = scratch + 1 * longwords, 3447 *n = scratch + 2 * longwords, 3448 *m = scratch + 3 * longwords; 3449 3450 reverse_words((unsigned long *)a_ints, a, longwords); 3451 reverse_words((unsigned long *)b_ints, b, longwords); 3452 reverse_words((unsigned long *)n_ints, n, longwords); 3453 3454 ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords); 3455 3456 reverse_words(m, (unsigned long *)m_ints, longwords); 3457 } 3458 3459 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints, 3460 jint len, jlong inv, 3461 jint *m_ints) { 3462 len = len & 0x7fffFFFF; // C2 does not respect int to long conversion for stub calls. 3463 assert(len % 2 == 0, "array length in montgomery_square must be even"); 3464 int longwords = len/2; 3465 3466 // Make very sure we don't use so much space that the stack might 3467 // overflow. 512 jints corresponds to an 16384-bit integer and 3468 // will use here a total of 6k bytes of stack space. 3469 int total_allocation = longwords * sizeof (unsigned long) * 3; 3470 guarantee(total_allocation <= 8192, "must be"); 3471 unsigned long *scratch = (unsigned long *)alloca(total_allocation); 3472 3473 // Local scratch arrays 3474 unsigned long 3475 *a = scratch + 0 * longwords, 3476 *n = scratch + 1 * longwords, 3477 *m = scratch + 2 * longwords; 3478 3479 reverse_words((unsigned long *)a_ints, a, longwords); 3480 reverse_words((unsigned long *)n_ints, n, longwords); 3481 3482 if (len >= MONTGOMERY_SQUARING_THRESHOLD) { 3483 ::montgomery_square(a, n, m, (unsigned long)inv, longwords); 3484 } else { 3485 ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords); 3486 } 3487 3488 reverse_words(m, (unsigned long *)m_ints, longwords); 3489 }