1 /* 2 * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #ifndef _WINDOWS 27 #include "alloca.h" 28 #endif 29 #include "asm/macroAssembler.hpp" 30 #include "asm/macroAssembler.inline.hpp" 31 #include "code/debugInfoRec.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "interpreter/interpreter.hpp" 35 #include "oops/compiledICHolder.hpp" 36 #include "prims/jvmtiRedefineClassesTrace.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/vframeArray.hpp" 39 #include "vmreg_x86.inline.hpp" 40 #ifdef COMPILER1 41 #include "c1/c1_Runtime1.hpp" 42 #endif 43 #ifdef COMPILER2 44 #include "opto/runtime.hpp" 45 #endif 46 #if INCLUDE_JVMCI 47 #include "jvmci/jvmciJavaClasses.hpp" 48 #endif 49 50 #define __ masm-> 51 52 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size; 53 54 class SimpleRuntimeFrame { 55 56 public: 57 58 // Most of the runtime stubs have this simple frame layout. 59 // This class exists to make the layout shared in one place. 60 // Offsets are for compiler stack slots, which are jints. 61 enum layout { 62 // The frame sender code expects that rbp will be in the "natural" place and 63 // will override any oopMap setting for it. We must therefore force the layout 64 // so that it agrees with the frame sender code. 65 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 66 rbp_off2, 67 return_off, return_off2, 68 framesize 69 }; 70 }; 71 72 class RegisterSaver { 73 // Capture info about frame layout. Layout offsets are in jint 74 // units because compiler frame slots are jints. 75 #define HALF_ZMM_BANK_WORDS 128 76 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off 77 #define DEF_ZMM_OFFS(regnum) zmm ## regnum ## _off = zmm_off + (regnum-16)*64/BytesPerInt, zmm ## regnum ## H_off 78 enum layout { 79 fpu_state_off = frame::arg_reg_save_area_bytes/BytesPerInt, // fxsave save area 80 xmm_off = fpu_state_off + 160/BytesPerInt, // offset in fxsave save area 81 DEF_XMM_OFFS(0), 82 DEF_XMM_OFFS(1), 83 DEF_XMM_OFFS(2), 84 DEF_XMM_OFFS(3), 85 DEF_XMM_OFFS(4), 86 DEF_XMM_OFFS(5), 87 DEF_XMM_OFFS(6), 88 DEF_XMM_OFFS(7), 89 DEF_XMM_OFFS(8), 90 DEF_XMM_OFFS(9), 91 DEF_XMM_OFFS(10), 92 DEF_XMM_OFFS(11), 93 DEF_XMM_OFFS(12), 94 DEF_XMM_OFFS(13), 95 DEF_XMM_OFFS(14), 96 DEF_XMM_OFFS(15), 97 zmm_off = fpu_state_off + ((FPUStateSizeInWords - (HALF_ZMM_BANK_WORDS + 1))*wordSize / BytesPerInt), 98 DEF_ZMM_OFFS(16), 99 DEF_ZMM_OFFS(17), 100 DEF_ZMM_OFFS(18), 101 DEF_ZMM_OFFS(19), 102 DEF_ZMM_OFFS(20), 103 DEF_ZMM_OFFS(21), 104 DEF_ZMM_OFFS(22), 105 DEF_ZMM_OFFS(23), 106 DEF_ZMM_OFFS(24), 107 DEF_ZMM_OFFS(25), 108 DEF_ZMM_OFFS(26), 109 DEF_ZMM_OFFS(27), 110 DEF_ZMM_OFFS(28), 111 DEF_ZMM_OFFS(29), 112 DEF_ZMM_OFFS(30), 113 DEF_ZMM_OFFS(31), 114 fpu_state_end = fpu_state_off + ((FPUStateSizeInWords-1)*wordSize / BytesPerInt), 115 fpu_stateH_end, 116 r15_off, r15H_off, 117 r14_off, r14H_off, 118 r13_off, r13H_off, 119 r12_off, r12H_off, 120 r11_off, r11H_off, 121 r10_off, r10H_off, 122 r9_off, r9H_off, 123 r8_off, r8H_off, 124 rdi_off, rdiH_off, 125 rsi_off, rsiH_off, 126 ignore_off, ignoreH_off, // extra copy of rbp 127 rsp_off, rspH_off, 128 rbx_off, rbxH_off, 129 rdx_off, rdxH_off, 130 rcx_off, rcxH_off, 131 rax_off, raxH_off, 132 // 16-byte stack alignment fill word: see MacroAssembler::push/pop_IU_state 133 align_off, alignH_off, 134 flags_off, flagsH_off, 135 // The frame sender code expects that rbp will be in the "natural" place and 136 // will override any oopMap setting for it. We must therefore force the layout 137 // so that it agrees with the frame sender code. 138 rbp_off, rbpH_off, // copy of rbp we will restore 139 return_off, returnH_off, // slot for return address 140 reg_save_size // size in compiler stack slots 141 }; 142 143 public: 144 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors = false); 145 static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false); 146 147 // Offsets into the register save area 148 // Used by deoptimization when it is managing result register 149 // values on its own 150 151 static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; } 152 static int rdx_offset_in_bytes(void) { return BytesPerInt * rdx_off; } 153 static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; } 154 static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; } 155 static int return_offset_in_bytes(void) { return BytesPerInt * return_off; } 156 157 // During deoptimization only the result registers need to be restored, 158 // all the other values have already been extracted. 159 static void restore_result_registers(MacroAssembler* masm); 160 }; 161 162 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) { 163 int vect_words = 0; 164 int ymmhi_offset = -1; 165 int off = 0; 166 int num_xmm_regs = XMMRegisterImpl::number_of_registers; 167 if (UseAVX < 3) { 168 num_xmm_regs = num_xmm_regs/2; 169 } 170 #if defined(COMPILER2) || INCLUDE_JVMCI 171 if (save_vectors) { 172 assert(UseAVX > 0, "512bit vectors are supported only with EVEX"); 173 assert(MaxVectorSize == 64, "only 512bit vectors are supported now"); 174 // Save upper half of YMM registers 175 vect_words = 16 * num_xmm_regs / wordSize; 176 if (UseAVX < 3) { 177 ymmhi_offset = additional_frame_words; 178 additional_frame_words += vect_words; 179 } 180 } 181 #else 182 assert(!save_vectors, "vectors are generated only by C2 and JVMCI"); 183 #endif 184 185 // Always make the frame size 16-byte aligned 186 int frame_size_in_bytes = round_to(additional_frame_words*wordSize + 187 reg_save_size*BytesPerInt, num_xmm_regs); 188 // OopMap frame size is in compiler stack slots (jint's) not bytes or words 189 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt; 190 // The caller will allocate additional_frame_words 191 int additional_frame_slots = additional_frame_words*wordSize / BytesPerInt; 192 // CodeBlob frame size is in words. 193 int frame_size_in_words = frame_size_in_bytes / wordSize; 194 *total_frame_words = frame_size_in_words; 195 196 // Save registers, fpu state, and flags. 197 // We assume caller has already pushed the return address onto the 198 // stack, so rsp is 8-byte aligned here. 199 // We push rpb twice in this sequence because we want the real rbp 200 // to be under the return like a normal enter. 201 202 __ enter(); // rsp becomes 16-byte aligned here 203 __ push_CPU_state(); // Push a multiple of 16 bytes 204 205 // push cpu state handles this on EVEX enabled targets 206 if ((vect_words > 0) && (UseAVX < 3)) { 207 assert(vect_words*wordSize >= 256, ""); 208 // Save upper half of YMM registes(0..num_xmm_regs) 209 __ subptr(rsp, num_xmm_regs*16); 210 for (int n = 0; n < num_xmm_regs; n++) { 211 __ vextractf128h(Address(rsp, off++*16), as_XMMRegister(n)); 212 } 213 } 214 if (frame::arg_reg_save_area_bytes != 0) { 215 // Allocate argument register save area 216 __ subptr(rsp, frame::arg_reg_save_area_bytes); 217 } 218 219 // Set an oopmap for the call site. This oopmap will map all 220 // oop-registers and debug-info registers as callee-saved. This 221 // will allow deoptimization at this safepoint to find all possible 222 // debug-info recordings, as well as let GC find all oops. 223 224 OopMapSet *oop_maps = new OopMapSet(); 225 OopMap* map = new OopMap(frame_size_in_slots, 0); 226 227 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_slots) 228 #define YMMHI_STACK_OFFSET(x) VMRegImpl::stack2reg((x / VMRegImpl::stack_slot_size) + ymmhi_offset) 229 230 map->set_callee_saved(STACK_OFFSET( rax_off ), rax->as_VMReg()); 231 map->set_callee_saved(STACK_OFFSET( rcx_off ), rcx->as_VMReg()); 232 map->set_callee_saved(STACK_OFFSET( rdx_off ), rdx->as_VMReg()); 233 map->set_callee_saved(STACK_OFFSET( rbx_off ), rbx->as_VMReg()); 234 // rbp location is known implicitly by the frame sender code, needs no oopmap 235 // and the location where rbp was saved by is ignored 236 map->set_callee_saved(STACK_OFFSET( rsi_off ), rsi->as_VMReg()); 237 map->set_callee_saved(STACK_OFFSET( rdi_off ), rdi->as_VMReg()); 238 map->set_callee_saved(STACK_OFFSET( r8_off ), r8->as_VMReg()); 239 map->set_callee_saved(STACK_OFFSET( r9_off ), r9->as_VMReg()); 240 map->set_callee_saved(STACK_OFFSET( r10_off ), r10->as_VMReg()); 241 map->set_callee_saved(STACK_OFFSET( r11_off ), r11->as_VMReg()); 242 map->set_callee_saved(STACK_OFFSET( r12_off ), r12->as_VMReg()); 243 map->set_callee_saved(STACK_OFFSET( r13_off ), r13->as_VMReg()); 244 map->set_callee_saved(STACK_OFFSET( r14_off ), r14->as_VMReg()); 245 map->set_callee_saved(STACK_OFFSET( r15_off ), r15->as_VMReg()); 246 // For both AVX and EVEX we will use the legacy FXSAVE area for xmm0..xmm15, 247 // on EVEX enabled targets, we get it included in the xsave area 248 off = xmm0_off; 249 int delta = xmm1_off - off; 250 for (int n = 0; n < 16; n++) { 251 XMMRegister xmm_name = as_XMMRegister(n); 252 map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg()); 253 off += delta; 254 } 255 if(UseAVX > 2) { 256 // Obtain xmm16..xmm31 from the XSAVE area on EVEX enabled targets 257 off = zmm16_off; 258 delta = zmm17_off - off; 259 for (int n = 16; n < num_xmm_regs; n++) { 260 XMMRegister xmm_name = as_XMMRegister(n); 261 map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg()); 262 off += delta; 263 } 264 } 265 266 #if defined(COMPILER2) || INCLUDE_JVMCI 267 if (save_vectors) { 268 assert(ymmhi_offset != -1, "save area must exist"); 269 map->set_callee_saved(YMMHI_STACK_OFFSET( 0), xmm0->as_VMReg()->next(4)); 270 map->set_callee_saved(YMMHI_STACK_OFFSET( 16), xmm1->as_VMReg()->next(4)); 271 map->set_callee_saved(YMMHI_STACK_OFFSET( 32), xmm2->as_VMReg()->next(4)); 272 map->set_callee_saved(YMMHI_STACK_OFFSET( 48), xmm3->as_VMReg()->next(4)); 273 map->set_callee_saved(YMMHI_STACK_OFFSET( 64), xmm4->as_VMReg()->next(4)); 274 map->set_callee_saved(YMMHI_STACK_OFFSET( 80), xmm5->as_VMReg()->next(4)); 275 map->set_callee_saved(YMMHI_STACK_OFFSET( 96), xmm6->as_VMReg()->next(4)); 276 map->set_callee_saved(YMMHI_STACK_OFFSET(112), xmm7->as_VMReg()->next(4)); 277 map->set_callee_saved(YMMHI_STACK_OFFSET(128), xmm8->as_VMReg()->next(4)); 278 map->set_callee_saved(YMMHI_STACK_OFFSET(144), xmm9->as_VMReg()->next(4)); 279 map->set_callee_saved(YMMHI_STACK_OFFSET(160), xmm10->as_VMReg()->next(4)); 280 map->set_callee_saved(YMMHI_STACK_OFFSET(176), xmm11->as_VMReg()->next(4)); 281 map->set_callee_saved(YMMHI_STACK_OFFSET(192), xmm12->as_VMReg()->next(4)); 282 map->set_callee_saved(YMMHI_STACK_OFFSET(208), xmm13->as_VMReg()->next(4)); 283 map->set_callee_saved(YMMHI_STACK_OFFSET(224), xmm14->as_VMReg()->next(4)); 284 map->set_callee_saved(YMMHI_STACK_OFFSET(240), xmm15->as_VMReg()->next(4)); 285 } 286 #endif // COMPILER2 || INCLUDE_JVMCI 287 288 // %%% These should all be a waste but we'll keep things as they were for now 289 if (true) { 290 map->set_callee_saved(STACK_OFFSET( raxH_off ), rax->as_VMReg()->next()); 291 map->set_callee_saved(STACK_OFFSET( rcxH_off ), rcx->as_VMReg()->next()); 292 map->set_callee_saved(STACK_OFFSET( rdxH_off ), rdx->as_VMReg()->next()); 293 map->set_callee_saved(STACK_OFFSET( rbxH_off ), rbx->as_VMReg()->next()); 294 // rbp location is known implicitly by the frame sender code, needs no oopmap 295 map->set_callee_saved(STACK_OFFSET( rsiH_off ), rsi->as_VMReg()->next()); 296 map->set_callee_saved(STACK_OFFSET( rdiH_off ), rdi->as_VMReg()->next()); 297 map->set_callee_saved(STACK_OFFSET( r8H_off ), r8->as_VMReg()->next()); 298 map->set_callee_saved(STACK_OFFSET( r9H_off ), r9->as_VMReg()->next()); 299 map->set_callee_saved(STACK_OFFSET( r10H_off ), r10->as_VMReg()->next()); 300 map->set_callee_saved(STACK_OFFSET( r11H_off ), r11->as_VMReg()->next()); 301 map->set_callee_saved(STACK_OFFSET( r12H_off ), r12->as_VMReg()->next()); 302 map->set_callee_saved(STACK_OFFSET( r13H_off ), r13->as_VMReg()->next()); 303 map->set_callee_saved(STACK_OFFSET( r14H_off ), r14->as_VMReg()->next()); 304 map->set_callee_saved(STACK_OFFSET( r15H_off ), r15->as_VMReg()->next()); 305 // For both AVX and EVEX we will use the legacy FXSAVE area for xmm0..xmm15, 306 // on EVEX enabled targets, we get it included in the xsave area 307 off = xmm0H_off; 308 delta = xmm1H_off - off; 309 for (int n = 0; n < 16; n++) { 310 XMMRegister xmm_name = as_XMMRegister(n); 311 map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg()->next()); 312 off += delta; 313 } 314 if (UseAVX > 2) { 315 // Obtain xmm16..xmm31 from the XSAVE area on EVEX enabled targets 316 off = zmm16H_off; 317 delta = zmm17H_off - off; 318 for (int n = 16; n < num_xmm_regs; n++) { 319 XMMRegister xmm_name = as_XMMRegister(n); 320 map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg()->next()); 321 off += delta; 322 } 323 } 324 } 325 326 return map; 327 } 328 329 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) { 330 int num_xmm_regs = XMMRegisterImpl::number_of_registers; 331 if (UseAVX < 3) { 332 num_xmm_regs = num_xmm_regs/2; 333 } 334 if (frame::arg_reg_save_area_bytes != 0) { 335 // Pop arg register save area 336 __ addptr(rsp, frame::arg_reg_save_area_bytes); 337 } 338 #if defined(COMPILER2) || INCLUDE_JVMCI 339 // On EVEX enabled targets everything is handled in pop fpu state 340 if ((restore_vectors) && (UseAVX < 3)) { 341 assert(UseAVX > 0, "256/512-bit vectors are supported only with AVX"); 342 assert(MaxVectorSize == 64, "up to 512bit vectors are supported now"); 343 int off = 0; 344 // Restore upper half of YMM registes (0..num_xmm_regs) 345 for (int n = 0; n < num_xmm_regs; n++) { 346 __ vinsertf128h(as_XMMRegister(n), Address(rsp, off++*16)); 347 } 348 __ addptr(rsp, num_xmm_regs*16); 349 } 350 #else 351 assert(!restore_vectors, "vectors are generated only by C2 and JVMCI"); 352 #endif 353 // Recover CPU state 354 __ pop_CPU_state(); 355 // Get the rbp described implicitly by the calling convention (no oopMap) 356 __ pop(rbp); 357 } 358 359 void RegisterSaver::restore_result_registers(MacroAssembler* masm) { 360 361 // Just restore result register. Only used by deoptimization. By 362 // now any callee save register that needs to be restored to a c2 363 // caller of the deoptee has been extracted into the vframeArray 364 // and will be stuffed into the c2i adapter we create for later 365 // restoration so only result registers need to be restored here. 366 367 // Restore fp result register 368 __ movdbl(xmm0, Address(rsp, xmm0_offset_in_bytes())); 369 // Restore integer result register 370 __ movptr(rax, Address(rsp, rax_offset_in_bytes())); 371 __ movptr(rdx, Address(rsp, rdx_offset_in_bytes())); 372 373 // Pop all of the register save are off the stack except the return address 374 __ addptr(rsp, return_offset_in_bytes()); 375 } 376 377 // Is vector's size (in bytes) bigger than a size saved by default? 378 // 16 bytes XMM registers are saved by default using fxsave/fxrstor instructions. 379 bool SharedRuntime::is_wide_vector(int size) { 380 return size > 16; 381 } 382 383 // The java_calling_convention describes stack locations as ideal slots on 384 // a frame with no abi restrictions. Since we must observe abi restrictions 385 // (like the placement of the register window) the slots must be biased by 386 // the following value. 387 static int reg2offset_in(VMReg r) { 388 // Account for saved rbp and return address 389 // This should really be in_preserve_stack_slots 390 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 391 } 392 393 static int reg2offset_out(VMReg r) { 394 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 395 } 396 397 // --------------------------------------------------------------------------- 398 // Read the array of BasicTypes from a signature, and compute where the 399 // arguments should go. Values in the VMRegPair regs array refer to 4-byte 400 // quantities. Values less than VMRegImpl::stack0 are registers, those above 401 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer 402 // as framesizes are fixed. 403 // VMRegImpl::stack0 refers to the first slot 0(sp). 404 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register 405 // up to RegisterImpl::number_of_registers) are the 64-bit 406 // integer registers. 407 408 // Note: the INPUTS in sig_bt are in units of Java argument words, which are 409 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit 410 // units regardless of build. Of course for i486 there is no 64 bit build 411 412 // The Java calling convention is a "shifted" version of the C ABI. 413 // By skipping the first C ABI register we can call non-static jni methods 414 // with small numbers of arguments without having to shuffle the arguments 415 // at all. Since we control the java ABI we ought to at least get some 416 // advantage out of it. 417 418 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 419 VMRegPair *regs, 420 int total_args_passed, 421 int is_outgoing) { 422 423 // Create the mapping between argument positions and 424 // registers. 425 static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = { 426 j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5 427 }; 428 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_j] = { 429 j_farg0, j_farg1, j_farg2, j_farg3, 430 j_farg4, j_farg5, j_farg6, j_farg7 431 }; 432 433 434 uint int_args = 0; 435 uint fp_args = 0; 436 uint stk_args = 0; // inc by 2 each time 437 438 for (int i = 0; i < total_args_passed; i++) { 439 switch (sig_bt[i]) { 440 case T_BOOLEAN: 441 case T_CHAR: 442 case T_BYTE: 443 case T_SHORT: 444 case T_INT: 445 if (int_args < Argument::n_int_register_parameters_j) { 446 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg()); 447 } else { 448 regs[i].set1(VMRegImpl::stack2reg(stk_args)); 449 stk_args += 2; 450 } 451 break; 452 case T_VOID: 453 // halves of T_LONG or T_DOUBLE 454 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half"); 455 regs[i].set_bad(); 456 break; 457 case T_LONG: 458 assert(sig_bt[i + 1] == T_VOID, "expecting half"); 459 // fall through 460 case T_OBJECT: 461 case T_ARRAY: 462 case T_ADDRESS: 463 if (int_args < Argument::n_int_register_parameters_j) { 464 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg()); 465 } else { 466 regs[i].set2(VMRegImpl::stack2reg(stk_args)); 467 stk_args += 2; 468 } 469 break; 470 case T_FLOAT: 471 if (fp_args < Argument::n_float_register_parameters_j) { 472 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg()); 473 } else { 474 regs[i].set1(VMRegImpl::stack2reg(stk_args)); 475 stk_args += 2; 476 } 477 break; 478 case T_DOUBLE: 479 assert(sig_bt[i + 1] == T_VOID, "expecting half"); 480 if (fp_args < Argument::n_float_register_parameters_j) { 481 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg()); 482 } else { 483 regs[i].set2(VMRegImpl::stack2reg(stk_args)); 484 stk_args += 2; 485 } 486 break; 487 default: 488 ShouldNotReachHere(); 489 break; 490 } 491 } 492 493 return round_to(stk_args, 2); 494 } 495 496 // Patch the callers callsite with entry to compiled code if it exists. 497 static void patch_callers_callsite(MacroAssembler *masm) { 498 Label L; 499 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD); 500 __ jcc(Assembler::equal, L); 501 502 // Save the current stack pointer 503 __ mov(r13, rsp); 504 // Schedule the branch target address early. 505 // Call into the VM to patch the caller, then jump to compiled callee 506 // rax isn't live so capture return address while we easily can 507 __ movptr(rax, Address(rsp, 0)); 508 509 // align stack so push_CPU_state doesn't fault 510 __ andptr(rsp, -(StackAlignmentInBytes)); 511 __ push_CPU_state(); 512 513 // VM needs caller's callsite 514 // VM needs target method 515 // This needs to be a long call since we will relocate this adapter to 516 // the codeBuffer and it may not reach 517 518 // Allocate argument register save area 519 if (frame::arg_reg_save_area_bytes != 0) { 520 __ subptr(rsp, frame::arg_reg_save_area_bytes); 521 } 522 __ mov(c_rarg0, rbx); 523 __ mov(c_rarg1, rax); 524 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite))); 525 526 // De-allocate argument register save area 527 if (frame::arg_reg_save_area_bytes != 0) { 528 __ addptr(rsp, frame::arg_reg_save_area_bytes); 529 } 530 531 __ pop_CPU_state(); 532 // restore sp 533 __ mov(rsp, r13); 534 __ bind(L); 535 } 536 537 538 static void gen_c2i_adapter(MacroAssembler *masm, 539 int total_args_passed, 540 int comp_args_on_stack, 541 const BasicType *sig_bt, 542 const VMRegPair *regs, 543 Label& skip_fixup) { 544 // Before we get into the guts of the C2I adapter, see if we should be here 545 // at all. We've come from compiled code and are attempting to jump to the 546 // interpreter, which means the caller made a static call to get here 547 // (vcalls always get a compiled target if there is one). Check for a 548 // compiled target. If there is one, we need to patch the caller's call. 549 patch_callers_callsite(masm); 550 551 __ bind(skip_fixup); 552 553 // Since all args are passed on the stack, total_args_passed * 554 // Interpreter::stackElementSize is the space we need. Plus 1 because 555 // we also account for the return address location since 556 // we store it first rather than hold it in rax across all the shuffling 557 558 int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize; 559 560 // stack is aligned, keep it that way 561 extraspace = round_to(extraspace, 2*wordSize); 562 563 // Get return address 564 __ pop(rax); 565 566 // set senderSP value 567 __ mov(r13, rsp); 568 569 __ subptr(rsp, extraspace); 570 571 // Store the return address in the expected location 572 __ movptr(Address(rsp, 0), rax); 573 574 // Now write the args into the outgoing interpreter space 575 for (int i = 0; i < total_args_passed; i++) { 576 if (sig_bt[i] == T_VOID) { 577 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 578 continue; 579 } 580 581 // offset to start parameters 582 int st_off = (total_args_passed - i) * Interpreter::stackElementSize; 583 int next_off = st_off - Interpreter::stackElementSize; 584 585 // Say 4 args: 586 // i st_off 587 // 0 32 T_LONG 588 // 1 24 T_VOID 589 // 2 16 T_OBJECT 590 // 3 8 T_BOOL 591 // - 0 return address 592 // 593 // However to make thing extra confusing. Because we can fit a long/double in 594 // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter 595 // leaves one slot empty and only stores to a single slot. In this case the 596 // slot that is occupied is the T_VOID slot. See I said it was confusing. 597 598 VMReg r_1 = regs[i].first(); 599 VMReg r_2 = regs[i].second(); 600 if (!r_1->is_valid()) { 601 assert(!r_2->is_valid(), ""); 602 continue; 603 } 604 if (r_1->is_stack()) { 605 // memory to memory use rax 606 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace; 607 if (!r_2->is_valid()) { 608 // sign extend?? 609 __ movl(rax, Address(rsp, ld_off)); 610 __ movptr(Address(rsp, st_off), rax); 611 612 } else { 613 614 __ movq(rax, Address(rsp, ld_off)); 615 616 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG 617 // T_DOUBLE and T_LONG use two slots in the interpreter 618 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 619 // ld_off == LSW, ld_off+wordSize == MSW 620 // st_off == MSW, next_off == LSW 621 __ movq(Address(rsp, next_off), rax); 622 #ifdef ASSERT 623 // Overwrite the unused slot with known junk 624 __ mov64(rax, CONST64(0xdeadffffdeadaaaa)); 625 __ movptr(Address(rsp, st_off), rax); 626 #endif /* ASSERT */ 627 } else { 628 __ movq(Address(rsp, st_off), rax); 629 } 630 } 631 } else if (r_1->is_Register()) { 632 Register r = r_1->as_Register(); 633 if (!r_2->is_valid()) { 634 // must be only an int (or less ) so move only 32bits to slot 635 // why not sign extend?? 636 __ movl(Address(rsp, st_off), r); 637 } else { 638 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG 639 // T_DOUBLE and T_LONG use two slots in the interpreter 640 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 641 // long/double in gpr 642 #ifdef ASSERT 643 // Overwrite the unused slot with known junk 644 __ mov64(rax, CONST64(0xdeadffffdeadaaab)); 645 __ movptr(Address(rsp, st_off), rax); 646 #endif /* ASSERT */ 647 __ movq(Address(rsp, next_off), r); 648 } else { 649 __ movptr(Address(rsp, st_off), r); 650 } 651 } 652 } else { 653 assert(r_1->is_XMMRegister(), ""); 654 if (!r_2->is_valid()) { 655 // only a float use just part of the slot 656 __ movflt(Address(rsp, st_off), r_1->as_XMMRegister()); 657 } else { 658 #ifdef ASSERT 659 // Overwrite the unused slot with known junk 660 __ mov64(rax, CONST64(0xdeadffffdeadaaac)); 661 __ movptr(Address(rsp, st_off), rax); 662 #endif /* ASSERT */ 663 __ movdbl(Address(rsp, next_off), r_1->as_XMMRegister()); 664 } 665 } 666 } 667 668 // Schedule the branch target address early. 669 __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset()))); 670 __ jmp(rcx); 671 } 672 673 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, 674 address code_start, address code_end, 675 Label& L_ok) { 676 Label L_fail; 677 __ lea(temp_reg, ExternalAddress(code_start)); 678 __ cmpptr(pc_reg, temp_reg); 679 __ jcc(Assembler::belowEqual, L_fail); 680 __ lea(temp_reg, ExternalAddress(code_end)); 681 __ cmpptr(pc_reg, temp_reg); 682 __ jcc(Assembler::below, L_ok); 683 __ bind(L_fail); 684 } 685 686 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, 687 int total_args_passed, 688 int comp_args_on_stack, 689 const BasicType *sig_bt, 690 const VMRegPair *regs) { 691 692 // Note: r13 contains the senderSP on entry. We must preserve it since 693 // we may do a i2c -> c2i transition if we lose a race where compiled 694 // code goes non-entrant while we get args ready. 695 // In addition we use r13 to locate all the interpreter args as 696 // we must align the stack to 16 bytes on an i2c entry else we 697 // lose alignment we expect in all compiled code and register 698 // save code can segv when fxsave instructions find improperly 699 // aligned stack pointer. 700 701 // Adapters can be frameless because they do not require the caller 702 // to perform additional cleanup work, such as correcting the stack pointer. 703 // An i2c adapter is frameless because the *caller* frame, which is interpreted, 704 // routinely repairs its own stack pointer (from interpreter_frame_last_sp), 705 // even if a callee has modified the stack pointer. 706 // A c2i adapter is frameless because the *callee* frame, which is interpreted, 707 // routinely repairs its caller's stack pointer (from sender_sp, which is set 708 // up via the senderSP register). 709 // In other words, if *either* the caller or callee is interpreted, we can 710 // get the stack pointer repaired after a call. 711 // This is why c2i and i2c adapters cannot be indefinitely composed. 712 // In particular, if a c2i adapter were to somehow call an i2c adapter, 713 // both caller and callee would be compiled methods, and neither would 714 // clean up the stack pointer changes performed by the two adapters. 715 // If this happens, control eventually transfers back to the compiled 716 // caller, but with an uncorrected stack, causing delayed havoc. 717 718 // Pick up the return address 719 __ movptr(rax, Address(rsp, 0)); 720 721 if (VerifyAdapterCalls && 722 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) { 723 // So, let's test for cascading c2i/i2c adapters right now. 724 // assert(Interpreter::contains($return_addr) || 725 // StubRoutines::contains($return_addr), 726 // "i2c adapter must return to an interpreter frame"); 727 __ block_comment("verify_i2c { "); 728 Label L_ok; 729 if (Interpreter::code() != NULL) 730 range_check(masm, rax, r11, 731 Interpreter::code()->code_start(), Interpreter::code()->code_end(), 732 L_ok); 733 if (StubRoutines::code1() != NULL) 734 range_check(masm, rax, r11, 735 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(), 736 L_ok); 737 if (StubRoutines::code2() != NULL) 738 range_check(masm, rax, r11, 739 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(), 740 L_ok); 741 const char* msg = "i2c adapter must return to an interpreter frame"; 742 __ block_comment(msg); 743 __ stop(msg); 744 __ bind(L_ok); 745 __ block_comment("} verify_i2ce "); 746 } 747 748 // Must preserve original SP for loading incoming arguments because 749 // we need to align the outgoing SP for compiled code. 750 __ movptr(r11, rsp); 751 752 // Cut-out for having no stack args. Since up to 2 int/oop args are passed 753 // in registers, we will occasionally have no stack args. 754 int comp_words_on_stack = 0; 755 if (comp_args_on_stack) { 756 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in 757 // registers are below. By subtracting stack0, we either get a negative 758 // number (all values in registers) or the maximum stack slot accessed. 759 760 // Convert 4-byte c2 stack slots to words. 761 comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; 762 // Round up to miminum stack alignment, in wordSize 763 comp_words_on_stack = round_to(comp_words_on_stack, 2); 764 __ subptr(rsp, comp_words_on_stack * wordSize); 765 } 766 767 768 // Ensure compiled code always sees stack at proper alignment 769 __ andptr(rsp, -16); 770 771 // push the return address and misalign the stack that youngest frame always sees 772 // as far as the placement of the call instruction 773 __ push(rax); 774 775 // Put saved SP in another register 776 const Register saved_sp = rax; 777 __ movptr(saved_sp, r11); 778 779 // Will jump to the compiled code just as if compiled code was doing it. 780 // Pre-load the register-jump target early, to schedule it better. 781 __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset()))); 782 783 #if INCLUDE_JVMCI 784 if (EnableJVMCI) { 785 // check if this call should be routed towards a specific entry point 786 __ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0); 787 Label no_alternative_target; 788 __ jcc(Assembler::equal, no_alternative_target); 789 __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()))); 790 __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0); 791 __ bind(no_alternative_target); 792 } 793 #endif // INCLUDE_JVMCI 794 795 // Now generate the shuffle code. Pick up all register args and move the 796 // rest through the floating point stack top. 797 for (int i = 0; i < total_args_passed; i++) { 798 if (sig_bt[i] == T_VOID) { 799 // Longs and doubles are passed in native word order, but misaligned 800 // in the 32-bit build. 801 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 802 continue; 803 } 804 805 // Pick up 0, 1 or 2 words from SP+offset. 806 807 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), 808 "scrambled load targets?"); 809 // Load in argument order going down. 810 int ld_off = (total_args_passed - i)*Interpreter::stackElementSize; 811 // Point to interpreter value (vs. tag) 812 int next_off = ld_off - Interpreter::stackElementSize; 813 // 814 // 815 // 816 VMReg r_1 = regs[i].first(); 817 VMReg r_2 = regs[i].second(); 818 if (!r_1->is_valid()) { 819 assert(!r_2->is_valid(), ""); 820 continue; 821 } 822 if (r_1->is_stack()) { 823 // Convert stack slot to an SP offset (+ wordSize to account for return address ) 824 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize; 825 826 // We can use r13 as a temp here because compiled code doesn't need r13 as an input 827 // and if we end up going thru a c2i because of a miss a reasonable value of r13 828 // will be generated. 829 if (!r_2->is_valid()) { 830 // sign extend??? 831 __ movl(r13, Address(saved_sp, ld_off)); 832 __ movptr(Address(rsp, st_off), r13); 833 } else { 834 // 835 // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE 836 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case 837 // So we must adjust where to pick up the data to match the interpreter. 838 // 839 // Interpreter local[n] == MSW, local[n+1] == LSW however locals 840 // are accessed as negative so LSW is at LOW address 841 842 // ld_off is MSW so get LSW 843 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)? 844 next_off : ld_off; 845 __ movq(r13, Address(saved_sp, offset)); 846 // st_off is LSW (i.e. reg.first()) 847 __ movq(Address(rsp, st_off), r13); 848 } 849 } else if (r_1->is_Register()) { // Register argument 850 Register r = r_1->as_Register(); 851 assert(r != rax, "must be different"); 852 if (r_2->is_valid()) { 853 // 854 // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE 855 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case 856 // So we must adjust where to pick up the data to match the interpreter. 857 858 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)? 859 next_off : ld_off; 860 861 // this can be a misaligned move 862 __ movq(r, Address(saved_sp, offset)); 863 } else { 864 // sign extend and use a full word? 865 __ movl(r, Address(saved_sp, ld_off)); 866 } 867 } else { 868 if (!r_2->is_valid()) { 869 __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off)); 870 } else { 871 __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off)); 872 } 873 } 874 } 875 876 // 6243940 We might end up in handle_wrong_method if 877 // the callee is deoptimized as we race thru here. If that 878 // happens we don't want to take a safepoint because the 879 // caller frame will look interpreted and arguments are now 880 // "compiled" so it is much better to make this transition 881 // invisible to the stack walking code. Unfortunately if 882 // we try and find the callee by normal means a safepoint 883 // is possible. So we stash the desired callee in the thread 884 // and the vm will find there should this case occur. 885 886 __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx); 887 888 // put Method* where a c2i would expect should we end up there 889 // only needed becaus eof c2 resolve stubs return Method* as a result in 890 // rax 891 __ mov(rax, rbx); 892 __ jmp(r11); 893 } 894 895 // --------------------------------------------------------------- 896 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, 897 int total_args_passed, 898 int comp_args_on_stack, 899 const BasicType *sig_bt, 900 const VMRegPair *regs, 901 AdapterFingerPrint* fingerprint) { 902 address i2c_entry = __ pc(); 903 904 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs); 905 906 // ------------------------------------------------------------------------- 907 // Generate a C2I adapter. On entry we know rbx holds the Method* during calls 908 // to the interpreter. The args start out packed in the compiled layout. They 909 // need to be unpacked into the interpreter layout. This will almost always 910 // require some stack space. We grow the current (compiled) stack, then repack 911 // the args. We finally end in a jump to the generic interpreter entry point. 912 // On exit from the interpreter, the interpreter will restore our SP (lest the 913 // compiled code, which relys solely on SP and not RBP, get sick). 914 915 address c2i_unverified_entry = __ pc(); 916 Label skip_fixup; 917 Label ok; 918 919 Register holder = rax; 920 Register receiver = j_rarg0; 921 Register temp = rbx; 922 923 { 924 __ load_klass(temp, receiver); 925 __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset())); 926 __ movptr(rbx, Address(holder, CompiledICHolder::holder_method_offset())); 927 __ jcc(Assembler::equal, ok); 928 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 929 930 __ bind(ok); 931 // Method might have been compiled since the call site was patched to 932 // interpreted if that is the case treat it as a miss so we can get 933 // the call site corrected. 934 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD); 935 __ jcc(Assembler::equal, skip_fixup); 936 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 937 } 938 939 address c2i_entry = __ pc(); 940 941 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup); 942 943 __ flush(); 944 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); 945 } 946 947 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 948 VMRegPair *regs, 949 VMRegPair *regs2, 950 int total_args_passed) { 951 assert(regs2 == NULL, "not needed on x86"); 952 // We return the amount of VMRegImpl stack slots we need to reserve for all 953 // the arguments NOT counting out_preserve_stack_slots. 954 955 // NOTE: These arrays will have to change when c1 is ported 956 #ifdef _WIN64 957 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = { 958 c_rarg0, c_rarg1, c_rarg2, c_rarg3 959 }; 960 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = { 961 c_farg0, c_farg1, c_farg2, c_farg3 962 }; 963 #else 964 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = { 965 c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5 966 }; 967 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = { 968 c_farg0, c_farg1, c_farg2, c_farg3, 969 c_farg4, c_farg5, c_farg6, c_farg7 970 }; 971 #endif // _WIN64 972 973 974 uint int_args = 0; 975 uint fp_args = 0; 976 uint stk_args = 0; // inc by 2 each time 977 978 for (int i = 0; i < total_args_passed; i++) { 979 switch (sig_bt[i]) { 980 case T_BOOLEAN: 981 case T_CHAR: 982 case T_BYTE: 983 case T_SHORT: 984 case T_INT: 985 if (int_args < Argument::n_int_register_parameters_c) { 986 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg()); 987 #ifdef _WIN64 988 fp_args++; 989 // Allocate slots for callee to stuff register args the stack. 990 stk_args += 2; 991 #endif 992 } else { 993 regs[i].set1(VMRegImpl::stack2reg(stk_args)); 994 stk_args += 2; 995 } 996 break; 997 case T_LONG: 998 assert(sig_bt[i + 1] == T_VOID, "expecting half"); 999 // fall through 1000 case T_OBJECT: 1001 case T_ARRAY: 1002 case T_ADDRESS: 1003 case T_METADATA: 1004 if (int_args < Argument::n_int_register_parameters_c) { 1005 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg()); 1006 #ifdef _WIN64 1007 fp_args++; 1008 stk_args += 2; 1009 #endif 1010 } else { 1011 regs[i].set2(VMRegImpl::stack2reg(stk_args)); 1012 stk_args += 2; 1013 } 1014 break; 1015 case T_FLOAT: 1016 if (fp_args < Argument::n_float_register_parameters_c) { 1017 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg()); 1018 #ifdef _WIN64 1019 int_args++; 1020 // Allocate slots for callee to stuff register args the stack. 1021 stk_args += 2; 1022 #endif 1023 } else { 1024 regs[i].set1(VMRegImpl::stack2reg(stk_args)); 1025 stk_args += 2; 1026 } 1027 break; 1028 case T_DOUBLE: 1029 assert(sig_bt[i + 1] == T_VOID, "expecting half"); 1030 if (fp_args < Argument::n_float_register_parameters_c) { 1031 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg()); 1032 #ifdef _WIN64 1033 int_args++; 1034 // Allocate slots for callee to stuff register args the stack. 1035 stk_args += 2; 1036 #endif 1037 } else { 1038 regs[i].set2(VMRegImpl::stack2reg(stk_args)); 1039 stk_args += 2; 1040 } 1041 break; 1042 case T_VOID: // Halves of longs and doubles 1043 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half"); 1044 regs[i].set_bad(); 1045 break; 1046 default: 1047 ShouldNotReachHere(); 1048 break; 1049 } 1050 } 1051 #ifdef _WIN64 1052 // windows abi requires that we always allocate enough stack space 1053 // for 4 64bit registers to be stored down. 1054 if (stk_args < 8) { 1055 stk_args = 8; 1056 } 1057 #endif // _WIN64 1058 1059 return stk_args; 1060 } 1061 1062 // On 64 bit we will store integer like items to the stack as 1063 // 64 bits items (sparc abi) even though java would only store 1064 // 32bits for a parameter. On 32bit it will simply be 32 bits 1065 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 1066 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1067 if (src.first()->is_stack()) { 1068 if (dst.first()->is_stack()) { 1069 // stack to stack 1070 __ movslq(rax, Address(rbp, reg2offset_in(src.first()))); 1071 __ movq(Address(rsp, reg2offset_out(dst.first())), rax); 1072 } else { 1073 // stack to reg 1074 __ movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()))); 1075 } 1076 } else if (dst.first()->is_stack()) { 1077 // reg to stack 1078 // Do we really have to sign extend??? 1079 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 1080 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register()); 1081 } else { 1082 // Do we really have to sign extend??? 1083 // __ movslq(dst.first()->as_Register(), src.first()->as_Register()); 1084 if (dst.first() != src.first()) { 1085 __ movq(dst.first()->as_Register(), src.first()->as_Register()); 1086 } 1087 } 1088 } 1089 1090 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1091 if (src.first()->is_stack()) { 1092 if (dst.first()->is_stack()) { 1093 // stack to stack 1094 __ movq(rax, Address(rbp, reg2offset_in(src.first()))); 1095 __ movq(Address(rsp, reg2offset_out(dst.first())), rax); 1096 } else { 1097 // stack to reg 1098 __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()))); 1099 } 1100 } else if (dst.first()->is_stack()) { 1101 // reg to stack 1102 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register()); 1103 } else { 1104 if (dst.first() != src.first()) { 1105 __ movq(dst.first()->as_Register(), src.first()->as_Register()); 1106 } 1107 } 1108 } 1109 1110 // An oop arg. Must pass a handle not the oop itself 1111 static void object_move(MacroAssembler* masm, 1112 OopMap* map, 1113 int oop_handle_offset, 1114 int framesize_in_slots, 1115 VMRegPair src, 1116 VMRegPair dst, 1117 bool is_receiver, 1118 int* receiver_offset) { 1119 1120 // must pass a handle. First figure out the location we use as a handle 1121 1122 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register(); 1123 1124 // See if oop is NULL if it is we need no handle 1125 1126 if (src.first()->is_stack()) { 1127 1128 // Oop is already on the stack as an argument 1129 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1130 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 1131 if (is_receiver) { 1132 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 1133 } 1134 1135 __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD); 1136 __ lea(rHandle, Address(rbp, reg2offset_in(src.first()))); 1137 // conditionally move a NULL 1138 __ cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first()))); 1139 } else { 1140 1141 // Oop is in an a register we must store it to the space we reserve 1142 // on the stack for oop_handles and pass a handle if oop is non-NULL 1143 1144 const Register rOop = src.first()->as_Register(); 1145 int oop_slot; 1146 if (rOop == j_rarg0) 1147 oop_slot = 0; 1148 else if (rOop == j_rarg1) 1149 oop_slot = 1; 1150 else if (rOop == j_rarg2) 1151 oop_slot = 2; 1152 else if (rOop == j_rarg3) 1153 oop_slot = 3; 1154 else if (rOop == j_rarg4) 1155 oop_slot = 4; 1156 else { 1157 assert(rOop == j_rarg5, "wrong register"); 1158 oop_slot = 5; 1159 } 1160 1161 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 1162 int offset = oop_slot*VMRegImpl::stack_slot_size; 1163 1164 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1165 // Store oop in handle area, may be NULL 1166 __ movptr(Address(rsp, offset), rOop); 1167 if (is_receiver) { 1168 *receiver_offset = offset; 1169 } 1170 1171 __ cmpptr(rOop, (int32_t)NULL_WORD); 1172 __ lea(rHandle, Address(rsp, offset)); 1173 // conditionally move a NULL from the handle area where it was just stored 1174 __ cmovptr(Assembler::equal, rHandle, Address(rsp, offset)); 1175 } 1176 1177 // If arg is on the stack then place it otherwise it is already in correct reg. 1178 if (dst.first()->is_stack()) { 1179 __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle); 1180 } 1181 } 1182 1183 // A float arg may have to do float reg int reg conversion 1184 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1185 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move"); 1186 1187 // The calling conventions assures us that each VMregpair is either 1188 // all really one physical register or adjacent stack slots. 1189 // This greatly simplifies the cases here compared to sparc. 1190 1191 if (src.first()->is_stack()) { 1192 if (dst.first()->is_stack()) { 1193 __ movl(rax, Address(rbp, reg2offset_in(src.first()))); 1194 __ movptr(Address(rsp, reg2offset_out(dst.first())), rax); 1195 } else { 1196 // stack to reg 1197 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 1198 __ movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()))); 1199 } 1200 } else if (dst.first()->is_stack()) { 1201 // reg to stack 1202 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 1203 __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister()); 1204 } else { 1205 // reg to reg 1206 // In theory these overlap but the ordering is such that this is likely a nop 1207 if ( src.first() != dst.first()) { 1208 __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister()); 1209 } 1210 } 1211 } 1212 1213 // A long move 1214 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1215 1216 // The calling conventions assures us that each VMregpair is either 1217 // all really one physical register or adjacent stack slots. 1218 // This greatly simplifies the cases here compared to sparc. 1219 1220 if (src.is_single_phys_reg() ) { 1221 if (dst.is_single_phys_reg()) { 1222 if (dst.first() != src.first()) { 1223 __ mov(dst.first()->as_Register(), src.first()->as_Register()); 1224 } 1225 } else { 1226 assert(dst.is_single_reg(), "not a stack pair"); 1227 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register()); 1228 } 1229 } else if (dst.is_single_phys_reg()) { 1230 assert(src.is_single_reg(), "not a stack pair"); 1231 __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_out(src.first()))); 1232 } else { 1233 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs"); 1234 __ movq(rax, Address(rbp, reg2offset_in(src.first()))); 1235 __ movq(Address(rsp, reg2offset_out(dst.first())), rax); 1236 } 1237 } 1238 1239 // A double move 1240 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1241 1242 // The calling conventions assures us that each VMregpair is either 1243 // all really one physical register or adjacent stack slots. 1244 // This greatly simplifies the cases here compared to sparc. 1245 1246 if (src.is_single_phys_reg() ) { 1247 if (dst.is_single_phys_reg()) { 1248 // In theory these overlap but the ordering is such that this is likely a nop 1249 if ( src.first() != dst.first()) { 1250 __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister()); 1251 } 1252 } else { 1253 assert(dst.is_single_reg(), "not a stack pair"); 1254 __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister()); 1255 } 1256 } else if (dst.is_single_phys_reg()) { 1257 assert(src.is_single_reg(), "not a stack pair"); 1258 __ movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_out(src.first()))); 1259 } else { 1260 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs"); 1261 __ movq(rax, Address(rbp, reg2offset_in(src.first()))); 1262 __ movq(Address(rsp, reg2offset_out(dst.first())), rax); 1263 } 1264 } 1265 1266 1267 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1268 // We always ignore the frame_slots arg and just use the space just below frame pointer 1269 // which by this time is free to use 1270 switch (ret_type) { 1271 case T_FLOAT: 1272 __ movflt(Address(rbp, -wordSize), xmm0); 1273 break; 1274 case T_DOUBLE: 1275 __ movdbl(Address(rbp, -wordSize), xmm0); 1276 break; 1277 case T_VOID: break; 1278 default: { 1279 __ movptr(Address(rbp, -wordSize), rax); 1280 } 1281 } 1282 } 1283 1284 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1285 // We always ignore the frame_slots arg and just use the space just below frame pointer 1286 // which by this time is free to use 1287 switch (ret_type) { 1288 case T_FLOAT: 1289 __ movflt(xmm0, Address(rbp, -wordSize)); 1290 break; 1291 case T_DOUBLE: 1292 __ movdbl(xmm0, Address(rbp, -wordSize)); 1293 break; 1294 case T_VOID: break; 1295 default: { 1296 __ movptr(rax, Address(rbp, -wordSize)); 1297 } 1298 } 1299 } 1300 1301 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { 1302 for ( int i = first_arg ; i < arg_count ; i++ ) { 1303 if (args[i].first()->is_Register()) { 1304 __ push(args[i].first()->as_Register()); 1305 } else if (args[i].first()->is_XMMRegister()) { 1306 __ subptr(rsp, 2*wordSize); 1307 __ movdbl(Address(rsp, 0), args[i].first()->as_XMMRegister()); 1308 } 1309 } 1310 } 1311 1312 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { 1313 for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) { 1314 if (args[i].first()->is_Register()) { 1315 __ pop(args[i].first()->as_Register()); 1316 } else if (args[i].first()->is_XMMRegister()) { 1317 __ movdbl(args[i].first()->as_XMMRegister(), Address(rsp, 0)); 1318 __ addptr(rsp, 2*wordSize); 1319 } 1320 } 1321 } 1322 1323 1324 static void save_or_restore_arguments(MacroAssembler* masm, 1325 const int stack_slots, 1326 const int total_in_args, 1327 const int arg_save_area, 1328 OopMap* map, 1329 VMRegPair* in_regs, 1330 BasicType* in_sig_bt) { 1331 // if map is non-NULL then the code should store the values, 1332 // otherwise it should load them. 1333 int slot = arg_save_area; 1334 // Save down double word first 1335 for ( int i = 0; i < total_in_args; i++) { 1336 if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) { 1337 int offset = slot * VMRegImpl::stack_slot_size; 1338 slot += VMRegImpl::slots_per_word; 1339 assert(slot <= stack_slots, "overflow"); 1340 if (map != NULL) { 1341 __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister()); 1342 } else { 1343 __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset)); 1344 } 1345 } 1346 if (in_regs[i].first()->is_Register() && 1347 (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) { 1348 int offset = slot * VMRegImpl::stack_slot_size; 1349 if (map != NULL) { 1350 __ movq(Address(rsp, offset), in_regs[i].first()->as_Register()); 1351 if (in_sig_bt[i] == T_ARRAY) { 1352 map->set_oop(VMRegImpl::stack2reg(slot));; 1353 } 1354 } else { 1355 __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset)); 1356 } 1357 slot += VMRegImpl::slots_per_word; 1358 } 1359 } 1360 // Save or restore single word registers 1361 for ( int i = 0; i < total_in_args; i++) { 1362 if (in_regs[i].first()->is_Register()) { 1363 int offset = slot * VMRegImpl::stack_slot_size; 1364 slot++; 1365 assert(slot <= stack_slots, "overflow"); 1366 1367 // Value is in an input register pass we must flush it to the stack 1368 const Register reg = in_regs[i].first()->as_Register(); 1369 switch (in_sig_bt[i]) { 1370 case T_BOOLEAN: 1371 case T_CHAR: 1372 case T_BYTE: 1373 case T_SHORT: 1374 case T_INT: 1375 if (map != NULL) { 1376 __ movl(Address(rsp, offset), reg); 1377 } else { 1378 __ movl(reg, Address(rsp, offset)); 1379 } 1380 break; 1381 case T_ARRAY: 1382 case T_LONG: 1383 // handled above 1384 break; 1385 case T_OBJECT: 1386 default: ShouldNotReachHere(); 1387 } 1388 } else if (in_regs[i].first()->is_XMMRegister()) { 1389 if (in_sig_bt[i] == T_FLOAT) { 1390 int offset = slot * VMRegImpl::stack_slot_size; 1391 slot++; 1392 assert(slot <= stack_slots, "overflow"); 1393 if (map != NULL) { 1394 __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister()); 1395 } else { 1396 __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset)); 1397 } 1398 } 1399 } else if (in_regs[i].first()->is_stack()) { 1400 if (in_sig_bt[i] == T_ARRAY && map != NULL) { 1401 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1402 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots)); 1403 } 1404 } 1405 } 1406 } 1407 1408 1409 // Check GC_locker::needs_gc and enter the runtime if it's true. This 1410 // keeps a new JNI critical region from starting until a GC has been 1411 // forced. Save down any oops in registers and describe them in an 1412 // OopMap. 1413 static void check_needs_gc_for_critical_native(MacroAssembler* masm, 1414 int stack_slots, 1415 int total_c_args, 1416 int total_in_args, 1417 int arg_save_area, 1418 OopMapSet* oop_maps, 1419 VMRegPair* in_regs, 1420 BasicType* in_sig_bt) { 1421 __ block_comment("check GC_locker::needs_gc"); 1422 Label cont; 1423 __ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false); 1424 __ jcc(Assembler::equal, cont); 1425 1426 // Save down any incoming oops and call into the runtime to halt for a GC 1427 1428 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1429 save_or_restore_arguments(masm, stack_slots, total_in_args, 1430 arg_save_area, map, in_regs, in_sig_bt); 1431 1432 address the_pc = __ pc(); 1433 oop_maps->add_gc_map( __ offset(), map); 1434 __ set_last_Java_frame(rsp, noreg, the_pc); 1435 1436 __ block_comment("block_for_jni_critical"); 1437 __ movptr(c_rarg0, r15_thread); 1438 __ mov(r12, rsp); // remember sp 1439 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1440 __ andptr(rsp, -16); // align stack as required by ABI 1441 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical))); 1442 __ mov(rsp, r12); // restore sp 1443 __ reinit_heapbase(); 1444 1445 __ reset_last_Java_frame(false, true); 1446 1447 save_or_restore_arguments(masm, stack_slots, total_in_args, 1448 arg_save_area, NULL, in_regs, in_sig_bt); 1449 1450 __ bind(cont); 1451 #ifdef ASSERT 1452 if (StressCriticalJNINatives) { 1453 // Stress register saving 1454 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1455 save_or_restore_arguments(masm, stack_slots, total_in_args, 1456 arg_save_area, map, in_regs, in_sig_bt); 1457 // Destroy argument registers 1458 for (int i = 0; i < total_in_args - 1; i++) { 1459 if (in_regs[i].first()->is_Register()) { 1460 const Register reg = in_regs[i].first()->as_Register(); 1461 __ xorptr(reg, reg); 1462 } else if (in_regs[i].first()->is_XMMRegister()) { 1463 __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister()); 1464 } else if (in_regs[i].first()->is_FloatRegister()) { 1465 ShouldNotReachHere(); 1466 } else if (in_regs[i].first()->is_stack()) { 1467 // Nothing to do 1468 } else { 1469 ShouldNotReachHere(); 1470 } 1471 if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) { 1472 i++; 1473 } 1474 } 1475 1476 save_or_restore_arguments(masm, stack_slots, total_in_args, 1477 arg_save_area, NULL, in_regs, in_sig_bt); 1478 } 1479 #endif 1480 } 1481 1482 // Unpack an array argument into a pointer to the body and the length 1483 // if the array is non-null, otherwise pass 0 for both. 1484 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) { 1485 Register tmp_reg = rax; 1486 assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg, 1487 "possible collision"); 1488 assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg, 1489 "possible collision"); 1490 1491 __ block_comment("unpack_array_argument {"); 1492 1493 // Pass the length, ptr pair 1494 Label is_null, done; 1495 VMRegPair tmp; 1496 tmp.set_ptr(tmp_reg->as_VMReg()); 1497 if (reg.first()->is_stack()) { 1498 // Load the arg up from the stack 1499 move_ptr(masm, reg, tmp); 1500 reg = tmp; 1501 } 1502 __ testptr(reg.first()->as_Register(), reg.first()->as_Register()); 1503 __ jccb(Assembler::equal, is_null); 1504 __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type))); 1505 move_ptr(masm, tmp, body_arg); 1506 // load the length relative to the body. 1507 __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() - 1508 arrayOopDesc::base_offset_in_bytes(in_elem_type))); 1509 move32_64(masm, tmp, length_arg); 1510 __ jmpb(done); 1511 __ bind(is_null); 1512 // Pass zeros 1513 __ xorptr(tmp_reg, tmp_reg); 1514 move_ptr(masm, tmp, body_arg); 1515 move32_64(masm, tmp, length_arg); 1516 __ bind(done); 1517 1518 __ block_comment("} unpack_array_argument"); 1519 } 1520 1521 1522 // Different signatures may require very different orders for the move 1523 // to avoid clobbering other arguments. There's no simple way to 1524 // order them safely. Compute a safe order for issuing stores and 1525 // break any cycles in those stores. This code is fairly general but 1526 // it's not necessary on the other platforms so we keep it in the 1527 // platform dependent code instead of moving it into a shared file. 1528 // (See bugs 7013347 & 7145024.) 1529 // Note that this code is specific to LP64. 1530 class ComputeMoveOrder: public StackObj { 1531 class MoveOperation: public ResourceObj { 1532 friend class ComputeMoveOrder; 1533 private: 1534 VMRegPair _src; 1535 VMRegPair _dst; 1536 int _src_index; 1537 int _dst_index; 1538 bool _processed; 1539 MoveOperation* _next; 1540 MoveOperation* _prev; 1541 1542 static int get_id(VMRegPair r) { 1543 return r.first()->value(); 1544 } 1545 1546 public: 1547 MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst): 1548 _src(src) 1549 , _src_index(src_index) 1550 , _dst(dst) 1551 , _dst_index(dst_index) 1552 , _next(NULL) 1553 , _prev(NULL) 1554 , _processed(false) { 1555 } 1556 1557 VMRegPair src() const { return _src; } 1558 int src_id() const { return get_id(src()); } 1559 int src_index() const { return _src_index; } 1560 VMRegPair dst() const { return _dst; } 1561 void set_dst(int i, VMRegPair dst) { _dst_index = i, _dst = dst; } 1562 int dst_index() const { return _dst_index; } 1563 int dst_id() const { return get_id(dst()); } 1564 MoveOperation* next() const { return _next; } 1565 MoveOperation* prev() const { return _prev; } 1566 void set_processed() { _processed = true; } 1567 bool is_processed() const { return _processed; } 1568 1569 // insert 1570 void break_cycle(VMRegPair temp_register) { 1571 // create a new store following the last store 1572 // to move from the temp_register to the original 1573 MoveOperation* new_store = new MoveOperation(-1, temp_register, dst_index(), dst()); 1574 1575 // break the cycle of links and insert new_store at the end 1576 // break the reverse link. 1577 MoveOperation* p = prev(); 1578 assert(p->next() == this, "must be"); 1579 _prev = NULL; 1580 p->_next = new_store; 1581 new_store->_prev = p; 1582 1583 // change the original store to save it's value in the temp. 1584 set_dst(-1, temp_register); 1585 } 1586 1587 void link(GrowableArray<MoveOperation*>& killer) { 1588 // link this store in front the store that it depends on 1589 MoveOperation* n = killer.at_grow(src_id(), NULL); 1590 if (n != NULL) { 1591 assert(_next == NULL && n->_prev == NULL, "shouldn't have been set yet"); 1592 _next = n; 1593 n->_prev = this; 1594 } 1595 } 1596 }; 1597 1598 private: 1599 GrowableArray<MoveOperation*> edges; 1600 1601 public: 1602 ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs, 1603 BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) { 1604 // Move operations where the dest is the stack can all be 1605 // scheduled first since they can't interfere with the other moves. 1606 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) { 1607 if (in_sig_bt[i] == T_ARRAY) { 1608 c_arg--; 1609 if (out_regs[c_arg].first()->is_stack() && 1610 out_regs[c_arg + 1].first()->is_stack()) { 1611 arg_order.push(i); 1612 arg_order.push(c_arg); 1613 } else { 1614 if (out_regs[c_arg].first()->is_stack() || 1615 in_regs[i].first() == out_regs[c_arg].first()) { 1616 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg + 1]); 1617 } else { 1618 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]); 1619 } 1620 } 1621 } else if (in_sig_bt[i] == T_VOID) { 1622 arg_order.push(i); 1623 arg_order.push(c_arg); 1624 } else { 1625 if (out_regs[c_arg].first()->is_stack() || 1626 in_regs[i].first() == out_regs[c_arg].first()) { 1627 arg_order.push(i); 1628 arg_order.push(c_arg); 1629 } else { 1630 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]); 1631 } 1632 } 1633 } 1634 // Break any cycles in the register moves and emit the in the 1635 // proper order. 1636 GrowableArray<MoveOperation*>* stores = get_store_order(tmp_vmreg); 1637 for (int i = 0; i < stores->length(); i++) { 1638 arg_order.push(stores->at(i)->src_index()); 1639 arg_order.push(stores->at(i)->dst_index()); 1640 } 1641 } 1642 1643 // Collected all the move operations 1644 void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) { 1645 if (src.first() == dst.first()) return; 1646 edges.append(new MoveOperation(src_index, src, dst_index, dst)); 1647 } 1648 1649 // Walk the edges breaking cycles between moves. The result list 1650 // can be walked in order to produce the proper set of loads 1651 GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) { 1652 // Record which moves kill which values 1653 GrowableArray<MoveOperation*> killer; 1654 for (int i = 0; i < edges.length(); i++) { 1655 MoveOperation* s = edges.at(i); 1656 assert(killer.at_grow(s->dst_id(), NULL) == NULL, "only one killer"); 1657 killer.at_put_grow(s->dst_id(), s, NULL); 1658 } 1659 assert(killer.at_grow(MoveOperation::get_id(temp_register), NULL) == NULL, 1660 "make sure temp isn't in the registers that are killed"); 1661 1662 // create links between loads and stores 1663 for (int i = 0; i < edges.length(); i++) { 1664 edges.at(i)->link(killer); 1665 } 1666 1667 // at this point, all the move operations are chained together 1668 // in a doubly linked list. Processing it backwards finds 1669 // the beginning of the chain, forwards finds the end. If there's 1670 // a cycle it can be broken at any point, so pick an edge and walk 1671 // backward until the list ends or we end where we started. 1672 GrowableArray<MoveOperation*>* stores = new GrowableArray<MoveOperation*>(); 1673 for (int e = 0; e < edges.length(); e++) { 1674 MoveOperation* s = edges.at(e); 1675 if (!s->is_processed()) { 1676 MoveOperation* start = s; 1677 // search for the beginning of the chain or cycle 1678 while (start->prev() != NULL && start->prev() != s) { 1679 start = start->prev(); 1680 } 1681 if (start->prev() == s) { 1682 start->break_cycle(temp_register); 1683 } 1684 // walk the chain forward inserting to store list 1685 while (start != NULL) { 1686 stores->append(start); 1687 start->set_processed(); 1688 start = start->next(); 1689 } 1690 } 1691 } 1692 return stores; 1693 } 1694 }; 1695 1696 static void verify_oop_args(MacroAssembler* masm, 1697 methodHandle method, 1698 const BasicType* sig_bt, 1699 const VMRegPair* regs) { 1700 Register temp_reg = rbx; // not part of any compiled calling seq 1701 if (VerifyOops) { 1702 for (int i = 0; i < method->size_of_parameters(); i++) { 1703 if (sig_bt[i] == T_OBJECT || 1704 sig_bt[i] == T_ARRAY) { 1705 VMReg r = regs[i].first(); 1706 assert(r->is_valid(), "bad oop arg"); 1707 if (r->is_stack()) { 1708 __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize)); 1709 __ verify_oop(temp_reg); 1710 } else { 1711 __ verify_oop(r->as_Register()); 1712 } 1713 } 1714 } 1715 } 1716 } 1717 1718 static void gen_special_dispatch(MacroAssembler* masm, 1719 methodHandle method, 1720 const BasicType* sig_bt, 1721 const VMRegPair* regs) { 1722 verify_oop_args(masm, method, sig_bt, regs); 1723 vmIntrinsics::ID iid = method->intrinsic_id(); 1724 1725 // Now write the args into the outgoing interpreter space 1726 bool has_receiver = false; 1727 Register receiver_reg = noreg; 1728 int member_arg_pos = -1; 1729 Register member_reg = noreg; 1730 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); 1731 if (ref_kind != 0) { 1732 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument 1733 member_reg = rbx; // known to be free at this point 1734 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); 1735 } else if (iid == vmIntrinsics::_invokeBasic) { 1736 has_receiver = true; 1737 } else { 1738 fatal("unexpected intrinsic id %d", iid); 1739 } 1740 1741 if (member_reg != noreg) { 1742 // Load the member_arg into register, if necessary. 1743 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); 1744 VMReg r = regs[member_arg_pos].first(); 1745 if (r->is_stack()) { 1746 __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize)); 1747 } else { 1748 // no data motion is needed 1749 member_reg = r->as_Register(); 1750 } 1751 } 1752 1753 if (has_receiver) { 1754 // Make sure the receiver is loaded into a register. 1755 assert(method->size_of_parameters() > 0, "oob"); 1756 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); 1757 VMReg r = regs[0].first(); 1758 assert(r->is_valid(), "bad receiver arg"); 1759 if (r->is_stack()) { 1760 // Porting note: This assumes that compiled calling conventions always 1761 // pass the receiver oop in a register. If this is not true on some 1762 // platform, pick a temp and load the receiver from stack. 1763 fatal("receiver always in a register"); 1764 receiver_reg = j_rarg0; // known to be free at this point 1765 __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize)); 1766 } else { 1767 // no data motion is needed 1768 receiver_reg = r->as_Register(); 1769 } 1770 } 1771 1772 // Figure out which address we are really jumping to: 1773 MethodHandles::generate_method_handle_dispatch(masm, iid, 1774 receiver_reg, member_reg, /*for_compiler_entry:*/ true); 1775 } 1776 1777 // --------------------------------------------------------------------------- 1778 // Generate a native wrapper for a given method. The method takes arguments 1779 // in the Java compiled code convention, marshals them to the native 1780 // convention (handlizes oops, etc), transitions to native, makes the call, 1781 // returns to java state (possibly blocking), unhandlizes any result and 1782 // returns. 1783 // 1784 // Critical native functions are a shorthand for the use of 1785 // GetPrimtiveArrayCritical and disallow the use of any other JNI 1786 // functions. The wrapper is expected to unpack the arguments before 1787 // passing them to the callee and perform checks before and after the 1788 // native call to ensure that they GC_locker 1789 // lock_critical/unlock_critical semantics are followed. Some other 1790 // parts of JNI setup are skipped like the tear down of the JNI handle 1791 // block and the check for pending exceptions it's impossible for them 1792 // to be thrown. 1793 // 1794 // They are roughly structured like this: 1795 // if (GC_locker::needs_gc()) 1796 // SharedRuntime::block_for_jni_critical(); 1797 // tranistion to thread_in_native 1798 // unpack arrray arguments and call native entry point 1799 // check for safepoint in progress 1800 // check if any thread suspend flags are set 1801 // call into JVM and possible unlock the JNI critical 1802 // if a GC was suppressed while in the critical native. 1803 // transition back to thread_in_Java 1804 // return to caller 1805 // 1806 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, 1807 methodHandle method, 1808 int compile_id, 1809 BasicType* in_sig_bt, 1810 VMRegPair* in_regs, 1811 BasicType ret_type) { 1812 if (method->is_method_handle_intrinsic()) { 1813 vmIntrinsics::ID iid = method->intrinsic_id(); 1814 intptr_t start = (intptr_t)__ pc(); 1815 int vep_offset = ((intptr_t)__ pc()) - start; 1816 gen_special_dispatch(masm, 1817 method, 1818 in_sig_bt, 1819 in_regs); 1820 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period 1821 __ flush(); 1822 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually 1823 return nmethod::new_native_nmethod(method, 1824 compile_id, 1825 masm->code(), 1826 vep_offset, 1827 frame_complete, 1828 stack_slots / VMRegImpl::slots_per_word, 1829 in_ByteSize(-1), 1830 in_ByteSize(-1), 1831 (OopMapSet*)NULL); 1832 } 1833 bool is_critical_native = true; 1834 address native_func = method->critical_native_function(); 1835 if (native_func == NULL) { 1836 native_func = method->native_function(); 1837 is_critical_native = false; 1838 } 1839 assert(native_func != NULL, "must have function"); 1840 1841 // An OopMap for lock (and class if static) 1842 OopMapSet *oop_maps = new OopMapSet(); 1843 intptr_t start = (intptr_t)__ pc(); 1844 1845 // We have received a description of where all the java arg are located 1846 // on entry to the wrapper. We need to convert these args to where 1847 // the jni function will expect them. To figure out where they go 1848 // we convert the java signature to a C signature by inserting 1849 // the hidden arguments as arg[0] and possibly arg[1] (static method) 1850 1851 const int total_in_args = method->size_of_parameters(); 1852 int total_c_args = total_in_args; 1853 if (!is_critical_native) { 1854 total_c_args += 1; 1855 if (method->is_static()) { 1856 total_c_args++; 1857 } 1858 } else { 1859 for (int i = 0; i < total_in_args; i++) { 1860 if (in_sig_bt[i] == T_ARRAY) { 1861 total_c_args++; 1862 } 1863 } 1864 } 1865 1866 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 1867 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 1868 BasicType* in_elem_bt = NULL; 1869 1870 int argc = 0; 1871 if (!is_critical_native) { 1872 out_sig_bt[argc++] = T_ADDRESS; 1873 if (method->is_static()) { 1874 out_sig_bt[argc++] = T_OBJECT; 1875 } 1876 1877 for (int i = 0; i < total_in_args ; i++ ) { 1878 out_sig_bt[argc++] = in_sig_bt[i]; 1879 } 1880 } else { 1881 Thread* THREAD = Thread::current(); 1882 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args); 1883 SignatureStream ss(method->signature()); 1884 for (int i = 0; i < total_in_args ; i++ ) { 1885 if (in_sig_bt[i] == T_ARRAY) { 1886 // Arrays are passed as int, elem* pair 1887 out_sig_bt[argc++] = T_INT; 1888 out_sig_bt[argc++] = T_ADDRESS; 1889 Symbol* atype = ss.as_symbol(CHECK_NULL); 1890 const char* at = atype->as_C_string(); 1891 if (strlen(at) == 2) { 1892 assert(at[0] == '[', "must be"); 1893 switch (at[1]) { 1894 case 'B': in_elem_bt[i] = T_BYTE; break; 1895 case 'C': in_elem_bt[i] = T_CHAR; break; 1896 case 'D': in_elem_bt[i] = T_DOUBLE; break; 1897 case 'F': in_elem_bt[i] = T_FLOAT; break; 1898 case 'I': in_elem_bt[i] = T_INT; break; 1899 case 'J': in_elem_bt[i] = T_LONG; break; 1900 case 'S': in_elem_bt[i] = T_SHORT; break; 1901 case 'Z': in_elem_bt[i] = T_BOOLEAN; break; 1902 default: ShouldNotReachHere(); 1903 } 1904 } 1905 } else { 1906 out_sig_bt[argc++] = in_sig_bt[i]; 1907 in_elem_bt[i] = T_VOID; 1908 } 1909 if (in_sig_bt[i] != T_VOID) { 1910 assert(in_sig_bt[i] == ss.type(), "must match"); 1911 ss.next(); 1912 } 1913 } 1914 } 1915 1916 // Now figure out where the args must be stored and how much stack space 1917 // they require. 1918 int out_arg_slots; 1919 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args); 1920 1921 // Compute framesize for the wrapper. We need to handlize all oops in 1922 // incoming registers 1923 1924 // Calculate the total number of stack slots we will need. 1925 1926 // First count the abi requirement plus all of the outgoing args 1927 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; 1928 1929 // Now the space for the inbound oop handle area 1930 int total_save_slots = 6 * VMRegImpl::slots_per_word; // 6 arguments passed in registers 1931 if (is_critical_native) { 1932 // Critical natives may have to call out so they need a save area 1933 // for register arguments. 1934 int double_slots = 0; 1935 int single_slots = 0; 1936 for ( int i = 0; i < total_in_args; i++) { 1937 if (in_regs[i].first()->is_Register()) { 1938 const Register reg = in_regs[i].first()->as_Register(); 1939 switch (in_sig_bt[i]) { 1940 case T_BOOLEAN: 1941 case T_BYTE: 1942 case T_SHORT: 1943 case T_CHAR: 1944 case T_INT: single_slots++; break; 1945 case T_ARRAY: // specific to LP64 (7145024) 1946 case T_LONG: double_slots++; break; 1947 default: ShouldNotReachHere(); 1948 } 1949 } else if (in_regs[i].first()->is_XMMRegister()) { 1950 switch (in_sig_bt[i]) { 1951 case T_FLOAT: single_slots++; break; 1952 case T_DOUBLE: double_slots++; break; 1953 default: ShouldNotReachHere(); 1954 } 1955 } else if (in_regs[i].first()->is_FloatRegister()) { 1956 ShouldNotReachHere(); 1957 } 1958 } 1959 total_save_slots = double_slots * 2 + single_slots; 1960 // align the save area 1961 if (double_slots != 0) { 1962 stack_slots = round_to(stack_slots, 2); 1963 } 1964 } 1965 1966 int oop_handle_offset = stack_slots; 1967 stack_slots += total_save_slots; 1968 1969 // Now any space we need for handlizing a klass if static method 1970 1971 int klass_slot_offset = 0; 1972 int klass_offset = -1; 1973 int lock_slot_offset = 0; 1974 bool is_static = false; 1975 1976 if (method->is_static()) { 1977 klass_slot_offset = stack_slots; 1978 stack_slots += VMRegImpl::slots_per_word; 1979 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 1980 is_static = true; 1981 } 1982 1983 // Plus a lock if needed 1984 1985 if (method->is_synchronized()) { 1986 lock_slot_offset = stack_slots; 1987 stack_slots += VMRegImpl::slots_per_word; 1988 } 1989 1990 // Now a place (+2) to save return values or temp during shuffling 1991 // + 4 for return address (which we own) and saved rbp 1992 stack_slots += 6; 1993 1994 // Ok The space we have allocated will look like: 1995 // 1996 // 1997 // FP-> | | 1998 // |---------------------| 1999 // | 2 slots for moves | 2000 // |---------------------| 2001 // | lock box (if sync) | 2002 // |---------------------| <- lock_slot_offset 2003 // | klass (if static) | 2004 // |---------------------| <- klass_slot_offset 2005 // | oopHandle area | 2006 // |---------------------| <- oop_handle_offset (6 java arg registers) 2007 // | outbound memory | 2008 // | based arguments | 2009 // | | 2010 // |---------------------| 2011 // | | 2012 // SP-> | out_preserved_slots | 2013 // 2014 // 2015 2016 2017 // Now compute actual number of stack words we need rounding to make 2018 // stack properly aligned. 2019 stack_slots = round_to(stack_slots, StackAlignmentInSlots); 2020 2021 int stack_size = stack_slots * VMRegImpl::stack_slot_size; 2022 2023 // First thing make an ic check to see if we should even be here 2024 2025 // We are free to use all registers as temps without saving them and 2026 // restoring them except rbp. rbp is the only callee save register 2027 // as far as the interpreter and the compiler(s) are concerned. 2028 2029 2030 const Register ic_reg = rax; 2031 const Register receiver = j_rarg0; 2032 2033 Label hit; 2034 Label exception_pending; 2035 2036 assert_different_registers(ic_reg, receiver, rscratch1); 2037 __ verify_oop(receiver); 2038 __ load_klass(rscratch1, receiver); 2039 __ cmpq(ic_reg, rscratch1); 2040 __ jcc(Assembler::equal, hit); 2041 2042 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 2043 2044 // Verified entry point must be aligned 2045 __ align(8); 2046 2047 __ bind(hit); 2048 2049 int vep_offset = ((intptr_t)__ pc()) - start; 2050 2051 // The instruction at the verified entry point must be 5 bytes or longer 2052 // because it can be patched on the fly by make_non_entrant. The stack bang 2053 // instruction fits that requirement. 2054 2055 // Generate stack overflow check 2056 2057 if (UseStackBanging) { 2058 __ bang_stack_with_offset(StackShadowPages*os::vm_page_size()); 2059 } else { 2060 // need a 5 byte instruction to allow MT safe patching to non-entrant 2061 __ fat_nop(); 2062 } 2063 2064 // Generate a new frame for the wrapper. 2065 __ enter(); 2066 // -2 because return address is already present and so is saved rbp 2067 __ subptr(rsp, stack_size - 2*wordSize); 2068 2069 // Frame is now completed as far as size and linkage. 2070 int frame_complete = ((intptr_t)__ pc()) - start; 2071 2072 if (UseRTMLocking) { 2073 // Abort RTM transaction before calling JNI 2074 // because critical section will be large and will be 2075 // aborted anyway. Also nmethod could be deoptimized. 2076 __ xabort(0); 2077 } 2078 2079 #ifdef ASSERT 2080 { 2081 Label L; 2082 __ mov(rax, rsp); 2083 __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI) 2084 __ cmpptr(rax, rsp); 2085 __ jcc(Assembler::equal, L); 2086 __ stop("improperly aligned stack"); 2087 __ bind(L); 2088 } 2089 #endif /* ASSERT */ 2090 2091 2092 // We use r14 as the oop handle for the receiver/klass 2093 // It is callee save so it survives the call to native 2094 2095 const Register oop_handle_reg = r14; 2096 2097 if (is_critical_native) { 2098 check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args, 2099 oop_handle_offset, oop_maps, in_regs, in_sig_bt); 2100 } 2101 2102 // 2103 // We immediately shuffle the arguments so that any vm call we have to 2104 // make from here on out (sync slow path, jvmti, etc.) we will have 2105 // captured the oops from our caller and have a valid oopMap for 2106 // them. 2107 2108 // ----------------- 2109 // The Grand Shuffle 2110 2111 // The Java calling convention is either equal (linux) or denser (win64) than the 2112 // c calling convention. However the because of the jni_env argument the c calling 2113 // convention always has at least one more (and two for static) arguments than Java. 2114 // Therefore if we move the args from java -> c backwards then we will never have 2115 // a register->register conflict and we don't have to build a dependency graph 2116 // and figure out how to break any cycles. 2117 // 2118 2119 // Record esp-based slot for receiver on stack for non-static methods 2120 int receiver_offset = -1; 2121 2122 // This is a trick. We double the stack slots so we can claim 2123 // the oops in the caller's frame. Since we are sure to have 2124 // more args than the caller doubling is enough to make 2125 // sure we can capture all the incoming oop args from the 2126 // caller. 2127 // 2128 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 2129 2130 // Mark location of rbp (someday) 2131 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rbp)); 2132 2133 // Use eax, ebx as temporaries during any memory-memory moves we have to do 2134 // All inbound args are referenced based on rbp and all outbound args via rsp. 2135 2136 2137 #ifdef ASSERT 2138 bool reg_destroyed[RegisterImpl::number_of_registers]; 2139 bool freg_destroyed[XMMRegisterImpl::number_of_registers]; 2140 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) { 2141 reg_destroyed[r] = false; 2142 } 2143 for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) { 2144 freg_destroyed[f] = false; 2145 } 2146 2147 #endif /* ASSERT */ 2148 2149 // This may iterate in two different directions depending on the 2150 // kind of native it is. The reason is that for regular JNI natives 2151 // the incoming and outgoing registers are offset upwards and for 2152 // critical natives they are offset down. 2153 GrowableArray<int> arg_order(2 * total_in_args); 2154 VMRegPair tmp_vmreg; 2155 tmp_vmreg.set1(rbx->as_VMReg()); 2156 2157 if (!is_critical_native) { 2158 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) { 2159 arg_order.push(i); 2160 arg_order.push(c_arg); 2161 } 2162 } else { 2163 // Compute a valid move order, using tmp_vmreg to break any cycles 2164 ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg); 2165 } 2166 2167 int temploc = -1; 2168 for (int ai = 0; ai < arg_order.length(); ai += 2) { 2169 int i = arg_order.at(ai); 2170 int c_arg = arg_order.at(ai + 1); 2171 __ block_comment(err_msg("move %d -> %d", i, c_arg)); 2172 if (c_arg == -1) { 2173 assert(is_critical_native, "should only be required for critical natives"); 2174 // This arg needs to be moved to a temporary 2175 __ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register()); 2176 in_regs[i] = tmp_vmreg; 2177 temploc = i; 2178 continue; 2179 } else if (i == -1) { 2180 assert(is_critical_native, "should only be required for critical natives"); 2181 // Read from the temporary location 2182 assert(temploc != -1, "must be valid"); 2183 i = temploc; 2184 temploc = -1; 2185 } 2186 #ifdef ASSERT 2187 if (in_regs[i].first()->is_Register()) { 2188 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!"); 2189 } else if (in_regs[i].first()->is_XMMRegister()) { 2190 assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!"); 2191 } 2192 if (out_regs[c_arg].first()->is_Register()) { 2193 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; 2194 } else if (out_regs[c_arg].first()->is_XMMRegister()) { 2195 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true; 2196 } 2197 #endif /* ASSERT */ 2198 switch (in_sig_bt[i]) { 2199 case T_ARRAY: 2200 if (is_critical_native) { 2201 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]); 2202 c_arg++; 2203 #ifdef ASSERT 2204 if (out_regs[c_arg].first()->is_Register()) { 2205 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; 2206 } else if (out_regs[c_arg].first()->is_XMMRegister()) { 2207 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true; 2208 } 2209 #endif 2210 break; 2211 } 2212 case T_OBJECT: 2213 assert(!is_critical_native, "no oop arguments"); 2214 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg], 2215 ((i == 0) && (!is_static)), 2216 &receiver_offset); 2217 break; 2218 case T_VOID: 2219 break; 2220 2221 case T_FLOAT: 2222 float_move(masm, in_regs[i], out_regs[c_arg]); 2223 break; 2224 2225 case T_DOUBLE: 2226 assert( i + 1 < total_in_args && 2227 in_sig_bt[i + 1] == T_VOID && 2228 out_sig_bt[c_arg+1] == T_VOID, "bad arg list"); 2229 double_move(masm, in_regs[i], out_regs[c_arg]); 2230 break; 2231 2232 case T_LONG : 2233 long_move(masm, in_regs[i], out_regs[c_arg]); 2234 break; 2235 2236 case T_ADDRESS: assert(false, "found T_ADDRESS in java args"); 2237 2238 default: 2239 move32_64(masm, in_regs[i], out_regs[c_arg]); 2240 } 2241 } 2242 2243 int c_arg; 2244 2245 // Pre-load a static method's oop into r14. Used both by locking code and 2246 // the normal JNI call code. 2247 if (!is_critical_native) { 2248 // point c_arg at the first arg that is already loaded in case we 2249 // need to spill before we call out 2250 c_arg = total_c_args - total_in_args; 2251 2252 if (method->is_static()) { 2253 2254 // load oop into a register 2255 __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror())); 2256 2257 // Now handlize the static class mirror it's known not-null. 2258 __ movptr(Address(rsp, klass_offset), oop_handle_reg); 2259 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 2260 2261 // Now get the handle 2262 __ lea(oop_handle_reg, Address(rsp, klass_offset)); 2263 // store the klass handle as second argument 2264 __ movptr(c_rarg1, oop_handle_reg); 2265 // and protect the arg if we must spill 2266 c_arg--; 2267 } 2268 } else { 2269 // For JNI critical methods we need to save all registers in save_args. 2270 c_arg = 0; 2271 } 2272 2273 // Change state to native (we save the return address in the thread, since it might not 2274 // be pushed on the stack when we do a a stack traversal). It is enough that the pc() 2275 // points into the right code segment. It does not have to be the correct return pc. 2276 // We use the same pc/oopMap repeatedly when we call out 2277 2278 intptr_t the_pc = (intptr_t) __ pc(); 2279 oop_maps->add_gc_map(the_pc - start, map); 2280 2281 __ set_last_Java_frame(rsp, noreg, (address)the_pc); 2282 2283 2284 // We have all of the arguments setup at this point. We must not touch any register 2285 // argument registers at this point (what if we save/restore them there are no oop? 2286 2287 { 2288 SkipIfEqual skip(masm, &DTraceMethodProbes, false); 2289 // protect the args we've loaded 2290 save_args(masm, total_c_args, c_arg, out_regs); 2291 __ mov_metadata(c_rarg1, method()); 2292 __ call_VM_leaf( 2293 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 2294 r15_thread, c_rarg1); 2295 restore_args(masm, total_c_args, c_arg, out_regs); 2296 } 2297 2298 // RedefineClasses() tracing support for obsolete method entry 2299 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { 2300 // protect the args we've loaded 2301 save_args(masm, total_c_args, c_arg, out_regs); 2302 __ mov_metadata(c_rarg1, method()); 2303 __ call_VM_leaf( 2304 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 2305 r15_thread, c_rarg1); 2306 restore_args(masm, total_c_args, c_arg, out_regs); 2307 } 2308 2309 // Lock a synchronized method 2310 2311 // Register definitions used by locking and unlocking 2312 2313 const Register swap_reg = rax; // Must use rax for cmpxchg instruction 2314 const Register obj_reg = rbx; // Will contain the oop 2315 const Register lock_reg = r13; // Address of compiler lock object (BasicLock) 2316 const Register old_hdr = r13; // value of old header at unlock time 2317 2318 Label slow_path_lock; 2319 Label lock_done; 2320 2321 if (method->is_synchronized()) { 2322 assert(!is_critical_native, "unhandled"); 2323 2324 2325 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes(); 2326 2327 // Get the handle (the 2nd argument) 2328 __ mov(oop_handle_reg, c_rarg1); 2329 2330 // Get address of the box 2331 2332 __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size)); 2333 2334 // Load the oop from the handle 2335 __ movptr(obj_reg, Address(oop_handle_reg, 0)); 2336 2337 if (UseBiasedLocking) { 2338 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock); 2339 } 2340 2341 // Load immediate 1 into swap_reg %rax 2342 __ movl(swap_reg, 1); 2343 2344 // Load (object->mark() | 1) into swap_reg %rax 2345 __ orptr(swap_reg, Address(obj_reg, 0)); 2346 2347 // Save (object->mark() | 1) into BasicLock's displaced header 2348 __ movptr(Address(lock_reg, mark_word_offset), swap_reg); 2349 2350 if (os::is_MP()) { 2351 __ lock(); 2352 } 2353 2354 // src -> dest iff dest == rax else rax <- dest 2355 __ cmpxchgptr(lock_reg, Address(obj_reg, 0)); 2356 __ jcc(Assembler::equal, lock_done); 2357 2358 // Hmm should this move to the slow path code area??? 2359 2360 // Test if the oopMark is an obvious stack pointer, i.e., 2361 // 1) (mark & 3) == 0, and 2362 // 2) rsp <= mark < mark + os::pagesize() 2363 // These 3 tests can be done by evaluating the following 2364 // expression: ((mark - rsp) & (3 - os::vm_page_size())), 2365 // assuming both stack pointer and pagesize have their 2366 // least significant 2 bits clear. 2367 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg 2368 2369 __ subptr(swap_reg, rsp); 2370 __ andptr(swap_reg, 3 - os::vm_page_size()); 2371 2372 // Save the test result, for recursive case, the result is zero 2373 __ movptr(Address(lock_reg, mark_word_offset), swap_reg); 2374 __ jcc(Assembler::notEqual, slow_path_lock); 2375 2376 // Slow path will re-enter here 2377 2378 __ bind(lock_done); 2379 } 2380 2381 2382 // Finally just about ready to make the JNI call 2383 2384 2385 // get JNIEnv* which is first argument to native 2386 if (!is_critical_native) { 2387 __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset()))); 2388 } 2389 2390 // Now set thread in native 2391 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native); 2392 2393 __ call(RuntimeAddress(native_func)); 2394 2395 // Verify or restore cpu control state after JNI call 2396 __ restore_cpu_control_state_after_jni(); 2397 2398 // Unpack native results. 2399 switch (ret_type) { 2400 case T_BOOLEAN: __ c2bool(rax); break; 2401 case T_CHAR : __ movzwl(rax, rax); break; 2402 case T_BYTE : __ sign_extend_byte (rax); break; 2403 case T_SHORT : __ sign_extend_short(rax); break; 2404 case T_INT : /* nothing to do */ break; 2405 case T_DOUBLE : 2406 case T_FLOAT : 2407 // Result is in xmm0 we'll save as needed 2408 break; 2409 case T_ARRAY: // Really a handle 2410 case T_OBJECT: // Really a handle 2411 break; // can't de-handlize until after safepoint check 2412 case T_VOID: break; 2413 case T_LONG: break; 2414 default : ShouldNotReachHere(); 2415 } 2416 2417 // Switch thread to "native transition" state before reading the synchronization state. 2418 // This additional state is necessary because reading and testing the synchronization 2419 // state is not atomic w.r.t. GC, as this scenario demonstrates: 2420 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 2421 // VM thread changes sync state to synchronizing and suspends threads for GC. 2422 // Thread A is resumed to finish this native method, but doesn't block here since it 2423 // didn't see any synchronization is progress, and escapes. 2424 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans); 2425 2426 if(os::is_MP()) { 2427 if (UseMembar) { 2428 // Force this write out before the read below 2429 __ membar(Assembler::Membar_mask_bits( 2430 Assembler::LoadLoad | Assembler::LoadStore | 2431 Assembler::StoreLoad | Assembler::StoreStore)); 2432 } else { 2433 // Write serialization page so VM thread can do a pseudo remote membar. 2434 // We use the current thread pointer to calculate a thread specific 2435 // offset to write to within the page. This minimizes bus traffic 2436 // due to cache line collision. 2437 __ serialize_memory(r15_thread, rcx); 2438 } 2439 } 2440 2441 Label after_transition; 2442 2443 // check for safepoint operation in progress and/or pending suspend requests 2444 { 2445 Label Continue; 2446 2447 __ cmp32(ExternalAddress((address)SafepointSynchronize::address_of_state()), 2448 SafepointSynchronize::_not_synchronized); 2449 2450 Label L; 2451 __ jcc(Assembler::notEqual, L); 2452 __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0); 2453 __ jcc(Assembler::equal, Continue); 2454 __ bind(L); 2455 2456 // Don't use call_VM as it will see a possible pending exception and forward it 2457 // and never return here preventing us from clearing _last_native_pc down below. 2458 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are 2459 // preserved and correspond to the bcp/locals pointers. So we do a runtime call 2460 // by hand. 2461 // 2462 save_native_result(masm, ret_type, stack_slots); 2463 __ mov(c_rarg0, r15_thread); 2464 __ mov(r12, rsp); // remember sp 2465 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 2466 __ andptr(rsp, -16); // align stack as required by ABI 2467 if (!is_critical_native) { 2468 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 2469 } else { 2470 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition))); 2471 } 2472 __ mov(rsp, r12); // restore sp 2473 __ reinit_heapbase(); 2474 // Restore any method result value 2475 restore_native_result(masm, ret_type, stack_slots); 2476 2477 if (is_critical_native) { 2478 // The call above performed the transition to thread_in_Java so 2479 // skip the transition logic below. 2480 __ jmpb(after_transition); 2481 } 2482 2483 __ bind(Continue); 2484 } 2485 2486 // change thread state 2487 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java); 2488 __ bind(after_transition); 2489 2490 Label reguard; 2491 Label reguard_done; 2492 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled); 2493 __ jcc(Assembler::equal, reguard); 2494 __ bind(reguard_done); 2495 2496 // native result if any is live 2497 2498 // Unlock 2499 Label unlock_done; 2500 Label slow_path_unlock; 2501 if (method->is_synchronized()) { 2502 2503 // Get locked oop from the handle we passed to jni 2504 __ movptr(obj_reg, Address(oop_handle_reg, 0)); 2505 2506 Label done; 2507 2508 if (UseBiasedLocking) { 2509 __ biased_locking_exit(obj_reg, old_hdr, done); 2510 } 2511 2512 // Simple recursive lock? 2513 2514 __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD); 2515 __ jcc(Assembler::equal, done); 2516 2517 // Must save rax if if it is live now because cmpxchg must use it 2518 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) { 2519 save_native_result(masm, ret_type, stack_slots); 2520 } 2521 2522 2523 // get address of the stack lock 2524 __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size)); 2525 // get old displaced header 2526 __ movptr(old_hdr, Address(rax, 0)); 2527 2528 // Atomic swap old header if oop still contains the stack lock 2529 if (os::is_MP()) { 2530 __ lock(); 2531 } 2532 __ cmpxchgptr(old_hdr, Address(obj_reg, 0)); 2533 __ jcc(Assembler::notEqual, slow_path_unlock); 2534 2535 // slow path re-enters here 2536 __ bind(unlock_done); 2537 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) { 2538 restore_native_result(masm, ret_type, stack_slots); 2539 } 2540 2541 __ bind(done); 2542 2543 } 2544 { 2545 SkipIfEqual skip(masm, &DTraceMethodProbes, false); 2546 save_native_result(masm, ret_type, stack_slots); 2547 __ mov_metadata(c_rarg1, method()); 2548 __ call_VM_leaf( 2549 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2550 r15_thread, c_rarg1); 2551 restore_native_result(masm, ret_type, stack_slots); 2552 } 2553 2554 __ reset_last_Java_frame(false, true); 2555 2556 // Unpack oop result 2557 if (ret_type == T_OBJECT || ret_type == T_ARRAY) { 2558 Label L; 2559 __ testptr(rax, rax); 2560 __ jcc(Assembler::zero, L); 2561 __ movptr(rax, Address(rax, 0)); 2562 __ bind(L); 2563 __ verify_oop(rax); 2564 } 2565 2566 if (!is_critical_native) { 2567 // reset handle block 2568 __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset())); 2569 __ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); 2570 } 2571 2572 // pop our frame 2573 2574 __ leave(); 2575 2576 if (!is_critical_native) { 2577 // Any exception pending? 2578 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD); 2579 __ jcc(Assembler::notEqual, exception_pending); 2580 } 2581 2582 // Return 2583 2584 __ ret(0); 2585 2586 // Unexpected paths are out of line and go here 2587 2588 if (!is_critical_native) { 2589 // forward the exception 2590 __ bind(exception_pending); 2591 2592 // and forward the exception 2593 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 2594 } 2595 2596 // Slow path locking & unlocking 2597 if (method->is_synchronized()) { 2598 2599 // BEGIN Slow path lock 2600 __ bind(slow_path_lock); 2601 2602 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM 2603 // args are (oop obj, BasicLock* lock, JavaThread* thread) 2604 2605 // protect the args we've loaded 2606 save_args(masm, total_c_args, c_arg, out_regs); 2607 2608 __ mov(c_rarg0, obj_reg); 2609 __ mov(c_rarg1, lock_reg); 2610 __ mov(c_rarg2, r15_thread); 2611 2612 // Not a leaf but we have last_Java_frame setup as we want 2613 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3); 2614 restore_args(masm, total_c_args, c_arg, out_regs); 2615 2616 #ifdef ASSERT 2617 { Label L; 2618 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD); 2619 __ jcc(Assembler::equal, L); 2620 __ stop("no pending exception allowed on exit from monitorenter"); 2621 __ bind(L); 2622 } 2623 #endif 2624 __ jmp(lock_done); 2625 2626 // END Slow path lock 2627 2628 // BEGIN Slow path unlock 2629 __ bind(slow_path_unlock); 2630 2631 // If we haven't already saved the native result we must save it now as xmm registers 2632 // are still exposed. 2633 2634 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) { 2635 save_native_result(masm, ret_type, stack_slots); 2636 } 2637 2638 __ lea(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size)); 2639 2640 __ mov(c_rarg0, obj_reg); 2641 __ mov(c_rarg2, r15_thread); 2642 __ mov(r12, rsp); // remember sp 2643 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 2644 __ andptr(rsp, -16); // align stack as required by ABI 2645 2646 // Save pending exception around call to VM (which contains an EXCEPTION_MARK) 2647 // NOTE that obj_reg == rbx currently 2648 __ movptr(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset()))); 2649 __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD); 2650 2651 // args are (oop obj, BasicLock* lock, JavaThread* thread) 2652 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C))); 2653 __ mov(rsp, r12); // restore sp 2654 __ reinit_heapbase(); 2655 #ifdef ASSERT 2656 { 2657 Label L; 2658 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD); 2659 __ jcc(Assembler::equal, L); 2660 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C"); 2661 __ bind(L); 2662 } 2663 #endif /* ASSERT */ 2664 2665 __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), rbx); 2666 2667 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) { 2668 restore_native_result(masm, ret_type, stack_slots); 2669 } 2670 __ jmp(unlock_done); 2671 2672 // END Slow path unlock 2673 2674 } // synchronized 2675 2676 // SLOW PATH Reguard the stack if needed 2677 2678 __ bind(reguard); 2679 save_native_result(masm, ret_type, stack_slots); 2680 __ mov(r12, rsp); // remember sp 2681 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 2682 __ andptr(rsp, -16); // align stack as required by ABI 2683 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 2684 __ mov(rsp, r12); // restore sp 2685 __ reinit_heapbase(); 2686 restore_native_result(masm, ret_type, stack_slots); 2687 // and continue 2688 __ jmp(reguard_done); 2689 2690 2691 2692 __ flush(); 2693 2694 nmethod *nm = nmethod::new_native_nmethod(method, 2695 compile_id, 2696 masm->code(), 2697 vep_offset, 2698 frame_complete, 2699 stack_slots / VMRegImpl::slots_per_word, 2700 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2701 in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size), 2702 oop_maps); 2703 2704 if (is_critical_native) { 2705 nm->set_lazy_critical_native(true); 2706 } 2707 2708 return nm; 2709 2710 } 2711 2712 // this function returns the adjust size (in number of words) to a c2i adapter 2713 // activation for use during deoptimization 2714 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) { 2715 return (callee_locals - callee_parameters) * Interpreter::stackElementWords; 2716 } 2717 2718 2719 uint SharedRuntime::out_preserve_stack_slots() { 2720 return 0; 2721 } 2722 2723 //------------------------------generate_deopt_blob---------------------------- 2724 void SharedRuntime::generate_deopt_blob() { 2725 // Allocate space for the code 2726 ResourceMark rm; 2727 // Setup code generation tools 2728 int pad = 0; 2729 #if INCLUDE_JVMCI 2730 if (EnableJVMCI) { 2731 pad += 512; // Increase the buffer size when compiling for JVMCI 2732 } 2733 #endif 2734 CodeBuffer buffer("deopt_blob", 2048+pad, 1024); 2735 MacroAssembler* masm = new MacroAssembler(&buffer); 2736 int frame_size_in_words; 2737 OopMap* map = NULL; 2738 OopMapSet *oop_maps = new OopMapSet(); 2739 2740 // ------------- 2741 // This code enters when returning to a de-optimized nmethod. A return 2742 // address has been pushed on the the stack, and return values are in 2743 // registers. 2744 // If we are doing a normal deopt then we were called from the patched 2745 // nmethod from the point we returned to the nmethod. So the return 2746 // address on the stack is wrong by NativeCall::instruction_size 2747 // We will adjust the value so it looks like we have the original return 2748 // address on the stack (like when we eagerly deoptimized). 2749 // In the case of an exception pending when deoptimizing, we enter 2750 // with a return address on the stack that points after the call we patched 2751 // into the exception handler. We have the following register state from, 2752 // e.g., the forward exception stub (see stubGenerator_x86_64.cpp). 2753 // rax: exception oop 2754 // rbx: exception handler 2755 // rdx: throwing pc 2756 // So in this case we simply jam rdx into the useless return address and 2757 // the stack looks just like we want. 2758 // 2759 // At this point we need to de-opt. We save the argument return 2760 // registers. We call the first C routine, fetch_unroll_info(). This 2761 // routine captures the return values and returns a structure which 2762 // describes the current frame size and the sizes of all replacement frames. 2763 // The current frame is compiled code and may contain many inlined 2764 // functions, each with their own JVM state. We pop the current frame, then 2765 // push all the new frames. Then we call the C routine unpack_frames() to 2766 // populate these frames. Finally unpack_frames() returns us the new target 2767 // address. Notice that callee-save registers are BLOWN here; they have 2768 // already been captured in the vframeArray at the time the return PC was 2769 // patched. 2770 address start = __ pc(); 2771 Label cont; 2772 2773 // Prolog for non exception case! 2774 2775 // Save everything in sight. 2776 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); 2777 2778 // Normal deoptimization. Save exec mode for unpack_frames. 2779 __ movl(r14, Deoptimization::Unpack_deopt); // callee-saved 2780 __ jmp(cont); 2781 2782 int reexecute_offset = __ pc() - start; 2783 #if INCLUDE_JVMCI && !defined(COMPILER1) 2784 if (EnableJVMCI && UseJVMCICompiler) { 2785 // JVMCI does not use this kind of deoptimization 2786 __ should_not_reach_here(); 2787 } 2788 #endif 2789 2790 // Reexecute case 2791 // return address is the pc describes what bci to do re-execute at 2792 2793 // No need to update map as each call to save_live_registers will produce identical oopmap 2794 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); 2795 2796 __ movl(r14, Deoptimization::Unpack_reexecute); // callee-saved 2797 __ jmp(cont); 2798 2799 #if INCLUDE_JVMCI 2800 Label after_fetch_unroll_info_call; 2801 int implicit_exception_uncommon_trap_offset = 0; 2802 int uncommon_trap_offset = 0; 2803 2804 if (EnableJVMCI) { 2805 implicit_exception_uncommon_trap_offset = __ pc() - start; 2806 2807 __ pushptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()))); 2808 __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())), (int32_t)NULL_WORD); 2809 2810 uncommon_trap_offset = __ pc() - start; 2811 2812 // Save everything in sight. 2813 RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); 2814 // fetch_unroll_info needs to call last_java_frame() 2815 __ set_last_Java_frame(noreg, noreg, NULL); 2816 2817 __ movl(c_rarg1, Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset()))); 2818 __ movl(Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())), -1); 2819 2820 __ movl(r14, (int32_t)Deoptimization::Unpack_reexecute); 2821 __ mov(c_rarg0, r15_thread); 2822 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap))); 2823 oop_maps->add_gc_map( __ pc()-start, map->deep_copy()); 2824 2825 __ reset_last_Java_frame(false, false); 2826 2827 __ jmp(after_fetch_unroll_info_call); 2828 } // EnableJVMCI 2829 #endif // INCLUDE_JVMCI 2830 2831 int exception_offset = __ pc() - start; 2832 2833 // Prolog for exception case 2834 2835 // all registers are dead at this entry point, except for rax, and 2836 // rdx which contain the exception oop and exception pc 2837 // respectively. Set them in TLS and fall thru to the 2838 // unpack_with_exception_in_tls entry point. 2839 2840 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx); 2841 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), rax); 2842 2843 int exception_in_tls_offset = __ pc() - start; 2844 2845 // new implementation because exception oop is now passed in JavaThread 2846 2847 // Prolog for exception case 2848 // All registers must be preserved because they might be used by LinearScan 2849 // Exceptiop oop and throwing PC are passed in JavaThread 2850 // tos: stack at point of call to method that threw the exception (i.e. only 2851 // args are on the stack, no return address) 2852 2853 // make room on stack for the return address 2854 // It will be patched later with the throwing pc. The correct value is not 2855 // available now because loading it from memory would destroy registers. 2856 __ push(0); 2857 2858 // Save everything in sight. 2859 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); 2860 2861 // Now it is safe to overwrite any register 2862 2863 // Deopt during an exception. Save exec mode for unpack_frames. 2864 __ movl(r14, Deoptimization::Unpack_exception); // callee-saved 2865 2866 // load throwing pc from JavaThread and patch it as the return address 2867 // of the current frame. Then clear the field in JavaThread 2868 2869 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset())); 2870 __ movptr(Address(rbp, wordSize), rdx); 2871 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); 2872 2873 #ifdef ASSERT 2874 // verify that there is really an exception oop in JavaThread 2875 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset())); 2876 __ verify_oop(rax); 2877 2878 // verify that there is no pending exception 2879 Label no_pending_exception; 2880 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 2881 __ testptr(rax, rax); 2882 __ jcc(Assembler::zero, no_pending_exception); 2883 __ stop("must not have pending exception here"); 2884 __ bind(no_pending_exception); 2885 #endif 2886 2887 __ bind(cont); 2888 2889 // Call C code. Need thread and this frame, but NOT official VM entry 2890 // crud. We cannot block on this call, no GC can happen. 2891 // 2892 // UnrollBlock* fetch_unroll_info(JavaThread* thread) 2893 2894 // fetch_unroll_info needs to call last_java_frame(). 2895 2896 __ set_last_Java_frame(noreg, noreg, NULL); 2897 #ifdef ASSERT 2898 { Label L; 2899 __ cmpptr(Address(r15_thread, 2900 JavaThread::last_Java_fp_offset()), 2901 (int32_t)0); 2902 __ jcc(Assembler::equal, L); 2903 __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared"); 2904 __ bind(L); 2905 } 2906 #endif // ASSERT 2907 __ mov(c_rarg0, r15_thread); 2908 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info))); 2909 2910 // Need to have an oopmap that tells fetch_unroll_info where to 2911 // find any register it might need. 2912 oop_maps->add_gc_map(__ pc() - start, map); 2913 2914 __ reset_last_Java_frame(false, false); 2915 2916 #if INCLUDE_JVMCI 2917 if (EnableJVMCI) { 2918 __ bind(after_fetch_unroll_info_call); 2919 } 2920 #endif 2921 2922 // Load UnrollBlock* into rdi 2923 __ mov(rdi, rax); 2924 2925 Label noException; 2926 __ cmpl(r14, Deoptimization::Unpack_exception); // Was exception pending? 2927 __ jcc(Assembler::notEqual, noException); 2928 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset())); 2929 // QQQ this is useless it was NULL above 2930 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset())); 2931 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD); 2932 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); 2933 2934 __ verify_oop(rax); 2935 2936 // Overwrite the result registers with the exception results. 2937 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax); 2938 // I think this is useless 2939 __ movptr(Address(rsp, RegisterSaver::rdx_offset_in_bytes()), rdx); 2940 2941 __ bind(noException); 2942 2943 // Only register save data is on the stack. 2944 // Now restore the result registers. Everything else is either dead 2945 // or captured in the vframeArray. 2946 RegisterSaver::restore_result_registers(masm); 2947 2948 // All of the register save area has been popped of the stack. Only the 2949 // return address remains. 2950 2951 // Pop all the frames we must move/replace. 2952 // 2953 // Frame picture (youngest to oldest) 2954 // 1: self-frame (no frame link) 2955 // 2: deopting frame (no frame link) 2956 // 3: caller of deopting frame (could be compiled/interpreted). 2957 // 2958 // Note: by leaving the return address of self-frame on the stack 2959 // and using the size of frame 2 to adjust the stack 2960 // when we are done the return to frame 3 will still be on the stack. 2961 2962 // Pop deoptimized frame 2963 __ movl(rcx, Address(rdi, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes())); 2964 __ addptr(rsp, rcx); 2965 2966 // rsp should be pointing at the return address to the caller (3) 2967 2968 // Pick up the initial fp we should save 2969 // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved) 2970 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes())); 2971 2972 #ifdef ASSERT 2973 // Compilers generate code that bang the stack by as much as the 2974 // interpreter would need. So this stack banging should never 2975 // trigger a fault. Verify that it does not on non product builds. 2976 if (UseStackBanging) { 2977 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes())); 2978 __ bang_stack_size(rbx, rcx); 2979 } 2980 #endif 2981 2982 // Load address of array of frame pcs into rcx 2983 __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes())); 2984 2985 // Trash the old pc 2986 __ addptr(rsp, wordSize); 2987 2988 // Load address of array of frame sizes into rsi 2989 __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes())); 2990 2991 // Load counter into rdx 2992 __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes())); 2993 2994 // Now adjust the caller's stack to make up for the extra locals 2995 // but record the original sp so that we can save it in the skeletal interpreter 2996 // frame and the stack walking of interpreter_sender will get the unextended sp 2997 // value and not the "real" sp value. 2998 2999 const Register sender_sp = r8; 3000 3001 __ mov(sender_sp, rsp); 3002 __ movl(rbx, Address(rdi, 3003 Deoptimization::UnrollBlock:: 3004 caller_adjustment_offset_in_bytes())); 3005 __ subptr(rsp, rbx); 3006 3007 // Push interpreter frames in a loop 3008 Label loop; 3009 __ bind(loop); 3010 __ movptr(rbx, Address(rsi, 0)); // Load frame size 3011 #ifdef CC_INTERP 3012 __ subptr(rbx, 4*wordSize); // we'll push pc and ebp by hand and 3013 #ifdef ASSERT 3014 __ push(0xDEADDEAD); // Make a recognizable pattern 3015 __ push(0xDEADDEAD); 3016 #else /* ASSERT */ 3017 __ subptr(rsp, 2*wordSize); // skip the "static long no_param" 3018 #endif /* ASSERT */ 3019 #else 3020 __ subptr(rbx, 2*wordSize); // We'll push pc and ebp by hand 3021 #endif // CC_INTERP 3022 __ pushptr(Address(rcx, 0)); // Save return address 3023 __ enter(); // Save old & set new ebp 3024 __ subptr(rsp, rbx); // Prolog 3025 #ifdef CC_INTERP 3026 __ movptr(Address(rbp, 3027 -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))), 3028 sender_sp); // Make it walkable 3029 #else /* CC_INTERP */ 3030 // This value is corrected by layout_activation_impl 3031 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD ); 3032 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), sender_sp); // Make it walkable 3033 #endif /* CC_INTERP */ 3034 __ mov(sender_sp, rsp); // Pass sender_sp to next frame 3035 __ addptr(rsi, wordSize); // Bump array pointer (sizes) 3036 __ addptr(rcx, wordSize); // Bump array pointer (pcs) 3037 __ decrementl(rdx); // Decrement counter 3038 __ jcc(Assembler::notZero, loop); 3039 __ pushptr(Address(rcx, 0)); // Save final return address 3040 3041 // Re-push self-frame 3042 __ enter(); // Save old & set new ebp 3043 3044 // Allocate a full sized register save area. 3045 // Return address and rbp are in place, so we allocate two less words. 3046 __ subptr(rsp, (frame_size_in_words - 2) * wordSize); 3047 3048 // Restore frame locals after moving the frame 3049 __ movdbl(Address(rsp, RegisterSaver::xmm0_offset_in_bytes()), xmm0); 3050 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax); 3051 3052 // Call C code. Need thread but NOT official VM entry 3053 // crud. We cannot block on this call, no GC can happen. Call should 3054 // restore return values to their stack-slots with the new SP. 3055 // 3056 // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode) 3057 3058 // Use rbp because the frames look interpreted now 3059 // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP. 3060 // Don't need the precise return PC here, just precise enough to point into this code blob. 3061 address the_pc = __ pc(); 3062 __ set_last_Java_frame(noreg, rbp, the_pc); 3063 3064 __ andptr(rsp, -(StackAlignmentInBytes)); // Fix stack alignment as required by ABI 3065 __ mov(c_rarg0, r15_thread); 3066 __ movl(c_rarg1, r14); // second arg: exec_mode 3067 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames))); 3068 // Revert SP alignment after call since we're going to do some SP relative addressing below 3069 __ movptr(rsp, Address(r15_thread, JavaThread::last_Java_sp_offset())); 3070 3071 // Set an oopmap for the call site 3072 // Use the same PC we used for the last java frame 3073 oop_maps->add_gc_map(the_pc - start, 3074 new OopMap( frame_size_in_words, 0 )); 3075 3076 // Clear fp AND pc 3077 __ reset_last_Java_frame(true, true); 3078 3079 // Collect return values 3080 __ movdbl(xmm0, Address(rsp, RegisterSaver::xmm0_offset_in_bytes())); 3081 __ movptr(rax, Address(rsp, RegisterSaver::rax_offset_in_bytes())); 3082 // I think this is useless (throwing pc?) 3083 __ movptr(rdx, Address(rsp, RegisterSaver::rdx_offset_in_bytes())); 3084 3085 // Pop self-frame. 3086 __ leave(); // Epilog 3087 3088 // Jump to interpreter 3089 __ ret(0); 3090 3091 // Make sure all code is generated 3092 masm->flush(); 3093 3094 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words); 3095 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); 3096 #if INCLUDE_JVMCI 3097 if (EnableJVMCI) { 3098 _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset); 3099 _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset); 3100 } 3101 #endif 3102 } 3103 3104 #ifdef COMPILER2 3105 //------------------------------generate_uncommon_trap_blob-------------------- 3106 void SharedRuntime::generate_uncommon_trap_blob() { 3107 // Allocate space for the code 3108 ResourceMark rm; 3109 // Setup code generation tools 3110 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024); 3111 MacroAssembler* masm = new MacroAssembler(&buffer); 3112 3113 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned"); 3114 3115 address start = __ pc(); 3116 3117 if (UseRTMLocking) { 3118 // Abort RTM transaction before possible nmethod deoptimization. 3119 __ xabort(0); 3120 } 3121 3122 // Push self-frame. We get here with a return address on the 3123 // stack, so rsp is 8-byte aligned until we allocate our frame. 3124 __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog! 3125 3126 // No callee saved registers. rbp is assumed implicitly saved 3127 __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp); 3128 3129 // compiler left unloaded_class_index in j_rarg0 move to where the 3130 // runtime expects it. 3131 __ movl(c_rarg1, j_rarg0); 3132 3133 __ set_last_Java_frame(noreg, noreg, NULL); 3134 3135 // Call C code. Need thread but NOT official VM entry 3136 // crud. We cannot block on this call, no GC can happen. Call should 3137 // capture callee-saved registers as well as return values. 3138 // Thread is in rdi already. 3139 // 3140 // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index); 3141 3142 __ mov(c_rarg0, r15_thread); 3143 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap))); 3144 3145 // Set an oopmap for the call site 3146 OopMapSet* oop_maps = new OopMapSet(); 3147 OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0); 3148 3149 // location of rbp is known implicitly by the frame sender code 3150 3151 oop_maps->add_gc_map(__ pc() - start, map); 3152 3153 __ reset_last_Java_frame(false, false); 3154 3155 // Load UnrollBlock* into rdi 3156 __ mov(rdi, rax); 3157 3158 // Pop all the frames we must move/replace. 3159 // 3160 // Frame picture (youngest to oldest) 3161 // 1: self-frame (no frame link) 3162 // 2: deopting frame (no frame link) 3163 // 3: caller of deopting frame (could be compiled/interpreted). 3164 3165 // Pop self-frame. We have no frame, and must rely only on rax and rsp. 3166 __ addptr(rsp, (SimpleRuntimeFrame::framesize - 2) << LogBytesPerInt); // Epilog! 3167 3168 // Pop deoptimized frame (int) 3169 __ movl(rcx, Address(rdi, 3170 Deoptimization::UnrollBlock:: 3171 size_of_deoptimized_frame_offset_in_bytes())); 3172 __ addptr(rsp, rcx); 3173 3174 // rsp should be pointing at the return address to the caller (3) 3175 3176 // Pick up the initial fp we should save 3177 // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved) 3178 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes())); 3179 3180 #ifdef ASSERT 3181 // Compilers generate code that bang the stack by as much as the 3182 // interpreter would need. So this stack banging should never 3183 // trigger a fault. Verify that it does not on non product builds. 3184 if (UseStackBanging) { 3185 __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes())); 3186 __ bang_stack_size(rbx, rcx); 3187 } 3188 #endif 3189 3190 // Load address of array of frame pcs into rcx (address*) 3191 __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes())); 3192 3193 // Trash the return pc 3194 __ addptr(rsp, wordSize); 3195 3196 // Load address of array of frame sizes into rsi (intptr_t*) 3197 __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock:: frame_sizes_offset_in_bytes())); 3198 3199 // Counter 3200 __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock:: number_of_frames_offset_in_bytes())); // (int) 3201 3202 // Now adjust the caller's stack to make up for the extra locals but 3203 // record the original sp so that we can save it in the skeletal 3204 // interpreter frame and the stack walking of interpreter_sender 3205 // will get the unextended sp value and not the "real" sp value. 3206 3207 const Register sender_sp = r8; 3208 3209 __ mov(sender_sp, rsp); 3210 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock:: caller_adjustment_offset_in_bytes())); // (int) 3211 __ subptr(rsp, rbx); 3212 3213 // Push interpreter frames in a loop 3214 Label loop; 3215 __ bind(loop); 3216 __ movptr(rbx, Address(rsi, 0)); // Load frame size 3217 __ subptr(rbx, 2 * wordSize); // We'll push pc and rbp by hand 3218 __ pushptr(Address(rcx, 0)); // Save return address 3219 __ enter(); // Save old & set new rbp 3220 __ subptr(rsp, rbx); // Prolog 3221 #ifdef CC_INTERP 3222 __ movptr(Address(rbp, 3223 -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))), 3224 sender_sp); // Make it walkable 3225 #else // CC_INTERP 3226 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), 3227 sender_sp); // Make it walkable 3228 // This value is corrected by layout_activation_impl 3229 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD ); 3230 #endif // CC_INTERP 3231 __ mov(sender_sp, rsp); // Pass sender_sp to next frame 3232 __ addptr(rsi, wordSize); // Bump array pointer (sizes) 3233 __ addptr(rcx, wordSize); // Bump array pointer (pcs) 3234 __ decrementl(rdx); // Decrement counter 3235 __ jcc(Assembler::notZero, loop); 3236 __ pushptr(Address(rcx, 0)); // Save final return address 3237 3238 // Re-push self-frame 3239 __ enter(); // Save old & set new rbp 3240 __ subptr(rsp, (SimpleRuntimeFrame::framesize - 4) << LogBytesPerInt); 3241 // Prolog 3242 3243 // Use rbp because the frames look interpreted now 3244 // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP. 3245 // Don't need the precise return PC here, just precise enough to point into this code blob. 3246 address the_pc = __ pc(); 3247 __ set_last_Java_frame(noreg, rbp, the_pc); 3248 3249 // Call C code. Need thread but NOT official VM entry 3250 // crud. We cannot block on this call, no GC can happen. Call should 3251 // restore return values to their stack-slots with the new SP. 3252 // Thread is in rdi already. 3253 // 3254 // BasicType unpack_frames(JavaThread* thread, int exec_mode); 3255 3256 __ andptr(rsp, -(StackAlignmentInBytes)); // Align SP as required by ABI 3257 __ mov(c_rarg0, r15_thread); 3258 __ movl(c_rarg1, Deoptimization::Unpack_uncommon_trap); 3259 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames))); 3260 3261 // Set an oopmap for the call site 3262 // Use the same PC we used for the last java frame 3263 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0)); 3264 3265 // Clear fp AND pc 3266 __ reset_last_Java_frame(true, true); 3267 3268 // Pop self-frame. 3269 __ leave(); // Epilog 3270 3271 // Jump to interpreter 3272 __ ret(0); 3273 3274 // Make sure all code is generated 3275 masm->flush(); 3276 3277 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, 3278 SimpleRuntimeFrame::framesize >> 1); 3279 } 3280 #endif // COMPILER2 3281 3282 3283 //------------------------------generate_handler_blob------ 3284 // 3285 // Generate a special Compile2Runtime blob that saves all registers, 3286 // and setup oopmap. 3287 // 3288 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) { 3289 assert(StubRoutines::forward_exception_entry() != NULL, 3290 "must be generated before"); 3291 3292 ResourceMark rm; 3293 OopMapSet *oop_maps = new OopMapSet(); 3294 OopMap* map; 3295 3296 // Allocate space for the code. Setup code generation tools. 3297 CodeBuffer buffer("handler_blob", 2048, 1024); 3298 MacroAssembler* masm = new MacroAssembler(&buffer); 3299 3300 address start = __ pc(); 3301 address call_pc = NULL; 3302 int frame_size_in_words; 3303 bool cause_return = (poll_type == POLL_AT_RETURN); 3304 bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP); 3305 3306 if (UseRTMLocking) { 3307 // Abort RTM transaction before calling runtime 3308 // because critical section will be large and will be 3309 // aborted anyway. Also nmethod could be deoptimized. 3310 __ xabort(0); 3311 } 3312 3313 // Make room for return address (or push it again) 3314 if (!cause_return) { 3315 __ push(rbx); 3316 } 3317 3318 // Save registers, fpu state, and flags 3319 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors); 3320 3321 // The following is basically a call_VM. However, we need the precise 3322 // address of the call in order to generate an oopmap. Hence, we do all the 3323 // work outselves. 3324 3325 __ set_last_Java_frame(noreg, noreg, NULL); 3326 3327 // The return address must always be correct so that frame constructor never 3328 // sees an invalid pc. 3329 3330 if (!cause_return) { 3331 // overwrite the dummy value we pushed on entry 3332 __ movptr(c_rarg0, Address(r15_thread, JavaThread::saved_exception_pc_offset())); 3333 __ movptr(Address(rbp, wordSize), c_rarg0); 3334 } 3335 3336 // Do the call 3337 __ mov(c_rarg0, r15_thread); 3338 __ call(RuntimeAddress(call_ptr)); 3339 3340 // Set an oopmap for the call site. This oopmap will map all 3341 // oop-registers and debug-info registers as callee-saved. This 3342 // will allow deoptimization at this safepoint to find all possible 3343 // debug-info recordings, as well as let GC find all oops. 3344 3345 oop_maps->add_gc_map( __ pc() - start, map); 3346 3347 Label noException; 3348 3349 __ reset_last_Java_frame(false, false); 3350 3351 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 3352 __ jcc(Assembler::equal, noException); 3353 3354 // Exception pending 3355 3356 RegisterSaver::restore_live_registers(masm, save_vectors); 3357 3358 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 3359 3360 // No exception case 3361 __ bind(noException); 3362 3363 // Normal exit, restore registers and exit. 3364 RegisterSaver::restore_live_registers(masm, save_vectors); 3365 3366 __ ret(0); 3367 3368 // Make sure all code is generated 3369 masm->flush(); 3370 3371 // Fill-out other meta info 3372 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words); 3373 } 3374 3375 // 3376 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss 3377 // 3378 // Generate a stub that calls into vm to find out the proper destination 3379 // of a java call. All the argument registers are live at this point 3380 // but since this is generic code we don't know what they are and the caller 3381 // must do any gc of the args. 3382 // 3383 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { 3384 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); 3385 3386 // allocate space for the code 3387 ResourceMark rm; 3388 3389 CodeBuffer buffer(name, 1000, 512); 3390 MacroAssembler* masm = new MacroAssembler(&buffer); 3391 3392 int frame_size_in_words; 3393 3394 OopMapSet *oop_maps = new OopMapSet(); 3395 OopMap* map = NULL; 3396 3397 int start = __ offset(); 3398 3399 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); 3400 3401 int frame_complete = __ offset(); 3402 3403 __ set_last_Java_frame(noreg, noreg, NULL); 3404 3405 __ mov(c_rarg0, r15_thread); 3406 3407 __ call(RuntimeAddress(destination)); 3408 3409 3410 // Set an oopmap for the call site. 3411 // We need this not only for callee-saved registers, but also for volatile 3412 // registers that the compiler might be keeping live across a safepoint. 3413 3414 oop_maps->add_gc_map( __ offset() - start, map); 3415 3416 // rax contains the address we are going to jump to assuming no exception got installed 3417 3418 // clear last_Java_sp 3419 __ reset_last_Java_frame(false, false); 3420 // check for pending exceptions 3421 Label pending; 3422 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 3423 __ jcc(Assembler::notEqual, pending); 3424 3425 // get the returned Method* 3426 __ get_vm_result_2(rbx, r15_thread); 3427 __ movptr(Address(rsp, RegisterSaver::rbx_offset_in_bytes()), rbx); 3428 3429 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax); 3430 3431 RegisterSaver::restore_live_registers(masm); 3432 3433 // We are back the the original state on entry and ready to go. 3434 3435 __ jmp(rax); 3436 3437 // Pending exception after the safepoint 3438 3439 __ bind(pending); 3440 3441 RegisterSaver::restore_live_registers(masm); 3442 3443 // exception pending => remove activation and forward to exception handler 3444 3445 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD); 3446 3447 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 3448 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 3449 3450 // ------------- 3451 // make sure all code is generated 3452 masm->flush(); 3453 3454 // return the blob 3455 // frame_size_words or bytes?? 3456 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true); 3457 } 3458 3459 3460 //------------------------------Montgomery multiplication------------------------ 3461 // 3462 3463 #ifndef _WINDOWS 3464 3465 #define ASM_SUBTRACT 3466 3467 #ifdef ASM_SUBTRACT 3468 // Subtract 0:b from carry:a. Return carry. 3469 static unsigned long 3470 sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) { 3471 long i = 0, cnt = len; 3472 unsigned long tmp; 3473 asm volatile("clc; " 3474 "0: ; " 3475 "mov (%[b], %[i], 8), %[tmp]; " 3476 "sbb %[tmp], (%[a], %[i], 8); " 3477 "inc %[i]; dec %[cnt]; " 3478 "jne 0b; " 3479 "mov %[carry], %[tmp]; sbb $0, %[tmp]; " 3480 : [i]"+r"(i), [cnt]"+r"(cnt), [tmp]"=&r"(tmp) 3481 : [a]"r"(a), [b]"r"(b), [carry]"r"(carry) 3482 : "memory"); 3483 return tmp; 3484 } 3485 #else // ASM_SUBTRACT 3486 typedef int __attribute__((mode(TI))) int128; 3487 3488 // Subtract 0:b from carry:a. Return carry. 3489 static unsigned long 3490 sub(unsigned long a[], unsigned long b[], unsigned long carry, int len) { 3491 int128 tmp = 0; 3492 int i; 3493 for (i = 0; i < len; i++) { 3494 tmp += a[i]; 3495 tmp -= b[i]; 3496 a[i] = tmp; 3497 tmp >>= 64; 3498 assert(-1 <= tmp && tmp <= 0, "invariant"); 3499 } 3500 return tmp + carry; 3501 } 3502 #endif // ! ASM_SUBTRACT 3503 3504 // Multiply (unsigned) Long A by Long B, accumulating the double- 3505 // length result into the accumulator formed of T0, T1, and T2. 3506 #define MACC(A, B, T0, T1, T2) \ 3507 do { \ 3508 unsigned long hi, lo; \ 3509 __asm__ ("mul %5; add %%rax, %2; adc %%rdx, %3; adc $0, %4" \ 3510 : "=&d"(hi), "=a"(lo), "+r"(T0), "+r"(T1), "+g"(T2) \ 3511 : "r"(A), "a"(B) : "cc"); \ 3512 } while(0) 3513 3514 // As above, but add twice the double-length result into the 3515 // accumulator. 3516 #define MACC2(A, B, T0, T1, T2) \ 3517 do { \ 3518 unsigned long hi, lo; \ 3519 __asm__ ("mul %5; add %%rax, %2; adc %%rdx, %3; adc $0, %4; " \ 3520 "add %%rax, %2; adc %%rdx, %3; adc $0, %4" \ 3521 : "=&d"(hi), "=a"(lo), "+r"(T0), "+r"(T1), "+g"(T2) \ 3522 : "r"(A), "a"(B) : "cc"); \ 3523 } while(0) 3524 3525 // Fast Montgomery multiplication. The derivation of the algorithm is 3526 // in A Cryptographic Library for the Motorola DSP56000, 3527 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237. 3528 3529 static void __attribute__((noinline)) 3530 montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[], 3531 unsigned long m[], unsigned long inv, int len) { 3532 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator 3533 int i; 3534 3535 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); 3536 3537 for (i = 0; i < len; i++) { 3538 int j; 3539 for (j = 0; j < i; j++) { 3540 MACC(a[j], b[i-j], t0, t1, t2); 3541 MACC(m[j], n[i-j], t0, t1, t2); 3542 } 3543 MACC(a[i], b[0], t0, t1, t2); 3544 m[i] = t0 * inv; 3545 MACC(m[i], n[0], t0, t1, t2); 3546 3547 assert(t0 == 0, "broken Montgomery multiply"); 3548 3549 t0 = t1; t1 = t2; t2 = 0; 3550 } 3551 3552 for (i = len; i < 2*len; i++) { 3553 int j; 3554 for (j = i-len+1; j < len; j++) { 3555 MACC(a[j], b[i-j], t0, t1, t2); 3556 MACC(m[j], n[i-j], t0, t1, t2); 3557 } 3558 m[i-len] = t0; 3559 t0 = t1; t1 = t2; t2 = 0; 3560 } 3561 3562 while (t0) 3563 t0 = sub(m, n, t0, len); 3564 } 3565 3566 // Fast Montgomery squaring. This uses asymptotically 25% fewer 3567 // multiplies so it should be up to 25% faster than Montgomery 3568 // multiplication. However, its loop control is more complex and it 3569 // may actually run slower on some machines. 3570 3571 static void __attribute__((noinline)) 3572 montgomery_square(unsigned long a[], unsigned long n[], 3573 unsigned long m[], unsigned long inv, int len) { 3574 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator 3575 int i; 3576 3577 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); 3578 3579 for (i = 0; i < len; i++) { 3580 int j; 3581 int end = (i+1)/2; 3582 for (j = 0; j < end; j++) { 3583 MACC2(a[j], a[i-j], t0, t1, t2); 3584 MACC(m[j], n[i-j], t0, t1, t2); 3585 } 3586 if ((i & 1) == 0) { 3587 MACC(a[j], a[j], t0, t1, t2); 3588 } 3589 for (; j < i; j++) { 3590 MACC(m[j], n[i-j], t0, t1, t2); 3591 } 3592 m[i] = t0 * inv; 3593 MACC(m[i], n[0], t0, t1, t2); 3594 3595 assert(t0 == 0, "broken Montgomery square"); 3596 3597 t0 = t1; t1 = t2; t2 = 0; 3598 } 3599 3600 for (i = len; i < 2*len; i++) { 3601 int start = i-len+1; 3602 int end = start + (len - start)/2; 3603 int j; 3604 for (j = start; j < end; j++) { 3605 MACC2(a[j], a[i-j], t0, t1, t2); 3606 MACC(m[j], n[i-j], t0, t1, t2); 3607 } 3608 if ((i & 1) == 0) { 3609 MACC(a[j], a[j], t0, t1, t2); 3610 } 3611 for (; j < len; j++) { 3612 MACC(m[j], n[i-j], t0, t1, t2); 3613 } 3614 m[i-len] = t0; 3615 t0 = t1; t1 = t2; t2 = 0; 3616 } 3617 3618 while (t0) 3619 t0 = sub(m, n, t0, len); 3620 } 3621 3622 // Swap words in a longword. 3623 static unsigned long swap(unsigned long x) { 3624 return (x << 32) | (x >> 32); 3625 } 3626 3627 // Copy len longwords from s to d, word-swapping as we go. The 3628 // destination array is reversed. 3629 static void reverse_words(unsigned long *s, unsigned long *d, int len) { 3630 d += len; 3631 while(len-- > 0) { 3632 d--; 3633 *d = swap(*s); 3634 s++; 3635 } 3636 } 3637 3638 // The threshold at which squaring is advantageous was determined 3639 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz. 3640 #define MONTGOMERY_SQUARING_THRESHOLD 64 3641 3642 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints, 3643 jint len, jlong inv, 3644 jint *m_ints) { 3645 assert(len % 2 == 0, "array length in montgomery_multiply must be even"); 3646 int longwords = len/2; 3647 3648 // Make very sure we don't use so much space that the stack might 3649 // overflow. 512 jints corresponds to an 16384-bit integer and 3650 // will use here a total of 8k bytes of stack space. 3651 int total_allocation = longwords * sizeof (unsigned long) * 4; 3652 guarantee(total_allocation <= 8192, "must be"); 3653 unsigned long *scratch = (unsigned long *)alloca(total_allocation); 3654 3655 // Local scratch arrays 3656 unsigned long 3657 *a = scratch + 0 * longwords, 3658 *b = scratch + 1 * longwords, 3659 *n = scratch + 2 * longwords, 3660 *m = scratch + 3 * longwords; 3661 3662 reverse_words((unsigned long *)a_ints, a, longwords); 3663 reverse_words((unsigned long *)b_ints, b, longwords); 3664 reverse_words((unsigned long *)n_ints, n, longwords); 3665 3666 ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords); 3667 3668 reverse_words(m, (unsigned long *)m_ints, longwords); 3669 } 3670 3671 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints, 3672 jint len, jlong inv, 3673 jint *m_ints) { 3674 assert(len % 2 == 0, "array length in montgomery_square must be even"); 3675 int longwords = len/2; 3676 3677 // Make very sure we don't use so much space that the stack might 3678 // overflow. 512 jints corresponds to an 16384-bit integer and 3679 // will use here a total of 6k bytes of stack space. 3680 int total_allocation = longwords * sizeof (unsigned long) * 3; 3681 guarantee(total_allocation <= 8192, "must be"); 3682 unsigned long *scratch = (unsigned long *)alloca(total_allocation); 3683 3684 // Local scratch arrays 3685 unsigned long 3686 *a = scratch + 0 * longwords, 3687 *n = scratch + 1 * longwords, 3688 *m = scratch + 2 * longwords; 3689 3690 reverse_words((unsigned long *)a_ints, a, longwords); 3691 reverse_words((unsigned long *)n_ints, n, longwords); 3692 3693 if (len >= MONTGOMERY_SQUARING_THRESHOLD) { 3694 ::montgomery_square(a, n, m, (unsigned long)inv, longwords); 3695 } else { 3696 ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords); 3697 } 3698 3699 reverse_words(m, (unsigned long *)m_ints, longwords); 3700 } 3701 3702 #endif // WINDOWS 3703 3704 #ifdef COMPILER2 3705 // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame 3706 // 3707 //------------------------------generate_exception_blob--------------------------- 3708 // creates exception blob at the end 3709 // Using exception blob, this code is jumped from a compiled method. 3710 // (see emit_exception_handler in x86_64.ad file) 3711 // 3712 // Given an exception pc at a call we call into the runtime for the 3713 // handler in this method. This handler might merely restore state 3714 // (i.e. callee save registers) unwind the frame and jump to the 3715 // exception handler for the nmethod if there is no Java level handler 3716 // for the nmethod. 3717 // 3718 // This code is entered with a jmp. 3719 // 3720 // Arguments: 3721 // rax: exception oop 3722 // rdx: exception pc 3723 // 3724 // Results: 3725 // rax: exception oop 3726 // rdx: exception pc in caller or ??? 3727 // destination: exception handler of caller 3728 // 3729 // Note: the exception pc MUST be at a call (precise debug information) 3730 // Registers rax, rdx, rcx, rsi, rdi, r8-r11 are not callee saved. 3731 // 3732 3733 void OptoRuntime::generate_exception_blob() { 3734 assert(!OptoRuntime::is_callee_saved_register(RDX_num), ""); 3735 assert(!OptoRuntime::is_callee_saved_register(RAX_num), ""); 3736 assert(!OptoRuntime::is_callee_saved_register(RCX_num), ""); 3737 3738 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned"); 3739 3740 // Allocate space for the code 3741 ResourceMark rm; 3742 // Setup code generation tools 3743 CodeBuffer buffer("exception_blob", 2048, 1024); 3744 MacroAssembler* masm = new MacroAssembler(&buffer); 3745 3746 3747 address start = __ pc(); 3748 3749 // Exception pc is 'return address' for stack walker 3750 __ push(rdx); 3751 __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Prolog 3752 3753 // Save callee-saved registers. See x86_64.ad. 3754 3755 // rbp is an implicitly saved callee saved register (i.e., the calling 3756 // convention will save/restore it in the prolog/epilog). Other than that 3757 // there are no callee save registers now that adapter frames are gone. 3758 3759 __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp); 3760 3761 // Store exception in Thread object. We cannot pass any arguments to the 3762 // handle_exception call, since we do not want to make any assumption 3763 // about the size of the frame where the exception happened in. 3764 // c_rarg0 is either rdi (Linux) or rcx (Windows). 3765 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()),rax); 3766 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx); 3767 3768 // This call does all the hard work. It checks if an exception handler 3769 // exists in the method. 3770 // If so, it returns the handler address. 3771 // If not, it prepares for stack-unwinding, restoring the callee-save 3772 // registers of the frame being removed. 3773 // 3774 // address OptoRuntime::handle_exception_C(JavaThread* thread) 3775 3776 // At a method handle call, the stack may not be properly aligned 3777 // when returning with an exception. 3778 address the_pc = __ pc(); 3779 __ set_last_Java_frame(noreg, noreg, the_pc); 3780 __ mov(c_rarg0, r15_thread); 3781 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 3782 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C))); 3783 3784 // Set an oopmap for the call site. This oopmap will only be used if we 3785 // are unwinding the stack. Hence, all locations will be dead. 3786 // Callee-saved registers will be the same as the frame above (i.e., 3787 // handle_exception_stub), since they were restored when we got the 3788 // exception. 3789 3790 OopMapSet* oop_maps = new OopMapSet(); 3791 3792 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0)); 3793 3794 __ reset_last_Java_frame(false, true); 3795 3796 // Restore callee-saved registers 3797 3798 // rbp is an implicitly saved callee-saved register (i.e., the calling 3799 // convention will save restore it in prolog/epilog) Other than that 3800 // there are no callee save registers now that adapter frames are gone. 3801 3802 __ movptr(rbp, Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt)); 3803 3804 __ addptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog 3805 __ pop(rdx); // No need for exception pc anymore 3806 3807 // rax: exception handler 3808 3809 // We have a handler in rax (could be deopt blob). 3810 __ mov(r8, rax); 3811 3812 // Get the exception oop 3813 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset())); 3814 // Get the exception pc in case we are deoptimized 3815 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset())); 3816 #ifdef ASSERT 3817 __ movptr(Address(r15_thread, JavaThread::exception_handler_pc_offset()), (int)NULL_WORD); 3818 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int)NULL_WORD); 3819 #endif 3820 // Clear the exception oop so GC no longer processes it as a root. 3821 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int)NULL_WORD); 3822 3823 // rax: exception oop 3824 // r8: exception handler 3825 // rdx: exception pc 3826 // Jump to handler 3827 3828 __ jmp(r8); 3829 3830 // Make sure all code is generated 3831 masm->flush(); 3832 3833 // Set exception blob 3834 _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1); 3835 } 3836 #endif // COMPILER2