1 /*
2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #ifndef _WINDOWS
27 #include "alloca.h"
28 #endif
29 #include "asm/macroAssembler.hpp"
30 #include "asm/macroAssembler.inline.hpp"
31 #include "classfile/symbolTable.hpp"
32 #include "code/debugInfoRec.hpp"
33 #include "code/icBuffer.hpp"
34 #include "code/nativeInst.hpp"
35 #include "code/vtableStubs.hpp"
36 #include "gc/shared/collectedHeap.hpp"
37 #include "gc/shared/gcLocker.hpp"
38 #include "interpreter/interpreter.hpp"
39 #include "logging/log.hpp"
40 #include "memory/resourceArea.hpp"
41 #include "oops/compiledICHolder.hpp"
42 #include "runtime/safepointMechanism.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "runtime/vframeArray.hpp"
45 #include "utilities/align.hpp"
46 #include "utilities/formatBuffer.hpp"
47 #include "vm_version_x86.hpp"
48 #include "vmreg_x86.inline.hpp"
49 #ifdef COMPILER1
50 #include "c1/c1_Runtime1.hpp"
51 #endif
52 #ifdef COMPILER2
53 #include "opto/runtime.hpp"
54 #endif
55 #if INCLUDE_JVMCI
56 #include "jvmci/jvmciJavaClasses.hpp"
57 #endif
58
59 #define __ masm->
60
61 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
62
63 class SimpleRuntimeFrame {
64
65 public:
66
67 // Most of the runtime stubs have this simple frame layout.
68 // This class exists to make the layout shared in one place.
69 // Offsets are for compiler stack slots, which are jints.
70 enum layout {
71 // The frame sender code expects that rbp will be in the "natural" place and
72 // will override any oopMap setting for it. We must therefore force the layout
73 // so that it agrees with the frame sender code.
74 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
75 rbp_off2,
76 return_off, return_off2,
77 framesize
78 };
79 };
80
81 class RegisterSaver {
82 // Capture info about frame layout. Layout offsets are in jint
83 // units because compiler frame slots are jints.
84 #define XSAVE_AREA_BEGIN 160
85 #define XSAVE_AREA_YMM_BEGIN 576
86 #define XSAVE_AREA_ZMM_BEGIN 1152
87 #define XSAVE_AREA_UPPERBANK 1664
88 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
89 #define DEF_YMM_OFFS(regnum) ymm ## regnum ## _off = ymm_off + (regnum)*16/BytesPerInt, ymm ## regnum ## H_off
90 #define DEF_ZMM_OFFS(regnum) zmm ## regnum ## _off = zmm_off + (regnum-16)*64/BytesPerInt, zmm ## regnum ## H_off
91 enum layout {
92 fpu_state_off = frame::arg_reg_save_area_bytes/BytesPerInt, // fxsave save area
93 xmm_off = fpu_state_off + XSAVE_AREA_BEGIN/BytesPerInt, // offset in fxsave save area
94 DEF_XMM_OFFS(0),
95 DEF_XMM_OFFS(1),
96 // 2..15 are implied in range usage
97 ymm_off = xmm_off + (XSAVE_AREA_YMM_BEGIN - XSAVE_AREA_BEGIN)/BytesPerInt,
98 DEF_YMM_OFFS(0),
99 DEF_YMM_OFFS(1),
100 // 2..15 are implied in range usage
101 zmm_high = xmm_off + (XSAVE_AREA_ZMM_BEGIN - XSAVE_AREA_BEGIN)/BytesPerInt,
102 zmm_off = xmm_off + (XSAVE_AREA_UPPERBANK - XSAVE_AREA_BEGIN)/BytesPerInt,
103 DEF_ZMM_OFFS(16),
104 DEF_ZMM_OFFS(17),
105 // 18..31 are implied in range usage
106 fpu_state_end = fpu_state_off + ((FPUStateSizeInWords-1)*wordSize / BytesPerInt),
107 fpu_stateH_end,
108 r15_off, r15H_off,
109 r14_off, r14H_off,
110 r13_off, r13H_off,
111 r12_off, r12H_off,
112 r11_off, r11H_off,
113 r10_off, r10H_off,
114 r9_off, r9H_off,
115 r8_off, r8H_off,
116 rdi_off, rdiH_off,
117 rsi_off, rsiH_off,
118 ignore_off, ignoreH_off, // extra copy of rbp
119 rsp_off, rspH_off,
120 rbx_off, rbxH_off,
121 rdx_off, rdxH_off,
122 rcx_off, rcxH_off,
123 rax_off, raxH_off,
124 // 16-byte stack alignment fill word: see MacroAssembler::push/pop_IU_state
125 align_off, alignH_off,
126 flags_off, flagsH_off,
127 // The frame sender code expects that rbp will be in the "natural" place and
128 // will override any oopMap setting for it. We must therefore force the layout
129 // so that it agrees with the frame sender code.
130 rbp_off, rbpH_off, // copy of rbp we will restore
131 return_off, returnH_off, // slot for return address
132 reg_save_size // size in compiler stack slots
133 };
134
135 public:
136 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors = false);
137 static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
138
139 // Offsets into the register save area
140 // Used by deoptimization when it is managing result register
141 // values on its own
142
143 static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; }
144 static int rdx_offset_in_bytes(void) { return BytesPerInt * rdx_off; }
145 static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; }
146 static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; }
147 static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
148
149 // During deoptimization only the result registers need to be restored,
150 // all the other values have already been extracted.
151 static void restore_result_registers(MacroAssembler* masm);
152 };
153
154 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) {
155 int off = 0;
156 int num_xmm_regs = XMMRegisterImpl::number_of_registers;
157 if (UseAVX < 3) {
158 num_xmm_regs = num_xmm_regs/2;
159 }
160 #if COMPILER2_OR_JVMCI
161 if (save_vectors) {
162 assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
163 assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
164 }
165 #else
166 assert(!save_vectors, "vectors are generated only by C2 and JVMCI");
167 #endif
168
169 // Always make the frame size 16-byte aligned, both vector and non vector stacks are always allocated
170 int frame_size_in_bytes = align_up(reg_save_size*BytesPerInt, num_xmm_regs);
171 // OopMap frame size is in compiler stack slots (jint's) not bytes or words
172 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
173 // CodeBlob frame size is in words.
174 int frame_size_in_words = frame_size_in_bytes / wordSize;
175 *total_frame_words = frame_size_in_words;
176
177 // Save registers, fpu state, and flags.
178 // We assume caller has already pushed the return address onto the
179 // stack, so rsp is 8-byte aligned here.
180 // We push rpb twice in this sequence because we want the real rbp
181 // to be under the return like a normal enter.
182
183 __ enter(); // rsp becomes 16-byte aligned here
184 __ push_CPU_state(); // Push a multiple of 16 bytes
185
186 // push cpu state handles this on EVEX enabled targets
187 if (save_vectors) {
188 // Save upper half of YMM registers(0..15)
189 int base_addr = XSAVE_AREA_YMM_BEGIN;
190 for (int n = 0; n < 16; n++) {
191 __ vextractf128_high(Address(rsp, base_addr+n*16), as_XMMRegister(n));
192 }
193 if (VM_Version::supports_evex()) {
194 // Save upper half of ZMM registers(0..15)
195 base_addr = XSAVE_AREA_ZMM_BEGIN;
196 for (int n = 0; n < 16; n++) {
197 __ vextractf64x4_high(Address(rsp, base_addr+n*32), as_XMMRegister(n));
198 }
199 // Save full ZMM registers(16..num_xmm_regs)
200 base_addr = XSAVE_AREA_UPPERBANK;
201 off = 0;
202 int vector_len = Assembler::AVX_512bit;
203 for (int n = 16; n < num_xmm_regs; n++) {
204 __ evmovdqul(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n), vector_len);
205 }
206 }
207 } else {
208 if (VM_Version::supports_evex()) {
209 // Save upper bank of ZMM registers(16..31) for double/float usage
210 int base_addr = XSAVE_AREA_UPPERBANK;
211 off = 0;
212 for (int n = 16; n < num_xmm_regs; n++) {
213 __ movsd(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n));
214 }
215 }
216 }
217 __ vzeroupper();
218 if (frame::arg_reg_save_area_bytes != 0) {
219 // Allocate argument register save area
220 __ subptr(rsp, frame::arg_reg_save_area_bytes);
221 }
222
223 // Set an oopmap for the call site. This oopmap will map all
224 // oop-registers and debug-info registers as callee-saved. This
225 // will allow deoptimization at this safepoint to find all possible
226 // debug-info recordings, as well as let GC find all oops.
227
228 OopMapSet *oop_maps = new OopMapSet();
229 OopMap* map = new OopMap(frame_size_in_slots, 0);
230
231 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x))
232
233 map->set_callee_saved(STACK_OFFSET( rax_off ), rax->as_VMReg());
234 map->set_callee_saved(STACK_OFFSET( rcx_off ), rcx->as_VMReg());
235 map->set_callee_saved(STACK_OFFSET( rdx_off ), rdx->as_VMReg());
236 map->set_callee_saved(STACK_OFFSET( rbx_off ), rbx->as_VMReg());
237 // rbp location is known implicitly by the frame sender code, needs no oopmap
238 // and the location where rbp was saved by is ignored
239 map->set_callee_saved(STACK_OFFSET( rsi_off ), rsi->as_VMReg());
240 map->set_callee_saved(STACK_OFFSET( rdi_off ), rdi->as_VMReg());
241 map->set_callee_saved(STACK_OFFSET( r8_off ), r8->as_VMReg());
242 map->set_callee_saved(STACK_OFFSET( r9_off ), r9->as_VMReg());
243 map->set_callee_saved(STACK_OFFSET( r10_off ), r10->as_VMReg());
244 map->set_callee_saved(STACK_OFFSET( r11_off ), r11->as_VMReg());
245 map->set_callee_saved(STACK_OFFSET( r12_off ), r12->as_VMReg());
246 map->set_callee_saved(STACK_OFFSET( r13_off ), r13->as_VMReg());
247 map->set_callee_saved(STACK_OFFSET( r14_off ), r14->as_VMReg());
248 map->set_callee_saved(STACK_OFFSET( r15_off ), r15->as_VMReg());
249 // For both AVX and EVEX we will use the legacy FXSAVE area for xmm0..xmm15,
250 // on EVEX enabled targets, we get it included in the xsave area
251 off = xmm0_off;
252 int delta = xmm1_off - off;
253 for (int n = 0; n < 16; n++) {
254 XMMRegister xmm_name = as_XMMRegister(n);
255 map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg());
256 off += delta;
257 }
258 if(UseAVX > 2) {
259 // Obtain xmm16..xmm31 from the XSAVE area on EVEX enabled targets
260 off = zmm16_off;
261 delta = zmm17_off - off;
262 for (int n = 16; n < num_xmm_regs; n++) {
263 XMMRegister zmm_name = as_XMMRegister(n);
264 map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg());
265 off += delta;
266 }
267 }
268
269 #if COMPILER2_OR_JVMCI
270 if (save_vectors) {
271 off = ymm0_off;
272 int delta = ymm1_off - off;
273 for (int n = 0; n < 16; n++) {
274 XMMRegister ymm_name = as_XMMRegister(n);
275 map->set_callee_saved(STACK_OFFSET(off), ymm_name->as_VMReg()->next(4));
276 off += delta;
277 }
278 }
279 #endif // COMPILER2_OR_JVMCI
280
281 // %%% These should all be a waste but we'll keep things as they were for now
282 if (true) {
283 map->set_callee_saved(STACK_OFFSET( raxH_off ), rax->as_VMReg()->next());
284 map->set_callee_saved(STACK_OFFSET( rcxH_off ), rcx->as_VMReg()->next());
285 map->set_callee_saved(STACK_OFFSET( rdxH_off ), rdx->as_VMReg()->next());
286 map->set_callee_saved(STACK_OFFSET( rbxH_off ), rbx->as_VMReg()->next());
287 // rbp location is known implicitly by the frame sender code, needs no oopmap
288 map->set_callee_saved(STACK_OFFSET( rsiH_off ), rsi->as_VMReg()->next());
289 map->set_callee_saved(STACK_OFFSET( rdiH_off ), rdi->as_VMReg()->next());
290 map->set_callee_saved(STACK_OFFSET( r8H_off ), r8->as_VMReg()->next());
291 map->set_callee_saved(STACK_OFFSET( r9H_off ), r9->as_VMReg()->next());
292 map->set_callee_saved(STACK_OFFSET( r10H_off ), r10->as_VMReg()->next());
293 map->set_callee_saved(STACK_OFFSET( r11H_off ), r11->as_VMReg()->next());
294 map->set_callee_saved(STACK_OFFSET( r12H_off ), r12->as_VMReg()->next());
295 map->set_callee_saved(STACK_OFFSET( r13H_off ), r13->as_VMReg()->next());
296 map->set_callee_saved(STACK_OFFSET( r14H_off ), r14->as_VMReg()->next());
297 map->set_callee_saved(STACK_OFFSET( r15H_off ), r15->as_VMReg()->next());
298 // For both AVX and EVEX we will use the legacy FXSAVE area for xmm0..xmm15,
299 // on EVEX enabled targets, we get it included in the xsave area
300 off = xmm0H_off;
301 delta = xmm1H_off - off;
302 for (int n = 0; n < 16; n++) {
303 XMMRegister xmm_name = as_XMMRegister(n);
304 map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg()->next());
305 off += delta;
306 }
307 if (UseAVX > 2) {
308 // Obtain xmm16..xmm31 from the XSAVE area on EVEX enabled targets
309 off = zmm16H_off;
310 delta = zmm17H_off - off;
311 for (int n = 16; n < num_xmm_regs; n++) {
312 XMMRegister zmm_name = as_XMMRegister(n);
313 map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg()->next());
314 off += delta;
315 }
316 }
317 }
318
319 return map;
320 }
321
322 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
323 int num_xmm_regs = XMMRegisterImpl::number_of_registers;
324 if (UseAVX < 3) {
325 num_xmm_regs = num_xmm_regs/2;
326 }
327 if (frame::arg_reg_save_area_bytes != 0) {
328 // Pop arg register save area
329 __ addptr(rsp, frame::arg_reg_save_area_bytes);
330 }
331
332 #if COMPILER2_OR_JVMCI
333 if (restore_vectors) {
334 assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
335 assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
336 }
337 #else
338 assert(!restore_vectors, "vectors are generated only by C2");
339 #endif
340
341 __ vzeroupper();
342
343 // On EVEX enabled targets everything is handled in pop fpu state
344 if (restore_vectors) {
345 // Restore upper half of YMM registers (0..15)
346 int base_addr = XSAVE_AREA_YMM_BEGIN;
347 for (int n = 0; n < 16; n++) {
348 __ vinsertf128_high(as_XMMRegister(n), Address(rsp, base_addr+n*16));
349 }
350 if (VM_Version::supports_evex()) {
351 // Restore upper half of ZMM registers (0..15)
352 base_addr = XSAVE_AREA_ZMM_BEGIN;
353 for (int n = 0; n < 16; n++) {
354 __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, base_addr+n*32));
355 }
356 // Restore full ZMM registers(16..num_xmm_regs)
357 base_addr = XSAVE_AREA_UPPERBANK;
358 int vector_len = Assembler::AVX_512bit;
359 int off = 0;
360 for (int n = 16; n < num_xmm_regs; n++) {
361 __ evmovdqul(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)), vector_len);
362 }
363 }
364 } else {
365 if (VM_Version::supports_evex()) {
366 // Restore upper bank of ZMM registers(16..31) for double/float usage
367 int base_addr = XSAVE_AREA_UPPERBANK;
368 int off = 0;
369 for (int n = 16; n < num_xmm_regs; n++) {
370 __ movsd(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)));
371 }
372 }
373 }
374
375 // Recover CPU state
376 __ pop_CPU_state();
377 // Get the rbp described implicitly by the calling convention (no oopMap)
378 __ pop(rbp);
379 }
380
381 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
382
383 // Just restore result register. Only used by deoptimization. By
384 // now any callee save register that needs to be restored to a c2
385 // caller of the deoptee has been extracted into the vframeArray
386 // and will be stuffed into the c2i adapter we create for later
387 // restoration so only result registers need to be restored here.
388
389 // Restore fp result register
390 __ movdbl(xmm0, Address(rsp, xmm0_offset_in_bytes()));
391 // Restore integer result register
392 __ movptr(rax, Address(rsp, rax_offset_in_bytes()));
393 __ movptr(rdx, Address(rsp, rdx_offset_in_bytes()));
394
395 // Pop all of the register save are off the stack except the return address
396 __ addptr(rsp, return_offset_in_bytes());
397 }
398
399 // Is vector's size (in bytes) bigger than a size saved by default?
400 // 16 bytes XMM registers are saved by default using fxsave/fxrstor instructions.
401 bool SharedRuntime::is_wide_vector(int size) {
402 return size > 16;
403 }
404
405 size_t SharedRuntime::trampoline_size() {
406 return 16;
407 }
408
409 void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
410 __ jump(RuntimeAddress(destination));
411 }
412
413 // The java_calling_convention describes stack locations as ideal slots on
414 // a frame with no abi restrictions. Since we must observe abi restrictions
415 // (like the placement of the register window) the slots must be biased by
416 // the following value.
417 static int reg2offset_in(VMReg r) {
418 // Account for saved rbp and return address
419 // This should really be in_preserve_stack_slots
420 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
421 }
422
423 static int reg2offset_out(VMReg r) {
424 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
425 }
426
427 // ---------------------------------------------------------------------------
428 // Read the array of BasicTypes from a signature, and compute where the
429 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
430 // quantities. Values less than VMRegImpl::stack0 are registers, those above
431 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer
432 // as framesizes are fixed.
433 // VMRegImpl::stack0 refers to the first slot 0(sp).
434 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
435 // up to RegisterImpl::number_of_registers) are the 64-bit
436 // integer registers.
437
438 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
439 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
440 // units regardless of build. Of course for i486 there is no 64 bit build
441
442 // The Java calling convention is a "shifted" version of the C ABI.
443 // By skipping the first C ABI register we can call non-static jni methods
444 // with small numbers of arguments without having to shuffle the arguments
445 // at all. Since we control the java ABI we ought to at least get some
446 // advantage out of it.
447
448 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
449 VMRegPair *regs,
450 int total_args_passed,
451 int is_outgoing) {
452
453 // Create the mapping between argument positions and
454 // registers.
455 static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
456 j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5
457 };
458 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
459 j_farg0, j_farg1, j_farg2, j_farg3,
460 j_farg4, j_farg5, j_farg6, j_farg7
461 };
462
463
464 uint int_args = 0;
465 uint fp_args = 0;
466 uint stk_args = 0; // inc by 2 each time
467
468 for (int i = 0; i < total_args_passed; i++) {
469 switch (sig_bt[i]) {
470 case T_BOOLEAN:
471 case T_CHAR:
472 case T_BYTE:
473 case T_SHORT:
474 case T_INT:
475 if (int_args < Argument::n_int_register_parameters_j) {
476 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
477 } else {
478 regs[i].set1(VMRegImpl::stack2reg(stk_args));
479 stk_args += 2;
480 }
481 break;
482 case T_VOID:
483 // halves of T_LONG or T_DOUBLE
484 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
485 regs[i].set_bad();
486 break;
487 case T_LONG:
488 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
489 // fall through
490 case T_OBJECT:
491 case T_ARRAY:
492 case T_ADDRESS:
493 case T_VALUETYPE:
494 case T_VALUETYPEPTR:
495 if (int_args < Argument::n_int_register_parameters_j) {
496 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
497 } else {
498 regs[i].set2(VMRegImpl::stack2reg(stk_args));
499 stk_args += 2;
500 }
501 break;
502 case T_FLOAT:
503 if (fp_args < Argument::n_float_register_parameters_j) {
504 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
505 } else {
506 regs[i].set1(VMRegImpl::stack2reg(stk_args));
507 stk_args += 2;
508 }
509 break;
510 case T_DOUBLE:
511 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
512 if (fp_args < Argument::n_float_register_parameters_j) {
513 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
514 } else {
515 regs[i].set2(VMRegImpl::stack2reg(stk_args));
516 stk_args += 2;
517 }
518 break;
519 default:
520 ShouldNotReachHere();
521 break;
522 }
523 }
524
525 return align_up(stk_args, 2);
526 }
527
528 // Same as java_calling_convention() but for multiple return
529 // values. There's no way to store them on the stack so if we don't
530 // have enough registers, multiple values can't be returned.
531 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j+1;
532 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j;
533 int SharedRuntime::java_return_convention(const BasicType *sig_bt,
534 VMRegPair *regs,
535 int total_args_passed) {
536 // Create the mapping between argument positions and
537 // registers.
538 static const Register INT_ArgReg[java_return_convention_max_int] = {
539 rax, j_rarg5, j_rarg4, j_rarg3, j_rarg2, j_rarg1, j_rarg0
540 };
541 static const XMMRegister FP_ArgReg[java_return_convention_max_float] = {
542 j_farg0, j_farg1, j_farg2, j_farg3,
543 j_farg4, j_farg5, j_farg6, j_farg7
544 };
545
546
547 uint int_args = 0;
548 uint fp_args = 0;
549
550 for (int i = 0; i < total_args_passed; i++) {
551 switch (sig_bt[i]) {
552 case T_BOOLEAN:
553 case T_CHAR:
554 case T_BYTE:
555 case T_SHORT:
556 case T_INT:
557 if (int_args < Argument::n_int_register_parameters_j+1) {
558 regs[i].set1(INT_ArgReg[int_args]->as_VMReg());
559 int_args++;
560 } else {
561 return -1;
562 }
563 break;
564 case T_VOID:
565 // halves of T_LONG or T_DOUBLE
566 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
567 regs[i].set_bad();
568 break;
569 case T_LONG:
570 assert(sig_bt[i + 1] == T_VOID, "expecting half");
571 // fall through
572 case T_OBJECT:
573 case T_ARRAY:
574 case T_ADDRESS:
575 case T_METADATA:
576 case T_VALUETYPEPTR:
577 if (int_args < Argument::n_int_register_parameters_j+1) {
578 regs[i].set2(INT_ArgReg[int_args]->as_VMReg());
579 int_args++;
580 } else {
581 return -1;
582 }
583 break;
584 case T_FLOAT:
585 if (fp_args < Argument::n_float_register_parameters_j) {
586 regs[i].set1(FP_ArgReg[fp_args]->as_VMReg());
587 fp_args++;
588 } else {
589 return -1;
590 }
591 break;
592 case T_DOUBLE:
593 assert(sig_bt[i + 1] == T_VOID, "expecting half");
594 if (fp_args < Argument::n_float_register_parameters_j) {
595 regs[i].set2(FP_ArgReg[fp_args]->as_VMReg());
596 fp_args++;
597 } else {
598 return -1;
599 }
600 break;
601 default:
602 ShouldNotReachHere();
603 break;
604 }
605 }
606
607 return int_args + fp_args;
608 }
609
610 // Patch the callers callsite with entry to compiled code if it exists.
611 static void patch_callers_callsite(MacroAssembler *masm) {
612 Label L;
613 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
614 __ jcc(Assembler::equal, L);
615
616 // Save the current stack pointer
617 __ mov(r13, rsp);
618 // Schedule the branch target address early.
619 // Call into the VM to patch the caller, then jump to compiled callee
620 // rax isn't live so capture return address while we easily can
621 __ movptr(rax, Address(rsp, 0));
622
623 // align stack so push_CPU_state doesn't fault
624 __ andptr(rsp, -(StackAlignmentInBytes));
625 __ push_CPU_state();
626 __ vzeroupper();
627 // VM needs caller's callsite
628 // VM needs target method
629 // This needs to be a long call since we will relocate this adapter to
630 // the codeBuffer and it may not reach
631
632 // Allocate argument register save area
633 if (frame::arg_reg_save_area_bytes != 0) {
634 __ subptr(rsp, frame::arg_reg_save_area_bytes);
635 }
636 __ mov(c_rarg0, rbx);
637 __ mov(c_rarg1, rax);
638 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
639
640 // De-allocate argument register save area
641 if (frame::arg_reg_save_area_bytes != 0) {
642 __ addptr(rsp, frame::arg_reg_save_area_bytes);
643 }
644
645 __ vzeroupper();
646 __ pop_CPU_state();
647 // restore sp
648 __ mov(rsp, r13);
649 __ bind(L);
650 }
651
652 // For each value type argument, sig includes the list of fields of
653 // the value type. This utility function computes the number of
654 // arguments for the call if value types are passed by reference (the
655 // calling convention the interpreter expects).
656 static int compute_total_args_passed_int(const GrowableArray<SigEntry>& sig_extended) {
657 int total_args_passed = 0;
658 if (ValueTypePassFieldsAsArgs) {
659 for (int i = 0; i < sig_extended.length(); i++) {
660 BasicType bt = sig_extended.at(i)._bt;
661 if (bt == T_VALUETYPE) {
662 // In sig_extended, a value type argument starts with:
663 // T_VALUETYPE, followed by the types of the fields of the
664 // value type and T_VOID to mark the end of the value
665 // type. Value types are flattened so, for instance, in the
666 // case of a value type with an int field and a value type
667 // field that itself has 2 fields, an int and a long:
668 // T_VALUETYPE T_INT T_VALUETYPE T_INT T_LONG T_VOID (second
669 // slot for the T_LONG) T_VOID (inner T_VALUETYPE) T_VOID
670 // (outer T_VALUETYPE)
671 total_args_passed++;
672 int vt = 1;
673 do {
674 i++;
675 BasicType bt = sig_extended.at(i)._bt;
676 BasicType prev_bt = sig_extended.at(i-1)._bt;
677 if (bt == T_VALUETYPE) {
678 vt++;
679 } else if (bt == T_VOID &&
680 prev_bt != T_LONG &&
681 prev_bt != T_DOUBLE) {
682 vt--;
683 }
684 } while (vt != 0);
685 } else {
686 total_args_passed++;
687 }
688 }
689 } else {
690 total_args_passed = sig_extended.length();
691 }
692 return total_args_passed;
693 }
694
695
696 static void gen_c2i_adapter_helper(MacroAssembler* masm,
697 BasicType bt,
698 BasicType prev_bt,
699 size_t size_in_bytes,
700 const VMRegPair& reg_pair,
701 const Address& to,
702 int extraspace,
703 bool is_oop) {
704 assert(bt != T_VALUETYPE || !ValueTypePassFieldsAsArgs, "no value type here");
705 if (bt == T_VOID) {
706 assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half");
707 return;
708 }
709
710 // Say 4 args:
711 // i st_off
712 // 0 32 T_LONG
713 // 1 24 T_VOID
714 // 2 16 T_OBJECT
715 // 3 8 T_BOOL
716 // - 0 return address
717 //
718 // However to make thing extra confusing. Because we can fit a long/double in
719 // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
720 // leaves one slot empty and only stores to a single slot. In this case the
721 // slot that is occupied is the T_VOID slot. See I said it was confusing.
722
723 bool wide = (size_in_bytes == wordSize);
724 VMReg r_1 = reg_pair.first();
725 VMReg r_2 = reg_pair.second();
726 assert(r_2->is_valid() == wide, "invalid size");
727 if (!r_1->is_valid()) {
728 assert(!r_2->is_valid(), "must be invalid");
729 return;
730 }
731
732 if (!r_1->is_XMMRegister()) {
733 Register val = rax;
734 assert_different_registers(to.base(), val);
735 if(r_1->is_stack()) {
736 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
737 __ load_sized_value(val, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
738 } else {
739 val = r_1->as_Register();
740 }
741 if (is_oop) {
742 __ store_heap_oop(to, val);
743 } else {
744 __ store_sized_value(to, val, size_in_bytes);
745 }
746 } else {
747 if (wide) {
748 __ movdbl(to, r_1->as_XMMRegister());
749 } else {
750 __ movflt(to, r_1->as_XMMRegister());
751 }
752 }
753 }
754
755 static void gen_c2i_adapter(MacroAssembler *masm,
756 const GrowableArray<SigEntry>& sig_extended,
757 const VMRegPair *regs,
758 Label& skip_fixup,
759 address start,
760 OopMapSet*& oop_maps,
761 int& frame_complete,
762 int& frame_size_in_words) {
763 // Before we get into the guts of the C2I adapter, see if we should be here
764 // at all. We've come from compiled code and are attempting to jump to the
765 // interpreter, which means the caller made a static call to get here
766 // (vcalls always get a compiled target if there is one). Check for a
767 // compiled target. If there is one, we need to patch the caller's call.
768 patch_callers_callsite(masm);
769
770 __ bind(skip_fixup);
771
772 bool has_value_argument = false;
773 if (ValueTypePassFieldsAsArgs) {
774 // Is there a value type argument?
775 for (int i = 0; i < sig_extended.length() && !has_value_argument; i++) {
776 has_value_argument = (sig_extended.at(i)._bt == T_VALUETYPE);
777 }
778 if (has_value_argument) {
779 // There is at least a value type argument: we're coming from
780 // compiled code so we have no buffers to back the value
781 // types. Allocate the buffers here with a runtime call.
782 oop_maps = new OopMapSet();
783 OopMap* map = NULL;
784
785 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
786
787 frame_complete = __ offset();
788
789 __ set_last_Java_frame(noreg, noreg, NULL);
790
791 __ mov(c_rarg0, r15_thread);
792 __ mov(c_rarg1, rbx);
793
794 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::allocate_value_types)));
795
796 oop_maps->add_gc_map((int)(__ pc() - start), map);
797 __ reset_last_Java_frame(false);
798
799 RegisterSaver::restore_live_registers(masm);
800
801 Label no_exception;
802 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
803 __ jcc(Assembler::equal, no_exception);
804
805 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD);
806 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
807 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
808
809 __ bind(no_exception);
810
811 // We get an array of objects from the runtime call
812 __ get_vm_result(r13, r15_thread); // Use r13 as temporary because r10 is trashed by movptr()
813 __ get_vm_result_2(rbx, r15_thread); // TODO: required to keep the callee Method live?
814 __ mov(r10, r13);
815 }
816 }
817
818 // Since all args are passed on the stack, total_args_passed *
819 // Interpreter::stackElementSize is the space we need. Plus 1 because
820 // we also account for the return address location since
821 // we store it first rather than hold it in rax across all the shuffling
822 int total_args_passed = compute_total_args_passed_int(sig_extended);
823 int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize;
824
825 // stack is aligned, keep it that way
826 extraspace = align_up(extraspace, 2*wordSize);
827
828 // Get return address
829 __ pop(rax);
830
831 // set senderSP value
832 __ mov(r13, rsp);
833
834 __ subptr(rsp, extraspace);
835
836 // Store the return address in the expected location
837 __ movptr(Address(rsp, 0), rax);
838
839 // Now write the args into the outgoing interpreter space
840
841 // next_arg_comp is the next argument from the compiler point of
842 // view (value type fields are passed in registers/on the stack). In
843 // sig_extended, a value type argument starts with: T_VALUETYPE,
844 // followed by the types of the fields of the value type and T_VOID
845 // to mark the end of the value type. ignored counts the number of
846 // T_VALUETYPE/T_VOID. next_vt_arg is the next value type argument:
847 // used to get the buffer for that argument from the pool of buffers
848 // we allocated above and want to pass to the
849 // interpreter. next_arg_int is the next argument from the
850 // interpreter point of view (value types are passed by reference).
851 bool has_oop_field = false;
852 for (int next_arg_comp = 0, ignored = 0, next_vt_arg = 0, next_arg_int = 0;
853 next_arg_comp < sig_extended.length(); next_arg_comp++) {
854 assert(ignored <= next_arg_comp, "shouldn't skip over more slot than there are arguments");
855 assert(next_arg_int < total_args_passed, "more arguments for the interpreter than expected?");
856 BasicType bt = sig_extended.at(next_arg_comp)._bt;
857 int st_off = (total_args_passed - next_arg_int) * Interpreter::stackElementSize;
858 if (!ValueTypePassFieldsAsArgs || bt != T_VALUETYPE) {
859 int next_off = st_off - Interpreter::stackElementSize;
860 const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off;
861 const VMRegPair reg_pair = regs[next_arg_comp-ignored];
862 size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
863 gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended.at(next_arg_comp-1)._bt : T_ILLEGAL,
864 size_in_bytes, reg_pair, Address(rsp, offset), extraspace, false);
865 next_arg_int++;
866 #ifdef ASSERT
867 if (bt == T_LONG || bt == T_DOUBLE) {
868 // Overwrite the unused slot with known junk
869 __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
870 __ movptr(Address(rsp, st_off), rax);
871 }
872 #endif /* ASSERT */
873 } else {
874 ignored++;
875 // get the buffer from the just allocated pool of buffers
876 int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + next_vt_arg * type2aelembytes(T_VALUETYPE);
877 __ load_heap_oop(r11, Address(r10, index));
878 next_vt_arg++; next_arg_int++;
879 int vt = 1;
880 // write fields we get from compiled code in registers/stack
881 // slots to the buffer: we know we are done with that value type
882 // argument when we hit the T_VOID that acts as an end of value
883 // type delimiter for this value type. Value types are flattened
884 // so we might encounter embedded value types. Each entry in
885 // sig_extended contains a field offset in the buffer.
886 do {
887 next_arg_comp++;
888 BasicType bt = sig_extended.at(next_arg_comp)._bt;
889 BasicType prev_bt = sig_extended.at(next_arg_comp-1)._bt;
890 if (bt == T_VALUETYPE) {
891 vt++;
892 ignored++;
893 } else if (bt == T_VOID &&
894 prev_bt != T_LONG &&
895 prev_bt != T_DOUBLE) {
896 vt--;
897 ignored++;
898 } else {
899 int off = sig_extended.at(next_arg_comp)._offset;
900 assert(off > 0, "offset in object should be positive");
901 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
902 bool is_oop = (bt == T_OBJECT || bt == T_VALUETYPEPTR || bt == T_ARRAY);
903 has_oop_field = has_oop_field || is_oop;
904 gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended.at(next_arg_comp-1)._bt : T_ILLEGAL,
905 size_in_bytes, regs[next_arg_comp-ignored], Address(r11, off), extraspace, is_oop);
906 }
907 } while (vt != 0);
908 // pass the buffer to the interpreter
909 __ movptr(Address(rsp, st_off), r11);
910 }
911 }
912
913 // If a value type was allocated and initialized, apply post barrier to all oop fields
914 if (has_value_argument && has_oop_field) {
915 __ push(r13); // save senderSP
916 __ push(rbx); // save callee
917 // Allocate argument register save area
918 if (frame::arg_reg_save_area_bytes != 0) {
919 __ subptr(rsp, frame::arg_reg_save_area_bytes);
920 }
921 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::apply_post_barriers), r15_thread, r10);
922 // De-allocate argument register save area
923 if (frame::arg_reg_save_area_bytes != 0) {
924 __ addptr(rsp, frame::arg_reg_save_area_bytes);
925 }
926 __ pop(rbx); // restore callee
927 __ pop(r13); // restore sender SP
928 }
929
930 // Schedule the branch target address early.
931 __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
932 __ jmp(rcx);
933 }
934
935 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
936 address code_start, address code_end,
937 Label& L_ok) {
938 Label L_fail;
939 __ lea(temp_reg, ExternalAddress(code_start));
940 __ cmpptr(pc_reg, temp_reg);
941 __ jcc(Assembler::belowEqual, L_fail);
942 __ lea(temp_reg, ExternalAddress(code_end));
943 __ cmpptr(pc_reg, temp_reg);
944 __ jcc(Assembler::below, L_ok);
945 __ bind(L_fail);
946 }
947
948 static void gen_i2c_adapter_helper(MacroAssembler* masm,
949 BasicType bt,
950 BasicType prev_bt,
951 size_t size_in_bytes,
952 const VMRegPair& reg_pair,
953 const Address& from,
954 bool is_oop) {
955 assert(bt != T_VALUETYPE || !ValueTypePassFieldsAsArgs, "no value type here");
956 if (bt == T_VOID) {
957 // Longs and doubles are passed in native word order, but misaligned
958 // in the 32-bit build.
959 assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half");
960 return;
961 }
962 assert(!reg_pair.second()->is_valid() || reg_pair.first()->next() == reg_pair.second(),
963 "scrambled load targets?");
964
965 bool wide = (size_in_bytes == wordSize);
966 VMReg r_1 = reg_pair.first();
967 VMReg r_2 = reg_pair.second();
968 assert(r_2->is_valid() == wide, "invalid size");
969 if (!r_1->is_valid()) {
970 assert(!r_2->is_valid(), "must be invalid");
971 return;
972 }
973
974 bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
975 if (!r_1->is_XMMRegister()) {
976 // We can use r13 as a temp here because compiled code doesn't need r13 as an input
977 // and if we end up going thru a c2i because of a miss a reasonable value of r13
978 // will be generated.
979 Register dst = r_1->is_stack() ? r13 : r_1->as_Register();
980 if (is_oop) {
981 __ load_heap_oop(dst, from);
982 } else {
983 __ load_sized_value(dst, from, size_in_bytes, is_signed);
984 }
985 if (r_1->is_stack()) {
986 // Convert stack slot to an SP offset (+ wordSize to account for return address)
987 int st_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
988 __ movq(Address(rsp, st_off), dst);
989 }
990 } else {
991 if (wide) {
992 __ movdbl(r_1->as_XMMRegister(), from);
993 } else {
994 __ movflt(r_1->as_XMMRegister(), from);
995 }
996 }
997 }
998
999 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
1000 int comp_args_on_stack,
1001 const GrowableArray<SigEntry>& sig_extended,
1002 const VMRegPair *regs) {
1003
1004 // Note: r13 contains the senderSP on entry. We must preserve it since
1005 // we may do a i2c -> c2i transition if we lose a race where compiled
1006 // code goes non-entrant while we get args ready.
1007 // In addition we use r13 to locate all the interpreter args as
1008 // we must align the stack to 16 bytes on an i2c entry else we
1009 // lose alignment we expect in all compiled code and register
1010 // save code can segv when fxsave instructions find improperly
1011 // aligned stack pointer.
1012
1013 // Adapters can be frameless because they do not require the caller
1014 // to perform additional cleanup work, such as correcting the stack pointer.
1015 // An i2c adapter is frameless because the *caller* frame, which is interpreted,
1016 // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
1017 // even if a callee has modified the stack pointer.
1018 // A c2i adapter is frameless because the *callee* frame, which is interpreted,
1019 // routinely repairs its caller's stack pointer (from sender_sp, which is set
1020 // up via the senderSP register).
1021 // In other words, if *either* the caller or callee is interpreted, we can
1022 // get the stack pointer repaired after a call.
1023 // This is why c2i and i2c adapters cannot be indefinitely composed.
1024 // In particular, if a c2i adapter were to somehow call an i2c adapter,
1025 // both caller and callee would be compiled methods, and neither would
1026 // clean up the stack pointer changes performed by the two adapters.
1027 // If this happens, control eventually transfers back to the compiled
1028 // caller, but with an uncorrected stack, causing delayed havoc.
1029
1030 // Pick up the return address
1031 __ movptr(rax, Address(rsp, 0));
1032
1033 if (VerifyAdapterCalls &&
1034 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
1035 // So, let's test for cascading c2i/i2c adapters right now.
1036 // assert(Interpreter::contains($return_addr) ||
1037 // StubRoutines::contains($return_addr),
1038 // "i2c adapter must return to an interpreter frame");
1039 __ block_comment("verify_i2c { ");
1040 Label L_ok;
1041 if (Interpreter::code() != NULL)
1042 range_check(masm, rax, r11,
1043 Interpreter::code()->code_start(), Interpreter::code()->code_end(),
1044 L_ok);
1045 if (StubRoutines::code1() != NULL)
1046 range_check(masm, rax, r11,
1047 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
1048 L_ok);
1049 if (StubRoutines::code2() != NULL)
1050 range_check(masm, rax, r11,
1051 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
1052 L_ok);
1053 const char* msg = "i2c adapter must return to an interpreter frame";
1054 __ block_comment(msg);
1055 __ stop(msg);
1056 __ bind(L_ok);
1057 __ block_comment("} verify_i2ce ");
1058 }
1059
1060 // Must preserve original SP for loading incoming arguments because
1061 // we need to align the outgoing SP for compiled code.
1062 __ movptr(r11, rsp);
1063
1064 // Cut-out for having no stack args. Since up to 2 int/oop args are passed
1065 // in registers, we will occasionally have no stack args.
1066 int comp_words_on_stack = 0;
1067 if (comp_args_on_stack) {
1068 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in
1069 // registers are below. By subtracting stack0, we either get a negative
1070 // number (all values in registers) or the maximum stack slot accessed.
1071
1072 // Convert 4-byte c2 stack slots to words.
1073 comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
1074 // Round up to miminum stack alignment, in wordSize
1075 comp_words_on_stack = align_up(comp_words_on_stack, 2);
1076 __ subptr(rsp, comp_words_on_stack * wordSize);
1077 }
1078
1079
1080 // Ensure compiled code always sees stack at proper alignment
1081 __ andptr(rsp, -16);
1082
1083 // push the return address and misalign the stack that youngest frame always sees
1084 // as far as the placement of the call instruction
1085 __ push(rax);
1086
1087 // Put saved SP in another register
1088 const Register saved_sp = rax;
1089 __ movptr(saved_sp, r11);
1090
1091 // Will jump to the compiled code just as if compiled code was doing it.
1092 // Pre-load the register-jump target early, to schedule it better.
1093 __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset())));
1094
1095 #if INCLUDE_JVMCI
1096 if (EnableJVMCI || UseAOT) {
1097 // check if this call should be routed towards a specific entry point
1098 __ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
1099 Label no_alternative_target;
1100 __ jcc(Assembler::equal, no_alternative_target);
1101 __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
1102 __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
1103 __ bind(no_alternative_target);
1104 }
1105 #endif // INCLUDE_JVMCI
1106
1107 int total_args_passed = compute_total_args_passed_int(sig_extended);
1108 // Now generate the shuffle code. Pick up all register args and move the
1109 // rest through the floating point stack top.
1110
1111 // next_arg_comp is the next argument from the compiler point of
1112 // view (value type fields are passed in registers/on the stack). In
1113 // sig_extended, a value type argument starts with: T_VALUETYPE,
1114 // followed by the types of the fields of the value type and T_VOID
1115 // to mark the end of the value type. ignored counts the number of
1116 // T_VALUETYPE/T_VOID. next_arg_int is the next argument from the
1117 // interpreter point of view (value types are passed by reference).
1118 for (int next_arg_comp = 0, ignored = 0, next_arg_int = 0; next_arg_comp < sig_extended.length(); next_arg_comp++) {
1119 assert(ignored <= next_arg_comp, "shouldn't skip over more slot than there are arguments");
1120 assert(next_arg_int < total_args_passed, "more arguments from the interpreter than expected?");
1121 BasicType bt = sig_extended.at(next_arg_comp)._bt;
1122 int ld_off = (total_args_passed - next_arg_int)*Interpreter::stackElementSize;
1123 if (!ValueTypePassFieldsAsArgs || bt != T_VALUETYPE) {
1124 // Load in argument order going down.
1125 // Point to interpreter value (vs. tag)
1126 int next_off = ld_off - Interpreter::stackElementSize;
1127 int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
1128 const VMRegPair reg_pair = regs[next_arg_comp-ignored];
1129 size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
1130 gen_i2c_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended.at(next_arg_comp-1)._bt : T_ILLEGAL,
1131 size_in_bytes, reg_pair, Address(saved_sp, offset), false);
1132 next_arg_int++;
1133 } else {
1134 next_arg_int++;
1135 ignored++;
1136 // get the buffer for that value type
1137 __ movptr(r10, Address(saved_sp, ld_off));
1138 int vt = 1;
1139 // load fields to registers/stack slots from the buffer: we know
1140 // we are done with that value type argument when we hit the
1141 // T_VOID that acts as an end of value type delimiter for this
1142 // value type. Value types are flattened so we might encounter
1143 // embedded value types. Each entry in sig_extended contains a
1144 // field offset in the buffer.
1145 do {
1146 next_arg_comp++;
1147 BasicType bt = sig_extended.at(next_arg_comp)._bt;
1148 BasicType prev_bt = sig_extended.at(next_arg_comp-1)._bt;
1149 if (bt == T_VALUETYPE) {
1150 vt++;
1151 ignored++;
1152 } else if (bt == T_VOID &&
1153 prev_bt != T_LONG &&
1154 prev_bt != T_DOUBLE) {
1155 vt--;
1156 ignored++;
1157 } else {
1158 int off = sig_extended.at(next_arg_comp)._offset;
1159 assert(off > 0, "offset in object should be positive");
1160 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
1161 bool is_oop = (bt == T_OBJECT || bt == T_VALUETYPEPTR || bt == T_ARRAY);
1162 gen_i2c_adapter_helper(masm, bt, prev_bt, size_in_bytes, regs[next_arg_comp - ignored], Address(r10, off), is_oop);
1163 }
1164 } while (vt != 0);
1165 }
1166 }
1167
1168 // 6243940 We might end up in handle_wrong_method if
1169 // the callee is deoptimized as we race thru here. If that
1170 // happens we don't want to take a safepoint because the
1171 // caller frame will look interpreted and arguments are now
1172 // "compiled" so it is much better to make this transition
1173 // invisible to the stack walking code. Unfortunately if
1174 // we try and find the callee by normal means a safepoint
1175 // is possible. So we stash the desired callee in the thread
1176 // and the vm will find there should this case occur.
1177
1178 __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
1179
1180 // put Method* where a c2i would expect should we end up there
1181 // only needed because of c2 resolve stubs return Method* as a result in
1182 // rax
1183 __ mov(rax, rbx);
1184 __ jmp(r11);
1185 }
1186
1187 // ---------------------------------------------------------------
1188 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
1189 int comp_args_on_stack,
1190 const GrowableArray<SigEntry>& sig_extended,
1191 const VMRegPair *regs,
1192 AdapterFingerPrint* fingerprint,
1193 AdapterBlob*& new_adapter) {
1194 address i2c_entry = __ pc();
1195
1196 gen_i2c_adapter(masm, comp_args_on_stack, sig_extended, regs);
1197
1198 // -------------------------------------------------------------------------
1199 // Generate a C2I adapter. On entry we know rbx holds the Method* during calls
1200 // to the interpreter. The args start out packed in the compiled layout. They
1201 // need to be unpacked into the interpreter layout. This will almost always
1202 // require some stack space. We grow the current (compiled) stack, then repack
1203 // the args. We finally end in a jump to the generic interpreter entry point.
1204 // On exit from the interpreter, the interpreter will restore our SP (lest the
1205 // compiled code, which relys solely on SP and not RBP, get sick).
1206
1207 address c2i_unverified_entry = __ pc();
1208 Label skip_fixup;
1209 Label ok;
1210
1211 Register holder = rax;
1212 Register receiver = j_rarg0;
1213 Register temp = rbx;
1214
1215 {
1216 __ load_klass(temp, receiver);
1217 __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
1218 __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
1219 __ jcc(Assembler::equal, ok);
1220 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1221
1222 __ bind(ok);
1223 // Method might have been compiled since the call site was patched to
1224 // interpreted if that is the case treat it as a miss so we can get
1225 // the call site corrected.
1226 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
1227 __ jcc(Assembler::equal, skip_fixup);
1228 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1229 }
1230
1231 address c2i_entry = __ pc();
1232
1233 OopMapSet* oop_maps = NULL;
1234 int frame_complete = CodeOffsets::frame_never_safe;
1235 int frame_size_in_words = 0;
1236 gen_c2i_adapter(masm, sig_extended, regs, skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words);
1237
1238 __ flush();
1239 new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps);
1240
1241 // If the method has value types arguments, save the extended signature as symbol in
1242 // the AdapterHandlerEntry to be used for scalarization of value type arguments.
1243 Symbol* extended_signature = NULL;
1244 bool has_value_argument = false;
1245 Thread* THREAD = Thread::current();
1246 ResourceMark rm(THREAD);
1247 int length = sig_extended.length();
1248 char* sig_str = NEW_RESOURCE_ARRAY(char, 2*length + 3);
1249 int idx = 0;
1250 sig_str[idx++] = '(';
1251 for (int index = 0; index < length; index++) {
1252 BasicType bt = sig_extended.at(index)._bt;
1253 if (bt == T_VALUETYPE) {
1254 has_value_argument = true;
1255 } else if (bt == T_VALUETYPEPTR) {
1256 has_value_argument = true;
1257 // non-flattened value type field
1258 sig_str[idx++] = type2char(T_VALUETYPE);
1259 sig_str[idx++] = ';';
1260 } else if (bt == T_VOID) {
1261 // Ignore
1262 } else {
1263 if (bt == T_ARRAY) {
1264 bt = T_OBJECT; // We don't know the element type, treat as Object
1265 }
1266 sig_str[idx++] = type2char(bt);
1267 if (bt == T_OBJECT) {
1268 sig_str[idx++] = ';';
1269 }
1270 }
1271 }
1272 sig_str[idx++] = ')';
1273 sig_str[idx++] = '\0';
1274 if (has_value_argument) {
1275 // Extended signature is only required if a value type argument is passed
1276 extended_signature = SymbolTable::new_permanent_symbol(sig_str, THREAD);
1277 }
1278
1279 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, extended_signature);
1280 }
1281
1282 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1283 VMRegPair *regs,
1284 VMRegPair *regs2,
1285 int total_args_passed) {
1286 assert(regs2 == NULL, "not needed on x86");
1287 // We return the amount of VMRegImpl stack slots we need to reserve for all
1288 // the arguments NOT counting out_preserve_stack_slots.
1289
1290 // NOTE: These arrays will have to change when c1 is ported
1291 #ifdef _WIN64
1292 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1293 c_rarg0, c_rarg1, c_rarg2, c_rarg3
1294 };
1295 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1296 c_farg0, c_farg1, c_farg2, c_farg3
1297 };
1298 #else
1299 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1300 c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5
1301 };
1302 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1303 c_farg0, c_farg1, c_farg2, c_farg3,
1304 c_farg4, c_farg5, c_farg6, c_farg7
1305 };
1306 #endif // _WIN64
1307
1308
1309 uint int_args = 0;
1310 uint fp_args = 0;
1311 uint stk_args = 0; // inc by 2 each time
1312
1313 for (int i = 0; i < total_args_passed; i++) {
1314 switch (sig_bt[i]) {
1315 case T_BOOLEAN:
1316 case T_CHAR:
1317 case T_BYTE:
1318 case T_SHORT:
1319 case T_INT:
1320 if (int_args < Argument::n_int_register_parameters_c) {
1321 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
1322 #ifdef _WIN64
1323 fp_args++;
1324 // Allocate slots for callee to stuff register args the stack.
1325 stk_args += 2;
1326 #endif
1327 } else {
1328 regs[i].set1(VMRegImpl::stack2reg(stk_args));
1329 stk_args += 2;
1330 }
1331 break;
1332 case T_LONG:
1333 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1334 // fall through
1335 case T_OBJECT:
1336 case T_ARRAY:
1337 case T_ADDRESS:
1338 case T_METADATA:
1339 if (int_args < Argument::n_int_register_parameters_c) {
1340 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
1341 #ifdef _WIN64
1342 fp_args++;
1343 stk_args += 2;
1344 #endif
1345 } else {
1346 regs[i].set2(VMRegImpl::stack2reg(stk_args));
1347 stk_args += 2;
1348 }
1349 break;
1350 case T_FLOAT:
1351 if (fp_args < Argument::n_float_register_parameters_c) {
1352 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
1353 #ifdef _WIN64
1354 int_args++;
1355 // Allocate slots for callee to stuff register args the stack.
1356 stk_args += 2;
1357 #endif
1358 } else {
1359 regs[i].set1(VMRegImpl::stack2reg(stk_args));
1360 stk_args += 2;
1361 }
1362 break;
1363 case T_DOUBLE:
1364 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1365 if (fp_args < Argument::n_float_register_parameters_c) {
1366 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
1367 #ifdef _WIN64
1368 int_args++;
1369 // Allocate slots for callee to stuff register args the stack.
1370 stk_args += 2;
1371 #endif
1372 } else {
1373 regs[i].set2(VMRegImpl::stack2reg(stk_args));
1374 stk_args += 2;
1375 }
1376 break;
1377 case T_VOID: // Halves of longs and doubles
1378 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
1379 regs[i].set_bad();
1380 break;
1381 default:
1382 ShouldNotReachHere();
1383 break;
1384 }
1385 }
1386 #ifdef _WIN64
1387 // windows abi requires that we always allocate enough stack space
1388 // for 4 64bit registers to be stored down.
1389 if (stk_args < 8) {
1390 stk_args = 8;
1391 }
1392 #endif // _WIN64
1393
1394 return stk_args;
1395 }
1396
1397 // On 64 bit we will store integer like items to the stack as
1398 // 64 bits items (sparc abi) even though java would only store
1399 // 32bits for a parameter. On 32bit it will simply be 32 bits
1400 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
1401 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1402 if (src.first()->is_stack()) {
1403 if (dst.first()->is_stack()) {
1404 // stack to stack
1405 __ movslq(rax, Address(rbp, reg2offset_in(src.first())));
1406 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1407 } else {
1408 // stack to reg
1409 __ movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1410 }
1411 } else if (dst.first()->is_stack()) {
1412 // reg to stack
1413 // Do we really have to sign extend???
1414 // __ movslq(src.first()->as_Register(), src.first()->as_Register());
1415 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1416 } else {
1417 // Do we really have to sign extend???
1418 // __ movslq(dst.first()->as_Register(), src.first()->as_Register());
1419 if (dst.first() != src.first()) {
1420 __ movq(dst.first()->as_Register(), src.first()->as_Register());
1421 }
1422 }
1423 }
1424
1425 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1426 if (src.first()->is_stack()) {
1427 if (dst.first()->is_stack()) {
1428 // stack to stack
1429 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1430 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1431 } else {
1432 // stack to reg
1433 __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1434 }
1435 } else if (dst.first()->is_stack()) {
1436 // reg to stack
1437 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1438 } else {
1439 if (dst.first() != src.first()) {
1440 __ movq(dst.first()->as_Register(), src.first()->as_Register());
1441 }
1442 }
1443 }
1444
1445 // An oop arg. Must pass a handle not the oop itself
1446 static void object_move(MacroAssembler* masm,
1447 OopMap* map,
1448 int oop_handle_offset,
1449 int framesize_in_slots,
1450 VMRegPair src,
1451 VMRegPair dst,
1452 bool is_receiver,
1453 int* receiver_offset) {
1454
1455 // must pass a handle. First figure out the location we use as a handle
1456
1457 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
1458
1459 // See if oop is NULL if it is we need no handle
1460
1461 if (src.first()->is_stack()) {
1462
1463 // Oop is already on the stack as an argument
1464 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1465 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1466 if (is_receiver) {
1467 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1468 }
1469
1470 __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
1471 __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1472 // conditionally move a NULL
1473 __ cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
1474 } else {
1475
1476 // Oop is in an a register we must store it to the space we reserve
1477 // on the stack for oop_handles and pass a handle if oop is non-NULL
1478
1479 const Register rOop = src.first()->as_Register();
1480 int oop_slot;
1481 if (rOop == j_rarg0)
1482 oop_slot = 0;
1483 else if (rOop == j_rarg1)
1484 oop_slot = 1;
1485 else if (rOop == j_rarg2)
1486 oop_slot = 2;
1487 else if (rOop == j_rarg3)
1488 oop_slot = 3;
1489 else if (rOop == j_rarg4)
1490 oop_slot = 4;
1491 else {
1492 assert(rOop == j_rarg5, "wrong register");
1493 oop_slot = 5;
1494 }
1495
1496 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
1497 int offset = oop_slot*VMRegImpl::stack_slot_size;
1498
1499 map->set_oop(VMRegImpl::stack2reg(oop_slot));
1500 // Store oop in handle area, may be NULL
1501 __ movptr(Address(rsp, offset), rOop);
1502 if (is_receiver) {
1503 *receiver_offset = offset;
1504 }
1505
1506 __ cmpptr(rOop, (int32_t)NULL_WORD);
1507 __ lea(rHandle, Address(rsp, offset));
1508 // conditionally move a NULL from the handle area where it was just stored
1509 __ cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
1510 }
1511
1512 // If arg is on the stack then place it otherwise it is already in correct reg.
1513 if (dst.first()->is_stack()) {
1514 __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1515 }
1516 }
1517
1518 // A float arg may have to do float reg int reg conversion
1519 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1520 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1521
1522 // The calling conventions assures us that each VMregpair is either
1523 // all really one physical register or adjacent stack slots.
1524 // This greatly simplifies the cases here compared to sparc.
1525
1526 if (src.first()->is_stack()) {
1527 if (dst.first()->is_stack()) {
1528 __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1529 __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1530 } else {
1531 // stack to reg
1532 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1533 __ movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first())));
1534 }
1535 } else if (dst.first()->is_stack()) {
1536 // reg to stack
1537 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1538 __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1539 } else {
1540 // reg to reg
1541 // In theory these overlap but the ordering is such that this is likely a nop
1542 if ( src.first() != dst.first()) {
1543 __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
1544 }
1545 }
1546 }
1547
1548 // A long move
1549 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1550
1551 // The calling conventions assures us that each VMregpair is either
1552 // all really one physical register or adjacent stack slots.
1553 // This greatly simplifies the cases here compared to sparc.
1554
1555 if (src.is_single_phys_reg() ) {
1556 if (dst.is_single_phys_reg()) {
1557 if (dst.first() != src.first()) {
1558 __ mov(dst.first()->as_Register(), src.first()->as_Register());
1559 }
1560 } else {
1561 assert(dst.is_single_reg(), "not a stack pair");
1562 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1563 }
1564 } else if (dst.is_single_phys_reg()) {
1565 assert(src.is_single_reg(), "not a stack pair");
1566 __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_out(src.first())));
1567 } else {
1568 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1569 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1570 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1571 }
1572 }
1573
1574 // A double move
1575 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1576
1577 // The calling conventions assures us that each VMregpair is either
1578 // all really one physical register or adjacent stack slots.
1579 // This greatly simplifies the cases here compared to sparc.
1580
1581 if (src.is_single_phys_reg() ) {
1582 if (dst.is_single_phys_reg()) {
1583 // In theory these overlap but the ordering is such that this is likely a nop
1584 if ( src.first() != dst.first()) {
1585 __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
1586 }
1587 } else {
1588 assert(dst.is_single_reg(), "not a stack pair");
1589 __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1590 }
1591 } else if (dst.is_single_phys_reg()) {
1592 assert(src.is_single_reg(), "not a stack pair");
1593 __ movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_out(src.first())));
1594 } else {
1595 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1596 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1597 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1598 }
1599 }
1600
1601
1602 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1603 // We always ignore the frame_slots arg and just use the space just below frame pointer
1604 // which by this time is free to use
1605 switch (ret_type) {
1606 case T_FLOAT:
1607 __ movflt(Address(rbp, -wordSize), xmm0);
1608 break;
1609 case T_DOUBLE:
1610 __ movdbl(Address(rbp, -wordSize), xmm0);
1611 break;
1612 case T_VOID: break;
1613 default: {
1614 __ movptr(Address(rbp, -wordSize), rax);
1615 }
1616 }
1617 }
1618
1619 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1620 // We always ignore the frame_slots arg and just use the space just below frame pointer
1621 // which by this time is free to use
1622 switch (ret_type) {
1623 case T_FLOAT:
1624 __ movflt(xmm0, Address(rbp, -wordSize));
1625 break;
1626 case T_DOUBLE:
1627 __ movdbl(xmm0, Address(rbp, -wordSize));
1628 break;
1629 case T_VOID: break;
1630 default: {
1631 __ movptr(rax, Address(rbp, -wordSize));
1632 }
1633 }
1634 }
1635
1636 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1637 for ( int i = first_arg ; i < arg_count ; i++ ) {
1638 if (args[i].first()->is_Register()) {
1639 __ push(args[i].first()->as_Register());
1640 } else if (args[i].first()->is_XMMRegister()) {
1641 __ subptr(rsp, 2*wordSize);
1642 __ movdbl(Address(rsp, 0), args[i].first()->as_XMMRegister());
1643 }
1644 }
1645 }
1646
1647 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1648 for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1649 if (args[i].first()->is_Register()) {
1650 __ pop(args[i].first()->as_Register());
1651 } else if (args[i].first()->is_XMMRegister()) {
1652 __ movdbl(args[i].first()->as_XMMRegister(), Address(rsp, 0));
1653 __ addptr(rsp, 2*wordSize);
1654 }
1655 }
1656 }
1657
1658
1659 static void save_or_restore_arguments(MacroAssembler* masm,
1660 const int stack_slots,
1661 const int total_in_args,
1662 const int arg_save_area,
1663 OopMap* map,
1664 VMRegPair* in_regs,
1665 BasicType* in_sig_bt) {
1666 // if map is non-NULL then the code should store the values,
1667 // otherwise it should load them.
1668 int slot = arg_save_area;
1669 // Save down double word first
1670 for ( int i = 0; i < total_in_args; i++) {
1671 if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
1672 int offset = slot * VMRegImpl::stack_slot_size;
1673 slot += VMRegImpl::slots_per_word;
1674 assert(slot <= stack_slots, "overflow");
1675 if (map != NULL) {
1676 __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1677 } else {
1678 __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1679 }
1680 }
1681 if (in_regs[i].first()->is_Register() &&
1682 (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
1683 int offset = slot * VMRegImpl::stack_slot_size;
1684 if (map != NULL) {
1685 __ movq(Address(rsp, offset), in_regs[i].first()->as_Register());
1686 if (in_sig_bt[i] == T_ARRAY) {
1687 map->set_oop(VMRegImpl::stack2reg(slot));;
1688 }
1689 } else {
1690 __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
1691 }
1692 slot += VMRegImpl::slots_per_word;
1693 }
1694 }
1695 // Save or restore single word registers
1696 for ( int i = 0; i < total_in_args; i++) {
1697 if (in_regs[i].first()->is_Register()) {
1698 int offset = slot * VMRegImpl::stack_slot_size;
1699 slot++;
1700 assert(slot <= stack_slots, "overflow");
1701
1702 // Value is in an input register pass we must flush it to the stack
1703 const Register reg = in_regs[i].first()->as_Register();
1704 switch (in_sig_bt[i]) {
1705 case T_BOOLEAN:
1706 case T_CHAR:
1707 case T_BYTE:
1708 case T_SHORT:
1709 case T_INT:
1710 if (map != NULL) {
1711 __ movl(Address(rsp, offset), reg);
1712 } else {
1713 __ movl(reg, Address(rsp, offset));
1714 }
1715 break;
1716 case T_ARRAY:
1717 case T_LONG:
1718 // handled above
1719 break;
1720 case T_OBJECT:
1721 default: ShouldNotReachHere();
1722 }
1723 } else if (in_regs[i].first()->is_XMMRegister()) {
1724 if (in_sig_bt[i] == T_FLOAT) {
1725 int offset = slot * VMRegImpl::stack_slot_size;
1726 slot++;
1727 assert(slot <= stack_slots, "overflow");
1728 if (map != NULL) {
1729 __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1730 } else {
1731 __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1732 }
1733 }
1734 } else if (in_regs[i].first()->is_stack()) {
1735 if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1736 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1737 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1738 }
1739 }
1740 }
1741 }
1742
1743 // Pin object, return pinned object or null in rax
1744 static void gen_pin_object(MacroAssembler* masm,
1745 VMRegPair reg) {
1746 __ block_comment("gen_pin_object {");
1747
1748 // rax always contains oop, either incoming or
1749 // pinned.
1750 Register tmp_reg = rax;
1751
1752 Label is_null;
1753 VMRegPair tmp;
1754 VMRegPair in_reg = reg;
1755
1756 tmp.set_ptr(tmp_reg->as_VMReg());
1757 if (reg.first()->is_stack()) {
1758 // Load the arg up from the stack
1759 move_ptr(masm, reg, tmp);
1760 reg = tmp;
1761 } else {
1762 __ movptr(rax, reg.first()->as_Register());
1763 }
1764 __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1765 __ jccb(Assembler::equal, is_null);
1766
1767 if (reg.first()->as_Register() != c_rarg1) {
1768 __ movptr(c_rarg1, reg.first()->as_Register());
1769 }
1770
1771 __ call_VM_leaf(
1772 CAST_FROM_FN_PTR(address, SharedRuntime::pin_object),
1773 r15_thread, c_rarg1);
1774
1775 __ bind(is_null);
1776 __ block_comment("} gen_pin_object");
1777 }
1778
1779 // Unpin object
1780 static void gen_unpin_object(MacroAssembler* masm,
1781 VMRegPair reg) {
1782 __ block_comment("gen_unpin_object {");
1783 Label is_null;
1784
1785 if (reg.first()->is_stack()) {
1786 __ movptr(c_rarg1, Address(rbp, reg2offset_in(reg.first())));
1787 } else if (reg.first()->as_Register() != c_rarg1) {
1788 __ movptr(c_rarg1, reg.first()->as_Register());
1789 }
1790
1791 __ testptr(c_rarg1, c_rarg1);
1792 __ jccb(Assembler::equal, is_null);
1793
1794 __ call_VM_leaf(
1795 CAST_FROM_FN_PTR(address, SharedRuntime::unpin_object),
1796 r15_thread, c_rarg1);
1797
1798 __ bind(is_null);
1799 __ block_comment("} gen_unpin_object");
1800 }
1801
1802 // Check GCLocker::needs_gc and enter the runtime if it's true. This
1803 // keeps a new JNI critical region from starting until a GC has been
1804 // forced. Save down any oops in registers and describe them in an
1805 // OopMap.
1806 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1807 int stack_slots,
1808 int total_c_args,
1809 int total_in_args,
1810 int arg_save_area,
1811 OopMapSet* oop_maps,
1812 VMRegPair* in_regs,
1813 BasicType* in_sig_bt) {
1814 __ block_comment("check GCLocker::needs_gc");
1815 Label cont;
1816 __ cmp8(ExternalAddress((address)GCLocker::needs_gc_address()), false);
1817 __ jcc(Assembler::equal, cont);
1818
1819 // Save down any incoming oops and call into the runtime to halt for a GC
1820
1821 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1822 save_or_restore_arguments(masm, stack_slots, total_in_args,
1823 arg_save_area, map, in_regs, in_sig_bt);
1824
1825 address the_pc = __ pc();
1826 oop_maps->add_gc_map( __ offset(), map);
1827 __ set_last_Java_frame(rsp, noreg, the_pc);
1828
1829 __ block_comment("block_for_jni_critical");
1830 __ movptr(c_rarg0, r15_thread);
1831 __ mov(r12, rsp); // remember sp
1832 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1833 __ andptr(rsp, -16); // align stack as required by ABI
1834 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
1835 __ mov(rsp, r12); // restore sp
1836 __ reinit_heapbase();
1837
1838 __ reset_last_Java_frame(false);
1839
1840 save_or_restore_arguments(masm, stack_slots, total_in_args,
1841 arg_save_area, NULL, in_regs, in_sig_bt);
1842 __ bind(cont);
1843 #ifdef ASSERT
1844 if (StressCriticalJNINatives) {
1845 // Stress register saving
1846 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1847 save_or_restore_arguments(masm, stack_slots, total_in_args,
1848 arg_save_area, map, in_regs, in_sig_bt);
1849 // Destroy argument registers
1850 for (int i = 0; i < total_in_args - 1; i++) {
1851 if (in_regs[i].first()->is_Register()) {
1852 const Register reg = in_regs[i].first()->as_Register();
1853 __ xorptr(reg, reg);
1854 } else if (in_regs[i].first()->is_XMMRegister()) {
1855 __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
1856 } else if (in_regs[i].first()->is_FloatRegister()) {
1857 ShouldNotReachHere();
1858 } else if (in_regs[i].first()->is_stack()) {
1859 // Nothing to do
1860 } else {
1861 ShouldNotReachHere();
1862 }
1863 if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
1864 i++;
1865 }
1866 }
1867
1868 save_or_restore_arguments(masm, stack_slots, total_in_args,
1869 arg_save_area, NULL, in_regs, in_sig_bt);
1870 }
1871 #endif
1872 }
1873
1874 // Unpack an array argument into a pointer to the body and the length
1875 // if the array is non-null, otherwise pass 0 for both.
1876 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1877 Register tmp_reg = rax;
1878 assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
1879 "possible collision");
1880 assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
1881 "possible collision");
1882
1883 __ block_comment("unpack_array_argument {");
1884
1885 // Pass the length, ptr pair
1886 Label is_null, done;
1887 VMRegPair tmp;
1888 tmp.set_ptr(tmp_reg->as_VMReg());
1889 if (reg.first()->is_stack()) {
1890 // Load the arg up from the stack
1891 move_ptr(masm, reg, tmp);
1892 reg = tmp;
1893 }
1894 __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1895 __ jccb(Assembler::equal, is_null);
1896 __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1897 move_ptr(masm, tmp, body_arg);
1898 // load the length relative to the body.
1899 __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
1900 arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1901 move32_64(masm, tmp, length_arg);
1902 __ jmpb(done);
1903 __ bind(is_null);
1904 // Pass zeros
1905 __ xorptr(tmp_reg, tmp_reg);
1906 move_ptr(masm, tmp, body_arg);
1907 move32_64(masm, tmp, length_arg);
1908 __ bind(done);
1909
1910 __ block_comment("} unpack_array_argument");
1911 }
1912
1913
1914 // Different signatures may require very different orders for the move
1915 // to avoid clobbering other arguments. There's no simple way to
1916 // order them safely. Compute a safe order for issuing stores and
1917 // break any cycles in those stores. This code is fairly general but
1918 // it's not necessary on the other platforms so we keep it in the
1919 // platform dependent code instead of moving it into a shared file.
1920 // (See bugs 7013347 & 7145024.)
1921 // Note that this code is specific to LP64.
1922 class ComputeMoveOrder: public StackObj {
1923 class MoveOperation: public ResourceObj {
1924 friend class ComputeMoveOrder;
1925 private:
1926 VMRegPair _src;
1927 VMRegPair _dst;
1928 int _src_index;
1929 int _dst_index;
1930 bool _processed;
1931 MoveOperation* _next;
1932 MoveOperation* _prev;
1933
1934 static int get_id(VMRegPair r) {
1935 return r.first()->value();
1936 }
1937
1938 public:
1939 MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
1940 _src(src)
1941 , _dst(dst)
1942 , _src_index(src_index)
1943 , _dst_index(dst_index)
1944 , _processed(false)
1945 , _next(NULL)
1946 , _prev(NULL) {
1947 }
1948
1949 VMRegPair src() const { return _src; }
1950 int src_id() const { return get_id(src()); }
1951 int src_index() const { return _src_index; }
1952 VMRegPair dst() const { return _dst; }
1953 void set_dst(int i, VMRegPair dst) { _dst_index = i, _dst = dst; }
1954 int dst_index() const { return _dst_index; }
1955 int dst_id() const { return get_id(dst()); }
1956 MoveOperation* next() const { return _next; }
1957 MoveOperation* prev() const { return _prev; }
1958 void set_processed() { _processed = true; }
1959 bool is_processed() const { return _processed; }
1960
1961 // insert
1962 void break_cycle(VMRegPair temp_register) {
1963 // create a new store following the last store
1964 // to move from the temp_register to the original
1965 MoveOperation* new_store = new MoveOperation(-1, temp_register, dst_index(), dst());
1966
1967 // break the cycle of links and insert new_store at the end
1968 // break the reverse link.
1969 MoveOperation* p = prev();
1970 assert(p->next() == this, "must be");
1971 _prev = NULL;
1972 p->_next = new_store;
1973 new_store->_prev = p;
1974
1975 // change the original store to save it's value in the temp.
1976 set_dst(-1, temp_register);
1977 }
1978
1979 void link(GrowableArray<MoveOperation*>& killer) {
1980 // link this store in front the store that it depends on
1981 MoveOperation* n = killer.at_grow(src_id(), NULL);
1982 if (n != NULL) {
1983 assert(_next == NULL && n->_prev == NULL, "shouldn't have been set yet");
1984 _next = n;
1985 n->_prev = this;
1986 }
1987 }
1988 };
1989
1990 private:
1991 GrowableArray<MoveOperation*> edges;
1992
1993 public:
1994 ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
1995 BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) {
1996 // Move operations where the dest is the stack can all be
1997 // scheduled first since they can't interfere with the other moves.
1998 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1999 if (in_sig_bt[i] == T_ARRAY) {
2000 c_arg--;
2001 if (out_regs[c_arg].first()->is_stack() &&
2002 out_regs[c_arg + 1].first()->is_stack()) {
2003 arg_order.push(i);
2004 arg_order.push(c_arg);
2005 } else {
2006 if (out_regs[c_arg].first()->is_stack() ||
2007 in_regs[i].first() == out_regs[c_arg].first()) {
2008 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg + 1]);
2009 } else {
2010 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
2011 }
2012 }
2013 } else if (in_sig_bt[i] == T_VOID) {
2014 arg_order.push(i);
2015 arg_order.push(c_arg);
2016 } else {
2017 if (out_regs[c_arg].first()->is_stack() ||
2018 in_regs[i].first() == out_regs[c_arg].first()) {
2019 arg_order.push(i);
2020 arg_order.push(c_arg);
2021 } else {
2022 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
2023 }
2024 }
2025 }
2026 // Break any cycles in the register moves and emit the in the
2027 // proper order.
2028 GrowableArray<MoveOperation*>* stores = get_store_order(tmp_vmreg);
2029 for (int i = 0; i < stores->length(); i++) {
2030 arg_order.push(stores->at(i)->src_index());
2031 arg_order.push(stores->at(i)->dst_index());
2032 }
2033 }
2034
2035 // Collected all the move operations
2036 void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) {
2037 if (src.first() == dst.first()) return;
2038 edges.append(new MoveOperation(src_index, src, dst_index, dst));
2039 }
2040
2041 // Walk the edges breaking cycles between moves. The result list
2042 // can be walked in order to produce the proper set of loads
2043 GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) {
2044 // Record which moves kill which values
2045 GrowableArray<MoveOperation*> killer;
2046 for (int i = 0; i < edges.length(); i++) {
2047 MoveOperation* s = edges.at(i);
2048 assert(killer.at_grow(s->dst_id(), NULL) == NULL, "only one killer");
2049 killer.at_put_grow(s->dst_id(), s, NULL);
2050 }
2051 assert(killer.at_grow(MoveOperation::get_id(temp_register), NULL) == NULL,
2052 "make sure temp isn't in the registers that are killed");
2053
2054 // create links between loads and stores
2055 for (int i = 0; i < edges.length(); i++) {
2056 edges.at(i)->link(killer);
2057 }
2058
2059 // at this point, all the move operations are chained together
2060 // in a doubly linked list. Processing it backwards finds
2061 // the beginning of the chain, forwards finds the end. If there's
2062 // a cycle it can be broken at any point, so pick an edge and walk
2063 // backward until the list ends or we end where we started.
2064 GrowableArray<MoveOperation*>* stores = new GrowableArray<MoveOperation*>();
2065 for (int e = 0; e < edges.length(); e++) {
2066 MoveOperation* s = edges.at(e);
2067 if (!s->is_processed()) {
2068 MoveOperation* start = s;
2069 // search for the beginning of the chain or cycle
2070 while (start->prev() != NULL && start->prev() != s) {
2071 start = start->prev();
2072 }
2073 if (start->prev() == s) {
2074 start->break_cycle(temp_register);
2075 }
2076 // walk the chain forward inserting to store list
2077 while (start != NULL) {
2078 stores->append(start);
2079 start->set_processed();
2080 start = start->next();
2081 }
2082 }
2083 }
2084 return stores;
2085 }
2086 };
2087
2088 static void verify_oop_args(MacroAssembler* masm,
2089 const methodHandle& method,
2090 const BasicType* sig_bt,
2091 const VMRegPair* regs) {
2092 Register temp_reg = rbx; // not part of any compiled calling seq
2093 if (VerifyOops) {
2094 for (int i = 0; i < method->size_of_parameters(); i++) {
2095 if (sig_bt[i] == T_OBJECT ||
2096 sig_bt[i] == T_ARRAY) {
2097 VMReg r = regs[i].first();
2098 assert(r->is_valid(), "bad oop arg");
2099 if (r->is_stack()) {
2100 __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
2101 __ verify_oop(temp_reg);
2102 } else {
2103 __ verify_oop(r->as_Register());
2104 }
2105 }
2106 }
2107 }
2108 }
2109
2110 static void gen_special_dispatch(MacroAssembler* masm,
2111 const methodHandle& method,
2112 const BasicType* sig_bt,
2113 const VMRegPair* regs) {
2114 verify_oop_args(masm, method, sig_bt, regs);
2115 vmIntrinsics::ID iid = method->intrinsic_id();
2116
2117 // Now write the args into the outgoing interpreter space
2118 bool has_receiver = false;
2119 Register receiver_reg = noreg;
2120 int member_arg_pos = -1;
2121 Register member_reg = noreg;
2122 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
2123 if (ref_kind != 0) {
2124 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
2125 member_reg = rbx; // known to be free at this point
2126 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
2127 } else if (iid == vmIntrinsics::_invokeBasic) {
2128 has_receiver = true;
2129 } else {
2130 fatal("unexpected intrinsic id %d", iid);
2131 }
2132
2133 if (member_reg != noreg) {
2134 // Load the member_arg into register, if necessary.
2135 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
2136 VMReg r = regs[member_arg_pos].first();
2137 if (r->is_stack()) {
2138 __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
2139 } else {
2140 // no data motion is needed
2141 member_reg = r->as_Register();
2142 }
2143 }
2144
2145 if (has_receiver) {
2146 // Make sure the receiver is loaded into a register.
2147 assert(method->size_of_parameters() > 0, "oob");
2148 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
2149 VMReg r = regs[0].first();
2150 assert(r->is_valid(), "bad receiver arg");
2151 if (r->is_stack()) {
2152 // Porting note: This assumes that compiled calling conventions always
2153 // pass the receiver oop in a register. If this is not true on some
2154 // platform, pick a temp and load the receiver from stack.
2155 fatal("receiver always in a register");
2156 receiver_reg = j_rarg0; // known to be free at this point
2157 __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
2158 } else {
2159 // no data motion is needed
2160 receiver_reg = r->as_Register();
2161 }
2162 }
2163
2164 // Figure out which address we are really jumping to:
2165 MethodHandles::generate_method_handle_dispatch(masm, iid,
2166 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
2167 }
2168
2169 // ---------------------------------------------------------------------------
2170 // Generate a native wrapper for a given method. The method takes arguments
2171 // in the Java compiled code convention, marshals them to the native
2172 // convention (handlizes oops, etc), transitions to native, makes the call,
2173 // returns to java state (possibly blocking), unhandlizes any result and
2174 // returns.
2175 //
2176 // Critical native functions are a shorthand for the use of
2177 // GetPrimtiveArrayCritical and disallow the use of any other JNI
2178 // functions. The wrapper is expected to unpack the arguments before
2179 // passing them to the callee and perform checks before and after the
2180 // native call to ensure that they GCLocker
2181 // lock_critical/unlock_critical semantics are followed. Some other
2182 // parts of JNI setup are skipped like the tear down of the JNI handle
2183 // block and the check for pending exceptions it's impossible for them
2184 // to be thrown.
2185 //
2186 // They are roughly structured like this:
2187 // if (GCLocker::needs_gc())
2188 // SharedRuntime::block_for_jni_critical();
2189 // tranistion to thread_in_native
2190 // unpack arrray arguments and call native entry point
2191 // check for safepoint in progress
2192 // check if any thread suspend flags are set
2193 // call into JVM and possible unlock the JNI critical
2194 // if a GC was suppressed while in the critical native.
2195 // transition back to thread_in_Java
2196 // return to caller
2197 //
2198 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
2199 const methodHandle& method,
2200 int compile_id,
2201 BasicType* in_sig_bt,
2202 VMRegPair* in_regs,
2203 BasicType ret_type) {
2204 if (method->is_method_handle_intrinsic()) {
2205 vmIntrinsics::ID iid = method->intrinsic_id();
2206 intptr_t start = (intptr_t)__ pc();
2207 int vep_offset = ((intptr_t)__ pc()) - start;
2208 gen_special_dispatch(masm,
2209 method,
2210 in_sig_bt,
2211 in_regs);
2212 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
2213 __ flush();
2214 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
2215 return nmethod::new_native_nmethod(method,
2216 compile_id,
2217 masm->code(),
2218 vep_offset,
2219 frame_complete,
2220 stack_slots / VMRegImpl::slots_per_word,
2221 in_ByteSize(-1),
2222 in_ByteSize(-1),
2223 (OopMapSet*)NULL);
2224 }
2225 bool is_critical_native = true;
2226 address native_func = method->critical_native_function();
2227 if (native_func == NULL) {
2228 native_func = method->native_function();
2229 is_critical_native = false;
2230 }
2231 assert(native_func != NULL, "must have function");
2232
2233 // An OopMap for lock (and class if static)
2234 OopMapSet *oop_maps = new OopMapSet();
2235 intptr_t start = (intptr_t)__ pc();
2236
2237 // We have received a description of where all the java arg are located
2238 // on entry to the wrapper. We need to convert these args to where
2239 // the jni function will expect them. To figure out where they go
2240 // we convert the java signature to a C signature by inserting
2241 // the hidden arguments as arg[0] and possibly arg[1] (static method)
2242
2243 const int total_in_args = method->size_of_parameters();
2244 int total_c_args = total_in_args;
2245 if (!is_critical_native) {
2246 total_c_args += 1;
2247 if (method->is_static()) {
2248 total_c_args++;
2249 }
2250 } else {
2251 for (int i = 0; i < total_in_args; i++) {
2252 if (in_sig_bt[i] == T_ARRAY) {
2253 total_c_args++;
2254 }
2255 }
2256 }
2257
2258 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
2259 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
2260 BasicType* in_elem_bt = NULL;
2261
2262 int argc = 0;
2263 if (!is_critical_native) {
2264 out_sig_bt[argc++] = T_ADDRESS;
2265 if (method->is_static()) {
2266 out_sig_bt[argc++] = T_OBJECT;
2267 }
2268
2269 for (int i = 0; i < total_in_args ; i++ ) {
2270 out_sig_bt[argc++] = in_sig_bt[i];
2271 }
2272 } else {
2273 Thread* THREAD = Thread::current();
2274 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
2275 SignatureStream ss(method->signature());
2276 for (int i = 0; i < total_in_args ; i++ ) {
2277 if (in_sig_bt[i] == T_ARRAY) {
2278 // Arrays are passed as int, elem* pair
2279 out_sig_bt[argc++] = T_INT;
2280 out_sig_bt[argc++] = T_ADDRESS;
2281 Symbol* atype = ss.as_symbol(CHECK_NULL);
2282 const char* at = atype->as_C_string();
2283 if (strlen(at) == 2) {
2284 assert(at[0] == '[', "must be");
2285 switch (at[1]) {
2286 case 'B': in_elem_bt[i] = T_BYTE; break;
2287 case 'C': in_elem_bt[i] = T_CHAR; break;
2288 case 'D': in_elem_bt[i] = T_DOUBLE; break;
2289 case 'F': in_elem_bt[i] = T_FLOAT; break;
2290 case 'I': in_elem_bt[i] = T_INT; break;
2291 case 'J': in_elem_bt[i] = T_LONG; break;
2292 case 'S': in_elem_bt[i] = T_SHORT; break;
2293 case 'Z': in_elem_bt[i] = T_BOOLEAN; break;
2294 default: ShouldNotReachHere();
2295 }
2296 }
2297 } else {
2298 out_sig_bt[argc++] = in_sig_bt[i];
2299 in_elem_bt[i] = T_VOID;
2300 }
2301 if (in_sig_bt[i] != T_VOID) {
2302 assert(in_sig_bt[i] == ss.type(), "must match");
2303 ss.next();
2304 }
2305 }
2306 }
2307
2308 // Now figure out where the args must be stored and how much stack space
2309 // they require.
2310 int out_arg_slots;
2311 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
2312
2313 // Compute framesize for the wrapper. We need to handlize all oops in
2314 // incoming registers
2315
2316 // Calculate the total number of stack slots we will need.
2317
2318 // First count the abi requirement plus all of the outgoing args
2319 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2320
2321 // Now the space for the inbound oop handle area
2322 int total_save_slots = 6 * VMRegImpl::slots_per_word; // 6 arguments passed in registers
2323 if (is_critical_native) {
2324 // Critical natives may have to call out so they need a save area
2325 // for register arguments.
2326 int double_slots = 0;
2327 int single_slots = 0;
2328 for ( int i = 0; i < total_in_args; i++) {
2329 if (in_regs[i].first()->is_Register()) {
2330 const Register reg = in_regs[i].first()->as_Register();
2331 switch (in_sig_bt[i]) {
2332 case T_BOOLEAN:
2333 case T_BYTE:
2334 case T_SHORT:
2335 case T_CHAR:
2336 case T_INT: single_slots++; break;
2337 case T_ARRAY: // specific to LP64 (7145024)
2338 case T_LONG: double_slots++; break;
2339 default: ShouldNotReachHere();
2340 }
2341 } else if (in_regs[i].first()->is_XMMRegister()) {
2342 switch (in_sig_bt[i]) {
2343 case T_FLOAT: single_slots++; break;
2344 case T_DOUBLE: double_slots++; break;
2345 default: ShouldNotReachHere();
2346 }
2347 } else if (in_regs[i].first()->is_FloatRegister()) {
2348 ShouldNotReachHere();
2349 }
2350 }
2351 total_save_slots = double_slots * 2 + single_slots;
2352 // align the save area
2353 if (double_slots != 0) {
2354 stack_slots = align_up(stack_slots, 2);
2355 }
2356 }
2357
2358 int oop_handle_offset = stack_slots;
2359 stack_slots += total_save_slots;
2360
2361 // Now any space we need for handlizing a klass if static method
2362
2363 int klass_slot_offset = 0;
2364 int klass_offset = -1;
2365 int lock_slot_offset = 0;
2366 bool is_static = false;
2367
2368 if (method->is_static()) {
2369 klass_slot_offset = stack_slots;
2370 stack_slots += VMRegImpl::slots_per_word;
2371 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
2372 is_static = true;
2373 }
2374
2375 // Plus a lock if needed
2376
2377 if (method->is_synchronized()) {
2378 lock_slot_offset = stack_slots;
2379 stack_slots += VMRegImpl::slots_per_word;
2380 }
2381
2382 // Now a place (+2) to save return values or temp during shuffling
2383 // + 4 for return address (which we own) and saved rbp
2384 stack_slots += 6;
2385
2386 // Ok The space we have allocated will look like:
2387 //
2388 //
2389 // FP-> | |
2390 // |---------------------|
2391 // | 2 slots for moves |
2392 // |---------------------|
2393 // | lock box (if sync) |
2394 // |---------------------| <- lock_slot_offset
2395 // | klass (if static) |
2396 // |---------------------| <- klass_slot_offset
2397 // | oopHandle area |
2398 // |---------------------| <- oop_handle_offset (6 java arg registers)
2399 // | outbound memory |
2400 // | based arguments |
2401 // | |
2402 // |---------------------|
2403 // | |
2404 // SP-> | out_preserved_slots |
2405 //
2406 //
2407
2408
2409 // Now compute actual number of stack words we need rounding to make
2410 // stack properly aligned.
2411 stack_slots = align_up(stack_slots, StackAlignmentInSlots);
2412
2413 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2414
2415 // First thing make an ic check to see if we should even be here
2416
2417 // We are free to use all registers as temps without saving them and
2418 // restoring them except rbp. rbp is the only callee save register
2419 // as far as the interpreter and the compiler(s) are concerned.
2420
2421
2422 const Register ic_reg = rax;
2423 const Register receiver = j_rarg0;
2424
2425 Label hit;
2426 Label exception_pending;
2427
2428 assert_different_registers(ic_reg, receiver, rscratch1);
2429 __ verify_oop(receiver);
2430 __ load_klass(rscratch1, receiver);
2431 __ cmpq(ic_reg, rscratch1);
2432 __ jcc(Assembler::equal, hit);
2433
2434 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2435
2436 // Verified entry point must be aligned
2437 __ align(8);
2438
2439 __ bind(hit);
2440
2441 int vep_offset = ((intptr_t)__ pc()) - start;
2442
2443 #ifdef COMPILER1
2444 // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
2445 if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
2446 inline_check_hashcode_from_object_header(masm, method, j_rarg0 /*obj_reg*/, rax /*result*/);
2447 }
2448 #endif // COMPILER1
2449
2450 // The instruction at the verified entry point must be 5 bytes or longer
2451 // because it can be patched on the fly by make_non_entrant. The stack bang
2452 // instruction fits that requirement.
2453
2454 // Generate stack overflow check
2455
2456 if (UseStackBanging) {
2457 __ bang_stack_with_offset((int)JavaThread::stack_shadow_zone_size());
2458 } else {
2459 // need a 5 byte instruction to allow MT safe patching to non-entrant
2460 __ fat_nop();
2461 }
2462
2463 // Generate a new frame for the wrapper.
2464 __ enter();
2465 // -2 because return address is already present and so is saved rbp
2466 __ subptr(rsp, stack_size - 2*wordSize);
2467
2468 // Frame is now completed as far as size and linkage.
2469 int frame_complete = ((intptr_t)__ pc()) - start;
2470
2471 if (UseRTMLocking) {
2472 // Abort RTM transaction before calling JNI
2473 // because critical section will be large and will be
2474 // aborted anyway. Also nmethod could be deoptimized.
2475 __ xabort(0);
2476 }
2477
2478 #ifdef ASSERT
2479 {
2480 Label L;
2481 __ mov(rax, rsp);
2482 __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
2483 __ cmpptr(rax, rsp);
2484 __ jcc(Assembler::equal, L);
2485 __ stop("improperly aligned stack");
2486 __ bind(L);
2487 }
2488 #endif /* ASSERT */
2489
2490
2491 // We use r14 as the oop handle for the receiver/klass
2492 // It is callee save so it survives the call to native
2493
2494 const Register oop_handle_reg = r14;
2495
2496 if (is_critical_native && !Universe::heap()->supports_object_pinning()) {
2497 check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
2498 oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2499 }
2500
2501 //
2502 // We immediately shuffle the arguments so that any vm call we have to
2503 // make from here on out (sync slow path, jvmti, etc.) we will have
2504 // captured the oops from our caller and have a valid oopMap for
2505 // them.
2506
2507 // -----------------
2508 // The Grand Shuffle
2509
2510 // The Java calling convention is either equal (linux) or denser (win64) than the
2511 // c calling convention. However the because of the jni_env argument the c calling
2512 // convention always has at least one more (and two for static) arguments than Java.
2513 // Therefore if we move the args from java -> c backwards then we will never have
2514 // a register->register conflict and we don't have to build a dependency graph
2515 // and figure out how to break any cycles.
2516 //
2517
2518 // Record esp-based slot for receiver on stack for non-static methods
2519 int receiver_offset = -1;
2520
2521 // This is a trick. We double the stack slots so we can claim
2522 // the oops in the caller's frame. Since we are sure to have
2523 // more args than the caller doubling is enough to make
2524 // sure we can capture all the incoming oop args from the
2525 // caller.
2526 //
2527 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2528
2529 // Mark location of rbp (someday)
2530 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rbp));
2531
2532 // Use eax, ebx as temporaries during any memory-memory moves we have to do
2533 // All inbound args are referenced based on rbp and all outbound args via rsp.
2534
2535
2536 #ifdef ASSERT
2537 bool reg_destroyed[RegisterImpl::number_of_registers];
2538 bool freg_destroyed[XMMRegisterImpl::number_of_registers];
2539 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2540 reg_destroyed[r] = false;
2541 }
2542 for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) {
2543 freg_destroyed[f] = false;
2544 }
2545
2546 #endif /* ASSERT */
2547
2548 // This may iterate in two different directions depending on the
2549 // kind of native it is. The reason is that for regular JNI natives
2550 // the incoming and outgoing registers are offset upwards and for
2551 // critical natives they are offset down.
2552 GrowableArray<int> arg_order(2 * total_in_args);
2553 // Inbound arguments that need to be pinned for critical natives
2554 GrowableArray<int> pinned_args(total_in_args);
2555 // Current stack slot for storing register based array argument
2556 int pinned_slot = oop_handle_offset;
2557
2558 VMRegPair tmp_vmreg;
2559 tmp_vmreg.set2(rbx->as_VMReg());
2560
2561 if (!is_critical_native) {
2562 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
2563 arg_order.push(i);
2564 arg_order.push(c_arg);
2565 }
2566 } else {
2567 // Compute a valid move order, using tmp_vmreg to break any cycles
2568 ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
2569 }
2570
2571 int temploc = -1;
2572 for (int ai = 0; ai < arg_order.length(); ai += 2) {
2573 int i = arg_order.at(ai);
2574 int c_arg = arg_order.at(ai + 1);
2575 __ block_comment(err_msg("move %d -> %d", i, c_arg));
2576 if (c_arg == -1) {
2577 assert(is_critical_native, "should only be required for critical natives");
2578 // This arg needs to be moved to a temporary
2579 __ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
2580 in_regs[i] = tmp_vmreg;
2581 temploc = i;
2582 continue;
2583 } else if (i == -1) {
2584 assert(is_critical_native, "should only be required for critical natives");
2585 // Read from the temporary location
2586 assert(temploc != -1, "must be valid");
2587 i = temploc;
2588 temploc = -1;
2589 }
2590 #ifdef ASSERT
2591 if (in_regs[i].first()->is_Register()) {
2592 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
2593 } else if (in_regs[i].first()->is_XMMRegister()) {
2594 assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
2595 }
2596 if (out_regs[c_arg].first()->is_Register()) {
2597 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2598 } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2599 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2600 }
2601 #endif /* ASSERT */
2602 switch (in_sig_bt[i]) {
2603 case T_ARRAY:
2604 if (is_critical_native) {
2605 // pin before unpack
2606 if (Universe::heap()->supports_object_pinning()) {
2607 save_args(masm, total_c_args, 0, out_regs);
2608 gen_pin_object(masm, in_regs[i]);
2609 pinned_args.append(i);
2610 restore_args(masm, total_c_args, 0, out_regs);
2611
2612 // rax has pinned array
2613 VMRegPair result_reg;
2614 result_reg.set_ptr(rax->as_VMReg());
2615 move_ptr(masm, result_reg, in_regs[i]);
2616 if (!in_regs[i].first()->is_stack()) {
2617 assert(pinned_slot <= stack_slots, "overflow");
2618 move_ptr(masm, result_reg, VMRegImpl::stack2reg(pinned_slot));
2619 pinned_slot += VMRegImpl::slots_per_word;
2620 }
2621 }
2622 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
2623 c_arg++;
2624 #ifdef ASSERT
2625 if (out_regs[c_arg].first()->is_Register()) {
2626 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2627 } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2628 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2629 }
2630 #endif
2631 break;
2632 }
2633 case T_OBJECT:
2634 assert(!is_critical_native, "no oop arguments");
2635 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2636 ((i == 0) && (!is_static)),
2637 &receiver_offset);
2638 break;
2639 case T_VOID:
2640 break;
2641
2642 case T_FLOAT:
2643 float_move(masm, in_regs[i], out_regs[c_arg]);
2644 break;
2645
2646 case T_DOUBLE:
2647 assert( i + 1 < total_in_args &&
2648 in_sig_bt[i + 1] == T_VOID &&
2649 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2650 double_move(masm, in_regs[i], out_regs[c_arg]);
2651 break;
2652
2653 case T_LONG :
2654 long_move(masm, in_regs[i], out_regs[c_arg]);
2655 break;
2656
2657 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2658
2659 default:
2660 move32_64(masm, in_regs[i], out_regs[c_arg]);
2661 }
2662 }
2663
2664 int c_arg;
2665
2666 // Pre-load a static method's oop into r14. Used both by locking code and
2667 // the normal JNI call code.
2668 if (!is_critical_native) {
2669 // point c_arg at the first arg that is already loaded in case we
2670 // need to spill before we call out
2671 c_arg = total_c_args - total_in_args;
2672
2673 if (method->is_static()) {
2674
2675 // load oop into a register
2676 __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
2677
2678 // Now handlize the static class mirror it's known not-null.
2679 __ movptr(Address(rsp, klass_offset), oop_handle_reg);
2680 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2681
2682 // Now get the handle
2683 __ lea(oop_handle_reg, Address(rsp, klass_offset));
2684 // store the klass handle as second argument
2685 __ movptr(c_rarg1, oop_handle_reg);
2686 // and protect the arg if we must spill
2687 c_arg--;
2688 }
2689 } else {
2690 // For JNI critical methods we need to save all registers in save_args.
2691 c_arg = 0;
2692 }
2693
2694 // Change state to native (we save the return address in the thread, since it might not
2695 // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
2696 // points into the right code segment. It does not have to be the correct return pc.
2697 // We use the same pc/oopMap repeatedly when we call out
2698
2699 intptr_t the_pc = (intptr_t) __ pc();
2700 oop_maps->add_gc_map(the_pc - start, map);
2701
2702 __ set_last_Java_frame(rsp, noreg, (address)the_pc);
2703
2704
2705 // We have all of the arguments setup at this point. We must not touch any register
2706 // argument registers at this point (what if we save/restore them there are no oop?
2707
2708 {
2709 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2710 // protect the args we've loaded
2711 save_args(masm, total_c_args, c_arg, out_regs);
2712 __ mov_metadata(c_rarg1, method());
2713 __ call_VM_leaf(
2714 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2715 r15_thread, c_rarg1);
2716 restore_args(masm, total_c_args, c_arg, out_regs);
2717 }
2718
2719 // RedefineClasses() tracing support for obsolete method entry
2720 if (log_is_enabled(Trace, redefine, class, obsolete)) {
2721 // protect the args we've loaded
2722 save_args(masm, total_c_args, c_arg, out_regs);
2723 __ mov_metadata(c_rarg1, method());
2724 __ call_VM_leaf(
2725 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2726 r15_thread, c_rarg1);
2727 restore_args(masm, total_c_args, c_arg, out_regs);
2728 }
2729
2730 // Lock a synchronized method
2731
2732 // Register definitions used by locking and unlocking
2733
2734 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
2735 const Register obj_reg = rbx; // Will contain the oop
2736 const Register lock_reg = r13; // Address of compiler lock object (BasicLock)
2737 const Register old_hdr = r13; // value of old header at unlock time
2738
2739 Label slow_path_lock;
2740 Label lock_done;
2741
2742 if (method->is_synchronized()) {
2743 assert(!is_critical_native, "unhandled");
2744
2745
2746 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2747
2748 // Get the handle (the 2nd argument)
2749 __ mov(oop_handle_reg, c_rarg1);
2750
2751 // Get address of the box
2752
2753 __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2754
2755 // Load the oop from the handle
2756 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2757
2758 __ resolve(IS_NOT_NULL, obj_reg);
2759 if (UseBiasedLocking) {
2760 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock);
2761 }
2762
2763 // Load immediate 1 into swap_reg %rax
2764 __ movl(swap_reg, 1);
2765
2766 // Load (object->mark() | 1) into swap_reg %rax
2767 __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2768 if (EnableValhalla && !UseBiasedLocking) {
2769 // For slow path is_always_locked, using biased, which is never natural for !UseBiasLocking
2770 __ andptr(swap_reg, ~markOopDesc::biased_lock_bit_in_place);
2771 }
2772
2773 // Save (object->mark() | 1) into BasicLock's displaced header
2774 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2775
2776 if (os::is_MP()) {
2777 __ lock();
2778 }
2779
2780 // src -> dest iff dest == rax else rax <- dest
2781 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2782 __ jcc(Assembler::equal, lock_done);
2783
2784 // Hmm should this move to the slow path code area???
2785
2786 // Test if the oopMark is an obvious stack pointer, i.e.,
2787 // 1) (mark & 3) == 0, and
2788 // 2) rsp <= mark < mark + os::pagesize()
2789 // These 3 tests can be done by evaluating the following
2790 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2791 // assuming both stack pointer and pagesize have their
2792 // least significant 2 bits clear.
2793 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2794
2795 __ subptr(swap_reg, rsp);
2796 __ andptr(swap_reg, 3 - os::vm_page_size());
2797
2798 // Save the test result, for recursive case, the result is zero
2799 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2800 __ jcc(Assembler::notEqual, slow_path_lock);
2801
2802 // Slow path will re-enter here
2803
2804 __ bind(lock_done);
2805 }
2806
2807
2808 // Finally just about ready to make the JNI call
2809
2810
2811 // get JNIEnv* which is first argument to native
2812 if (!is_critical_native) {
2813 __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2814 }
2815
2816 // Now set thread in native
2817 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2818
2819 __ call(RuntimeAddress(native_func));
2820
2821 // Verify or restore cpu control state after JNI call
2822 __ restore_cpu_control_state_after_jni();
2823
2824 // Unpack native results.
2825 switch (ret_type) {
2826 case T_BOOLEAN: __ c2bool(rax); break;
2827 case T_CHAR : __ movzwl(rax, rax); break;
2828 case T_BYTE : __ sign_extend_byte (rax); break;
2829 case T_SHORT : __ sign_extend_short(rax); break;
2830 case T_INT : /* nothing to do */ break;
2831 case T_DOUBLE :
2832 case T_FLOAT :
2833 // Result is in xmm0 we'll save as needed
2834 break;
2835 case T_ARRAY: // Really a handle
2836 case T_OBJECT: // Really a handle
2837 break; // can't de-handlize until after safepoint check
2838 case T_VOID: break;
2839 case T_LONG: break;
2840 default : ShouldNotReachHere();
2841 }
2842
2843 // unpin pinned arguments
2844 pinned_slot = oop_handle_offset;
2845 if (pinned_args.length() > 0) {
2846 // save return value that may be overwritten otherwise.
2847 save_native_result(masm, ret_type, stack_slots);
2848 for (int index = 0; index < pinned_args.length(); index ++) {
2849 int i = pinned_args.at(index);
2850 assert(pinned_slot <= stack_slots, "overflow");
2851 if (!in_regs[i].first()->is_stack()) {
2852 int offset = pinned_slot * VMRegImpl::stack_slot_size;
2853 __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
2854 pinned_slot += VMRegImpl::slots_per_word;
2855 }
2856 gen_unpin_object(masm, in_regs[i]);
2857 }
2858 restore_native_result(masm, ret_type, stack_slots);
2859 }
2860
2861 // Switch thread to "native transition" state before reading the synchronization state.
2862 // This additional state is necessary because reading and testing the synchronization
2863 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2864 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2865 // VM thread changes sync state to synchronizing and suspends threads for GC.
2866 // Thread A is resumed to finish this native method, but doesn't block here since it
2867 // didn't see any synchronization is progress, and escapes.
2868 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2869
2870 if(os::is_MP()) {
2871 if (UseMembar) {
2872 // Force this write out before the read below
2873 __ membar(Assembler::Membar_mask_bits(
2874 Assembler::LoadLoad | Assembler::LoadStore |
2875 Assembler::StoreLoad | Assembler::StoreStore));
2876 } else {
2877 // Write serialization page so VM thread can do a pseudo remote membar.
2878 // We use the current thread pointer to calculate a thread specific
2879 // offset to write to within the page. This minimizes bus traffic
2880 // due to cache line collision.
2881 __ serialize_memory(r15_thread, rcx);
2882 }
2883 }
2884
2885 Label after_transition;
2886
2887 // check for safepoint operation in progress and/or pending suspend requests
2888 {
2889 Label Continue;
2890 Label slow_path;
2891
2892 __ safepoint_poll(slow_path, r15_thread, rscratch1);
2893
2894 __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
2895 __ jcc(Assembler::equal, Continue);
2896 __ bind(slow_path);
2897
2898 // Don't use call_VM as it will see a possible pending exception and forward it
2899 // and never return here preventing us from clearing _last_native_pc down below.
2900 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
2901 // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2902 // by hand.
2903 //
2904 __ vzeroupper();
2905 save_native_result(masm, ret_type, stack_slots);
2906 __ mov(c_rarg0, r15_thread);
2907 __ mov(r12, rsp); // remember sp
2908 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2909 __ andptr(rsp, -16); // align stack as required by ABI
2910 if (!is_critical_native) {
2911 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2912 } else {
2913 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
2914 }
2915 __ mov(rsp, r12); // restore sp
2916 __ reinit_heapbase();
2917 // Restore any method result value
2918 restore_native_result(masm, ret_type, stack_slots);
2919
2920 if (is_critical_native) {
2921 // The call above performed the transition to thread_in_Java so
2922 // skip the transition logic below.
2923 __ jmpb(after_transition);
2924 }
2925
2926 __ bind(Continue);
2927 }
2928
2929 // change thread state
2930 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
2931 __ bind(after_transition);
2932
2933 Label reguard;
2934 Label reguard_done;
2935 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_reserved_disabled);
2936 __ jcc(Assembler::equal, reguard);
2937 __ bind(reguard_done);
2938
2939 // native result if any is live
2940
2941 // Unlock
2942 Label unlock_done;
2943 Label slow_path_unlock;
2944 if (method->is_synchronized()) {
2945
2946 // Get locked oop from the handle we passed to jni
2947 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2948 __ resolve(IS_NOT_NULL, obj_reg);
2949
2950 Label done;
2951
2952 if (UseBiasedLocking) {
2953 __ biased_locking_exit(obj_reg, old_hdr, done);
2954 }
2955
2956 // Simple recursive lock?
2957
2958 __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2959 __ jcc(Assembler::equal, done);
2960
2961 // Must save rax if if it is live now because cmpxchg must use it
2962 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2963 save_native_result(masm, ret_type, stack_slots);
2964 }
2965
2966
2967 // get address of the stack lock
2968 __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2969 // get old displaced header
2970 __ movptr(old_hdr, Address(rax, 0));
2971
2972 // Atomic swap old header if oop still contains the stack lock
2973 if (os::is_MP()) {
2974 __ lock();
2975 }
2976 __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2977 __ jcc(Assembler::notEqual, slow_path_unlock);
2978
2979 // slow path re-enters here
2980 __ bind(unlock_done);
2981 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2982 restore_native_result(masm, ret_type, stack_slots);
2983 }
2984
2985 __ bind(done);
2986
2987 }
2988 {
2989 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2990 save_native_result(masm, ret_type, stack_slots);
2991 __ mov_metadata(c_rarg1, method());
2992 __ call_VM_leaf(
2993 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2994 r15_thread, c_rarg1);
2995 restore_native_result(masm, ret_type, stack_slots);
2996 }
2997
2998 __ reset_last_Java_frame(false);
2999
3000 // Unbox oop result, e.g. JNIHandles::resolve value.
3001 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
3002 __ resolve_jobject(rax /* value */,
3003 r15_thread /* thread */,
3004 rcx /* tmp */);
3005 }
3006
3007 if (CheckJNICalls) {
3008 // clear_pending_jni_exception_check
3009 __ movptr(Address(r15_thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD);
3010 }
3011
3012 if (!is_critical_native) {
3013 // reset handle block
3014 __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
3015 __ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
3016 }
3017
3018 // pop our frame
3019
3020 __ leave();
3021
3022 if (!is_critical_native) {
3023 // Any exception pending?
3024 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
3025 __ jcc(Assembler::notEqual, exception_pending);
3026 }
3027
3028 // Return
3029
3030 __ ret(0);
3031
3032 // Unexpected paths are out of line and go here
3033
3034 if (!is_critical_native) {
3035 // forward the exception
3036 __ bind(exception_pending);
3037
3038 // and forward the exception
3039 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3040 }
3041
3042 // Slow path locking & unlocking
3043 if (method->is_synchronized()) {
3044
3045 // BEGIN Slow path lock
3046 __ bind(slow_path_lock);
3047
3048 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
3049 // args are (oop obj, BasicLock* lock, JavaThread* thread)
3050
3051 // protect the args we've loaded
3052 save_args(masm, total_c_args, c_arg, out_regs);
3053
3054 __ mov(c_rarg0, obj_reg);
3055 __ mov(c_rarg1, lock_reg);
3056 __ mov(c_rarg2, r15_thread);
3057
3058 // Not a leaf but we have last_Java_frame setup as we want
3059 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
3060 restore_args(masm, total_c_args, c_arg, out_regs);
3061
3062 #ifdef ASSERT
3063 { Label L;
3064 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
3065 __ jcc(Assembler::equal, L);
3066 __ stop("no pending exception allowed on exit from monitorenter");
3067 __ bind(L);
3068 }
3069 #endif
3070 __ jmp(lock_done);
3071
3072 // END Slow path lock
3073
3074 // BEGIN Slow path unlock
3075 __ bind(slow_path_unlock);
3076
3077 // If we haven't already saved the native result we must save it now as xmm registers
3078 // are still exposed.
3079 __ vzeroupper();
3080 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
3081 save_native_result(masm, ret_type, stack_slots);
3082 }
3083
3084 __ lea(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
3085
3086 __ mov(c_rarg0, obj_reg);
3087 __ mov(c_rarg2, r15_thread);
3088 __ mov(r12, rsp); // remember sp
3089 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
3090 __ andptr(rsp, -16); // align stack as required by ABI
3091
3092 // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
3093 // NOTE that obj_reg == rbx currently
3094 __ movptr(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset())));
3095 __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
3096
3097 // args are (oop obj, BasicLock* lock, JavaThread* thread)
3098 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
3099 __ mov(rsp, r12); // restore sp
3100 __ reinit_heapbase();
3101 #ifdef ASSERT
3102 {
3103 Label L;
3104 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
3105 __ jcc(Assembler::equal, L);
3106 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
3107 __ bind(L);
3108 }
3109 #endif /* ASSERT */
3110
3111 __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), rbx);
3112
3113 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
3114 restore_native_result(masm, ret_type, stack_slots);
3115 }
3116 __ jmp(unlock_done);
3117
3118 // END Slow path unlock
3119
3120 } // synchronized
3121
3122 // SLOW PATH Reguard the stack if needed
3123
3124 __ bind(reguard);
3125 __ vzeroupper();
3126 save_native_result(masm, ret_type, stack_slots);
3127 __ mov(r12, rsp); // remember sp
3128 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
3129 __ andptr(rsp, -16); // align stack as required by ABI
3130 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
3131 __ mov(rsp, r12); // restore sp
3132 __ reinit_heapbase();
3133 restore_native_result(masm, ret_type, stack_slots);
3134 // and continue
3135 __ jmp(reguard_done);
3136
3137
3138
3139 __ flush();
3140
3141 nmethod *nm = nmethod::new_native_nmethod(method,
3142 compile_id,
3143 masm->code(),
3144 vep_offset,
3145 frame_complete,
3146 stack_slots / VMRegImpl::slots_per_word,
3147 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
3148 in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
3149 oop_maps);
3150
3151 if (is_critical_native) {
3152 nm->set_lazy_critical_native(true);
3153 }
3154
3155 return nm;
3156
3157 }
3158
3159 // this function returns the adjust size (in number of words) to a c2i adapter
3160 // activation for use during deoptimization
3161 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
3162 return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
3163 }
3164
3165
3166 uint SharedRuntime::out_preserve_stack_slots() {
3167 return 0;
3168 }
3169
3170 //------------------------------generate_deopt_blob----------------------------
3171 void SharedRuntime::generate_deopt_blob() {
3172 // Allocate space for the code
3173 ResourceMark rm;
3174 // Setup code generation tools
3175 int pad = 0;
3176 #if INCLUDE_JVMCI
3177 if (EnableJVMCI || UseAOT) {
3178 pad += 512; // Increase the buffer size when compiling for JVMCI
3179 }
3180 #endif
3181 CodeBuffer buffer("deopt_blob", 2048+pad, 1024);
3182 MacroAssembler* masm = new MacroAssembler(&buffer);
3183 int frame_size_in_words;
3184 OopMap* map = NULL;
3185 OopMapSet *oop_maps = new OopMapSet();
3186
3187 // -------------
3188 // This code enters when returning to a de-optimized nmethod. A return
3189 // address has been pushed on the the stack, and return values are in
3190 // registers.
3191 // If we are doing a normal deopt then we were called from the patched
3192 // nmethod from the point we returned to the nmethod. So the return
3193 // address on the stack is wrong by NativeCall::instruction_size
3194 // We will adjust the value so it looks like we have the original return
3195 // address on the stack (like when we eagerly deoptimized).
3196 // In the case of an exception pending when deoptimizing, we enter
3197 // with a return address on the stack that points after the call we patched
3198 // into the exception handler. We have the following register state from,
3199 // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
3200 // rax: exception oop
3201 // rbx: exception handler
3202 // rdx: throwing pc
3203 // So in this case we simply jam rdx into the useless return address and
3204 // the stack looks just like we want.
3205 //
3206 // At this point we need to de-opt. We save the argument return
3207 // registers. We call the first C routine, fetch_unroll_info(). This
3208 // routine captures the return values and returns a structure which
3209 // describes the current frame size and the sizes of all replacement frames.
3210 // The current frame is compiled code and may contain many inlined
3211 // functions, each with their own JVM state. We pop the current frame, then
3212 // push all the new frames. Then we call the C routine unpack_frames() to
3213 // populate these frames. Finally unpack_frames() returns us the new target
3214 // address. Notice that callee-save registers are BLOWN here; they have
3215 // already been captured in the vframeArray at the time the return PC was
3216 // patched.
3217 address start = __ pc();
3218 Label cont;
3219
3220 // Prolog for non exception case!
3221
3222 // Save everything in sight.
3223 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3224
3225 // Normal deoptimization. Save exec mode for unpack_frames.
3226 __ movl(r14, Deoptimization::Unpack_deopt); // callee-saved
3227 __ jmp(cont);
3228
3229 int reexecute_offset = __ pc() - start;
3230 #if INCLUDE_JVMCI && !defined(COMPILER1)
3231 if (EnableJVMCI && UseJVMCICompiler) {
3232 // JVMCI does not use this kind of deoptimization
3233 __ should_not_reach_here();
3234 }
3235 #endif
3236
3237 // Reexecute case
3238 // return address is the pc describes what bci to do re-execute at
3239
3240 // No need to update map as each call to save_live_registers will produce identical oopmap
3241 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3242
3243 __ movl(r14, Deoptimization::Unpack_reexecute); // callee-saved
3244 __ jmp(cont);
3245
3246 #if INCLUDE_JVMCI
3247 Label after_fetch_unroll_info_call;
3248 int implicit_exception_uncommon_trap_offset = 0;
3249 int uncommon_trap_offset = 0;
3250
3251 if (EnableJVMCI || UseAOT) {
3252 implicit_exception_uncommon_trap_offset = __ pc() - start;
3253
3254 __ pushptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
3255 __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())), (int32_t)NULL_WORD);
3256
3257 uncommon_trap_offset = __ pc() - start;
3258
3259 // Save everything in sight.
3260 RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3261 // fetch_unroll_info needs to call last_java_frame()
3262 __ set_last_Java_frame(noreg, noreg, NULL);
3263
3264 __ movl(c_rarg1, Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())));
3265 __ movl(Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())), -1);
3266
3267 __ movl(r14, (int32_t)Deoptimization::Unpack_reexecute);
3268 __ mov(c_rarg0, r15_thread);
3269 __ movl(c_rarg2, r14); // exec mode
3270 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
3271 oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
3272
3273 __ reset_last_Java_frame(false);
3274
3275 __ jmp(after_fetch_unroll_info_call);
3276 } // EnableJVMCI
3277 #endif // INCLUDE_JVMCI
3278
3279 int exception_offset = __ pc() - start;
3280
3281 // Prolog for exception case
3282
3283 // all registers are dead at this entry point, except for rax, and
3284 // rdx which contain the exception oop and exception pc
3285 // respectively. Set them in TLS and fall thru to the
3286 // unpack_with_exception_in_tls entry point.
3287
3288 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
3289 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), rax);
3290
3291 int exception_in_tls_offset = __ pc() - start;
3292
3293 // new implementation because exception oop is now passed in JavaThread
3294
3295 // Prolog for exception case
3296 // All registers must be preserved because they might be used by LinearScan
3297 // Exceptiop oop and throwing PC are passed in JavaThread
3298 // tos: stack at point of call to method that threw the exception (i.e. only
3299 // args are on the stack, no return address)
3300
3301 // make room on stack for the return address
3302 // It will be patched later with the throwing pc. The correct value is not
3303 // available now because loading it from memory would destroy registers.
3304 __ push(0);
3305
3306 // Save everything in sight.
3307 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3308
3309 // Now it is safe to overwrite any register
3310
3311 // Deopt during an exception. Save exec mode for unpack_frames.
3312 __ movl(r14, Deoptimization::Unpack_exception); // callee-saved
3313
3314 // load throwing pc from JavaThread and patch it as the return address
3315 // of the current frame. Then clear the field in JavaThread
3316
3317 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3318 __ movptr(Address(rbp, wordSize), rdx);
3319 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
3320
3321 #ifdef ASSERT
3322 // verify that there is really an exception oop in JavaThread
3323 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3324 __ verify_oop(rax);
3325
3326 // verify that there is no pending exception
3327 Label no_pending_exception;
3328 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3329 __ testptr(rax, rax);
3330 __ jcc(Assembler::zero, no_pending_exception);
3331 __ stop("must not have pending exception here");
3332 __ bind(no_pending_exception);
3333 #endif
3334
3335 __ bind(cont);
3336
3337 // Call C code. Need thread and this frame, but NOT official VM entry
3338 // crud. We cannot block on this call, no GC can happen.
3339 //
3340 // UnrollBlock* fetch_unroll_info(JavaThread* thread)
3341
3342 // fetch_unroll_info needs to call last_java_frame().
3343
3344 __ set_last_Java_frame(noreg, noreg, NULL);
3345 #ifdef ASSERT
3346 { Label L;
3347 __ cmpptr(Address(r15_thread,
3348 JavaThread::last_Java_fp_offset()),
3349 (int32_t)0);
3350 __ jcc(Assembler::equal, L);
3351 __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
3352 __ bind(L);
3353 }
3354 #endif // ASSERT
3355 __ mov(c_rarg0, r15_thread);
3356 __ movl(c_rarg1, r14); // exec_mode
3357 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
3358
3359 // Need to have an oopmap that tells fetch_unroll_info where to
3360 // find any register it might need.
3361 oop_maps->add_gc_map(__ pc() - start, map);
3362
3363 __ reset_last_Java_frame(false);
3364
3365 #if INCLUDE_JVMCI
3366 if (EnableJVMCI || UseAOT) {
3367 __ bind(after_fetch_unroll_info_call);
3368 }
3369 #endif
3370
3371 // Load UnrollBlock* into rdi
3372 __ mov(rdi, rax);
3373
3374 __ movl(r14, Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
3375 Label noException;
3376 __ cmpl(r14, Deoptimization::Unpack_exception); // Was exception pending?
3377 __ jcc(Assembler::notEqual, noException);
3378 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3379 // QQQ this is useless it was NULL above
3380 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3381 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
3382 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
3383
3384 __ verify_oop(rax);
3385
3386 // Overwrite the result registers with the exception results.
3387 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3388 // I think this is useless
3389 __ movptr(Address(rsp, RegisterSaver::rdx_offset_in_bytes()), rdx);
3390
3391 __ bind(noException);
3392
3393 // Only register save data is on the stack.
3394 // Now restore the result registers. Everything else is either dead
3395 // or captured in the vframeArray.
3396 RegisterSaver::restore_result_registers(masm);
3397
3398 // All of the register save area has been popped of the stack. Only the
3399 // return address remains.
3400
3401 // Pop all the frames we must move/replace.
3402 //
3403 // Frame picture (youngest to oldest)
3404 // 1: self-frame (no frame link)
3405 // 2: deopting frame (no frame link)
3406 // 3: caller of deopting frame (could be compiled/interpreted).
3407 //
3408 // Note: by leaving the return address of self-frame on the stack
3409 // and using the size of frame 2 to adjust the stack
3410 // when we are done the return to frame 3 will still be on the stack.
3411
3412 // Pop deoptimized frame
3413 __ movl(rcx, Address(rdi, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
3414 __ addptr(rsp, rcx);
3415
3416 // rsp should be pointing at the return address to the caller (3)
3417
3418 // Pick up the initial fp we should save
3419 // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
3420 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3421
3422 #ifdef ASSERT
3423 // Compilers generate code that bang the stack by as much as the
3424 // interpreter would need. So this stack banging should never
3425 // trigger a fault. Verify that it does not on non product builds.
3426 if (UseStackBanging) {
3427 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3428 __ bang_stack_size(rbx, rcx);
3429 }
3430 #endif
3431
3432 // Load address of array of frame pcs into rcx
3433 __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3434
3435 // Trash the old pc
3436 __ addptr(rsp, wordSize);
3437
3438 // Load address of array of frame sizes into rsi
3439 __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
3440
3441 // Load counter into rdx
3442 __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
3443
3444 // Now adjust the caller's stack to make up for the extra locals
3445 // but record the original sp so that we can save it in the skeletal interpreter
3446 // frame and the stack walking of interpreter_sender will get the unextended sp
3447 // value and not the "real" sp value.
3448
3449 const Register sender_sp = r8;
3450
3451 __ mov(sender_sp, rsp);
3452 __ movl(rbx, Address(rdi,
3453 Deoptimization::UnrollBlock::
3454 caller_adjustment_offset_in_bytes()));
3455 __ subptr(rsp, rbx);
3456
3457 // Push interpreter frames in a loop
3458 Label loop;
3459 __ bind(loop);
3460 __ movptr(rbx, Address(rsi, 0)); // Load frame size
3461 __ subptr(rbx, 2*wordSize); // We'll push pc and ebp by hand
3462 __ pushptr(Address(rcx, 0)); // Save return address
3463 __ enter(); // Save old & set new ebp
3464 __ subptr(rsp, rbx); // Prolog
3465 // This value is corrected by layout_activation_impl
3466 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3467 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), sender_sp); // Make it walkable
3468 __ mov(sender_sp, rsp); // Pass sender_sp to next frame
3469 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
3470 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
3471 __ decrementl(rdx); // Decrement counter
3472 __ jcc(Assembler::notZero, loop);
3473 __ pushptr(Address(rcx, 0)); // Save final return address
3474
3475 // Re-push self-frame
3476 __ enter(); // Save old & set new ebp
3477
3478 // Allocate a full sized register save area.
3479 // Return address and rbp are in place, so we allocate two less words.
3480 __ subptr(rsp, (frame_size_in_words - 2) * wordSize);
3481
3482 // Restore frame locals after moving the frame
3483 __ movdbl(Address(rsp, RegisterSaver::xmm0_offset_in_bytes()), xmm0);
3484 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3485
3486 // Call C code. Need thread but NOT official VM entry
3487 // crud. We cannot block on this call, no GC can happen. Call should
3488 // restore return values to their stack-slots with the new SP.
3489 //
3490 // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
3491
3492 // Use rbp because the frames look interpreted now
3493 // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3494 // Don't need the precise return PC here, just precise enough to point into this code blob.
3495 address the_pc = __ pc();
3496 __ set_last_Java_frame(noreg, rbp, the_pc);
3497
3498 __ andptr(rsp, -(StackAlignmentInBytes)); // Fix stack alignment as required by ABI
3499 __ mov(c_rarg0, r15_thread);
3500 __ movl(c_rarg1, r14); // second arg: exec_mode
3501 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3502 // Revert SP alignment after call since we're going to do some SP relative addressing below
3503 __ movptr(rsp, Address(r15_thread, JavaThread::last_Java_sp_offset()));
3504
3505 // Set an oopmap for the call site
3506 // Use the same PC we used for the last java frame
3507 oop_maps->add_gc_map(the_pc - start,
3508 new OopMap( frame_size_in_words, 0 ));
3509
3510 // Clear fp AND pc
3511 __ reset_last_Java_frame(true);
3512
3513 // Collect return values
3514 __ movdbl(xmm0, Address(rsp, RegisterSaver::xmm0_offset_in_bytes()));
3515 __ movptr(rax, Address(rsp, RegisterSaver::rax_offset_in_bytes()));
3516 // I think this is useless (throwing pc?)
3517 __ movptr(rdx, Address(rsp, RegisterSaver::rdx_offset_in_bytes()));
3518
3519 // Pop self-frame.
3520 __ leave(); // Epilog
3521
3522 // Jump to interpreter
3523 __ ret(0);
3524
3525 // Make sure all code is generated
3526 masm->flush();
3527
3528 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
3529 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3530 #if INCLUDE_JVMCI
3531 if (EnableJVMCI || UseAOT) {
3532 _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
3533 _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
3534 }
3535 #endif
3536 }
3537
3538 #ifdef COMPILER2
3539 //------------------------------generate_uncommon_trap_blob--------------------
3540 void SharedRuntime::generate_uncommon_trap_blob() {
3541 // Allocate space for the code
3542 ResourceMark rm;
3543 // Setup code generation tools
3544 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
3545 MacroAssembler* masm = new MacroAssembler(&buffer);
3546
3547 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3548
3549 address start = __ pc();
3550
3551 if (UseRTMLocking) {
3552 // Abort RTM transaction before possible nmethod deoptimization.
3553 __ xabort(0);
3554 }
3555
3556 // Push self-frame. We get here with a return address on the
3557 // stack, so rsp is 8-byte aligned until we allocate our frame.
3558 __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
3559
3560 // No callee saved registers. rbp is assumed implicitly saved
3561 __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
3562
3563 // compiler left unloaded_class_index in j_rarg0 move to where the
3564 // runtime expects it.
3565 __ movl(c_rarg1, j_rarg0);
3566
3567 __ set_last_Java_frame(noreg, noreg, NULL);
3568
3569 // Call C code. Need thread but NOT official VM entry
3570 // crud. We cannot block on this call, no GC can happen. Call should
3571 // capture callee-saved registers as well as return values.
3572 // Thread is in rdi already.
3573 //
3574 // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
3575
3576 __ mov(c_rarg0, r15_thread);
3577 __ movl(c_rarg2, Deoptimization::Unpack_uncommon_trap);
3578 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
3579
3580 // Set an oopmap for the call site
3581 OopMapSet* oop_maps = new OopMapSet();
3582 OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
3583
3584 // location of rbp is known implicitly by the frame sender code
3585
3586 oop_maps->add_gc_map(__ pc() - start, map);
3587
3588 __ reset_last_Java_frame(false);
3589
3590 // Load UnrollBlock* into rdi
3591 __ mov(rdi, rax);
3592
3593 #ifdef ASSERT
3594 { Label L;
3595 __ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()),
3596 (int32_t)Deoptimization::Unpack_uncommon_trap);
3597 __ jcc(Assembler::equal, L);
3598 __ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
3599 __ bind(L);
3600 }
3601 #endif
3602
3603 // Pop all the frames we must move/replace.
3604 //
3605 // Frame picture (youngest to oldest)
3606 // 1: self-frame (no frame link)
3607 // 2: deopting frame (no frame link)
3608 // 3: caller of deopting frame (could be compiled/interpreted).
3609
3610 // Pop self-frame. We have no frame, and must rely only on rax and rsp.
3611 __ addptr(rsp, (SimpleRuntimeFrame::framesize - 2) << LogBytesPerInt); // Epilog!
3612
3613 // Pop deoptimized frame (int)
3614 __ movl(rcx, Address(rdi,
3615 Deoptimization::UnrollBlock::
3616 size_of_deoptimized_frame_offset_in_bytes()));
3617 __ addptr(rsp, rcx);
3618
3619 // rsp should be pointing at the return address to the caller (3)
3620
3621 // Pick up the initial fp we should save
3622 // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
3623 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3624
3625 #ifdef ASSERT
3626 // Compilers generate code that bang the stack by as much as the
3627 // interpreter would need. So this stack banging should never
3628 // trigger a fault. Verify that it does not on non product builds.
3629 if (UseStackBanging) {
3630 __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3631 __ bang_stack_size(rbx, rcx);
3632 }
3633 #endif
3634
3635 // Load address of array of frame pcs into rcx (address*)
3636 __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3637
3638 // Trash the return pc
3639 __ addptr(rsp, wordSize);
3640
3641 // Load address of array of frame sizes into rsi (intptr_t*)
3642 __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock:: frame_sizes_offset_in_bytes()));
3643
3644 // Counter
3645 __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock:: number_of_frames_offset_in_bytes())); // (int)
3646
3647 // Now adjust the caller's stack to make up for the extra locals but
3648 // record the original sp so that we can save it in the skeletal
3649 // interpreter frame and the stack walking of interpreter_sender
3650 // will get the unextended sp value and not the "real" sp value.
3651
3652 const Register sender_sp = r8;
3653
3654 __ mov(sender_sp, rsp);
3655 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock:: caller_adjustment_offset_in_bytes())); // (int)
3656 __ subptr(rsp, rbx);
3657
3658 // Push interpreter frames in a loop
3659 Label loop;
3660 __ bind(loop);
3661 __ movptr(rbx, Address(rsi, 0)); // Load frame size
3662 __ subptr(rbx, 2 * wordSize); // We'll push pc and rbp by hand
3663 __ pushptr(Address(rcx, 0)); // Save return address
3664 __ enter(); // Save old & set new rbp
3665 __ subptr(rsp, rbx); // Prolog
3666 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize),
3667 sender_sp); // Make it walkable
3668 // This value is corrected by layout_activation_impl
3669 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3670 __ mov(sender_sp, rsp); // Pass sender_sp to next frame
3671 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
3672 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
3673 __ decrementl(rdx); // Decrement counter
3674 __ jcc(Assembler::notZero, loop);
3675 __ pushptr(Address(rcx, 0)); // Save final return address
3676
3677 // Re-push self-frame
3678 __ enter(); // Save old & set new rbp
3679 __ subptr(rsp, (SimpleRuntimeFrame::framesize - 4) << LogBytesPerInt);
3680 // Prolog
3681
3682 // Use rbp because the frames look interpreted now
3683 // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3684 // Don't need the precise return PC here, just precise enough to point into this code blob.
3685 address the_pc = __ pc();
3686 __ set_last_Java_frame(noreg, rbp, the_pc);
3687
3688 // Call C code. Need thread but NOT official VM entry
3689 // crud. We cannot block on this call, no GC can happen. Call should
3690 // restore return values to their stack-slots with the new SP.
3691 // Thread is in rdi already.
3692 //
3693 // BasicType unpack_frames(JavaThread* thread, int exec_mode);
3694
3695 __ andptr(rsp, -(StackAlignmentInBytes)); // Align SP as required by ABI
3696 __ mov(c_rarg0, r15_thread);
3697 __ movl(c_rarg1, Deoptimization::Unpack_uncommon_trap);
3698 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3699
3700 // Set an oopmap for the call site
3701 // Use the same PC we used for the last java frame
3702 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3703
3704 // Clear fp AND pc
3705 __ reset_last_Java_frame(true);
3706
3707 // Pop self-frame.
3708 __ leave(); // Epilog
3709
3710 // Jump to interpreter
3711 __ ret(0);
3712
3713 // Make sure all code is generated
3714 masm->flush();
3715
3716 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps,
3717 SimpleRuntimeFrame::framesize >> 1);
3718 }
3719 #endif // COMPILER2
3720
3721
3722 //------------------------------generate_handler_blob------
3723 //
3724 // Generate a special Compile2Runtime blob that saves all registers,
3725 // and setup oopmap.
3726 //
3727 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3728 assert(StubRoutines::forward_exception_entry() != NULL,
3729 "must be generated before");
3730
3731 ResourceMark rm;
3732 OopMapSet *oop_maps = new OopMapSet();
3733 OopMap* map;
3734
3735 // Allocate space for the code. Setup code generation tools.
3736 CodeBuffer buffer("handler_blob", 2048, 1024);
3737 MacroAssembler* masm = new MacroAssembler(&buffer);
3738
3739 address start = __ pc();
3740 address call_pc = NULL;
3741 int frame_size_in_words;
3742 bool cause_return = (poll_type == POLL_AT_RETURN);
3743 bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
3744
3745 if (UseRTMLocking) {
3746 // Abort RTM transaction before calling runtime
3747 // because critical section will be large and will be
3748 // aborted anyway. Also nmethod could be deoptimized.
3749 __ xabort(0);
3750 }
3751
3752 // Make room for return address (or push it again)
3753 if (!cause_return) {
3754 __ push(rbx);
3755 }
3756
3757 // Save registers, fpu state, and flags
3758 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors);
3759
3760 // The following is basically a call_VM. However, we need the precise
3761 // address of the call in order to generate an oopmap. Hence, we do all the
3762 // work outselves.
3763
3764 __ set_last_Java_frame(noreg, noreg, NULL);
3765
3766 // The return address must always be correct so that frame constructor never
3767 // sees an invalid pc.
3768
3769 if (!cause_return) {
3770 // Get the return pc saved by the signal handler and stash it in its appropriate place on the stack.
3771 // Additionally, rbx is a callee saved register and we can look at it later to determine
3772 // if someone changed the return address for us!
3773 __ movptr(rbx, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
3774 __ movptr(Address(rbp, wordSize), rbx);
3775 }
3776
3777 // Do the call
3778 __ mov(c_rarg0, r15_thread);
3779 __ call(RuntimeAddress(call_ptr));
3780
3781 // Set an oopmap for the call site. This oopmap will map all
3782 // oop-registers and debug-info registers as callee-saved. This
3783 // will allow deoptimization at this safepoint to find all possible
3784 // debug-info recordings, as well as let GC find all oops.
3785
3786 oop_maps->add_gc_map( __ pc() - start, map);
3787
3788 Label noException;
3789
3790 __ reset_last_Java_frame(false);
3791
3792 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3793 __ jcc(Assembler::equal, noException);
3794
3795 // Exception pending
3796
3797 RegisterSaver::restore_live_registers(masm, save_vectors);
3798
3799 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3800
3801 // No exception case
3802 __ bind(noException);
3803
3804 Label no_adjust, bail, no_prefix, not_special;
3805 if (SafepointMechanism::uses_thread_local_poll() && !cause_return) {
3806 // If our stashed return pc was modified by the runtime we avoid touching it
3807 __ cmpptr(rbx, Address(rbp, wordSize));
3808 __ jccb(Assembler::notEqual, no_adjust);
3809
3810 // Skip over the poll instruction.
3811 // See NativeInstruction::is_safepoint_poll()
3812 // Possible encodings:
3813 // 85 00 test %eax,(%rax)
3814 // 85 01 test %eax,(%rcx)
3815 // 85 02 test %eax,(%rdx)
3816 // 85 03 test %eax,(%rbx)
3817 // 85 06 test %eax,(%rsi)
3818 // 85 07 test %eax,(%rdi)
3819 //
3820 // 41 85 00 test %eax,(%r8)
3821 // 41 85 01 test %eax,(%r9)
3822 // 41 85 02 test %eax,(%r10)
3823 // 41 85 03 test %eax,(%r11)
3824 // 41 85 06 test %eax,(%r14)
3825 // 41 85 07 test %eax,(%r15)
3826 //
3827 // 85 04 24 test %eax,(%rsp)
3828 // 41 85 04 24 test %eax,(%r12)
3829 // 85 45 00 test %eax,0x0(%rbp)
3830 // 41 85 45 00 test %eax,0x0(%r13)
3831
3832 __ cmpb(Address(rbx, 0), NativeTstRegMem::instruction_rex_b_prefix);
3833 __ jcc(Assembler::notEqual, no_prefix);
3834 __ addptr(rbx, 1);
3835 __ bind(no_prefix);
3836 #ifdef ASSERT
3837 __ movptr(rax, rbx); // remember where 0x85 should be, for verification below
3838 #endif
3839 // r12/r13/rsp/rbp base encoding takes 3 bytes with the following register values:
3840 // r12/rsp 0x04
3841 // r13/rbp 0x05
3842 __ movzbq(rcx, Address(rbx, 1));
3843 __ andptr(rcx, 0x07); // looking for 0x04 .. 0x05
3844 __ subptr(rcx, 4); // looking for 0x00 .. 0x01
3845 __ cmpptr(rcx, 1);
3846 __ jcc(Assembler::above, not_special);
3847 __ addptr(rbx, 1);
3848 __ bind(not_special);
3849 #ifdef ASSERT
3850 // Verify the correct encoding of the poll we're about to skip.
3851 __ cmpb(Address(rax, 0), NativeTstRegMem::instruction_code_memXregl);
3852 __ jcc(Assembler::notEqual, bail);
3853 // Mask out the modrm bits
3854 __ testb(Address(rax, 1), NativeTstRegMem::modrm_mask);
3855 // rax encodes to 0, so if the bits are nonzero it's incorrect
3856 __ jcc(Assembler::notZero, bail);
3857 #endif
3858 // Adjust return pc forward to step over the safepoint poll instruction
3859 __ addptr(rbx, 2);
3860 __ movptr(Address(rbp, wordSize), rbx);
3861 }
3862
3863 __ bind(no_adjust);
3864 // Normal exit, restore registers and exit.
3865 RegisterSaver::restore_live_registers(masm, save_vectors);
3866 __ ret(0);
3867
3868 #ifdef ASSERT
3869 __ bind(bail);
3870 __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
3871 #endif
3872
3873 // Make sure all code is generated
3874 masm->flush();
3875
3876 // Fill-out other meta info
3877 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
3878 }
3879
3880 //
3881 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3882 //
3883 // Generate a stub that calls into vm to find out the proper destination
3884 // of a java call. All the argument registers are live at this point
3885 // but since this is generic code we don't know what they are and the caller
3886 // must do any gc of the args.
3887 //
3888 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3889 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3890
3891 // allocate space for the code
3892 ResourceMark rm;
3893
3894 CodeBuffer buffer(name, 1000, 512);
3895 MacroAssembler* masm = new MacroAssembler(&buffer);
3896
3897 int frame_size_in_words;
3898
3899 OopMapSet *oop_maps = new OopMapSet();
3900 OopMap* map = NULL;
3901
3902 int start = __ offset();
3903
3904 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3905
3906 int frame_complete = __ offset();
3907
3908 __ set_last_Java_frame(noreg, noreg, NULL);
3909
3910 __ mov(c_rarg0, r15_thread);
3911
3912 __ call(RuntimeAddress(destination));
3913
3914
3915 // Set an oopmap for the call site.
3916 // We need this not only for callee-saved registers, but also for volatile
3917 // registers that the compiler might be keeping live across a safepoint.
3918
3919 oop_maps->add_gc_map( __ offset() - start, map);
3920
3921 // rax contains the address we are going to jump to assuming no exception got installed
3922
3923 // clear last_Java_sp
3924 __ reset_last_Java_frame(false);
3925 // check for pending exceptions
3926 Label pending;
3927 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3928 __ jcc(Assembler::notEqual, pending);
3929
3930 // get the returned Method*
3931 __ get_vm_result_2(rbx, r15_thread);
3932 __ movptr(Address(rsp, RegisterSaver::rbx_offset_in_bytes()), rbx);
3933
3934 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3935
3936 RegisterSaver::restore_live_registers(masm);
3937
3938 // We are back the the original state on entry and ready to go.
3939
3940 __ jmp(rax);
3941
3942 // Pending exception after the safepoint
3943
3944 __ bind(pending);
3945
3946 RegisterSaver::restore_live_registers(masm);
3947
3948 // exception pending => remove activation and forward to exception handler
3949
3950 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD);
3951
3952 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3953 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3954
3955 // -------------
3956 // make sure all code is generated
3957 masm->flush();
3958
3959 // return the blob
3960 // frame_size_words or bytes??
3961 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
3962 }
3963
3964
3965 //------------------------------Montgomery multiplication------------------------
3966 //
3967
3968 #ifndef _WINDOWS
3969
3970 #define ASM_SUBTRACT
3971
3972 #ifdef ASM_SUBTRACT
3973 // Subtract 0:b from carry:a. Return carry.
3974 static unsigned long
3975 sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) {
3976 long i = 0, cnt = len;
3977 unsigned long tmp;
3978 asm volatile("clc; "
3979 "0: ; "
3980 "mov (%[b], %[i], 8), %[tmp]; "
3981 "sbb %[tmp], (%[a], %[i], 8); "
3982 "inc %[i]; dec %[cnt]; "
3983 "jne 0b; "
3984 "mov %[carry], %[tmp]; sbb $0, %[tmp]; "
3985 : [i]"+r"(i), [cnt]"+r"(cnt), [tmp]"=&r"(tmp)
3986 : [a]"r"(a), [b]"r"(b), [carry]"r"(carry)
3987 : "memory");
3988 return tmp;
3989 }
3990 #else // ASM_SUBTRACT
3991 typedef int __attribute__((mode(TI))) int128;
3992
3993 // Subtract 0:b from carry:a. Return carry.
3994 static unsigned long
3995 sub(unsigned long a[], unsigned long b[], unsigned long carry, int len) {
3996 int128 tmp = 0;
3997 int i;
3998 for (i = 0; i < len; i++) {
3999 tmp += a[i];
4000 tmp -= b[i];
4001 a[i] = tmp;
4002 tmp >>= 64;
4003 assert(-1 <= tmp && tmp <= 0, "invariant");
4004 }
4005 return tmp + carry;
4006 }
4007 #endif // ! ASM_SUBTRACT
4008
4009 // Multiply (unsigned) Long A by Long B, accumulating the double-
4010 // length result into the accumulator formed of T0, T1, and T2.
4011 #define MACC(A, B, T0, T1, T2) \
4012 do { \
4013 unsigned long hi, lo; \
4014 __asm__ ("mul %5; add %%rax, %2; adc %%rdx, %3; adc $0, %4" \
4015 : "=&d"(hi), "=a"(lo), "+r"(T0), "+r"(T1), "+g"(T2) \
4016 : "r"(A), "a"(B) : "cc"); \
4017 } while(0)
4018
4019 // As above, but add twice the double-length result into the
4020 // accumulator.
4021 #define MACC2(A, B, T0, T1, T2) \
4022 do { \
4023 unsigned long hi, lo; \
4024 __asm__ ("mul %5; add %%rax, %2; adc %%rdx, %3; adc $0, %4; " \
4025 "add %%rax, %2; adc %%rdx, %3; adc $0, %4" \
4026 : "=&d"(hi), "=a"(lo), "+r"(T0), "+r"(T1), "+g"(T2) \
4027 : "r"(A), "a"(B) : "cc"); \
4028 } while(0)
4029
4030 // Fast Montgomery multiplication. The derivation of the algorithm is
4031 // in A Cryptographic Library for the Motorola DSP56000,
4032 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237.
4033
4034 static void __attribute__((noinline))
4035 montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[],
4036 unsigned long m[], unsigned long inv, int len) {
4037 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
4038 int i;
4039
4040 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
4041
4042 for (i = 0; i < len; i++) {
4043 int j;
4044 for (j = 0; j < i; j++) {
4045 MACC(a[j], b[i-j], t0, t1, t2);
4046 MACC(m[j], n[i-j], t0, t1, t2);
4047 }
4048 MACC(a[i], b[0], t0, t1, t2);
4049 m[i] = t0 * inv;
4050 MACC(m[i], n[0], t0, t1, t2);
4051
4052 assert(t0 == 0, "broken Montgomery multiply");
4053
4054 t0 = t1; t1 = t2; t2 = 0;
4055 }
4056
4057 for (i = len; i < 2*len; i++) {
4058 int j;
4059 for (j = i-len+1; j < len; j++) {
4060 MACC(a[j], b[i-j], t0, t1, t2);
4061 MACC(m[j], n[i-j], t0, t1, t2);
4062 }
4063 m[i-len] = t0;
4064 t0 = t1; t1 = t2; t2 = 0;
4065 }
4066
4067 while (t0)
4068 t0 = sub(m, n, t0, len);
4069 }
4070
4071 // Fast Montgomery squaring. This uses asymptotically 25% fewer
4072 // multiplies so it should be up to 25% faster than Montgomery
4073 // multiplication. However, its loop control is more complex and it
4074 // may actually run slower on some machines.
4075
4076 static void __attribute__((noinline))
4077 montgomery_square(unsigned long a[], unsigned long n[],
4078 unsigned long m[], unsigned long inv, int len) {
4079 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
4080 int i;
4081
4082 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
4083
4084 for (i = 0; i < len; i++) {
4085 int j;
4086 int end = (i+1)/2;
4087 for (j = 0; j < end; j++) {
4088 MACC2(a[j], a[i-j], t0, t1, t2);
4089 MACC(m[j], n[i-j], t0, t1, t2);
4090 }
4091 if ((i & 1) == 0) {
4092 MACC(a[j], a[j], t0, t1, t2);
4093 }
4094 for (; j < i; j++) {
4095 MACC(m[j], n[i-j], t0, t1, t2);
4096 }
4097 m[i] = t0 * inv;
4098 MACC(m[i], n[0], t0, t1, t2);
4099
4100 assert(t0 == 0, "broken Montgomery square");
4101
4102 t0 = t1; t1 = t2; t2 = 0;
4103 }
4104
4105 for (i = len; i < 2*len; i++) {
4106 int start = i-len+1;
4107 int end = start + (len - start)/2;
4108 int j;
4109 for (j = start; j < end; j++) {
4110 MACC2(a[j], a[i-j], t0, t1, t2);
4111 MACC(m[j], n[i-j], t0, t1, t2);
4112 }
4113 if ((i & 1) == 0) {
4114 MACC(a[j], a[j], t0, t1, t2);
4115 }
4116 for (; j < len; j++) {
4117 MACC(m[j], n[i-j], t0, t1, t2);
4118 }
4119 m[i-len] = t0;
4120 t0 = t1; t1 = t2; t2 = 0;
4121 }
4122
4123 while (t0)
4124 t0 = sub(m, n, t0, len);
4125 }
4126
4127 // Swap words in a longword.
4128 static unsigned long swap(unsigned long x) {
4129 return (x << 32) | (x >> 32);
4130 }
4131
4132 // Copy len longwords from s to d, word-swapping as we go. The
4133 // destination array is reversed.
4134 static void reverse_words(unsigned long *s, unsigned long *d, int len) {
4135 d += len;
4136 while(len-- > 0) {
4137 d--;
4138 *d = swap(*s);
4139 s++;
4140 }
4141 }
4142
4143 // The threshold at which squaring is advantageous was determined
4144 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz.
4145 #define MONTGOMERY_SQUARING_THRESHOLD 64
4146
4147 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints,
4148 jint len, jlong inv,
4149 jint *m_ints) {
4150 assert(len % 2 == 0, "array length in montgomery_multiply must be even");
4151 int longwords = len/2;
4152
4153 // Make very sure we don't use so much space that the stack might
4154 // overflow. 512 jints corresponds to an 16384-bit integer and
4155 // will use here a total of 8k bytes of stack space.
4156 int total_allocation = longwords * sizeof (unsigned long) * 4;
4157 guarantee(total_allocation <= 8192, "must be");
4158 unsigned long *scratch = (unsigned long *)alloca(total_allocation);
4159
4160 // Local scratch arrays
4161 unsigned long
4162 *a = scratch + 0 * longwords,
4163 *b = scratch + 1 * longwords,
4164 *n = scratch + 2 * longwords,
4165 *m = scratch + 3 * longwords;
4166
4167 reverse_words((unsigned long *)a_ints, a, longwords);
4168 reverse_words((unsigned long *)b_ints, b, longwords);
4169 reverse_words((unsigned long *)n_ints, n, longwords);
4170
4171 ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords);
4172
4173 reverse_words(m, (unsigned long *)m_ints, longwords);
4174 }
4175
4176 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints,
4177 jint len, jlong inv,
4178 jint *m_ints) {
4179 assert(len % 2 == 0, "array length in montgomery_square must be even");
4180 int longwords = len/2;
4181
4182 // Make very sure we don't use so much space that the stack might
4183 // overflow. 512 jints corresponds to an 16384-bit integer and
4184 // will use here a total of 6k bytes of stack space.
4185 int total_allocation = longwords * sizeof (unsigned long) * 3;
4186 guarantee(total_allocation <= 8192, "must be");
4187 unsigned long *scratch = (unsigned long *)alloca(total_allocation);
4188
4189 // Local scratch arrays
4190 unsigned long
4191 *a = scratch + 0 * longwords,
4192 *n = scratch + 1 * longwords,
4193 *m = scratch + 2 * longwords;
4194
4195 reverse_words((unsigned long *)a_ints, a, longwords);
4196 reverse_words((unsigned long *)n_ints, n, longwords);
4197
4198 if (len >= MONTGOMERY_SQUARING_THRESHOLD) {
4199 ::montgomery_square(a, n, m, (unsigned long)inv, longwords);
4200 } else {
4201 ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords);
4202 }
4203
4204 reverse_words(m, (unsigned long *)m_ints, longwords);
4205 }
4206
4207 #endif // WINDOWS
4208
4209 #ifdef COMPILER2
4210 // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame
4211 //
4212 //------------------------------generate_exception_blob---------------------------
4213 // creates exception blob at the end
4214 // Using exception blob, this code is jumped from a compiled method.
4215 // (see emit_exception_handler in x86_64.ad file)
4216 //
4217 // Given an exception pc at a call we call into the runtime for the
4218 // handler in this method. This handler might merely restore state
4219 // (i.e. callee save registers) unwind the frame and jump to the
4220 // exception handler for the nmethod if there is no Java level handler
4221 // for the nmethod.
4222 //
4223 // This code is entered with a jmp.
4224 //
4225 // Arguments:
4226 // rax: exception oop
4227 // rdx: exception pc
4228 //
4229 // Results:
4230 // rax: exception oop
4231 // rdx: exception pc in caller or ???
4232 // destination: exception handler of caller
4233 //
4234 // Note: the exception pc MUST be at a call (precise debug information)
4235 // Registers rax, rdx, rcx, rsi, rdi, r8-r11 are not callee saved.
4236 //
4237
4238 void OptoRuntime::generate_exception_blob() {
4239 assert(!OptoRuntime::is_callee_saved_register(RDX_num), "");
4240 assert(!OptoRuntime::is_callee_saved_register(RAX_num), "");
4241 assert(!OptoRuntime::is_callee_saved_register(RCX_num), "");
4242
4243 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
4244
4245 // Allocate space for the code
4246 ResourceMark rm;
4247 // Setup code generation tools
4248 CodeBuffer buffer("exception_blob", 2048, 1024);
4249 MacroAssembler* masm = new MacroAssembler(&buffer);
4250
4251
4252 address start = __ pc();
4253
4254 // Exception pc is 'return address' for stack walker
4255 __ push(rdx);
4256 __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Prolog
4257
4258 // Save callee-saved registers. See x86_64.ad.
4259
4260 // rbp is an implicitly saved callee saved register (i.e., the calling
4261 // convention will save/restore it in the prolog/epilog). Other than that
4262 // there are no callee save registers now that adapter frames are gone.
4263
4264 __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
4265
4266 // Store exception in Thread object. We cannot pass any arguments to the
4267 // handle_exception call, since we do not want to make any assumption
4268 // about the size of the frame where the exception happened in.
4269 // c_rarg0 is either rdi (Linux) or rcx (Windows).
4270 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()),rax);
4271 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
4272
4273 // This call does all the hard work. It checks if an exception handler
4274 // exists in the method.
4275 // If so, it returns the handler address.
4276 // If not, it prepares for stack-unwinding, restoring the callee-save
4277 // registers of the frame being removed.
4278 //
4279 // address OptoRuntime::handle_exception_C(JavaThread* thread)
4280
4281 // At a method handle call, the stack may not be properly aligned
4282 // when returning with an exception.
4283 address the_pc = __ pc();
4284 __ set_last_Java_frame(noreg, noreg, the_pc);
4285 __ mov(c_rarg0, r15_thread);
4286 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack
4287 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
4288
4289 // Set an oopmap for the call site. This oopmap will only be used if we
4290 // are unwinding the stack. Hence, all locations will be dead.
4291 // Callee-saved registers will be the same as the frame above (i.e.,
4292 // handle_exception_stub), since they were restored when we got the
4293 // exception.
4294
4295 OopMapSet* oop_maps = new OopMapSet();
4296
4297 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
4298
4299 __ reset_last_Java_frame(false);
4300
4301 // Restore callee-saved registers
4302
4303 // rbp is an implicitly saved callee-saved register (i.e., the calling
4304 // convention will save restore it in prolog/epilog) Other than that
4305 // there are no callee save registers now that adapter frames are gone.
4306
4307 __ movptr(rbp, Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt));
4308
4309 __ addptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog
4310 __ pop(rdx); // No need for exception pc anymore
4311
4312 // rax: exception handler
4313
4314 // We have a handler in rax (could be deopt blob).
4315 __ mov(r8, rax);
4316
4317 // Get the exception oop
4318 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
4319 // Get the exception pc in case we are deoptimized
4320 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
4321 #ifdef ASSERT
4322 __ movptr(Address(r15_thread, JavaThread::exception_handler_pc_offset()), (int)NULL_WORD);
4323 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int)NULL_WORD);
4324 #endif
4325 // Clear the exception oop so GC no longer processes it as a root.
4326 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int)NULL_WORD);
4327
4328 // rax: exception oop
4329 // r8: exception handler
4330 // rdx: exception pc
4331 // Jump to handler
4332
4333 __ jmp(r8);
4334
4335 // Make sure all code is generated
4336 masm->flush();
4337
4338 // Set exception blob
4339 _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
4340 }
4341 #endif // COMPILER2
4342
4343 BufferedValueTypeBlob* SharedRuntime::generate_buffered_value_type_adapter(const ValueKlass* vk) {
4344 BufferBlob* buf = BufferBlob::create("value types pack/unpack", 16 * K);
4345 CodeBuffer buffer(buf);
4346 short buffer_locs[20];
4347 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
4348 sizeof(buffer_locs)/sizeof(relocInfo));
4349
4350 MacroAssembler _masm(&buffer);
4351 MacroAssembler* masm = &_masm;
4352
4353 const Array<SigEntry>* sig_vk = vk->extended_sig();
4354 const Array<VMRegPair>* regs = vk->return_regs();
4355
4356 int pack_fields_off = __ offset();
4357
4358 int j = 1;
4359 for (int i = 0; i < sig_vk->length(); i++) {
4360 BasicType bt = sig_vk->at(i)._bt;
4361 if (bt == T_VALUETYPE) {
4362 continue;
4363 }
4364 if (bt == T_VOID) {
4365 if (sig_vk->at(i-1)._bt == T_LONG ||
4366 sig_vk->at(i-1)._bt == T_DOUBLE) {
4367 j++;
4368 }
4369 continue;
4370 }
4371 int off = sig_vk->at(i)._offset;
4372 VMRegPair pair = regs->at(j);
4373 VMReg r_1 = pair.first();
4374 VMReg r_2 = pair.second();
4375 Address to(rax, off);
4376 if (bt == T_FLOAT) {
4377 __ movflt(to, r_1->as_XMMRegister());
4378 } else if (bt == T_DOUBLE) {
4379 __ movdbl(to, r_1->as_XMMRegister());
4380 } else if (bt == T_OBJECT || bt == T_VALUETYPEPTR || bt == T_ARRAY) {
4381 __ store_heap_oop(to, r_1->as_Register());
4382 } else {
4383 assert(is_java_primitive(bt), "unexpected basic type");
4384 size_t size_in_bytes = type2aelembytes(bt);
4385 __ store_sized_value(to, r_1->as_Register(), size_in_bytes);
4386 }
4387 j++;
4388 }
4389 assert(j == regs->length(), "missed a field?");
4390
4391 __ ret(0);
4392
4393 int unpack_fields_off = __ offset();
4394
4395 j = 1;
4396 for (int i = 0; i < sig_vk->length(); i++) {
4397 BasicType bt = sig_vk->at(i)._bt;
4398 if (bt == T_VALUETYPE) {
4399 continue;
4400 }
4401 if (bt == T_VOID) {
4402 if (sig_vk->at(i-1)._bt == T_LONG ||
4403 sig_vk->at(i-1)._bt == T_DOUBLE) {
4404 j++;
4405 }
4406 continue;
4407 }
4408 int off = sig_vk->at(i)._offset;
4409 VMRegPair pair = regs->at(j);
4410 VMReg r_1 = pair.first();
4411 VMReg r_2 = pair.second();
4412 Address from(rax, off);
4413 if (bt == T_FLOAT) {
4414 __ movflt(r_1->as_XMMRegister(), from);
4415 } else if (bt == T_DOUBLE) {
4416 __ movdbl(r_1->as_XMMRegister(), from);
4417 } else if (bt == T_OBJECT || bt == T_VALUETYPEPTR || bt == T_ARRAY) {
4418 __ load_heap_oop(r_1->as_Register(), from);
4419 } else {
4420 assert(is_java_primitive(bt), "unexpected basic type");
4421 size_t size_in_bytes = type2aelembytes(bt);
4422 __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN);
4423 }
4424 j++;
4425 }
4426 assert(j == regs->length(), "missed a field?");
4427
4428 if (StressValueTypeReturnedAsFields) {
4429 __ load_klass(rax, rax);
4430 __ orptr(rax, 1);
4431 }
4432
4433 __ ret(0);
4434
4435 __ flush();
4436
4437 return BufferedValueTypeBlob::create(&buffer, pack_fields_off, unpack_fields_off);
4438 }
--- EOF ---