Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
+++ new/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
1 1 /*
2 - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
2 + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "asm/assembler.hpp"
27 27 #include "assembler_sparc.inline.hpp"
28 28 #include "code/debugInfoRec.hpp"
29 29 #include "code/icBuffer.hpp"
30 30 #include "code/vtableStubs.hpp"
31 31 #include "interpreter/interpreter.hpp"
32 32 #include "oops/compiledICHolderOop.hpp"
33 33 #include "prims/jvmtiRedefineClassesTrace.hpp"
34 34 #include "runtime/sharedRuntime.hpp"
35 35 #include "runtime/vframeArray.hpp"
36 36 #include "vmreg_sparc.inline.hpp"
37 37 #ifdef COMPILER1
38 38 #include "c1/c1_Runtime1.hpp"
39 39 #endif
40 40 #ifdef COMPILER2
41 41 #include "opto/runtime.hpp"
42 42 #endif
43 43 #ifdef SHARK
44 44 #include "compiler/compileBroker.hpp"
45 45 #include "shark/sharkCompiler.hpp"
46 46 #endif
47 47
48 48 #define __ masm->
49 49
50 50 #ifdef COMPILER2
51 51 UncommonTrapBlob* SharedRuntime::_uncommon_trap_blob;
52 52 #endif // COMPILER2
53 53
54 54 DeoptimizationBlob* SharedRuntime::_deopt_blob;
55 55 SafepointBlob* SharedRuntime::_polling_page_safepoint_handler_blob;
56 56 SafepointBlob* SharedRuntime::_polling_page_return_handler_blob;
57 57 RuntimeStub* SharedRuntime::_wrong_method_blob;
58 58 RuntimeStub* SharedRuntime::_ic_miss_blob;
59 59 RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob;
60 60 RuntimeStub* SharedRuntime::_resolve_virtual_call_blob;
61 61 RuntimeStub* SharedRuntime::_resolve_static_call_blob;
62 62
63 63 class RegisterSaver {
64 64
65 65 // Used for saving volatile registers. This is Gregs, Fregs, I/L/O.
66 66 // The Oregs are problematic. In the 32bit build the compiler can
67 67 // have O registers live with 64 bit quantities. A window save will
68 68 // cut the heads off of the registers. We have to do a very extensive
69 69 // stack dance to save and restore these properly.
70 70
71 71 // Note that the Oregs problem only exists if we block at either a polling
72 72 // page exception a compiled code safepoint that was not originally a call
73 73 // or deoptimize following one of these kinds of safepoints.
74 74
75 75 // Lots of registers to save. For all builds, a window save will preserve
76 76 // the %i and %l registers. For the 32-bit longs-in-two entries and 64-bit
77 77 // builds a window-save will preserve the %o registers. In the LION build
78 78 // we need to save the 64-bit %o registers which requires we save them
79 79 // before the window-save (as then they become %i registers and get their
80 80 // heads chopped off on interrupt). We have to save some %g registers here
81 81 // as well.
82 82 enum {
83 83 // This frame's save area. Includes extra space for the native call:
84 84 // vararg's layout space and the like. Briefly holds the caller's
85 85 // register save area.
86 86 call_args_area = frame::register_save_words_sp_offset +
87 87 frame::memory_parameter_word_sp_offset*wordSize,
88 88 // Make sure save locations are always 8 byte aligned.
89 89 // can't use round_to because it doesn't produce compile time constant
90 90 start_of_extra_save_area = ((call_args_area + 7) & ~7),
91 91 g1_offset = start_of_extra_save_area, // g-regs needing saving
92 92 g3_offset = g1_offset+8,
93 93 g4_offset = g3_offset+8,
94 94 g5_offset = g4_offset+8,
95 95 o0_offset = g5_offset+8,
96 96 o1_offset = o0_offset+8,
97 97 o2_offset = o1_offset+8,
98 98 o3_offset = o2_offset+8,
99 99 o4_offset = o3_offset+8,
100 100 o5_offset = o4_offset+8,
101 101 start_of_flags_save_area = o5_offset+8,
102 102 ccr_offset = start_of_flags_save_area,
103 103 fsr_offset = ccr_offset + 8,
104 104 d00_offset = fsr_offset+8, // Start of float save area
105 105 register_save_size = d00_offset+8*32
106 106 };
107 107
108 108
109 109 public:
110 110
111 111 static int Oexception_offset() { return o0_offset; };
112 112 static int G3_offset() { return g3_offset; };
113 113 static int G5_offset() { return g5_offset; };
114 114 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
115 115 static void restore_live_registers(MacroAssembler* masm);
116 116
117 117 // During deoptimization only the result register need to be restored
118 118 // all the other values have already been extracted.
119 119
120 120 static void restore_result_registers(MacroAssembler* masm);
121 121 };
122 122
123 123 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
124 124 // Record volatile registers as callee-save values in an OopMap so their save locations will be
125 125 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
126 126 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers
127 127 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
128 128 // (as the stub's I's) when the runtime routine called by the stub creates its frame.
129 129 int i;
130 130 // Always make the frame size 16 byte aligned.
131 131 int frame_size = round_to(additional_frame_words + register_save_size, 16);
132 132 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words
133 133 int frame_size_in_slots = frame_size / sizeof(jint);
134 134 // CodeBlob frame size is in words.
135 135 *total_frame_words = frame_size / wordSize;
136 136 // OopMap* map = new OopMap(*total_frame_words, 0);
137 137 OopMap* map = new OopMap(frame_size_in_slots, 0);
138 138
139 139 #if !defined(_LP64)
140 140
141 141 // Save 64-bit O registers; they will get their heads chopped off on a 'save'.
142 142 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
143 143 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
144 144 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
145 145 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
146 146 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
147 147 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
148 148 #endif /* _LP64 */
149 149
150 150 __ save(SP, -frame_size, SP);
151 151
152 152 #ifndef _LP64
153 153 // Reload the 64 bit Oregs. Although they are now Iregs we load them
154 154 // to Oregs here to avoid interrupts cutting off their heads
155 155
156 156 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
157 157 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
158 158 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
159 159 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
160 160 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
161 161 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
162 162
163 163 __ stx(O0, SP, o0_offset+STACK_BIAS);
164 164 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg());
165 165
166 166 __ stx(O1, SP, o1_offset+STACK_BIAS);
167 167
168 168 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg());
169 169
170 170 __ stx(O2, SP, o2_offset+STACK_BIAS);
171 171 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg());
172 172
173 173 __ stx(O3, SP, o3_offset+STACK_BIAS);
174 174 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg());
175 175
176 176 __ stx(O4, SP, o4_offset+STACK_BIAS);
177 177 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg());
178 178
179 179 __ stx(O5, SP, o5_offset+STACK_BIAS);
180 180 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg());
181 181 #endif /* _LP64 */
182 182
183 183
184 184 #ifdef _LP64
185 185 int debug_offset = 0;
186 186 #else
187 187 int debug_offset = 4;
188 188 #endif
189 189 // Save the G's
190 190 __ stx(G1, SP, g1_offset+STACK_BIAS);
191 191 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg());
192 192
193 193 __ stx(G3, SP, g3_offset+STACK_BIAS);
194 194 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg());
195 195
196 196 __ stx(G4, SP, g4_offset+STACK_BIAS);
197 197 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg());
198 198
199 199 __ stx(G5, SP, g5_offset+STACK_BIAS);
200 200 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg());
201 201
202 202 // This is really a waste but we'll keep things as they were for now
203 203 if (true) {
204 204 #ifndef _LP64
205 205 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next());
206 206 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next());
207 207 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next());
208 208 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next());
209 209 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next());
210 210 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next());
211 211 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next());
212 212 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next());
213 213 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next());
214 214 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next());
215 215 #endif /* _LP64 */
216 216 }
217 217
218 218
219 219 // Save the flags
220 220 __ rdccr( G5 );
221 221 __ stx(G5, SP, ccr_offset+STACK_BIAS);
222 222 __ stxfsr(SP, fsr_offset+STACK_BIAS);
223 223
224 224 // Save all the FP registers: 32 doubles (32 floats correspond to the 2 halves of the first 16 doubles)
225 225 int offset = d00_offset;
226 226 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
227 227 FloatRegister f = as_FloatRegister(i);
228 228 __ stf(FloatRegisterImpl::D, f, SP, offset+STACK_BIAS);
229 229 // Record as callee saved both halves of double registers (2 float registers).
230 230 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg());
231 231 map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next());
232 232 offset += sizeof(double);
233 233 }
234 234
235 235 // And we're done.
236 236
237 237 return map;
238 238 }
239 239
240 240
241 241 // Pop the current frame and restore all the registers that we
242 242 // saved.
243 243 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
244 244
245 245 // Restore all the FP registers
246 246 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
247 247 __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i));
248 248 }
249 249
250 250 __ ldx(SP, ccr_offset+STACK_BIAS, G1);
251 251 __ wrccr (G1) ;
252 252
253 253 // Restore the G's
254 254 // Note that G2 (AKA GThread) must be saved and restored separately.
255 255 // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr.
256 256
257 257 __ ldx(SP, g1_offset+STACK_BIAS, G1);
258 258 __ ldx(SP, g3_offset+STACK_BIAS, G3);
259 259 __ ldx(SP, g4_offset+STACK_BIAS, G4);
260 260 __ ldx(SP, g5_offset+STACK_BIAS, G5);
261 261
262 262
263 263 #if !defined(_LP64)
264 264 // Restore the 64-bit O's.
265 265 __ ldx(SP, o0_offset+STACK_BIAS, O0);
266 266 __ ldx(SP, o1_offset+STACK_BIAS, O1);
267 267 __ ldx(SP, o2_offset+STACK_BIAS, O2);
268 268 __ ldx(SP, o3_offset+STACK_BIAS, O3);
269 269 __ ldx(SP, o4_offset+STACK_BIAS, O4);
270 270 __ ldx(SP, o5_offset+STACK_BIAS, O5);
271 271
272 272 // And temporarily place them in TLS
273 273
274 274 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
275 275 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
276 276 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
277 277 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
278 278 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
279 279 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
280 280 #endif /* _LP64 */
281 281
282 282 // Restore flags
283 283
284 284 __ ldxfsr(SP, fsr_offset+STACK_BIAS);
285 285
286 286 __ restore();
287 287
288 288 #if !defined(_LP64)
289 289 // Now reload the 64bit Oregs after we've restore the window.
290 290 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
291 291 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
292 292 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
293 293 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
294 294 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
295 295 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
296 296 #endif /* _LP64 */
297 297
298 298 }
299 299
300 300 // Pop the current frame and restore the registers that might be holding
301 301 // a result.
302 302 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
303 303
304 304 #if !defined(_LP64)
305 305 // 32bit build returns longs in G1
306 306 __ ldx(SP, g1_offset+STACK_BIAS, G1);
307 307
308 308 // Retrieve the 64-bit O's.
309 309 __ ldx(SP, o0_offset+STACK_BIAS, O0);
310 310 __ ldx(SP, o1_offset+STACK_BIAS, O1);
311 311 // and save to TLS
312 312 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
313 313 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
314 314 #endif /* _LP64 */
315 315
316 316 __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0));
317 317
318 318 __ restore();
319 319
320 320 #if !defined(_LP64)
321 321 // Now reload the 64bit Oregs after we've restore the window.
322 322 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
323 323 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
324 324 #endif /* _LP64 */
325 325
326 326 }
327 327
328 328 // The java_calling_convention describes stack locations as ideal slots on
329 329 // a frame with no abi restrictions. Since we must observe abi restrictions
330 330 // (like the placement of the register window) the slots must be biased by
331 331 // the following value.
332 332 static int reg2offset(VMReg r) {
333 333 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
334 334 }
335 335
336 336 // ---------------------------------------------------------------------------
337 337 // Read the array of BasicTypes from a signature, and compute where the
338 338 // arguments should go. Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size)
339 339 // quantities. Values less than VMRegImpl::stack0 are registers, those above
340 340 // refer to 4-byte stack slots. All stack slots are based off of the window
341 341 // top. VMRegImpl::stack0 refers to the first slot past the 16-word window,
342 342 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
343 343 // values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit
344 344 // integer registers. Values 64-95 are the (32-bit only) float registers.
345 345 // Each 32-bit quantity is given its own number, so the integer registers
346 346 // (in either 32- or 64-bit builds) use 2 numbers. For example, there is
347 347 // an O0-low and an O0-high. Essentially, all int register numbers are doubled.
348 348
349 349 // Register results are passed in O0-O5, for outgoing call arguments. To
350 350 // convert to incoming arguments, convert all O's to I's. The regs array
351 351 // refer to the low and hi 32-bit words of 64-bit registers or stack slots.
352 352 // If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a
353 353 // 32-bit value was passed). If both are VMRegImpl::Bad(), it means no value was
354 354 // passed (used as a placeholder for the other half of longs and doubles in
355 355 // the 64-bit build). regs[].second() is either VMRegImpl::Bad() or regs[].second() is
356 356 // regs[].first()+1 (regs[].first() may be misaligned in the C calling convention).
357 357 // Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first()
358 358 // == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the
359 359 // same VMRegPair.
360 360
361 361 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
362 362 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
363 363 // units regardless of build.
364 364
365 365
366 366 // ---------------------------------------------------------------------------
367 367 // The compiled Java calling convention. The Java convention always passes
368 368 // 64-bit values in adjacent aligned locations (either registers or stack),
369 369 // floats in float registers and doubles in aligned float pairs. Values are
370 370 // packed in the registers. There is no backing varargs store for values in
371 371 // registers. In the 32-bit build, longs are passed in G1 and G4 (cannot be
372 372 // passed in I's, because longs in I's get their heads chopped off at
373 373 // interrupt).
374 374 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
375 375 VMRegPair *regs,
376 376 int total_args_passed,
377 377 int is_outgoing) {
378 378 assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers");
379 379
380 380 // Convention is to pack the first 6 int/oop args into the first 6 registers
381 381 // (I0-I5), extras spill to the stack. Then pack the first 8 float args
382 382 // into F0-F7, extras spill to the stack. Then pad all register sets to
383 383 // align. Then put longs and doubles into the same registers as they fit,
384 384 // else spill to the stack.
385 385 const int int_reg_max = SPARC_ARGS_IN_REGS_NUM;
386 386 const int flt_reg_max = 8;
387 387 //
388 388 // Where 32-bit 1-reg longs start being passed
389 389 // In tiered we must pass on stack because c1 can't use a "pair" in a single reg.
390 390 // So make it look like we've filled all the G regs that c2 wants to use.
391 391 Register g_reg = TieredCompilation ? noreg : G1;
392 392
393 393 // Count int/oop and float args. See how many stack slots we'll need and
394 394 // where the longs & doubles will go.
395 395 int int_reg_cnt = 0;
396 396 int flt_reg_cnt = 0;
397 397 // int stk_reg_pairs = frame::register_save_words*(wordSize>>2);
398 398 // int stk_reg_pairs = SharedRuntime::out_preserve_stack_slots();
399 399 int stk_reg_pairs = 0;
400 400 for (int i = 0; i < total_args_passed; i++) {
401 401 switch (sig_bt[i]) {
402 402 case T_LONG: // LP64, longs compete with int args
403 403 assert(sig_bt[i+1] == T_VOID, "");
404 404 #ifdef _LP64
405 405 if (int_reg_cnt < int_reg_max) int_reg_cnt++;
406 406 #endif
407 407 break;
408 408 case T_OBJECT:
409 409 case T_ARRAY:
410 410 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
411 411 if (int_reg_cnt < int_reg_max) int_reg_cnt++;
412 412 #ifndef _LP64
413 413 else stk_reg_pairs++;
414 414 #endif
415 415 break;
416 416 case T_INT:
417 417 case T_SHORT:
418 418 case T_CHAR:
419 419 case T_BYTE:
420 420 case T_BOOLEAN:
421 421 if (int_reg_cnt < int_reg_max) int_reg_cnt++;
422 422 else stk_reg_pairs++;
423 423 break;
424 424 case T_FLOAT:
425 425 if (flt_reg_cnt < flt_reg_max) flt_reg_cnt++;
426 426 else stk_reg_pairs++;
427 427 break;
428 428 case T_DOUBLE:
429 429 assert(sig_bt[i+1] == T_VOID, "");
430 430 break;
431 431 case T_VOID:
432 432 break;
433 433 default:
434 434 ShouldNotReachHere();
435 435 }
436 436 }
437 437
438 438 // This is where the longs/doubles start on the stack.
439 439 stk_reg_pairs = (stk_reg_pairs+1) & ~1; // Round
440 440
441 441 int int_reg_pairs = (int_reg_cnt+1) & ~1; // 32-bit 2-reg longs only
442 442 int flt_reg_pairs = (flt_reg_cnt+1) & ~1;
443 443
444 444 // int stk_reg = frame::register_save_words*(wordSize>>2);
445 445 // int stk_reg = SharedRuntime::out_preserve_stack_slots();
446 446 int stk_reg = 0;
447 447 int int_reg = 0;
448 448 int flt_reg = 0;
449 449
450 450 // Now do the signature layout
451 451 for (int i = 0; i < total_args_passed; i++) {
452 452 switch (sig_bt[i]) {
453 453 case T_INT:
454 454 case T_SHORT:
455 455 case T_CHAR:
456 456 case T_BYTE:
457 457 case T_BOOLEAN:
458 458 #ifndef _LP64
459 459 case T_OBJECT:
460 460 case T_ARRAY:
461 461 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
462 462 #endif // _LP64
463 463 if (int_reg < int_reg_max) {
464 464 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
465 465 regs[i].set1(r->as_VMReg());
466 466 } else {
467 467 regs[i].set1(VMRegImpl::stack2reg(stk_reg++));
468 468 }
469 469 break;
470 470
471 471 #ifdef _LP64
472 472 case T_OBJECT:
473 473 case T_ARRAY:
474 474 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
475 475 if (int_reg < int_reg_max) {
476 476 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
477 477 regs[i].set2(r->as_VMReg());
478 478 } else {
479 479 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
480 480 stk_reg_pairs += 2;
481 481 }
482 482 break;
483 483 #endif // _LP64
484 484
485 485 case T_LONG:
486 486 assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half");
487 487 #ifdef _LP64
488 488 if (int_reg < int_reg_max) {
489 489 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
490 490 regs[i].set2(r->as_VMReg());
491 491 } else {
492 492 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
493 493 stk_reg_pairs += 2;
494 494 }
495 495 #else
496 496 #ifdef COMPILER2
497 497 // For 32-bit build, can't pass longs in O-regs because they become
498 498 // I-regs and get trashed. Use G-regs instead. G1 and G4 are almost
499 499 // spare and available. This convention isn't used by the Sparc ABI or
500 500 // anywhere else. If we're tiered then we don't use G-regs because c1
501 501 // can't deal with them as a "pair". (Tiered makes this code think g's are filled)
502 502 // G0: zero
503 503 // G1: 1st Long arg
504 504 // G2: global allocated to TLS
505 505 // G3: used in inline cache check
506 506 // G4: 2nd Long arg
507 507 // G5: used in inline cache check
508 508 // G6: used by OS
509 509 // G7: used by OS
510 510
511 511 if (g_reg == G1) {
512 512 regs[i].set2(G1->as_VMReg()); // This long arg in G1
513 513 g_reg = G4; // Where the next arg goes
514 514 } else if (g_reg == G4) {
515 515 regs[i].set2(G4->as_VMReg()); // The 2nd long arg in G4
516 516 g_reg = noreg; // No more longs in registers
517 517 } else {
518 518 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
519 519 stk_reg_pairs += 2;
520 520 }
521 521 #else // COMPILER2
522 522 if (int_reg_pairs + 1 < int_reg_max) {
523 523 if (is_outgoing) {
524 524 regs[i].set_pair(as_oRegister(int_reg_pairs + 1)->as_VMReg(), as_oRegister(int_reg_pairs)->as_VMReg());
525 525 } else {
526 526 regs[i].set_pair(as_iRegister(int_reg_pairs + 1)->as_VMReg(), as_iRegister(int_reg_pairs)->as_VMReg());
527 527 }
528 528 int_reg_pairs += 2;
529 529 } else {
530 530 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
531 531 stk_reg_pairs += 2;
532 532 }
533 533 #endif // COMPILER2
534 534 #endif // _LP64
535 535 break;
536 536
537 537 case T_FLOAT:
538 538 if (flt_reg < flt_reg_max) regs[i].set1(as_FloatRegister(flt_reg++)->as_VMReg());
539 539 else regs[i].set1( VMRegImpl::stack2reg(stk_reg++));
540 540 break;
541 541 case T_DOUBLE:
542 542 assert(sig_bt[i+1] == T_VOID, "expecting half");
543 543 if (flt_reg_pairs + 1 < flt_reg_max) {
544 544 regs[i].set2(as_FloatRegister(flt_reg_pairs)->as_VMReg());
545 545 flt_reg_pairs += 2;
546 546 } else {
547 547 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
548 548 stk_reg_pairs += 2;
549 549 }
550 550 break;
551 551 case T_VOID: regs[i].set_bad(); break; // Halves of longs & doubles
552 552 default:
553 553 ShouldNotReachHere();
554 554 }
555 555 }
556 556
557 557 // retun the amount of stack space these arguments will need.
558 558 return stk_reg_pairs;
559 559
560 560 }
561 561
562 562 // Helper class mostly to avoid passing masm everywhere, and handle
563 563 // store displacement overflow logic.
564 564 class AdapterGenerator {
565 565 MacroAssembler *masm;
566 566 Register Rdisp;
567 567 void set_Rdisp(Register r) { Rdisp = r; }
568 568
569 569 void patch_callers_callsite();
570 570
571 571 // base+st_off points to top of argument
572 572 int arg_offset(const int st_off) { return st_off; }
573 573 int next_arg_offset(const int st_off) {
574 574 return st_off - Interpreter::stackElementSize;
575 575 }
576 576
577 577 // Argument slot values may be loaded first into a register because
578 578 // they might not fit into displacement.
579 579 RegisterOrConstant arg_slot(const int st_off);
580 580 RegisterOrConstant next_arg_slot(const int st_off);
581 581
582 582 // Stores long into offset pointed to by base
583 583 void store_c2i_long(Register r, Register base,
584 584 const int st_off, bool is_stack);
585 585 void store_c2i_object(Register r, Register base,
586 586 const int st_off);
587 587 void store_c2i_int(Register r, Register base,
588 588 const int st_off);
589 589 void store_c2i_double(VMReg r_2,
590 590 VMReg r_1, Register base, const int st_off);
591 591 void store_c2i_float(FloatRegister f, Register base,
592 592 const int st_off);
593 593
594 594 public:
595 595 void gen_c2i_adapter(int total_args_passed,
596 596 // VMReg max_arg,
597 597 int comp_args_on_stack, // VMRegStackSlots
598 598 const BasicType *sig_bt,
599 599 const VMRegPair *regs,
600 600 Label& skip_fixup);
601 601 void gen_i2c_adapter(int total_args_passed,
602 602 // VMReg max_arg,
603 603 int comp_args_on_stack, // VMRegStackSlots
604 604 const BasicType *sig_bt,
605 605 const VMRegPair *regs);
606 606
607 607 AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {}
608 608 };
609 609
610 610
611 611 // Patch the callers callsite with entry to compiled code if it exists.
612 612 void AdapterGenerator::patch_callers_callsite() {
613 613 Label L;
614 614 __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch);
615 615 __ br_null(G3_scratch, false, __ pt, L);
616 616 // Schedule the branch target address early.
617 617 __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
618 618 // Call into the VM to patch the caller, then jump to compiled callee
619 619 __ save_frame(4); // Args in compiled layout; do not blow them
620 620
621 621 // Must save all the live Gregs the list is:
622 622 // G1: 1st Long arg (32bit build)
623 623 // G2: global allocated to TLS
624 624 // G3: used in inline cache check (scratch)
625 625 // G4: 2nd Long arg (32bit build);
626 626 // G5: used in inline cache check (methodOop)
627 627
628 628 // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops.
629 629
630 630 #ifdef _LP64
631 631 // mov(s,d)
632 632 __ mov(G1, L1);
633 633 __ mov(G4, L4);
634 634 __ mov(G5_method, L5);
635 635 __ mov(G5_method, O0); // VM needs target method
636 636 __ mov(I7, O1); // VM needs caller's callsite
637 637 // Must be a leaf call...
638 638 // can be very far once the blob has been relocated
639 639 AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
640 640 __ relocate(relocInfo::runtime_call_type);
641 641 __ jumpl_to(dest, O7, O7);
642 642 __ delayed()->mov(G2_thread, L7_thread_cache);
643 643 __ mov(L7_thread_cache, G2_thread);
644 644 __ mov(L1, G1);
645 645 __ mov(L4, G4);
646 646 __ mov(L5, G5_method);
647 647 #else
648 648 __ stx(G1, FP, -8 + STACK_BIAS);
649 649 __ stx(G4, FP, -16 + STACK_BIAS);
650 650 __ mov(G5_method, L5);
651 651 __ mov(G5_method, O0); // VM needs target method
652 652 __ mov(I7, O1); // VM needs caller's callsite
653 653 // Must be a leaf call...
654 654 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type);
655 655 __ delayed()->mov(G2_thread, L7_thread_cache);
656 656 __ mov(L7_thread_cache, G2_thread);
657 657 __ ldx(FP, -8 + STACK_BIAS, G1);
658 658 __ ldx(FP, -16 + STACK_BIAS, G4);
659 659 __ mov(L5, G5_method);
660 660 __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
661 661 #endif /* _LP64 */
662 662
663 663 __ restore(); // Restore args
664 664 __ bind(L);
665 665 }
666 666
667 667
668 668 RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) {
669 669 RegisterOrConstant roc(arg_offset(st_off));
670 670 return __ ensure_simm13_or_reg(roc, Rdisp);
671 671 }
672 672
673 673 RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) {
674 674 RegisterOrConstant roc(next_arg_offset(st_off));
675 675 return __ ensure_simm13_or_reg(roc, Rdisp);
676 676 }
677 677
678 678
679 679 // Stores long into offset pointed to by base
680 680 void AdapterGenerator::store_c2i_long(Register r, Register base,
681 681 const int st_off, bool is_stack) {
682 682 #ifdef _LP64
683 683 // In V9, longs are given 2 64-bit slots in the interpreter, but the
684 684 // data is passed in only 1 slot.
685 685 __ stx(r, base, next_arg_slot(st_off));
686 686 #else
687 687 #ifdef COMPILER2
688 688 // Misaligned store of 64-bit data
689 689 __ stw(r, base, arg_slot(st_off)); // lo bits
690 690 __ srlx(r, 32, r);
691 691 __ stw(r, base, next_arg_slot(st_off)); // hi bits
692 692 #else
693 693 if (is_stack) {
694 694 // Misaligned store of 64-bit data
695 695 __ stw(r, base, arg_slot(st_off)); // lo bits
696 696 __ srlx(r, 32, r);
697 697 __ stw(r, base, next_arg_slot(st_off)); // hi bits
698 698 } else {
699 699 __ stw(r->successor(), base, arg_slot(st_off) ); // lo bits
700 700 __ stw(r , base, next_arg_slot(st_off)); // hi bits
701 701 }
702 702 #endif // COMPILER2
703 703 #endif // _LP64
704 704 }
705 705
706 706 void AdapterGenerator::store_c2i_object(Register r, Register base,
707 707 const int st_off) {
708 708 __ st_ptr (r, base, arg_slot(st_off));
709 709 }
710 710
711 711 void AdapterGenerator::store_c2i_int(Register r, Register base,
712 712 const int st_off) {
713 713 __ st (r, base, arg_slot(st_off));
714 714 }
715 715
716 716 // Stores into offset pointed to by base
717 717 void AdapterGenerator::store_c2i_double(VMReg r_2,
718 718 VMReg r_1, Register base, const int st_off) {
719 719 #ifdef _LP64
720 720 // In V9, doubles are given 2 64-bit slots in the interpreter, but the
721 721 // data is passed in only 1 slot.
722 722 __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
723 723 #else
724 724 // Need to marshal 64-bit value from misaligned Lesp loads
725 725 __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
726 726 __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) );
727 727 #endif
728 728 }
729 729
730 730 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
731 731 const int st_off) {
732 732 __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off));
733 733 }
734 734
735 735 void AdapterGenerator::gen_c2i_adapter(
736 736 int total_args_passed,
737 737 // VMReg max_arg,
738 738 int comp_args_on_stack, // VMRegStackSlots
739 739 const BasicType *sig_bt,
740 740 const VMRegPair *regs,
741 741 Label& skip_fixup) {
742 742
743 743 // Before we get into the guts of the C2I adapter, see if we should be here
744 744 // at all. We've come from compiled code and are attempting to jump to the
745 745 // interpreter, which means the caller made a static call to get here
746 746 // (vcalls always get a compiled target if there is one). Check for a
747 747 // compiled target. If there is one, we need to patch the caller's call.
748 748 // However we will run interpreted if we come thru here. The next pass
749 749 // thru the call site will run compiled. If we ran compiled here then
750 750 // we can (theorectically) do endless i2c->c2i->i2c transitions during
751 751 // deopt/uncommon trap cycles. If we always go interpreted here then
752 752 // we can have at most one and don't need to play any tricks to keep
753 753 // from endlessly growing the stack.
754 754 //
755 755 // Actually if we detected that we had an i2c->c2i transition here we
756 756 // ought to be able to reset the world back to the state of the interpreted
757 757 // call and not bother building another interpreter arg area. We don't
758 758 // do that at this point.
759 759
760 760 patch_callers_callsite();
761 761
762 762 __ bind(skip_fixup);
763 763
764 764 // Since all args are passed on the stack, total_args_passed*wordSize is the
765 765 // space we need. Add in varargs area needed by the interpreter. Round up
766 766 // to stack alignment.
767 767 const int arg_size = total_args_passed * Interpreter::stackElementSize;
768 768 const int varargs_area =
769 769 (frame::varargs_offset - frame::register_save_words)*wordSize;
770 770 const int extraspace = round_to(arg_size + varargs_area, 2*wordSize);
771 771
772 772 int bias = STACK_BIAS;
773 773 const int interp_arg_offset = frame::varargs_offset*wordSize +
774 774 (total_args_passed-1)*Interpreter::stackElementSize;
775 775
776 776 Register base = SP;
777 777
778 778 #ifdef _LP64
779 779 // In the 64bit build because of wider slots and STACKBIAS we can run
780 780 // out of bits in the displacement to do loads and stores. Use g3 as
781 781 // temporary displacement.
782 782 if (! __ is_simm13(extraspace)) {
783 783 __ set(extraspace, G3_scratch);
784 784 __ sub(SP, G3_scratch, SP);
785 785 } else {
786 786 __ sub(SP, extraspace, SP);
787 787 }
788 788 set_Rdisp(G3_scratch);
789 789 #else
790 790 __ sub(SP, extraspace, SP);
791 791 #endif // _LP64
792 792
793 793 // First write G1 (if used) to where ever it must go
794 794 for (int i=0; i<total_args_passed; i++) {
795 795 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
796 796 VMReg r_1 = regs[i].first();
797 797 VMReg r_2 = regs[i].second();
798 798 if (r_1 == G1_scratch->as_VMReg()) {
799 799 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
800 800 store_c2i_object(G1_scratch, base, st_off);
801 801 } else if (sig_bt[i] == T_LONG) {
802 802 assert(!TieredCompilation, "should not use register args for longs");
803 803 store_c2i_long(G1_scratch, base, st_off, false);
804 804 } else {
805 805 store_c2i_int(G1_scratch, base, st_off);
806 806 }
807 807 }
808 808 }
809 809
810 810 // Now write the args into the outgoing interpreter space
811 811 for (int i=0; i<total_args_passed; i++) {
812 812 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
813 813 VMReg r_1 = regs[i].first();
814 814 VMReg r_2 = regs[i].second();
815 815 if (!r_1->is_valid()) {
816 816 assert(!r_2->is_valid(), "");
817 817 continue;
818 818 }
819 819 // Skip G1 if found as we did it first in order to free it up
820 820 if (r_1 == G1_scratch->as_VMReg()) {
821 821 continue;
822 822 }
823 823 #ifdef ASSERT
824 824 bool G1_forced = false;
825 825 #endif // ASSERT
826 826 if (r_1->is_stack()) { // Pretend stack targets are loaded into G1
827 827 #ifdef _LP64
828 828 Register ld_off = Rdisp;
829 829 __ set(reg2offset(r_1) + extraspace + bias, ld_off);
830 830 #else
831 831 int ld_off = reg2offset(r_1) + extraspace + bias;
832 832 #endif // _LP64
833 833 #ifdef ASSERT
834 834 G1_forced = true;
835 835 #endif // ASSERT
836 836 r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle
837 837 if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch);
838 838 else __ ldx(base, ld_off, G1_scratch);
839 839 }
840 840
841 841 if (r_1->is_Register()) {
842 842 Register r = r_1->as_Register()->after_restore();
843 843 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
844 844 store_c2i_object(r, base, st_off);
845 845 } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
846 846 #ifndef _LP64
847 847 if (TieredCompilation) {
848 848 assert(G1_forced || sig_bt[i] != T_LONG, "should not use register args for longs");
849 849 }
850 850 #endif // _LP64
851 851 store_c2i_long(r, base, st_off, r_2->is_stack());
852 852 } else {
853 853 store_c2i_int(r, base, st_off);
854 854 }
855 855 } else {
856 856 assert(r_1->is_FloatRegister(), "");
857 857 if (sig_bt[i] == T_FLOAT) {
858 858 store_c2i_float(r_1->as_FloatRegister(), base, st_off);
859 859 } else {
860 860 assert(sig_bt[i] == T_DOUBLE, "wrong type");
861 861 store_c2i_double(r_2, r_1, base, st_off);
862 862 }
863 863 }
864 864 }
865 865
866 866 #ifdef _LP64
867 867 // Need to reload G3_scratch, used for temporary displacements.
868 868 __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
869 869
870 870 // Pass O5_savedSP as an argument to the interpreter.
871 871 // The interpreter will restore SP to this value before returning.
872 872 __ set(extraspace, G1);
873 873 __ add(SP, G1, O5_savedSP);
874 874 #else
875 875 // Pass O5_savedSP as an argument to the interpreter.
876 876 // The interpreter will restore SP to this value before returning.
877 877 __ add(SP, extraspace, O5_savedSP);
878 878 #endif // _LP64
879 879
880 880 __ mov((frame::varargs_offset)*wordSize -
881 881 1*Interpreter::stackElementSize+bias+BytesPerWord, G1);
882 882 // Jump to the interpreter just as if interpreter was doing it.
883 883 __ jmpl(G3_scratch, 0, G0);
884 884 // Setup Lesp for the call. Cannot actually set Lesp as the current Lesp
885 885 // (really L0) is in use by the compiled frame as a generic temp. However,
886 886 // the interpreter does not know where its args are without some kind of
887 887 // arg pointer being passed in. Pass it in Gargs.
888 888 __ delayed()->add(SP, G1, Gargs);
889 889 }
890 890
891 891 void AdapterGenerator::gen_i2c_adapter(
892 892 int total_args_passed,
893 893 // VMReg max_arg,
894 894 int comp_args_on_stack, // VMRegStackSlots
895 895 const BasicType *sig_bt,
896 896 const VMRegPair *regs) {
897 897
898 898 // Generate an I2C adapter: adjust the I-frame to make space for the C-frame
899 899 // layout. Lesp was saved by the calling I-frame and will be restored on
900 900 // return. Meanwhile, outgoing arg space is all owned by the callee
901 901 // C-frame, so we can mangle it at will. After adjusting the frame size,
902 902 // hoist register arguments and repack other args according to the compiled
903 903 // code convention. Finally, end in a jump to the compiled code. The entry
904 904 // point address is the start of the buffer.
905 905
906 906 // We will only enter here from an interpreted frame and never from after
907 907 // passing thru a c2i. Azul allowed this but we do not. If we lose the
908 908 // race and use a c2i we will remain interpreted for the race loser(s).
909 909 // This removes all sorts of headaches on the x86 side and also eliminates
910 910 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
911 911
912 912 // As you can see from the list of inputs & outputs there are not a lot
913 913 // of temp registers to work with: mostly G1, G3 & G4.
914 914
915 915 // Inputs:
916 916 // G2_thread - TLS
917 917 // G5_method - Method oop
918 918 // G4 (Gargs) - Pointer to interpreter's args
919 919 // O0..O4 - free for scratch
920 920 // O5_savedSP - Caller's saved SP, to be restored if needed
921 921 // O6 - Current SP!
922 922 // O7 - Valid return address
923 923 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
924 924
925 925 // Outputs:
926 926 // G2_thread - TLS
927 927 // G1, G4 - Outgoing long args in 32-bit build
928 928 // O0-O5 - Outgoing args in compiled layout
929 929 // O6 - Adjusted or restored SP
930 930 // O7 - Valid return address
931 931 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
932 932 // F0-F7 - more outgoing args
933 933
934 934
935 935 // Gargs is the incoming argument base, and also an outgoing argument.
936 936 __ sub(Gargs, BytesPerWord, Gargs);
937 937
938 938 // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME
939 939 // WITH O7 HOLDING A VALID RETURN PC
940 940 //
941 941 // | |
942 942 // : java stack :
943 943 // | |
944 944 // +--------------+ <--- start of outgoing args
945 945 // | receiver | |
946 946 // : rest of args : |---size is java-arg-words
947 947 // | | |
948 948 // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I
949 949 // | | |
950 950 // : unused : |---Space for max Java stack, plus stack alignment
951 951 // | | |
952 952 // +--------------+ <--- SP + 16*wordsize
953 953 // | |
954 954 // : window :
955 955 // | |
956 956 // +--------------+ <--- SP
957 957
958 958 // WE REPACK THE STACK. We use the common calling convention layout as
959 959 // discovered by calling SharedRuntime::calling_convention. We assume it
960 960 // causes an arbitrary shuffle of memory, which may require some register
961 961 // temps to do the shuffle. We hope for (and optimize for) the case where
962 962 // temps are not needed. We may have to resize the stack slightly, in case
963 963 // we need alignment padding (32-bit interpreter can pass longs & doubles
964 964 // misaligned, but the compilers expect them aligned).
965 965 //
966 966 // | |
967 967 // : java stack :
968 968 // | |
969 969 // +--------------+ <--- start of outgoing args
970 970 // | pad, align | |
971 971 // +--------------+ |
972 972 // | ints, floats | |---Outgoing stack args, packed low.
973 973 // +--------------+ | First few args in registers.
974 974 // : doubles : |
975 975 // | longs | |
976 976 // +--------------+ <--- SP' + 16*wordsize
977 977 // | |
978 978 // : window :
979 979 // | |
980 980 // +--------------+ <--- SP'
981 981
982 982 // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME
983 983 // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP
984 984 // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN.
985 985
986 986 // Cut-out for having no stack args. Since up to 6 args are passed
987 987 // in registers, we will commonly have no stack args.
988 988 if (comp_args_on_stack > 0) {
989 989
990 990 // Convert VMReg stack slots to words.
991 991 int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
992 992 // Round up to miminum stack alignment, in wordSize
993 993 comp_words_on_stack = round_to(comp_words_on_stack, 2);
994 994 // Now compute the distance from Lesp to SP. This calculation does not
995 995 // include the space for total_args_passed because Lesp has not yet popped
996 996 // the arguments.
997 997 __ sub(SP, (comp_words_on_stack)*wordSize, SP);
998 998 }
999 999
1000 1000 // Will jump to the compiled code just as if compiled code was doing it.
1001 1001 // Pre-load the register-jump target early, to schedule it better.
1002 1002 __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3);
1003 1003
1004 1004 // Now generate the shuffle code. Pick up all register args and move the
1005 1005 // rest through G1_scratch.
1006 1006 for (int i=0; i<total_args_passed; i++) {
1007 1007 if (sig_bt[i] == T_VOID) {
1008 1008 // Longs and doubles are passed in native word order, but misaligned
1009 1009 // in the 32-bit build.
1010 1010 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
1011 1011 continue;
1012 1012 }
1013 1013
1014 1014 // Pick up 0, 1 or 2 words from Lesp+offset. Assume mis-aligned in the
1015 1015 // 32-bit build and aligned in the 64-bit build. Look for the obvious
1016 1016 // ldx/lddf optimizations.
1017 1017
1018 1018 // Load in argument order going down.
1019 1019 const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
1020 1020 set_Rdisp(G1_scratch);
1021 1021
1022 1022 VMReg r_1 = regs[i].first();
1023 1023 VMReg r_2 = regs[i].second();
1024 1024 if (!r_1->is_valid()) {
1025 1025 assert(!r_2->is_valid(), "");
1026 1026 continue;
1027 1027 }
1028 1028 if (r_1->is_stack()) { // Pretend stack targets are loaded into F8/F9
1029 1029 r_1 = F8->as_VMReg(); // as part of the load/store shuffle
1030 1030 if (r_2->is_valid()) r_2 = r_1->next();
1031 1031 }
1032 1032 if (r_1->is_Register()) { // Register argument
1033 1033 Register r = r_1->as_Register()->after_restore();
1034 1034 if (!r_2->is_valid()) {
1035 1035 __ ld(Gargs, arg_slot(ld_off), r);
1036 1036 } else {
1037 1037 #ifdef _LP64
1038 1038 // In V9, longs are given 2 64-bit slots in the interpreter, but the
1039 1039 // data is passed in only 1 slot.
1040 1040 RegisterOrConstant slot = (sig_bt[i] == T_LONG) ?
1041 1041 next_arg_slot(ld_off) : arg_slot(ld_off);
1042 1042 __ ldx(Gargs, slot, r);
1043 1043 #else
1044 1044 // Need to load a 64-bit value into G1/G4, but G1/G4 is being used in the
1045 1045 // stack shuffle. Load the first 2 longs into G1/G4 later.
1046 1046 #endif
1047 1047 }
1048 1048 } else {
1049 1049 assert(r_1->is_FloatRegister(), "");
1050 1050 if (!r_2->is_valid()) {
1051 1051 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister());
1052 1052 } else {
1053 1053 #ifdef _LP64
1054 1054 // In V9, doubles are given 2 64-bit slots in the interpreter, but the
1055 1055 // data is passed in only 1 slot. This code also handles longs that
1056 1056 // are passed on the stack, but need a stack-to-stack move through a
1057 1057 // spare float register.
1058 1058 RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
1059 1059 next_arg_slot(ld_off) : arg_slot(ld_off);
1060 1060 __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister());
1061 1061 #else
1062 1062 // Need to marshal 64-bit value from misaligned Lesp loads
1063 1063 __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister());
1064 1064 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister());
1065 1065 #endif
1066 1066 }
1067 1067 }
1068 1068 // Was the argument really intended to be on the stack, but was loaded
1069 1069 // into F8/F9?
1070 1070 if (regs[i].first()->is_stack()) {
1071 1071 assert(r_1->as_FloatRegister() == F8, "fix this code");
1072 1072 // Convert stack slot to an SP offset
1073 1073 int st_off = reg2offset(regs[i].first()) + STACK_BIAS;
1074 1074 // Store down the shuffled stack word. Target address _is_ aligned.
1075 1075 RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp);
1076 1076 if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot);
1077 1077 else __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot);
1078 1078 }
1079 1079 }
1080 1080 bool made_space = false;
1081 1081 #ifndef _LP64
1082 1082 // May need to pick up a few long args in G1/G4
1083 1083 bool g4_crushed = false;
1084 1084 bool g3_crushed = false;
1085 1085 for (int i=0; i<total_args_passed; i++) {
1086 1086 if (regs[i].first()->is_Register() && regs[i].second()->is_valid()) {
1087 1087 // Load in argument order going down
1088 1088 int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
1089 1089 // Need to marshal 64-bit value from misaligned Lesp loads
1090 1090 Register r = regs[i].first()->as_Register()->after_restore();
1091 1091 if (r == G1 || r == G4) {
1092 1092 assert(!g4_crushed, "ordering problem");
1093 1093 if (r == G4){
1094 1094 g4_crushed = true;
1095 1095 __ lduw(Gargs, arg_slot(ld_off) , G3_scratch); // Load lo bits
1096 1096 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits
1097 1097 } else {
1098 1098 // better schedule this way
1099 1099 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits
1100 1100 __ lduw(Gargs, arg_slot(ld_off) , G3_scratch); // Load lo bits
1101 1101 }
1102 1102 g3_crushed = true;
1103 1103 __ sllx(r, 32, r);
1104 1104 __ or3(G3_scratch, r, r);
1105 1105 } else {
1106 1106 assert(r->is_out(), "longs passed in two O registers");
1107 1107 __ ld (Gargs, arg_slot(ld_off) , r->successor()); // Load lo bits
1108 1108 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits
1109 1109 }
1110 1110 }
1111 1111 }
1112 1112 #endif
1113 1113
1114 1114 // Jump to the compiled code just as if compiled code was doing it.
1115 1115 //
1116 1116 #ifndef _LP64
1117 1117 if (g3_crushed) {
1118 1118 // Rats load was wasted, at least it is in cache...
1119 1119 __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3);
1120 1120 }
1121 1121 #endif /* _LP64 */
1122 1122
1123 1123 // 6243940 We might end up in handle_wrong_method if
1124 1124 // the callee is deoptimized as we race thru here. If that
1125 1125 // happens we don't want to take a safepoint because the
1126 1126 // caller frame will look interpreted and arguments are now
1127 1127 // "compiled" so it is much better to make this transition
1128 1128 // invisible to the stack walking code. Unfortunately if
1129 1129 // we try and find the callee by normal means a safepoint
1130 1130 // is possible. So we stash the desired callee in the thread
1131 1131 // and the vm will find there should this case occur.
1132 1132 Address callee_target_addr(G2_thread, JavaThread::callee_target_offset());
1133 1133 __ st_ptr(G5_method, callee_target_addr);
1134 1134
1135 1135 if (StressNonEntrant) {
1136 1136 // Open a big window for deopt failure
1137 1137 __ save_frame(0);
1138 1138 __ mov(G0, L0);
1139 1139 Label loop;
1140 1140 __ bind(loop);
1141 1141 __ sub(L0, 1, L0);
1142 1142 __ br_null(L0, false, Assembler::pt, loop);
1143 1143 __ delayed()->nop();
1144 1144
1145 1145 __ restore();
1146 1146 }
1147 1147
1148 1148
1149 1149 __ jmpl(G3, 0, G0);
1150 1150 __ delayed()->nop();
1151 1151 }
1152 1152
1153 1153 // ---------------------------------------------------------------
1154 1154 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
1155 1155 int total_args_passed,
1156 1156 // VMReg max_arg,
1157 1157 int comp_args_on_stack, // VMRegStackSlots
1158 1158 const BasicType *sig_bt,
1159 1159 const VMRegPair *regs,
1160 1160 AdapterFingerPrint* fingerprint) {
1161 1161 address i2c_entry = __ pc();
1162 1162
1163 1163 AdapterGenerator agen(masm);
1164 1164
1165 1165 agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs);
1166 1166
1167 1167
1168 1168 // -------------------------------------------------------------------------
1169 1169 // Generate a C2I adapter. On entry we know G5 holds the methodOop. The
1170 1170 // args start out packed in the compiled layout. They need to be unpacked
1171 1171 // into the interpreter layout. This will almost always require some stack
1172 1172 // space. We grow the current (compiled) stack, then repack the args. We
1173 1173 // finally end in a jump to the generic interpreter entry point. On exit
1174 1174 // from the interpreter, the interpreter will restore our SP (lest the
1175 1175 // compiled code, which relys solely on SP and not FP, get sick).
1176 1176
1177 1177 address c2i_unverified_entry = __ pc();
1178 1178 Label skip_fixup;
1179 1179 {
1180 1180 #if !defined(_LP64) && defined(COMPILER2)
1181 1181 Register R_temp = L0; // another scratch register
1182 1182 #else
1183 1183 Register R_temp = G1; // another scratch register
1184 1184 #endif
1185 1185
1186 1186 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
1187 1187
1188 1188 __ verify_oop(O0);
1189 1189 __ verify_oop(G5_method);
1190 1190 __ load_klass(O0, G3_scratch);
1191 1191 __ verify_oop(G3_scratch);
1192 1192
1193 1193 #if !defined(_LP64) && defined(COMPILER2)
1194 1194 __ save(SP, -frame::register_save_words*wordSize, SP);
1195 1195 __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp);
1196 1196 __ verify_oop(R_temp);
1197 1197 __ cmp(G3_scratch, R_temp);
1198 1198 __ restore();
1199 1199 #else
1200 1200 __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp);
1201 1201 __ verify_oop(R_temp);
1202 1202 __ cmp(G3_scratch, R_temp);
1203 1203 #endif
1204 1204
1205 1205 Label ok, ok2;
1206 1206 __ brx(Assembler::equal, false, Assembler::pt, ok);
1207 1207 __ delayed()->ld_ptr(G5_method, compiledICHolderOopDesc::holder_method_offset(), G5_method);
1208 1208 __ jump_to(ic_miss, G3_scratch);
1209 1209 __ delayed()->nop();
1210 1210
1211 1211 __ bind(ok);
1212 1212 // Method might have been compiled since the call site was patched to
1213 1213 // interpreted if that is the case treat it as a miss so we can get
1214 1214 // the call site corrected.
1215 1215 __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch);
1216 1216 __ bind(ok2);
1217 1217 __ br_null(G3_scratch, false, __ pt, skip_fixup);
1218 1218 __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
1219 1219 __ jump_to(ic_miss, G3_scratch);
1220 1220 __ delayed()->nop();
1221 1221
1222 1222 }
1223 1223
1224 1224 address c2i_entry = __ pc();
1225 1225
1226 1226 agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
1227 1227
1228 1228 __ flush();
1229 1229 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
1230 1230
1231 1231 }
1232 1232
1233 1233 // Helper function for native calling conventions
1234 1234 static VMReg int_stk_helper( int i ) {
1235 1235 // Bias any stack based VMReg we get by ignoring the window area
1236 1236 // but not the register parameter save area.
1237 1237 //
1238 1238 // This is strange for the following reasons. We'd normally expect
1239 1239 // the calling convention to return an VMReg for a stack slot
1240 1240 // completely ignoring any abi reserved area. C2 thinks of that
1241 1241 // abi area as only out_preserve_stack_slots. This does not include
1242 1242 // the area allocated by the C abi to store down integer arguments
1243 1243 // because the java calling convention does not use it. So
1244 1244 // since c2 assumes that there are only out_preserve_stack_slots
1245 1245 // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack
1246 1246 // location the c calling convention must add in this bias amount
1247 1247 // to make up for the fact that the out_preserve_stack_slots is
1248 1248 // insufficient for C calls. What a mess. I sure hope those 6
1249 1249 // stack words were worth it on every java call!
1250 1250
1251 1251 // Another way of cleaning this up would be for out_preserve_stack_slots
1252 1252 // to take a parameter to say whether it was C or java calling conventions.
1253 1253 // Then things might look a little better (but not much).
1254 1254
1255 1255 int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM;
1256 1256 if( mem_parm_offset < 0 ) {
1257 1257 return as_oRegister(i)->as_VMReg();
1258 1258 } else {
1259 1259 int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word;
1260 1260 // Now return a biased offset that will be correct when out_preserve_slots is added back in
1261 1261 return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots());
1262 1262 }
1263 1263 }
1264 1264
1265 1265
1266 1266 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1267 1267 VMRegPair *regs,
1268 1268 int total_args_passed) {
1269 1269
1270 1270 // Return the number of VMReg stack_slots needed for the args.
1271 1271 // This value does not include an abi space (like register window
1272 1272 // save area).
1273 1273
1274 1274 // The native convention is V8 if !LP64
1275 1275 // The LP64 convention is the V9 convention which is slightly more sane.
1276 1276
1277 1277 // We return the amount of VMReg stack slots we need to reserve for all
1278 1278 // the arguments NOT counting out_preserve_stack_slots. Since we always
1279 1279 // have space for storing at least 6 registers to memory we start with that.
1280 1280 // See int_stk_helper for a further discussion.
1281 1281 int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots();
1282 1282
1283 1283 #ifdef _LP64
1284 1284 // V9 convention: All things "as-if" on double-wide stack slots.
1285 1285 // Hoist any int/ptr/long's in the first 6 to int regs.
1286 1286 // Hoist any flt/dbl's in the first 16 dbl regs.
1287 1287 int j = 0; // Count of actual args, not HALVES
1288 1288 for( int i=0; i<total_args_passed; i++, j++ ) {
1289 1289 switch( sig_bt[i] ) {
1290 1290 case T_BOOLEAN:
1291 1291 case T_BYTE:
1292 1292 case T_CHAR:
1293 1293 case T_INT:
1294 1294 case T_SHORT:
1295 1295 regs[i].set1( int_stk_helper( j ) ); break;
1296 1296 case T_LONG:
1297 1297 assert( sig_bt[i+1] == T_VOID, "expecting half" );
1298 1298 case T_ADDRESS: // raw pointers, like current thread, for VM calls
1299 1299 case T_ARRAY:
1300 1300 case T_OBJECT:
1301 1301 regs[i].set2( int_stk_helper( j ) );
1302 1302 break;
1303 1303 case T_FLOAT:
1304 1304 if ( j < 16 ) {
1305 1305 // V9ism: floats go in ODD registers
1306 1306 regs[i].set1(as_FloatRegister(1 + (j<<1))->as_VMReg());
1307 1307 } else {
1308 1308 // V9ism: floats go in ODD stack slot
1309 1309 regs[i].set1(VMRegImpl::stack2reg(1 + (j<<1)));
1310 1310 }
1311 1311 break;
1312 1312 case T_DOUBLE:
1313 1313 assert( sig_bt[i+1] == T_VOID, "expecting half" );
1314 1314 if ( j < 16 ) {
1315 1315 // V9ism: doubles go in EVEN/ODD regs
1316 1316 regs[i].set2(as_FloatRegister(j<<1)->as_VMReg());
1317 1317 } else {
1318 1318 // V9ism: doubles go in EVEN/ODD stack slots
1319 1319 regs[i].set2(VMRegImpl::stack2reg(j<<1));
1320 1320 }
1321 1321 break;
1322 1322 case T_VOID: regs[i].set_bad(); j--; break; // Do not count HALVES
1323 1323 default:
1324 1324 ShouldNotReachHere();
1325 1325 }
1326 1326 if (regs[i].first()->is_stack()) {
1327 1327 int off = regs[i].first()->reg2stack();
1328 1328 if (off > max_stack_slots) max_stack_slots = off;
1329 1329 }
1330 1330 if (regs[i].second()->is_stack()) {
1331 1331 int off = regs[i].second()->reg2stack();
1332 1332 if (off > max_stack_slots) max_stack_slots = off;
1333 1333 }
1334 1334 }
1335 1335
1336 1336 #else // _LP64
1337 1337 // V8 convention: first 6 things in O-regs, rest on stack.
1338 1338 // Alignment is willy-nilly.
1339 1339 for( int i=0; i<total_args_passed; i++ ) {
1340 1340 switch( sig_bt[i] ) {
1341 1341 case T_ADDRESS: // raw pointers, like current thread, for VM calls
1342 1342 case T_ARRAY:
1343 1343 case T_BOOLEAN:
1344 1344 case T_BYTE:
1345 1345 case T_CHAR:
1346 1346 case T_FLOAT:
1347 1347 case T_INT:
1348 1348 case T_OBJECT:
1349 1349 case T_SHORT:
1350 1350 regs[i].set1( int_stk_helper( i ) );
1351 1351 break;
1352 1352 case T_DOUBLE:
1353 1353 case T_LONG:
1354 1354 assert( sig_bt[i+1] == T_VOID, "expecting half" );
1355 1355 regs[i].set_pair( int_stk_helper( i+1 ), int_stk_helper( i ) );
1356 1356 break;
1357 1357 case T_VOID: regs[i].set_bad(); break;
1358 1358 default:
1359 1359 ShouldNotReachHere();
1360 1360 }
1361 1361 if (regs[i].first()->is_stack()) {
1362 1362 int off = regs[i].first()->reg2stack();
1363 1363 if (off > max_stack_slots) max_stack_slots = off;
1364 1364 }
1365 1365 if (regs[i].second()->is_stack()) {
1366 1366 int off = regs[i].second()->reg2stack();
1367 1367 if (off > max_stack_slots) max_stack_slots = off;
1368 1368 }
1369 1369 }
1370 1370 #endif // _LP64
1371 1371
1372 1372 return round_to(max_stack_slots + 1, 2);
1373 1373
1374 1374 }
1375 1375
1376 1376
1377 1377 // ---------------------------------------------------------------------------
1378 1378 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1379 1379 switch (ret_type) {
1380 1380 case T_FLOAT:
1381 1381 __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS);
1382 1382 break;
1383 1383 case T_DOUBLE:
1384 1384 __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS);
1385 1385 break;
1386 1386 }
1387 1387 }
1388 1388
1389 1389 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1390 1390 switch (ret_type) {
1391 1391 case T_FLOAT:
1392 1392 __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0);
1393 1393 break;
1394 1394 case T_DOUBLE:
1395 1395 __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0);
1396 1396 break;
1397 1397 }
1398 1398 }
1399 1399
1400 1400 // Check and forward and pending exception. Thread is stored in
1401 1401 // L7_thread_cache and possibly NOT in G2_thread. Since this is a native call, there
1402 1402 // is no exception handler. We merely pop this frame off and throw the
1403 1403 // exception in the caller's frame.
1404 1404 static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) {
1405 1405 Label L;
1406 1406 __ br_null(Rex_oop, false, Assembler::pt, L);
1407 1407 __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception
1408 1408 // Since this is a native call, we *know* the proper exception handler
1409 1409 // without calling into the VM: it's the empty function. Just pop this
1410 1410 // frame and then jump to forward_exception_entry; O7 will contain the
1411 1411 // native caller's return PC.
1412 1412 AddressLiteral exception_entry(StubRoutines::forward_exception_entry());
1413 1413 __ jump_to(exception_entry, G3_scratch);
1414 1414 __ delayed()->restore(); // Pop this frame off.
1415 1415 __ bind(L);
1416 1416 }
1417 1417
1418 1418 // A simple move of integer like type
1419 1419 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1420 1420 if (src.first()->is_stack()) {
1421 1421 if (dst.first()->is_stack()) {
1422 1422 // stack to stack
1423 1423 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1424 1424 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1425 1425 } else {
1426 1426 // stack to reg
1427 1427 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1428 1428 }
1429 1429 } else if (dst.first()->is_stack()) {
1430 1430 // reg to stack
1431 1431 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1432 1432 } else {
1433 1433 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1434 1434 }
1435 1435 }
1436 1436
1437 1437 // On 64 bit we will store integer like items to the stack as
1438 1438 // 64 bits items (sparc abi) even though java would only store
1439 1439 // 32bits for a parameter. On 32bit it will simply be 32 bits
1440 1440 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
1441 1441 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1442 1442 if (src.first()->is_stack()) {
1443 1443 if (dst.first()->is_stack()) {
1444 1444 // stack to stack
1445 1445 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1446 1446 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1447 1447 } else {
1448 1448 // stack to reg
1449 1449 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1450 1450 }
1451 1451 } else if (dst.first()->is_stack()) {
1452 1452 // reg to stack
1453 1453 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1454 1454 } else {
1455 1455 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1456 1456 }
1457 1457 }
1458 1458
1459 1459
1460 1460 // An oop arg. Must pass a handle not the oop itself
1461 1461 static void object_move(MacroAssembler* masm,
1462 1462 OopMap* map,
1463 1463 int oop_handle_offset,
1464 1464 int framesize_in_slots,
1465 1465 VMRegPair src,
1466 1466 VMRegPair dst,
1467 1467 bool is_receiver,
1468 1468 int* receiver_offset) {
1469 1469
1470 1470 // must pass a handle. First figure out the location we use as a handle
1471 1471
1472 1472 if (src.first()->is_stack()) {
1473 1473 // Oop is already on the stack
1474 1474 Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register();
1475 1475 __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle);
1476 1476 __ ld_ptr(rHandle, 0, L4);
1477 1477 #ifdef _LP64
1478 1478 __ movr( Assembler::rc_z, L4, G0, rHandle );
1479 1479 #else
1480 1480 __ tst( L4 );
1481 1481 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
1482 1482 #endif
1483 1483 if (dst.first()->is_stack()) {
1484 1484 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1485 1485 }
1486 1486 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1487 1487 if (is_receiver) {
1488 1488 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1489 1489 }
1490 1490 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1491 1491 } else {
1492 1492 // Oop is in an input register pass we must flush it to the stack
1493 1493 const Register rOop = src.first()->as_Register();
1494 1494 const Register rHandle = L5;
1495 1495 int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset;
1496 1496 int offset = oop_slot*VMRegImpl::stack_slot_size;
1497 1497 Label skip;
1498 1498 __ st_ptr(rOop, SP, offset + STACK_BIAS);
1499 1499 if (is_receiver) {
1500 1500 *receiver_offset = oop_slot * VMRegImpl::stack_slot_size;
1501 1501 }
1502 1502 map->set_oop(VMRegImpl::stack2reg(oop_slot));
1503 1503 __ add(SP, offset + STACK_BIAS, rHandle);
1504 1504 #ifdef _LP64
1505 1505 __ movr( Assembler::rc_z, rOop, G0, rHandle );
1506 1506 #else
1507 1507 __ tst( rOop );
1508 1508 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
1509 1509 #endif
1510 1510
1511 1511 if (dst.first()->is_stack()) {
1512 1512 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1513 1513 } else {
1514 1514 __ mov(rHandle, dst.first()->as_Register());
1515 1515 }
1516 1516 }
1517 1517 }
1518 1518
1519 1519 // A float arg may have to do float reg int reg conversion
1520 1520 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1521 1521 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1522 1522
1523 1523 if (src.first()->is_stack()) {
1524 1524 if (dst.first()->is_stack()) {
1525 1525 // stack to stack the easiest of the bunch
1526 1526 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1527 1527 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1528 1528 } else {
1529 1529 // stack to reg
1530 1530 if (dst.first()->is_Register()) {
1531 1531 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1532 1532 } else {
1533 1533 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1534 1534 }
1535 1535 }
1536 1536 } else if (dst.first()->is_stack()) {
1537 1537 // reg to stack
1538 1538 if (src.first()->is_Register()) {
1539 1539 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1540 1540 } else {
1541 1541 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1542 1542 }
1543 1543 } else {
1544 1544 // reg to reg
1545 1545 if (src.first()->is_Register()) {
1546 1546 if (dst.first()->is_Register()) {
1547 1547 // gpr -> gpr
1548 1548 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1549 1549 } else {
1550 1550 // gpr -> fpr
1551 1551 __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS);
1552 1552 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister());
1553 1553 }
1554 1554 } else if (dst.first()->is_Register()) {
1555 1555 // fpr -> gpr
1556 1556 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS);
1557 1557 __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register());
1558 1558 } else {
1559 1559 // fpr -> fpr
1560 1560 // In theory these overlap but the ordering is such that this is likely a nop
1561 1561 if ( src.first() != dst.first()) {
1562 1562 __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1563 1563 }
1564 1564 }
1565 1565 }
1566 1566 }
1567 1567
1568 1568 static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1569 1569 VMRegPair src_lo(src.first());
1570 1570 VMRegPair src_hi(src.second());
1571 1571 VMRegPair dst_lo(dst.first());
1572 1572 VMRegPair dst_hi(dst.second());
1573 1573 simple_move32(masm, src_lo, dst_lo);
1574 1574 simple_move32(masm, src_hi, dst_hi);
1575 1575 }
1576 1576
1577 1577 // A long move
1578 1578 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1579 1579
1580 1580 // Do the simple ones here else do two int moves
1581 1581 if (src.is_single_phys_reg() ) {
1582 1582 if (dst.is_single_phys_reg()) {
1583 1583 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1584 1584 } else {
1585 1585 // split src into two separate registers
1586 1586 // Remember hi means hi address or lsw on sparc
1587 1587 // Move msw to lsw
1588 1588 if (dst.second()->is_reg()) {
1589 1589 // MSW -> MSW
1590 1590 __ srax(src.first()->as_Register(), 32, dst.first()->as_Register());
1591 1591 // Now LSW -> LSW
1592 1592 // this will only move lo -> lo and ignore hi
1593 1593 VMRegPair split(dst.second());
1594 1594 simple_move32(masm, src, split);
1595 1595 } else {
1596 1596 VMRegPair split(src.first(), L4->as_VMReg());
1597 1597 // MSW -> MSW (lo ie. first word)
1598 1598 __ srax(src.first()->as_Register(), 32, L4);
1599 1599 split_long_move(masm, split, dst);
1600 1600 }
1601 1601 }
1602 1602 } else if (dst.is_single_phys_reg()) {
1603 1603 if (src.is_adjacent_aligned_on_stack(2)) {
1604 1604 __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1605 1605 } else {
1606 1606 // dst is a single reg.
1607 1607 // Remember lo is low address not msb for stack slots
1608 1608 // and lo is the "real" register for registers
1609 1609 // src is
1610 1610
1611 1611 VMRegPair split;
1612 1612
1613 1613 if (src.first()->is_reg()) {
1614 1614 // src.lo (msw) is a reg, src.hi is stk/reg
1615 1615 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg]
1616 1616 split.set_pair(dst.first(), src.first());
1617 1617 } else {
1618 1618 // msw is stack move to L5
1619 1619 // lsw is stack move to dst.lo (real reg)
1620 1620 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5
1621 1621 split.set_pair(dst.first(), L5->as_VMReg());
1622 1622 }
1623 1623
1624 1624 // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg)
1625 1625 // msw -> src.lo/L5, lsw -> dst.lo
1626 1626 split_long_move(masm, src, split);
1627 1627
1628 1628 // So dst now has the low order correct position the
1629 1629 // msw half
1630 1630 __ sllx(split.first()->as_Register(), 32, L5);
1631 1631
1632 1632 const Register d = dst.first()->as_Register();
1633 1633 __ or3(L5, d, d);
1634 1634 }
1635 1635 } else {
1636 1636 // For LP64 we can probably do better.
1637 1637 split_long_move(masm, src, dst);
1638 1638 }
1639 1639 }
1640 1640
1641 1641 // A double move
1642 1642 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1643 1643
1644 1644 // The painful thing here is that like long_move a VMRegPair might be
1645 1645 // 1: a single physical register
1646 1646 // 2: two physical registers (v8)
1647 1647 // 3: a physical reg [lo] and a stack slot [hi] (v8)
1648 1648 // 4: two stack slots
1649 1649
1650 1650 // Since src is always a java calling convention we know that the src pair
1651 1651 // is always either all registers or all stack (and aligned?)
1652 1652
1653 1653 // in a register [lo] and a stack slot [hi]
1654 1654 if (src.first()->is_stack()) {
1655 1655 if (dst.first()->is_stack()) {
1656 1656 // stack to stack the easiest of the bunch
1657 1657 // ought to be a way to do this where if alignment is ok we use ldd/std when possible
1658 1658 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1659 1659 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1660 1660 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1661 1661 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1662 1662 } else {
1663 1663 // stack to reg
1664 1664 if (dst.second()->is_stack()) {
1665 1665 // stack -> reg, stack -> stack
1666 1666 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1667 1667 if (dst.first()->is_Register()) {
1668 1668 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1669 1669 } else {
1670 1670 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1671 1671 }
1672 1672 // This was missing. (very rare case)
1673 1673 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1674 1674 } else {
1675 1675 // stack -> reg
1676 1676 // Eventually optimize for alignment QQQ
1677 1677 if (dst.first()->is_Register()) {
1678 1678 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1679 1679 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register());
1680 1680 } else {
1681 1681 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1682 1682 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister());
1683 1683 }
1684 1684 }
1685 1685 }
1686 1686 } else if (dst.first()->is_stack()) {
1687 1687 // reg to stack
1688 1688 if (src.first()->is_Register()) {
1689 1689 // Eventually optimize for alignment QQQ
1690 1690 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1691 1691 if (src.second()->is_stack()) {
1692 1692 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1693 1693 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1694 1694 } else {
1695 1695 __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS);
1696 1696 }
1697 1697 } else {
1698 1698 // fpr to stack
1699 1699 if (src.second()->is_stack()) {
1700 1700 ShouldNotReachHere();
1701 1701 } else {
1702 1702 // Is the stack aligned?
1703 1703 if (reg2offset(dst.first()) & 0x7) {
1704 1704 // No do as pairs
1705 1705 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1706 1706 __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS);
1707 1707 } else {
1708 1708 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1709 1709 }
1710 1710 }
1711 1711 }
1712 1712 } else {
1713 1713 // reg to reg
1714 1714 if (src.first()->is_Register()) {
1715 1715 if (dst.first()->is_Register()) {
1716 1716 // gpr -> gpr
1717 1717 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1718 1718 __ mov(src.second()->as_Register(), dst.second()->as_Register());
1719 1719 } else {
1720 1720 // gpr -> fpr
1721 1721 // ought to be able to do a single store
1722 1722 __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS);
1723 1723 __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS);
1724 1724 // ought to be able to do a single load
1725 1725 __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister());
1726 1726 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister());
1727 1727 }
1728 1728 } else if (dst.first()->is_Register()) {
1729 1729 // fpr -> gpr
1730 1730 // ought to be able to do a single store
1731 1731 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS);
1732 1732 // ought to be able to do a single load
1733 1733 // REMEMBER first() is low address not LSB
1734 1734 __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register());
1735 1735 if (dst.second()->is_Register()) {
1736 1736 __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register());
1737 1737 } else {
1738 1738 __ ld(FP, -4 + STACK_BIAS, L4);
1739 1739 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1740 1740 }
1741 1741 } else {
1742 1742 // fpr -> fpr
1743 1743 // In theory these overlap but the ordering is such that this is likely a nop
1744 1744 if ( src.first() != dst.first()) {
1745 1745 __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1746 1746 }
1747 1747 }
1748 1748 }
1749 1749 }
1750 1750
1751 1751 // Creates an inner frame if one hasn't already been created, and
1752 1752 // saves a copy of the thread in L7_thread_cache
1753 1753 static void create_inner_frame(MacroAssembler* masm, bool* already_created) {
1754 1754 if (!*already_created) {
1755 1755 __ save_frame(0);
1756 1756 // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below
1757 1757 // Don't use save_thread because it smashes G2 and we merely want to save a
1758 1758 // copy
1759 1759 __ mov(G2_thread, L7_thread_cache);
1760 1760 *already_created = true;
1761 1761 }
↓ open down ↓ |
1749 lines elided |
↑ open up ↑ |
1762 1762 }
1763 1763
1764 1764 // ---------------------------------------------------------------------------
1765 1765 // Generate a native wrapper for a given method. The method takes arguments
1766 1766 // in the Java compiled code convention, marshals them to the native
1767 1767 // convention (handlizes oops, etc), transitions to native, makes the call,
1768 1768 // returns to java state (possibly blocking), unhandlizes any result and
1769 1769 // returns.
1770 1770 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1771 1771 methodHandle method,
1772 + int compile_id,
1772 1773 int total_in_args,
1773 1774 int comp_args_on_stack, // in VMRegStackSlots
1774 1775 BasicType *in_sig_bt,
1775 1776 VMRegPair *in_regs,
1776 1777 BasicType ret_type) {
1777 1778
1778 1779 // Native nmethod wrappers never take possesion of the oop arguments.
1779 1780 // So the caller will gc the arguments. The only thing we need an
1780 1781 // oopMap for is if the call is static
1781 1782 //
1782 1783 // An OopMap for lock (and class if static), and one for the VM call itself
1783 1784 OopMapSet *oop_maps = new OopMapSet();
1784 1785 intptr_t start = (intptr_t)__ pc();
1785 1786
1786 1787 // First thing make an ic check to see if we should even be here
1787 1788 {
1788 1789 Label L;
1789 1790 const Register temp_reg = G3_scratch;
1790 1791 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
1791 1792 __ verify_oop(O0);
1792 1793 __ load_klass(O0, temp_reg);
1793 1794 __ cmp(temp_reg, G5_inline_cache_reg);
1794 1795 __ brx(Assembler::equal, true, Assembler::pt, L);
1795 1796 __ delayed()->nop();
1796 1797
1797 1798 __ jump_to(ic_miss, temp_reg);
1798 1799 __ delayed()->nop();
1799 1800 __ align(CodeEntryAlignment);
1800 1801 __ bind(L);
1801 1802 }
1802 1803
1803 1804 int vep_offset = ((intptr_t)__ pc()) - start;
1804 1805
1805 1806 #ifdef COMPILER1
1806 1807 if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
1807 1808 // Object.hashCode can pull the hashCode from the header word
1808 1809 // instead of doing a full VM transition once it's been computed.
1809 1810 // Since hashCode is usually polymorphic at call sites we can't do
1810 1811 // this optimization at the call site without a lot of work.
1811 1812 Label slowCase;
1812 1813 Register receiver = O0;
1813 1814 Register result = O0;
1814 1815 Register header = G3_scratch;
1815 1816 Register hash = G3_scratch; // overwrite header value with hash value
1816 1817 Register mask = G1; // to get hash field from header
1817 1818
1818 1819 // Read the header and build a mask to get its hash field. Give up if the object is not unlocked.
1819 1820 // We depend on hash_mask being at most 32 bits and avoid the use of
1820 1821 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
1821 1822 // vm: see markOop.hpp.
1822 1823 __ ld_ptr(receiver, oopDesc::mark_offset_in_bytes(), header);
1823 1824 __ sethi(markOopDesc::hash_mask, mask);
1824 1825 __ btst(markOopDesc::unlocked_value, header);
1825 1826 __ br(Assembler::zero, false, Assembler::pn, slowCase);
1826 1827 if (UseBiasedLocking) {
1827 1828 // Check if biased and fall through to runtime if so
1828 1829 __ delayed()->nop();
1829 1830 __ btst(markOopDesc::biased_lock_bit_in_place, header);
1830 1831 __ br(Assembler::notZero, false, Assembler::pn, slowCase);
1831 1832 }
1832 1833 __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);
1833 1834
1834 1835 // Check for a valid (non-zero) hash code and get its value.
1835 1836 #ifdef _LP64
1836 1837 __ srlx(header, markOopDesc::hash_shift, hash);
1837 1838 #else
1838 1839 __ srl(header, markOopDesc::hash_shift, hash);
1839 1840 #endif
1840 1841 __ andcc(hash, mask, hash);
1841 1842 __ br(Assembler::equal, false, Assembler::pn, slowCase);
1842 1843 __ delayed()->nop();
1843 1844
1844 1845 // leaf return.
1845 1846 __ retl();
1846 1847 __ delayed()->mov(hash, result);
1847 1848 __ bind(slowCase);
1848 1849 }
1849 1850 #endif // COMPILER1
1850 1851
1851 1852
1852 1853 // We have received a description of where all the java arg are located
1853 1854 // on entry to the wrapper. We need to convert these args to where
1854 1855 // the jni function will expect them. To figure out where they go
1855 1856 // we convert the java signature to a C signature by inserting
1856 1857 // the hidden arguments as arg[0] and possibly arg[1] (static method)
1857 1858
1858 1859 int total_c_args = total_in_args + 1;
1859 1860 if (method->is_static()) {
1860 1861 total_c_args++;
1861 1862 }
1862 1863
1863 1864 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1864 1865 VMRegPair * out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1865 1866
1866 1867 int argc = 0;
1867 1868 out_sig_bt[argc++] = T_ADDRESS;
1868 1869 if (method->is_static()) {
1869 1870 out_sig_bt[argc++] = T_OBJECT;
1870 1871 }
1871 1872
1872 1873 for (int i = 0; i < total_in_args ; i++ ) {
1873 1874 out_sig_bt[argc++] = in_sig_bt[i];
1874 1875 }
1875 1876
1876 1877 // Now figure out where the args must be stored and how much stack space
1877 1878 // they require (neglecting out_preserve_stack_slots but space for storing
1878 1879 // the 1st six register arguments). It's weird see int_stk_helper.
1879 1880 //
1880 1881 int out_arg_slots;
1881 1882 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
1882 1883
1883 1884 // Compute framesize for the wrapper. We need to handlize all oops in
1884 1885 // registers. We must create space for them here that is disjoint from
1885 1886 // the windowed save area because we have no control over when we might
1886 1887 // flush the window again and overwrite values that gc has since modified.
1887 1888 // (The live window race)
1888 1889 //
1889 1890 // We always just allocate 6 word for storing down these object. This allow
1890 1891 // us to simply record the base and use the Ireg number to decide which
1891 1892 // slot to use. (Note that the reg number is the inbound number not the
1892 1893 // outbound number).
1893 1894 // We must shuffle args to match the native convention, and include var-args space.
1894 1895
1895 1896 // Calculate the total number of stack slots we will need.
1896 1897
1897 1898 // First count the abi requirement plus all of the outgoing args
1898 1899 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1899 1900
1900 1901 // Now the space for the inbound oop handle area
1901 1902
1902 1903 int oop_handle_offset = stack_slots;
1903 1904 stack_slots += 6*VMRegImpl::slots_per_word;
1904 1905
1905 1906 // Now any space we need for handlizing a klass if static method
1906 1907
1907 1908 int oop_temp_slot_offset = 0;
1908 1909 int klass_slot_offset = 0;
1909 1910 int klass_offset = -1;
1910 1911 int lock_slot_offset = 0;
1911 1912 bool is_static = false;
1912 1913
1913 1914 if (method->is_static()) {
1914 1915 klass_slot_offset = stack_slots;
1915 1916 stack_slots += VMRegImpl::slots_per_word;
1916 1917 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1917 1918 is_static = true;
1918 1919 }
1919 1920
1920 1921 // Plus a lock if needed
1921 1922
1922 1923 if (method->is_synchronized()) {
1923 1924 lock_slot_offset = stack_slots;
1924 1925 stack_slots += VMRegImpl::slots_per_word;
1925 1926 }
1926 1927
1927 1928 // Now a place to save return value or as a temporary for any gpr -> fpr moves
1928 1929 stack_slots += 2;
1929 1930
1930 1931 // Ok The space we have allocated will look like:
1931 1932 //
1932 1933 //
1933 1934 // FP-> | |
1934 1935 // |---------------------|
1935 1936 // | 2 slots for moves |
1936 1937 // |---------------------|
1937 1938 // | lock box (if sync) |
1938 1939 // |---------------------| <- lock_slot_offset
1939 1940 // | klass (if static) |
1940 1941 // |---------------------| <- klass_slot_offset
1941 1942 // | oopHandle area |
1942 1943 // |---------------------| <- oop_handle_offset
1943 1944 // | outbound memory |
1944 1945 // | based arguments |
1945 1946 // | |
1946 1947 // |---------------------|
1947 1948 // | vararg area |
1948 1949 // |---------------------|
1949 1950 // | |
1950 1951 // SP-> | out_preserved_slots |
1951 1952 //
1952 1953 //
1953 1954
1954 1955
1955 1956 // Now compute actual number of stack words we need rounding to make
1956 1957 // stack properly aligned.
1957 1958 stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);
1958 1959
1959 1960 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1960 1961
1961 1962 // Generate stack overflow check before creating frame
1962 1963 __ generate_stack_overflow_check(stack_size);
1963 1964
1964 1965 // Generate a new frame for the wrapper.
1965 1966 __ save(SP, -stack_size, SP);
1966 1967
1967 1968 int frame_complete = ((intptr_t)__ pc()) - start;
1968 1969
1969 1970 __ verify_thread();
1970 1971
1971 1972
1972 1973 //
1973 1974 // We immediately shuffle the arguments so that any vm call we have to
1974 1975 // make from here on out (sync slow path, jvmti, etc.) we will have
1975 1976 // captured the oops from our caller and have a valid oopMap for
1976 1977 // them.
1977 1978
1978 1979 // -----------------
1979 1980 // The Grand Shuffle
1980 1981 //
1981 1982 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
1982 1983 // (derived from JavaThread* which is in L7_thread_cache) and, if static,
1983 1984 // the class mirror instead of a receiver. This pretty much guarantees that
1984 1985 // register layout will not match. We ignore these extra arguments during
1985 1986 // the shuffle. The shuffle is described by the two calling convention
1986 1987 // vectors we have in our possession. We simply walk the java vector to
1987 1988 // get the source locations and the c vector to get the destinations.
1988 1989 // Because we have a new window and the argument registers are completely
1989 1990 // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about
1990 1991 // here.
1991 1992
1992 1993 // This is a trick. We double the stack slots so we can claim
1993 1994 // the oops in the caller's frame. Since we are sure to have
1994 1995 // more args than the caller doubling is enough to make
1995 1996 // sure we can capture all the incoming oop args from the
1996 1997 // caller.
1997 1998 //
1998 1999 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1999 2000 int c_arg = total_c_args - 1;
2000 2001 // Record sp-based slot for receiver on stack for non-static methods
2001 2002 int receiver_offset = -1;
2002 2003
2003 2004 // We move the arguments backward because the floating point registers
2004 2005 // destination will always be to a register with a greater or equal register
2005 2006 // number or the stack.
2006 2007
2007 2008 #ifdef ASSERT
2008 2009 bool reg_destroyed[RegisterImpl::number_of_registers];
2009 2010 bool freg_destroyed[FloatRegisterImpl::number_of_registers];
2010 2011 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2011 2012 reg_destroyed[r] = false;
2012 2013 }
2013 2014 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
2014 2015 freg_destroyed[f] = false;
2015 2016 }
2016 2017
2017 2018 #endif /* ASSERT */
2018 2019
2019 2020 for ( int i = total_in_args - 1; i >= 0 ; i--, c_arg-- ) {
2020 2021
2021 2022 #ifdef ASSERT
2022 2023 if (in_regs[i].first()->is_Register()) {
2023 2024 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!");
2024 2025 } else if (in_regs[i].first()->is_FloatRegister()) {
2025 2026 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!");
2026 2027 }
2027 2028 if (out_regs[c_arg].first()->is_Register()) {
2028 2029 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2029 2030 } else if (out_regs[c_arg].first()->is_FloatRegister()) {
2030 2031 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true;
2031 2032 }
2032 2033 #endif /* ASSERT */
2033 2034
2034 2035 switch (in_sig_bt[i]) {
2035 2036 case T_ARRAY:
2036 2037 case T_OBJECT:
2037 2038 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2038 2039 ((i == 0) && (!is_static)),
2039 2040 &receiver_offset);
2040 2041 break;
2041 2042 case T_VOID:
2042 2043 break;
2043 2044
2044 2045 case T_FLOAT:
2045 2046 float_move(masm, in_regs[i], out_regs[c_arg]);
2046 2047 break;
2047 2048
2048 2049 case T_DOUBLE:
2049 2050 assert( i + 1 < total_in_args &&
2050 2051 in_sig_bt[i + 1] == T_VOID &&
2051 2052 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2052 2053 double_move(masm, in_regs[i], out_regs[c_arg]);
2053 2054 break;
2054 2055
2055 2056 case T_LONG :
2056 2057 long_move(masm, in_regs[i], out_regs[c_arg]);
2057 2058 break;
2058 2059
2059 2060 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2060 2061
2061 2062 default:
2062 2063 move32_64(masm, in_regs[i], out_regs[c_arg]);
2063 2064 }
2064 2065 }
2065 2066
2066 2067 // Pre-load a static method's oop into O1. Used both by locking code and
2067 2068 // the normal JNI call code.
2068 2069 if (method->is_static()) {
2069 2070 __ set_oop_constant(JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()), O1);
2070 2071
2071 2072 // Now handlize the static class mirror in O1. It's known not-null.
2072 2073 __ st_ptr(O1, SP, klass_offset + STACK_BIAS);
2073 2074 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2074 2075 __ add(SP, klass_offset + STACK_BIAS, O1);
2075 2076 }
2076 2077
2077 2078
2078 2079 const Register L6_handle = L6;
2079 2080
2080 2081 if (method->is_synchronized()) {
2081 2082 __ mov(O1, L6_handle);
2082 2083 }
2083 2084
2084 2085 // We have all of the arguments setup at this point. We MUST NOT touch any Oregs
2085 2086 // except O6/O7. So if we must call out we must push a new frame. We immediately
2086 2087 // push a new frame and flush the windows.
2087 2088
2088 2089 #ifdef _LP64
2089 2090 intptr_t thepc = (intptr_t) __ pc();
2090 2091 {
2091 2092 address here = __ pc();
2092 2093 // Call the next instruction
2093 2094 __ call(here + 8, relocInfo::none);
2094 2095 __ delayed()->nop();
2095 2096 }
2096 2097 #else
2097 2098 intptr_t thepc = __ load_pc_address(O7, 0);
2098 2099 #endif /* _LP64 */
2099 2100
2100 2101 // We use the same pc/oopMap repeatedly when we call out
2101 2102 oop_maps->add_gc_map(thepc - start, map);
2102 2103
2103 2104 // O7 now has the pc loaded that we will use when we finally call to native.
2104 2105
2105 2106 // Save thread in L7; it crosses a bunch of VM calls below
2106 2107 // Don't use save_thread because it smashes G2 and we merely
2107 2108 // want to save a copy
2108 2109 __ mov(G2_thread, L7_thread_cache);
2109 2110
2110 2111
2111 2112 // If we create an inner frame once is plenty
2112 2113 // when we create it we must also save G2_thread
2113 2114 bool inner_frame_created = false;
2114 2115
2115 2116 // dtrace method entry support
2116 2117 {
2117 2118 SkipIfEqual skip_if(
2118 2119 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2119 2120 // create inner frame
2120 2121 __ save_frame(0);
2121 2122 __ mov(G2_thread, L7_thread_cache);
2122 2123 __ set_oop_constant(JNIHandles::make_local(method()), O1);
2123 2124 __ call_VM_leaf(L7_thread_cache,
2124 2125 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2125 2126 G2_thread, O1);
2126 2127 __ restore();
2127 2128 }
2128 2129
2129 2130 // RedefineClasses() tracing support for obsolete method entry
2130 2131 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2131 2132 // create inner frame
2132 2133 __ save_frame(0);
2133 2134 __ mov(G2_thread, L7_thread_cache);
2134 2135 __ set_oop_constant(JNIHandles::make_local(method()), O1);
2135 2136 __ call_VM_leaf(L7_thread_cache,
2136 2137 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2137 2138 G2_thread, O1);
2138 2139 __ restore();
2139 2140 }
2140 2141
2141 2142 // We are in the jni frame unless saved_frame is true in which case
2142 2143 // we are in one frame deeper (the "inner" frame). If we are in the
2143 2144 // "inner" frames the args are in the Iregs and if the jni frame then
2144 2145 // they are in the Oregs.
2145 2146 // If we ever need to go to the VM (for locking, jvmti) then
2146 2147 // we will always be in the "inner" frame.
2147 2148
2148 2149 // Lock a synchronized method
2149 2150 int lock_offset = -1; // Set if locked
2150 2151 if (method->is_synchronized()) {
2151 2152 Register Roop = O1;
2152 2153 const Register L3_box = L3;
2153 2154
2154 2155 create_inner_frame(masm, &inner_frame_created);
2155 2156
2156 2157 __ ld_ptr(I1, 0, O1);
2157 2158 Label done;
2158 2159
2159 2160 lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size);
2160 2161 __ add(FP, lock_offset+STACK_BIAS, L3_box);
2161 2162 #ifdef ASSERT
2162 2163 if (UseBiasedLocking) {
2163 2164 // making the box point to itself will make it clear it went unused
2164 2165 // but also be obviously invalid
2165 2166 __ st_ptr(L3_box, L3_box, 0);
2166 2167 }
2167 2168 #endif // ASSERT
2168 2169 //
2169 2170 // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch
2170 2171 //
2171 2172 __ compiler_lock_object(Roop, L1, L3_box, L2);
2172 2173 __ br(Assembler::equal, false, Assembler::pt, done);
2173 2174 __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box);
2174 2175
2175 2176
2176 2177 // None of the above fast optimizations worked so we have to get into the
2177 2178 // slow case of monitor enter. Inline a special case of call_VM that
2178 2179 // disallows any pending_exception.
2179 2180 __ mov(Roop, O0); // Need oop in O0
2180 2181 __ mov(L3_box, O1);
2181 2182
2182 2183 // Record last_Java_sp, in case the VM code releases the JVM lock.
2183 2184
2184 2185 __ set_last_Java_frame(FP, I7);
2185 2186
2186 2187 // do the call
2187 2188 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type);
2188 2189 __ delayed()->mov(L7_thread_cache, O2);
2189 2190
2190 2191 __ restore_thread(L7_thread_cache); // restore G2_thread
2191 2192 __ reset_last_Java_frame();
2192 2193
2193 2194 #ifdef ASSERT
2194 2195 { Label L;
2195 2196 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2196 2197 __ br_null(O0, false, Assembler::pt, L);
2197 2198 __ delayed()->nop();
2198 2199 __ stop("no pending exception allowed on exit from IR::monitorenter");
2199 2200 __ bind(L);
2200 2201 }
2201 2202 #endif
2202 2203 __ bind(done);
2203 2204 }
2204 2205
2205 2206
2206 2207 // Finally just about ready to make the JNI call
2207 2208
2208 2209 __ flush_windows();
2209 2210 if (inner_frame_created) {
2210 2211 __ restore();
2211 2212 } else {
2212 2213 // Store only what we need from this frame
2213 2214 // QQQ I think that non-v9 (like we care) we don't need these saves
2214 2215 // either as the flush traps and the current window goes too.
2215 2216 __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2216 2217 __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2217 2218 }
2218 2219
2219 2220 // get JNIEnv* which is first argument to native
2220 2221
2221 2222 __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
2222 2223
2223 2224 // Use that pc we placed in O7 a while back as the current frame anchor
2224 2225
2225 2226 __ set_last_Java_frame(SP, O7);
2226 2227
2227 2228 // Transition from _thread_in_Java to _thread_in_native.
2228 2229 __ set(_thread_in_native, G3_scratch);
2229 2230 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2230 2231
2231 2232 // We flushed the windows ages ago now mark them as flushed
2232 2233
2233 2234 // mark windows as flushed
2234 2235 __ set(JavaFrameAnchor::flushed, G3_scratch);
2235 2236
2236 2237 Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
2237 2238
2238 2239 #ifdef _LP64
2239 2240 AddressLiteral dest(method->native_function());
2240 2241 __ relocate(relocInfo::runtime_call_type);
2241 2242 __ jumpl_to(dest, O7, O7);
2242 2243 #else
2243 2244 __ call(method->native_function(), relocInfo::runtime_call_type);
2244 2245 #endif
2245 2246 __ delayed()->st(G3_scratch, flags);
2246 2247
2247 2248 __ restore_thread(L7_thread_cache); // restore G2_thread
2248 2249
2249 2250 // Unpack native results. For int-types, we do any needed sign-extension
2250 2251 // and move things into I0. The return value there will survive any VM
2251 2252 // calls for blocking or unlocking. An FP or OOP result (handle) is done
2252 2253 // specially in the slow-path code.
2253 2254 switch (ret_type) {
2254 2255 case T_VOID: break; // Nothing to do!
2255 2256 case T_FLOAT: break; // Got it where we want it (unless slow-path)
2256 2257 case T_DOUBLE: break; // Got it where we want it (unless slow-path)
2257 2258 // In 64 bits build result is in O0, in O0, O1 in 32bit build
2258 2259 case T_LONG:
2259 2260 #ifndef _LP64
2260 2261 __ mov(O1, I1);
2261 2262 #endif
2262 2263 // Fall thru
2263 2264 case T_OBJECT: // Really a handle
2264 2265 case T_ARRAY:
2265 2266 case T_INT:
2266 2267 __ mov(O0, I0);
2267 2268 break;
2268 2269 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false
2269 2270 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, I0); break;
2270 2271 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, I0); break; // cannot use and3, 0xFFFF too big as immediate value!
2271 2272 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, I0); break;
2272 2273 break; // Cannot de-handlize until after reclaiming jvm_lock
2273 2274 default:
2274 2275 ShouldNotReachHere();
2275 2276 }
2276 2277
2277 2278 // must we block?
2278 2279
2279 2280 // Block, if necessary, before resuming in _thread_in_Java state.
2280 2281 // In order for GC to work, don't clear the last_Java_sp until after blocking.
2281 2282 { Label no_block;
2282 2283 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
2283 2284
2284 2285 // Switch thread to "native transition" state before reading the synchronization state.
2285 2286 // This additional state is necessary because reading and testing the synchronization
2286 2287 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2287 2288 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2288 2289 // VM thread changes sync state to synchronizing and suspends threads for GC.
2289 2290 // Thread A is resumed to finish this native method, but doesn't block here since it
2290 2291 // didn't see any synchronization is progress, and escapes.
2291 2292 __ set(_thread_in_native_trans, G3_scratch);
2292 2293 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2293 2294 if(os::is_MP()) {
2294 2295 if (UseMembar) {
2295 2296 // Force this write out before the read below
2296 2297 __ membar(Assembler::StoreLoad);
2297 2298 } else {
2298 2299 // Write serialization page so VM thread can do a pseudo remote membar.
2299 2300 // We use the current thread pointer to calculate a thread specific
2300 2301 // offset to write to within the page. This minimizes bus traffic
2301 2302 // due to cache line collision.
2302 2303 __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
2303 2304 }
2304 2305 }
2305 2306 __ load_contents(sync_state, G3_scratch);
2306 2307 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
2307 2308
2308 2309 Label L;
2309 2310 Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
2310 2311 __ br(Assembler::notEqual, false, Assembler::pn, L);
2311 2312 __ delayed()->ld(suspend_state, G3_scratch);
2312 2313 __ cmp(G3_scratch, 0);
2313 2314 __ br(Assembler::equal, false, Assembler::pt, no_block);
2314 2315 __ delayed()->nop();
2315 2316 __ bind(L);
2316 2317
2317 2318 // Block. Save any potential method result value before the operation and
2318 2319 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
2319 2320 // lets us share the oopMap we used when we went native rather the create
2320 2321 // a distinct one for this pc
2321 2322 //
2322 2323 save_native_result(masm, ret_type, stack_slots);
2323 2324 __ call_VM_leaf(L7_thread_cache,
2324 2325 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
2325 2326 G2_thread);
2326 2327
2327 2328 // Restore any method result value
2328 2329 restore_native_result(masm, ret_type, stack_slots);
2329 2330 __ bind(no_block);
2330 2331 }
2331 2332
2332 2333 // thread state is thread_in_native_trans. Any safepoint blocking has already
2333 2334 // happened so we can now change state to _thread_in_Java.
2334 2335
2335 2336
2336 2337 __ set(_thread_in_Java, G3_scratch);
2337 2338 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2338 2339
2339 2340
2340 2341 Label no_reguard;
2341 2342 __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
2342 2343 __ cmp(G3_scratch, JavaThread::stack_guard_yellow_disabled);
2343 2344 __ br(Assembler::notEqual, false, Assembler::pt, no_reguard);
2344 2345 __ delayed()->nop();
2345 2346
2346 2347 save_native_result(masm, ret_type, stack_slots);
2347 2348 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2348 2349 __ delayed()->nop();
2349 2350
2350 2351 __ restore_thread(L7_thread_cache); // restore G2_thread
2351 2352 restore_native_result(masm, ret_type, stack_slots);
2352 2353
2353 2354 __ bind(no_reguard);
2354 2355
2355 2356 // Handle possible exception (will unlock if necessary)
2356 2357
2357 2358 // native result if any is live in freg or I0 (and I1 if long and 32bit vm)
2358 2359
2359 2360 // Unlock
2360 2361 if (method->is_synchronized()) {
2361 2362 Label done;
2362 2363 Register I2_ex_oop = I2;
2363 2364 const Register L3_box = L3;
2364 2365 // Get locked oop from the handle we passed to jni
2365 2366 __ ld_ptr(L6_handle, 0, L4);
2366 2367 __ add(SP, lock_offset+STACK_BIAS, L3_box);
2367 2368 // Must save pending exception around the slow-path VM call. Since it's a
2368 2369 // leaf call, the pending exception (if any) can be kept in a register.
2369 2370 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop);
2370 2371 // Now unlock
2371 2372 // (Roop, Rmark, Rbox, Rscratch)
2372 2373 __ compiler_unlock_object(L4, L1, L3_box, L2);
2373 2374 __ br(Assembler::equal, false, Assembler::pt, done);
2374 2375 __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box);
2375 2376
2376 2377 // save and restore any potential method result value around the unlocking
2377 2378 // operation. Will save in I0 (or stack for FP returns).
2378 2379 save_native_result(masm, ret_type, stack_slots);
2379 2380
2380 2381 // Must clear pending-exception before re-entering the VM. Since this is
2381 2382 // a leaf call, pending-exception-oop can be safely kept in a register.
2382 2383 __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset()));
2383 2384
2384 2385 // slow case of monitor enter. Inline a special case of call_VM that
2385 2386 // disallows any pending_exception.
2386 2387 __ mov(L3_box, O1);
2387 2388
2388 2389 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type);
2389 2390 __ delayed()->mov(L4, O0); // Need oop in O0
2390 2391
2391 2392 __ restore_thread(L7_thread_cache); // restore G2_thread
2392 2393
2393 2394 #ifdef ASSERT
2394 2395 { Label L;
2395 2396 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2396 2397 __ br_null(O0, false, Assembler::pt, L);
2397 2398 __ delayed()->nop();
2398 2399 __ stop("no pending exception allowed on exit from IR::monitorexit");
2399 2400 __ bind(L);
2400 2401 }
2401 2402 #endif
2402 2403 restore_native_result(masm, ret_type, stack_slots);
2403 2404 // check_forward_pending_exception jump to forward_exception if any pending
2404 2405 // exception is set. The forward_exception routine expects to see the
2405 2406 // exception in pending_exception and not in a register. Kind of clumsy,
2406 2407 // since all folks who branch to forward_exception must have tested
2407 2408 // pending_exception first and hence have it in a register already.
2408 2409 __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset()));
2409 2410 __ bind(done);
2410 2411 }
2411 2412
2412 2413 // Tell dtrace about this method exit
2413 2414 {
2414 2415 SkipIfEqual skip_if(
2415 2416 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2416 2417 save_native_result(masm, ret_type, stack_slots);
2417 2418 __ set_oop_constant(JNIHandles::make_local(method()), O1);
2418 2419 __ call_VM_leaf(L7_thread_cache,
2419 2420 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2420 2421 G2_thread, O1);
2421 2422 restore_native_result(masm, ret_type, stack_slots);
2422 2423 }
2423 2424
2424 2425 // Clear "last Java frame" SP and PC.
2425 2426 __ verify_thread(); // G2_thread must be correct
2426 2427 __ reset_last_Java_frame();
2427 2428
2428 2429 // Unpack oop result
2429 2430 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2430 2431 Label L;
2431 2432 __ addcc(G0, I0, G0);
2432 2433 __ brx(Assembler::notZero, true, Assembler::pt, L);
2433 2434 __ delayed()->ld_ptr(I0, 0, I0);
2434 2435 __ mov(G0, I0);
2435 2436 __ bind(L);
2436 2437 __ verify_oop(I0);
2437 2438 }
2438 2439
2439 2440 // reset handle block
2440 2441 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
2441 2442 __ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes());
2442 2443
2443 2444 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
2444 2445 check_forward_pending_exception(masm, G3_scratch);
2445 2446
2446 2447
2447 2448 // Return
2448 2449
2449 2450 #ifndef _LP64
2450 2451 if (ret_type == T_LONG) {
2451 2452
2452 2453 // Must leave proper result in O0,O1 and G1 (c2/tiered only)
2453 2454 __ sllx(I0, 32, G1); // Shift bits into high G1
2454 2455 __ srl (I1, 0, I1); // Zero extend O1 (harmless?)
↓ open down ↓ |
673 lines elided |
↑ open up ↑ |
2455 2456 __ or3 (I1, G1, G1); // OR 64 bits into G1
2456 2457 }
2457 2458 #endif
2458 2459
2459 2460 __ ret();
2460 2461 __ delayed()->restore();
2461 2462
2462 2463 __ flush();
2463 2464
2464 2465 nmethod *nm = nmethod::new_native_nmethod(method,
2466 + compile_id,
2465 2467 masm->code(),
2466 2468 vep_offset,
2467 2469 frame_complete,
2468 2470 stack_slots / VMRegImpl::slots_per_word,
2469 2471 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2470 2472 in_ByteSize(lock_offset),
2471 2473 oop_maps);
2472 2474 return nm;
2473 2475
2474 2476 }
2475 2477
2476 2478 #ifdef HAVE_DTRACE_H
2477 2479 // ---------------------------------------------------------------------------
2478 2480 // Generate a dtrace nmethod for a given signature. The method takes arguments
2479 2481 // in the Java compiled code convention, marshals them to the native
2480 2482 // abi and then leaves nops at the position you would expect to call a native
2481 2483 // function. When the probe is enabled the nops are replaced with a trap
2482 2484 // instruction that dtrace inserts and the trace will cause a notification
2483 2485 // to dtrace.
2484 2486 //
2485 2487 // The probes are only able to take primitive types and java/lang/String as
2486 2488 // arguments. No other java types are allowed. Strings are converted to utf8
2487 2489 // strings so that from dtrace point of view java strings are converted to C
2488 2490 // strings. There is an arbitrary fixed limit on the total space that a method
2489 2491 // can use for converting the strings. (256 chars per string in the signature).
2490 2492 // So any java string larger then this is truncated.
2491 2493
2492 2494 static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
2493 2495 static bool offsets_initialized = false;
2494 2496
2495 2497 static VMRegPair reg64_to_VMRegPair(Register r) {
2496 2498 VMRegPair ret;
2497 2499 if (wordSize == 8) {
2498 2500 ret.set2(r->as_VMReg());
2499 2501 } else {
2500 2502 ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
2501 2503 }
2502 2504 return ret;
2503 2505 }
2504 2506
2505 2507
2506 2508 nmethod *SharedRuntime::generate_dtrace_nmethod(
2507 2509 MacroAssembler *masm, methodHandle method) {
2508 2510
2509 2511
2510 2512 // generate_dtrace_nmethod is guarded by a mutex so we are sure to
2511 2513 // be single threaded in this method.
2512 2514 assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be");
2513 2515
2514 2516 // Fill in the signature array, for the calling-convention call.
2515 2517 int total_args_passed = method->size_of_parameters();
2516 2518
2517 2519 BasicType* in_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2518 2520 VMRegPair *in_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2519 2521
2520 2522 // The signature we are going to use for the trap that dtrace will see
2521 2523 // java/lang/String is converted. We drop "this" and any other object
2522 2524 // is converted to NULL. (A one-slot java/lang/Long object reference
2523 2525 // is converted to a two-slot long, which is why we double the allocation).
2524 2526 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2);
2525 2527 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2);
2526 2528
2527 2529 int i=0;
2528 2530 int total_strings = 0;
2529 2531 int first_arg_to_pass = 0;
2530 2532 int total_c_args = 0;
2531 2533
2532 2534 // Skip the receiver as dtrace doesn't want to see it
2533 2535 if( !method->is_static() ) {
2534 2536 in_sig_bt[i++] = T_OBJECT;
2535 2537 first_arg_to_pass = 1;
2536 2538 }
2537 2539
2538 2540 SignatureStream ss(method->signature());
2539 2541 for ( ; !ss.at_return_type(); ss.next()) {
2540 2542 BasicType bt = ss.type();
2541 2543 in_sig_bt[i++] = bt; // Collect remaining bits of signature
2542 2544 out_sig_bt[total_c_args++] = bt;
2543 2545 if( bt == T_OBJECT) {
2544 2546 Symbol* s = ss.as_symbol_or_null();
2545 2547 if (s == vmSymbols::java_lang_String()) {
2546 2548 total_strings++;
2547 2549 out_sig_bt[total_c_args-1] = T_ADDRESS;
2548 2550 } else if (s == vmSymbols::java_lang_Boolean() ||
2549 2551 s == vmSymbols::java_lang_Byte()) {
2550 2552 out_sig_bt[total_c_args-1] = T_BYTE;
2551 2553 } else if (s == vmSymbols::java_lang_Character() ||
2552 2554 s == vmSymbols::java_lang_Short()) {
2553 2555 out_sig_bt[total_c_args-1] = T_SHORT;
2554 2556 } else if (s == vmSymbols::java_lang_Integer() ||
2555 2557 s == vmSymbols::java_lang_Float()) {
2556 2558 out_sig_bt[total_c_args-1] = T_INT;
2557 2559 } else if (s == vmSymbols::java_lang_Long() ||
2558 2560 s == vmSymbols::java_lang_Double()) {
2559 2561 out_sig_bt[total_c_args-1] = T_LONG;
2560 2562 out_sig_bt[total_c_args++] = T_VOID;
2561 2563 }
2562 2564 } else if ( bt == T_LONG || bt == T_DOUBLE ) {
2563 2565 in_sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
2564 2566 // We convert double to long
2565 2567 out_sig_bt[total_c_args-1] = T_LONG;
2566 2568 out_sig_bt[total_c_args++] = T_VOID;
2567 2569 } else if ( bt == T_FLOAT) {
2568 2570 // We convert float to int
2569 2571 out_sig_bt[total_c_args-1] = T_INT;
2570 2572 }
2571 2573 }
2572 2574
2573 2575 assert(i==total_args_passed, "validly parsed signature");
2574 2576
2575 2577 // Now get the compiled-Java layout as input arguments
2576 2578 int comp_args_on_stack;
2577 2579 comp_args_on_stack = SharedRuntime::java_calling_convention(
2578 2580 in_sig_bt, in_regs, total_args_passed, false);
2579 2581
2580 2582 // We have received a description of where all the java arg are located
2581 2583 // on entry to the wrapper. We need to convert these args to where
2582 2584 // the a native (non-jni) function would expect them. To figure out
2583 2585 // where they go we convert the java signature to a C signature and remove
2584 2586 // T_VOID for any long/double we might have received.
2585 2587
2586 2588
2587 2589 // Now figure out where the args must be stored and how much stack space
2588 2590 // they require (neglecting out_preserve_stack_slots but space for storing
2589 2591 // the 1st six register arguments). It's weird see int_stk_helper.
2590 2592 //
2591 2593 int out_arg_slots;
2592 2594 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
2593 2595
2594 2596 // Calculate the total number of stack slots we will need.
2595 2597
2596 2598 // First count the abi requirement plus all of the outgoing args
2597 2599 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2598 2600
2599 2601 // Plus a temp for possible converion of float/double/long register args
2600 2602
2601 2603 int conversion_temp = stack_slots;
2602 2604 stack_slots += 2;
2603 2605
2604 2606
2605 2607 // Now space for the string(s) we must convert
2606 2608
2607 2609 int string_locs = stack_slots;
2608 2610 stack_slots += total_strings *
2609 2611 (max_dtrace_string_size / VMRegImpl::stack_slot_size);
2610 2612
2611 2613 // Ok The space we have allocated will look like:
2612 2614 //
2613 2615 //
2614 2616 // FP-> | |
2615 2617 // |---------------------|
2616 2618 // | string[n] |
2617 2619 // |---------------------| <- string_locs[n]
2618 2620 // | string[n-1] |
2619 2621 // |---------------------| <- string_locs[n-1]
2620 2622 // | ... |
2621 2623 // | ... |
2622 2624 // |---------------------| <- string_locs[1]
2623 2625 // | string[0] |
2624 2626 // |---------------------| <- string_locs[0]
2625 2627 // | temp |
2626 2628 // |---------------------| <- conversion_temp
2627 2629 // | outbound memory |
2628 2630 // | based arguments |
2629 2631 // | |
2630 2632 // |---------------------|
2631 2633 // | |
2632 2634 // SP-> | out_preserved_slots |
2633 2635 //
2634 2636 //
2635 2637
2636 2638 // Now compute actual number of stack words we need rounding to make
2637 2639 // stack properly aligned.
2638 2640 stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word);
2639 2641
2640 2642 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2641 2643
2642 2644 intptr_t start = (intptr_t)__ pc();
2643 2645
2644 2646 // First thing make an ic check to see if we should even be here
2645 2647
2646 2648 {
2647 2649 Label L;
2648 2650 const Register temp_reg = G3_scratch;
2649 2651 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
2650 2652 __ verify_oop(O0);
2651 2653 __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg);
2652 2654 __ cmp(temp_reg, G5_inline_cache_reg);
2653 2655 __ brx(Assembler::equal, true, Assembler::pt, L);
2654 2656 __ delayed()->nop();
2655 2657
2656 2658 __ jump_to(ic_miss, temp_reg);
2657 2659 __ delayed()->nop();
2658 2660 __ align(CodeEntryAlignment);
2659 2661 __ bind(L);
2660 2662 }
2661 2663
2662 2664 int vep_offset = ((intptr_t)__ pc()) - start;
2663 2665
2664 2666
2665 2667 // The instruction at the verified entry point must be 5 bytes or longer
2666 2668 // because it can be patched on the fly by make_non_entrant. The stack bang
2667 2669 // instruction fits that requirement.
2668 2670
2669 2671 // Generate stack overflow check before creating frame
2670 2672 __ generate_stack_overflow_check(stack_size);
2671 2673
2672 2674 assert(((intptr_t)__ pc() - start - vep_offset) >= 5,
2673 2675 "valid size for make_non_entrant");
2674 2676
2675 2677 // Generate a new frame for the wrapper.
2676 2678 __ save(SP, -stack_size, SP);
2677 2679
2678 2680 // Frame is now completed as far a size and linkage.
2679 2681
2680 2682 int frame_complete = ((intptr_t)__ pc()) - start;
2681 2683
2682 2684 #ifdef ASSERT
2683 2685 bool reg_destroyed[RegisterImpl::number_of_registers];
2684 2686 bool freg_destroyed[FloatRegisterImpl::number_of_registers];
2685 2687 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2686 2688 reg_destroyed[r] = false;
2687 2689 }
2688 2690 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
2689 2691 freg_destroyed[f] = false;
2690 2692 }
2691 2693
2692 2694 #endif /* ASSERT */
2693 2695
2694 2696 VMRegPair zero;
2695 2697 const Register g0 = G0; // without this we get a compiler warning (why??)
2696 2698 zero.set2(g0->as_VMReg());
2697 2699
2698 2700 int c_arg, j_arg;
2699 2701
2700 2702 Register conversion_off = noreg;
2701 2703
2702 2704 for (j_arg = first_arg_to_pass, c_arg = 0 ;
2703 2705 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
2704 2706
2705 2707 VMRegPair src = in_regs[j_arg];
2706 2708 VMRegPair dst = out_regs[c_arg];
2707 2709
2708 2710 #ifdef ASSERT
2709 2711 if (src.first()->is_Register()) {
2710 2712 assert(!reg_destroyed[src.first()->as_Register()->encoding()], "ack!");
2711 2713 } else if (src.first()->is_FloatRegister()) {
2712 2714 assert(!freg_destroyed[src.first()->as_FloatRegister()->encoding(
2713 2715 FloatRegisterImpl::S)], "ack!");
2714 2716 }
2715 2717 if (dst.first()->is_Register()) {
2716 2718 reg_destroyed[dst.first()->as_Register()->encoding()] = true;
2717 2719 } else if (dst.first()->is_FloatRegister()) {
2718 2720 freg_destroyed[dst.first()->as_FloatRegister()->encoding(
2719 2721 FloatRegisterImpl::S)] = true;
2720 2722 }
2721 2723 #endif /* ASSERT */
2722 2724
2723 2725 switch (in_sig_bt[j_arg]) {
2724 2726 case T_ARRAY:
2725 2727 case T_OBJECT:
2726 2728 {
2727 2729 if (out_sig_bt[c_arg] == T_BYTE || out_sig_bt[c_arg] == T_SHORT ||
2728 2730 out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
2729 2731 // need to unbox a one-slot value
2730 2732 Register in_reg = L0;
2731 2733 Register tmp = L2;
2732 2734 if ( src.first()->is_reg() ) {
2733 2735 in_reg = src.first()->as_Register();
2734 2736 } else {
2735 2737 assert(Assembler::is_simm13(reg2offset(src.first()) + STACK_BIAS),
2736 2738 "must be");
2737 2739 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, in_reg);
2738 2740 }
2739 2741 // If the final destination is an acceptable register
2740 2742 if ( dst.first()->is_reg() ) {
2741 2743 if ( dst.is_single_phys_reg() || out_sig_bt[c_arg] != T_LONG ) {
2742 2744 tmp = dst.first()->as_Register();
2743 2745 }
2744 2746 }
2745 2747
2746 2748 Label skipUnbox;
2747 2749 if ( wordSize == 4 && out_sig_bt[c_arg] == T_LONG ) {
2748 2750 __ mov(G0, tmp->successor());
2749 2751 }
2750 2752 __ br_null(in_reg, true, Assembler::pn, skipUnbox);
2751 2753 __ delayed()->mov(G0, tmp);
2752 2754
2753 2755 BasicType bt = out_sig_bt[c_arg];
2754 2756 int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
2755 2757 switch (bt) {
2756 2758 case T_BYTE:
2757 2759 __ ldub(in_reg, box_offset, tmp); break;
2758 2760 case T_SHORT:
2759 2761 __ lduh(in_reg, box_offset, tmp); break;
2760 2762 case T_INT:
2761 2763 __ ld(in_reg, box_offset, tmp); break;
2762 2764 case T_LONG:
2763 2765 __ ld_long(in_reg, box_offset, tmp); break;
2764 2766 default: ShouldNotReachHere();
2765 2767 }
2766 2768
2767 2769 __ bind(skipUnbox);
2768 2770 // If tmp wasn't final destination copy to final destination
2769 2771 if (tmp == L2) {
2770 2772 VMRegPair tmp_as_VM = reg64_to_VMRegPair(L2);
2771 2773 if (out_sig_bt[c_arg] == T_LONG) {
2772 2774 long_move(masm, tmp_as_VM, dst);
2773 2775 } else {
2774 2776 move32_64(masm, tmp_as_VM, out_regs[c_arg]);
2775 2777 }
2776 2778 }
2777 2779 if (out_sig_bt[c_arg] == T_LONG) {
2778 2780 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
2779 2781 ++c_arg; // move over the T_VOID to keep the loop indices in sync
2780 2782 }
2781 2783 } else if (out_sig_bt[c_arg] == T_ADDRESS) {
2782 2784 Register s =
2783 2785 src.first()->is_reg() ? src.first()->as_Register() : L2;
2784 2786 Register d =
2785 2787 dst.first()->is_reg() ? dst.first()->as_Register() : L2;
2786 2788
2787 2789 // We store the oop now so that the conversion pass can reach
2788 2790 // while in the inner frame. This will be the only store if
2789 2791 // the oop is NULL.
2790 2792 if (s != L2) {
2791 2793 // src is register
2792 2794 if (d != L2) {
2793 2795 // dst is register
2794 2796 __ mov(s, d);
2795 2797 } else {
2796 2798 assert(Assembler::is_simm13(reg2offset(dst.first()) +
2797 2799 STACK_BIAS), "must be");
2798 2800 __ st_ptr(s, SP, reg2offset(dst.first()) + STACK_BIAS);
2799 2801 }
2800 2802 } else {
2801 2803 // src not a register
2802 2804 assert(Assembler::is_simm13(reg2offset(src.first()) +
2803 2805 STACK_BIAS), "must be");
2804 2806 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, d);
2805 2807 if (d == L2) {
2806 2808 assert(Assembler::is_simm13(reg2offset(dst.first()) +
2807 2809 STACK_BIAS), "must be");
2808 2810 __ st_ptr(d, SP, reg2offset(dst.first()) + STACK_BIAS);
2809 2811 }
2810 2812 }
2811 2813 } else if (out_sig_bt[c_arg] != T_VOID) {
2812 2814 // Convert the arg to NULL
2813 2815 if (dst.first()->is_reg()) {
2814 2816 __ mov(G0, dst.first()->as_Register());
2815 2817 } else {
2816 2818 assert(Assembler::is_simm13(reg2offset(dst.first()) +
2817 2819 STACK_BIAS), "must be");
2818 2820 __ st_ptr(G0, SP, reg2offset(dst.first()) + STACK_BIAS);
2819 2821 }
2820 2822 }
2821 2823 }
2822 2824 break;
2823 2825 case T_VOID:
2824 2826 break;
2825 2827
2826 2828 case T_FLOAT:
2827 2829 if (src.first()->is_stack()) {
2828 2830 // Stack to stack/reg is simple
2829 2831 move32_64(masm, src, dst);
2830 2832 } else {
2831 2833 if (dst.first()->is_reg()) {
2832 2834 // freg -> reg
2833 2835 int off =
2834 2836 STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
2835 2837 Register d = dst.first()->as_Register();
2836 2838 if (Assembler::is_simm13(off)) {
2837 2839 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
2838 2840 SP, off);
2839 2841 __ ld(SP, off, d);
2840 2842 } else {
2841 2843 if (conversion_off == noreg) {
2842 2844 __ set(off, L6);
2843 2845 conversion_off = L6;
2844 2846 }
2845 2847 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
2846 2848 SP, conversion_off);
2847 2849 __ ld(SP, conversion_off , d);
2848 2850 }
2849 2851 } else {
2850 2852 // freg -> mem
2851 2853 int off = STACK_BIAS + reg2offset(dst.first());
2852 2854 if (Assembler::is_simm13(off)) {
2853 2855 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
2854 2856 SP, off);
2855 2857 } else {
2856 2858 if (conversion_off == noreg) {
2857 2859 __ set(off, L6);
2858 2860 conversion_off = L6;
2859 2861 }
2860 2862 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
2861 2863 SP, conversion_off);
2862 2864 }
2863 2865 }
2864 2866 }
2865 2867 break;
2866 2868
2867 2869 case T_DOUBLE:
2868 2870 assert( j_arg + 1 < total_args_passed &&
2869 2871 in_sig_bt[j_arg + 1] == T_VOID &&
2870 2872 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2871 2873 if (src.first()->is_stack()) {
2872 2874 // Stack to stack/reg is simple
2873 2875 long_move(masm, src, dst);
2874 2876 } else {
2875 2877 Register d = dst.first()->is_reg() ? dst.first()->as_Register() : L2;
2876 2878
2877 2879 // Destination could be an odd reg on 32bit in which case
2878 2880 // we can't load direct to the destination.
2879 2881
2880 2882 if (!d->is_even() && wordSize == 4) {
2881 2883 d = L2;
2882 2884 }
2883 2885 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
2884 2886 if (Assembler::is_simm13(off)) {
2885 2887 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
2886 2888 SP, off);
2887 2889 __ ld_long(SP, off, d);
2888 2890 } else {
2889 2891 if (conversion_off == noreg) {
2890 2892 __ set(off, L6);
2891 2893 conversion_off = L6;
2892 2894 }
2893 2895 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
2894 2896 SP, conversion_off);
2895 2897 __ ld_long(SP, conversion_off, d);
2896 2898 }
2897 2899 if (d == L2) {
2898 2900 long_move(masm, reg64_to_VMRegPair(L2), dst);
2899 2901 }
2900 2902 }
2901 2903 break;
2902 2904
2903 2905 case T_LONG :
2904 2906 // 32bit can't do a split move of something like g1 -> O0, O1
2905 2907 // so use a memory temp
2906 2908 if (src.is_single_phys_reg() && wordSize == 4) {
2907 2909 Register tmp = L2;
2908 2910 if (dst.first()->is_reg() &&
2909 2911 (wordSize == 8 || dst.first()->as_Register()->is_even())) {
2910 2912 tmp = dst.first()->as_Register();
2911 2913 }
2912 2914
2913 2915 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
2914 2916 if (Assembler::is_simm13(off)) {
2915 2917 __ stx(src.first()->as_Register(), SP, off);
2916 2918 __ ld_long(SP, off, tmp);
2917 2919 } else {
2918 2920 if (conversion_off == noreg) {
2919 2921 __ set(off, L6);
2920 2922 conversion_off = L6;
2921 2923 }
2922 2924 __ stx(src.first()->as_Register(), SP, conversion_off);
2923 2925 __ ld_long(SP, conversion_off, tmp);
2924 2926 }
2925 2927
2926 2928 if (tmp == L2) {
2927 2929 long_move(masm, reg64_to_VMRegPair(L2), dst);
2928 2930 }
2929 2931 } else {
2930 2932 long_move(masm, src, dst);
2931 2933 }
2932 2934 break;
2933 2935
2934 2936 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2935 2937
2936 2938 default:
2937 2939 move32_64(masm, src, dst);
2938 2940 }
2939 2941 }
2940 2942
2941 2943
2942 2944 // If we have any strings we must store any register based arg to the stack
2943 2945 // This includes any still live xmm registers too.
2944 2946
2945 2947 if (total_strings > 0 ) {
2946 2948
2947 2949 // protect all the arg registers
2948 2950 __ save_frame(0);
2949 2951 __ mov(G2_thread, L7_thread_cache);
2950 2952 const Register L2_string_off = L2;
2951 2953
2952 2954 // Get first string offset
2953 2955 __ set(string_locs * VMRegImpl::stack_slot_size, L2_string_off);
2954 2956
2955 2957 for (c_arg = 0 ; c_arg < total_c_args ; c_arg++ ) {
2956 2958 if (out_sig_bt[c_arg] == T_ADDRESS) {
2957 2959
2958 2960 VMRegPair dst = out_regs[c_arg];
2959 2961 const Register d = dst.first()->is_reg() ?
2960 2962 dst.first()->as_Register()->after_save() : noreg;
2961 2963
2962 2964 // It's a string the oop and it was already copied to the out arg
2963 2965 // position
2964 2966 if (d != noreg) {
2965 2967 __ mov(d, O0);
2966 2968 } else {
2967 2969 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
2968 2970 "must be");
2969 2971 __ ld_ptr(FP, reg2offset(dst.first()) + STACK_BIAS, O0);
2970 2972 }
2971 2973 Label skip;
2972 2974
2973 2975 __ br_null(O0, false, Assembler::pn, skip);
2974 2976 __ delayed()->add(FP, L2_string_off, O1);
2975 2977
2976 2978 if (d != noreg) {
2977 2979 __ mov(O1, d);
2978 2980 } else {
2979 2981 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
2980 2982 "must be");
2981 2983 __ st_ptr(O1, FP, reg2offset(dst.first()) + STACK_BIAS);
2982 2984 }
2983 2985
2984 2986 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::get_utf),
2985 2987 relocInfo::runtime_call_type);
2986 2988 __ delayed()->add(L2_string_off, max_dtrace_string_size, L2_string_off);
2987 2989
2988 2990 __ bind(skip);
2989 2991
2990 2992 }
2991 2993
2992 2994 }
2993 2995 __ mov(L7_thread_cache, G2_thread);
2994 2996 __ restore();
2995 2997
2996 2998 }
2997 2999
2998 3000
2999 3001 // Ok now we are done. Need to place the nop that dtrace wants in order to
3000 3002 // patch in the trap
3001 3003
3002 3004 int patch_offset = ((intptr_t)__ pc()) - start;
3003 3005
3004 3006 __ nop();
3005 3007
3006 3008
3007 3009 // Return
3008 3010
3009 3011 __ ret();
3010 3012 __ delayed()->restore();
3011 3013
3012 3014 __ flush();
3013 3015
3014 3016 nmethod *nm = nmethod::new_dtrace_nmethod(
3015 3017 method, masm->code(), vep_offset, patch_offset, frame_complete,
3016 3018 stack_slots / VMRegImpl::slots_per_word);
3017 3019 return nm;
3018 3020
3019 3021 }
3020 3022
3021 3023 #endif // HAVE_DTRACE_H
3022 3024
3023 3025 // this function returns the adjust size (in number of words) to a c2i adapter
3024 3026 // activation for use during deoptimization
3025 3027 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
3026 3028 assert(callee_locals >= callee_parameters,
3027 3029 "test and remove; got more parms than locals");
3028 3030 if (callee_locals < callee_parameters)
3029 3031 return 0; // No adjustment for negative locals
3030 3032 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
3031 3033 return round_to(diff, WordsPerLong);
3032 3034 }
3033 3035
3034 3036 // "Top of Stack" slots that may be unused by the calling convention but must
3035 3037 // otherwise be preserved.
3036 3038 // On Intel these are not necessary and the value can be zero.
3037 3039 // On Sparc this describes the words reserved for storing a register window
3038 3040 // when an interrupt occurs.
3039 3041 uint SharedRuntime::out_preserve_stack_slots() {
3040 3042 return frame::register_save_words * VMRegImpl::slots_per_word;
3041 3043 }
3042 3044
3043 3045 static void gen_new_frame(MacroAssembler* masm, bool deopt) {
3044 3046 //
3045 3047 // Common out the new frame generation for deopt and uncommon trap
3046 3048 //
3047 3049 Register G3pcs = G3_scratch; // Array of new pcs (input)
3048 3050 Register Oreturn0 = O0;
3049 3051 Register Oreturn1 = O1;
3050 3052 Register O2UnrollBlock = O2;
3051 3053 Register O3array = O3; // Array of frame sizes (input)
3052 3054 Register O4array_size = O4; // number of frames (input)
3053 3055 Register O7frame_size = O7; // number of frames (input)
3054 3056
3055 3057 __ ld_ptr(O3array, 0, O7frame_size);
3056 3058 __ sub(G0, O7frame_size, O7frame_size);
3057 3059 __ save(SP, O7frame_size, SP);
3058 3060 __ ld_ptr(G3pcs, 0, I7); // load frame's new pc
3059 3061
3060 3062 #ifdef ASSERT
3061 3063 // make sure that the frames are aligned properly
3062 3064 #ifndef _LP64
3063 3065 __ btst(wordSize*2-1, SP);
3064 3066 __ breakpoint_trap(Assembler::notZero);
3065 3067 #endif
3066 3068 #endif
3067 3069
3068 3070 // Deopt needs to pass some extra live values from frame to frame
3069 3071
3070 3072 if (deopt) {
3071 3073 __ mov(Oreturn0->after_save(), Oreturn0);
3072 3074 __ mov(Oreturn1->after_save(), Oreturn1);
3073 3075 }
3074 3076
3075 3077 __ mov(O4array_size->after_save(), O4array_size);
3076 3078 __ sub(O4array_size, 1, O4array_size);
3077 3079 __ mov(O3array->after_save(), O3array);
3078 3080 __ mov(O2UnrollBlock->after_save(), O2UnrollBlock);
3079 3081 __ add(G3pcs, wordSize, G3pcs); // point to next pc value
3080 3082
3081 3083 #ifdef ASSERT
3082 3084 // trash registers to show a clear pattern in backtraces
3083 3085 __ set(0xDEAD0000, I0);
3084 3086 __ add(I0, 2, I1);
3085 3087 __ add(I0, 4, I2);
3086 3088 __ add(I0, 6, I3);
3087 3089 __ add(I0, 8, I4);
3088 3090 // Don't touch I5 could have valuable savedSP
3089 3091 __ set(0xDEADBEEF, L0);
3090 3092 __ mov(L0, L1);
3091 3093 __ mov(L0, L2);
3092 3094 __ mov(L0, L3);
3093 3095 __ mov(L0, L4);
3094 3096 __ mov(L0, L5);
3095 3097
3096 3098 // trash the return value as there is nothing to return yet
3097 3099 __ set(0xDEAD0001, O7);
3098 3100 #endif
3099 3101
3100 3102 __ mov(SP, O5_savedSP);
3101 3103 }
3102 3104
3103 3105
3104 3106 static void make_new_frames(MacroAssembler* masm, bool deopt) {
3105 3107 //
3106 3108 // loop through the UnrollBlock info and create new frames
3107 3109 //
3108 3110 Register G3pcs = G3_scratch;
3109 3111 Register Oreturn0 = O0;
3110 3112 Register Oreturn1 = O1;
3111 3113 Register O2UnrollBlock = O2;
3112 3114 Register O3array = O3;
3113 3115 Register O4array_size = O4;
3114 3116 Label loop;
3115 3117
3116 3118 // Before we make new frames, check to see if stack is available.
3117 3119 // Do this after the caller's return address is on top of stack
3118 3120 if (UseStackBanging) {
3119 3121 // Get total frame size for interpreted frames
3120 3122 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4);
3121 3123 __ bang_stack_size(O4, O3, G3_scratch);
3122 3124 }
3123 3125
3124 3126 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size);
3125 3127 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs);
3126 3128 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array);
3127 3129
3128 3130 // Adjust old interpreter frame to make space for new frame's extra java locals
3129 3131 //
3130 3132 // We capture the original sp for the transition frame only because it is needed in
3131 3133 // order to properly calculate interpreter_sp_adjustment. Even though in real life
3132 3134 // every interpreter frame captures a savedSP it is only needed at the transition
3133 3135 // (fortunately). If we had to have it correct everywhere then we would need to
3134 3136 // be told the sp_adjustment for each frame we create. If the frame size array
3135 3137 // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size]
3136 3138 // for each frame we create and keep up the illusion every where.
3137 3139 //
3138 3140
3139 3141 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7);
3140 3142 __ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment
3141 3143 __ sub(SP, O7, SP);
3142 3144
3143 3145 #ifdef ASSERT
3144 3146 // make sure that there is at least one entry in the array
3145 3147 __ tst(O4array_size);
3146 3148 __ breakpoint_trap(Assembler::zero);
3147 3149 #endif
3148 3150
3149 3151 // Now push the new interpreter frames
3150 3152 __ bind(loop);
3151 3153
3152 3154 // allocate a new frame, filling the registers
3153 3155
3154 3156 gen_new_frame(masm, deopt); // allocate an interpreter frame
3155 3157
3156 3158 __ tst(O4array_size);
3157 3159 __ br(Assembler::notZero, false, Assembler::pn, loop);
3158 3160 __ delayed()->add(O3array, wordSize, O3array);
3159 3161 __ ld_ptr(G3pcs, 0, O7); // load final frame new pc
3160 3162
3161 3163 }
3162 3164
3163 3165 //------------------------------generate_deopt_blob----------------------------
3164 3166 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
3165 3167 // instead.
3166 3168 void SharedRuntime::generate_deopt_blob() {
3167 3169 // allocate space for the code
3168 3170 ResourceMark rm;
3169 3171 // setup code generation tools
3170 3172 int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code
3171 3173 #ifdef _LP64
3172 3174 CodeBuffer buffer("deopt_blob", 2100+pad, 512);
3173 3175 #else
3174 3176 // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread)
3175 3177 // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread)
3176 3178 CodeBuffer buffer("deopt_blob", 1600+pad, 512);
3177 3179 #endif /* _LP64 */
3178 3180 MacroAssembler* masm = new MacroAssembler(&buffer);
3179 3181 FloatRegister Freturn0 = F0;
3180 3182 Register Greturn1 = G1;
3181 3183 Register Oreturn0 = O0;
3182 3184 Register Oreturn1 = O1;
3183 3185 Register O2UnrollBlock = O2;
3184 3186 Register L0deopt_mode = L0;
3185 3187 Register G4deopt_mode = G4_scratch;
3186 3188 int frame_size_words;
3187 3189 Address saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS);
3188 3190 #if !defined(_LP64) && defined(COMPILER2)
3189 3191 Address saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS);
3190 3192 #endif
3191 3193 Label cont;
3192 3194
3193 3195 OopMapSet *oop_maps = new OopMapSet();
3194 3196
3195 3197 //
3196 3198 // This is the entry point for code which is returning to a de-optimized
3197 3199 // frame.
3198 3200 // The steps taken by this frame are as follows:
3199 3201 // - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1)
3200 3202 // and all potentially live registers (at a pollpoint many registers can be live).
3201 3203 //
3202 3204 // - call the C routine: Deoptimization::fetch_unroll_info (this function
3203 3205 // returns information about the number and size of interpreter frames
3204 3206 // which are equivalent to the frame which is being deoptimized)
3205 3207 // - deallocate the unpack frame, restoring only results values. Other
3206 3208 // volatile registers will now be captured in the vframeArray as needed.
3207 3209 // - deallocate the deoptimization frame
3208 3210 // - in a loop using the information returned in the previous step
3209 3211 // push new interpreter frames (take care to propagate the return
3210 3212 // values through each new frame pushed)
3211 3213 // - create a dummy "unpack_frame" and save the return values (O0, O1, F0)
3212 3214 // - call the C routine: Deoptimization::unpack_frames (this function
3213 3215 // lays out values on the interpreter frame which was just created)
3214 3216 // - deallocate the dummy unpack_frame
3215 3217 // - ensure that all the return values are correctly set and then do
3216 3218 // a return to the interpreter entry point
3217 3219 //
3218 3220 // Refer to the following methods for more information:
3219 3221 // - Deoptimization::fetch_unroll_info
3220 3222 // - Deoptimization::unpack_frames
3221 3223
3222 3224 OopMap* map = NULL;
3223 3225
3224 3226 int start = __ offset();
3225 3227
3226 3228 // restore G2, the trampoline destroyed it
3227 3229 __ get_thread();
3228 3230
3229 3231 // On entry we have been called by the deoptimized nmethod with a call that
3230 3232 // replaced the original call (or safepoint polling location) so the deoptimizing
3231 3233 // pc is now in O7. Return values are still in the expected places
3232 3234
3233 3235 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3234 3236 __ ba(false, cont);
3235 3237 __ delayed()->mov(Deoptimization::Unpack_deopt, L0deopt_mode);
3236 3238
3237 3239 int exception_offset = __ offset() - start;
3238 3240
3239 3241 // restore G2, the trampoline destroyed it
3240 3242 __ get_thread();
3241 3243
3242 3244 // On entry we have been jumped to by the exception handler (or exception_blob
3243 3245 // for server). O0 contains the exception oop and O7 contains the original
3244 3246 // exception pc. So if we push a frame here it will look to the
3245 3247 // stack walking code (fetch_unroll_info) just like a normal call so
3246 3248 // state will be extracted normally.
3247 3249
3248 3250 // save exception oop in JavaThread and fall through into the
3249 3251 // exception_in_tls case since they are handled in same way except
3250 3252 // for where the pending exception is kept.
3251 3253 __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset());
3252 3254
3253 3255 //
3254 3256 // Vanilla deoptimization with an exception pending in exception_oop
3255 3257 //
3256 3258 int exception_in_tls_offset = __ offset() - start;
3257 3259
3258 3260 // No need to update oop_map as each call to save_live_registers will produce identical oopmap
3259 3261 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3260 3262
3261 3263 // Restore G2_thread
3262 3264 __ get_thread();
3263 3265
3264 3266 #ifdef ASSERT
3265 3267 {
3266 3268 // verify that there is really an exception oop in exception_oop
3267 3269 Label has_exception;
3268 3270 __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception);
3269 3271 __ br_notnull(Oexception, false, Assembler::pt, has_exception);
3270 3272 __ delayed()-> nop();
3271 3273 __ stop("no exception in thread");
3272 3274 __ bind(has_exception);
3273 3275
3274 3276 // verify that there is no pending exception
3275 3277 Label no_pending_exception;
3276 3278 Address exception_addr(G2_thread, Thread::pending_exception_offset());
3277 3279 __ ld_ptr(exception_addr, Oexception);
3278 3280 __ br_null(Oexception, false, Assembler::pt, no_pending_exception);
3279 3281 __ delayed()->nop();
3280 3282 __ stop("must not have pending exception here");
3281 3283 __ bind(no_pending_exception);
3282 3284 }
3283 3285 #endif
3284 3286
3285 3287 __ ba(false, cont);
3286 3288 __ delayed()->mov(Deoptimization::Unpack_exception, L0deopt_mode);;
3287 3289
3288 3290 //
3289 3291 // Reexecute entry, similar to c2 uncommon trap
3290 3292 //
3291 3293 int reexecute_offset = __ offset() - start;
3292 3294
3293 3295 // No need to update oop_map as each call to save_live_registers will produce identical oopmap
3294 3296 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3295 3297
3296 3298 __ mov(Deoptimization::Unpack_reexecute, L0deopt_mode);
3297 3299
3298 3300 __ bind(cont);
3299 3301
3300 3302 __ set_last_Java_frame(SP, noreg);
3301 3303
3302 3304 // do the call by hand so we can get the oopmap
3303 3305
3304 3306 __ mov(G2_thread, L7_thread_cache);
3305 3307 __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type);
3306 3308 __ delayed()->mov(G2_thread, O0);
3307 3309
3308 3310 // Set an oopmap for the call site this describes all our saved volatile registers
3309 3311
3310 3312 oop_maps->add_gc_map( __ offset()-start, map);
3311 3313
3312 3314 __ mov(L7_thread_cache, G2_thread);
3313 3315
3314 3316 __ reset_last_Java_frame();
3315 3317
3316 3318 // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers
3317 3319 // so this move will survive
3318 3320
3319 3321 __ mov(L0deopt_mode, G4deopt_mode);
3320 3322
3321 3323 __ mov(O0, O2UnrollBlock->after_save());
3322 3324
3323 3325 RegisterSaver::restore_result_registers(masm);
3324 3326
3325 3327 Label noException;
3326 3328 __ cmp(G4deopt_mode, Deoptimization::Unpack_exception); // Was exception pending?
3327 3329 __ br(Assembler::notEqual, false, Assembler::pt, noException);
3328 3330 __ delayed()->nop();
3329 3331
3330 3332 // Move the pending exception from exception_oop to Oexception so
3331 3333 // the pending exception will be picked up the interpreter.
3332 3334 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
3333 3335 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
3334 3336 __ bind(noException);
3335 3337
3336 3338 // deallocate the deoptimization frame taking care to preserve the return values
3337 3339 __ mov(Oreturn0, Oreturn0->after_save());
3338 3340 __ mov(Oreturn1, Oreturn1->after_save());
3339 3341 __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
3340 3342 __ restore();
3341 3343
3342 3344 // Allocate new interpreter frame(s) and possible c2i adapter frame
3343 3345
3344 3346 make_new_frames(masm, true);
3345 3347
3346 3348 // push a dummy "unpack_frame" taking care of float return values and
3347 3349 // call Deoptimization::unpack_frames to have the unpacker layout
3348 3350 // information in the interpreter frames just created and then return
3349 3351 // to the interpreter entry point
3350 3352 __ save(SP, -frame_size_words*wordSize, SP);
3351 3353 __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
3352 3354 #if !defined(_LP64)
3353 3355 #if defined(COMPILER2)
3354 3356 // 32-bit 1-register longs return longs in G1
3355 3357 __ stx(Greturn1, saved_Greturn1_addr);
3356 3358 #endif
3357 3359 __ set_last_Java_frame(SP, noreg);
3358 3360 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode);
3359 3361 #else
3360 3362 // LP64 uses g4 in set_last_Java_frame
3361 3363 __ mov(G4deopt_mode, O1);
3362 3364 __ set_last_Java_frame(SP, G0);
3363 3365 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1);
3364 3366 #endif
3365 3367 __ reset_last_Java_frame();
3366 3368 __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);
3367 3369
3368 3370 #if !defined(_LP64) && defined(COMPILER2)
3369 3371 // In 32 bit, C2 returns longs in G1 so restore the saved G1 into
3370 3372 // I0/I1 if the return value is long.
3371 3373 Label not_long;
3372 3374 __ cmp(O0,T_LONG);
3373 3375 __ br(Assembler::notEqual, false, Assembler::pt, not_long);
3374 3376 __ delayed()->nop();
3375 3377 __ ldd(saved_Greturn1_addr,I0);
3376 3378 __ bind(not_long);
3377 3379 #endif
3378 3380 __ ret();
3379 3381 __ delayed()->restore();
3380 3382
3381 3383 masm->flush();
3382 3384 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words);
3383 3385 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3384 3386 }
3385 3387
3386 3388 #ifdef COMPILER2
3387 3389
3388 3390 //------------------------------generate_uncommon_trap_blob--------------------
3389 3391 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
3390 3392 // instead.
3391 3393 void SharedRuntime::generate_uncommon_trap_blob() {
3392 3394 // allocate space for the code
3393 3395 ResourceMark rm;
3394 3396 // setup code generation tools
3395 3397 int pad = VerifyThread ? 512 : 0;
3396 3398 #ifdef _LP64
3397 3399 CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
3398 3400 #else
3399 3401 // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread)
3400 3402 // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread)
3401 3403 CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512);
3402 3404 #endif
3403 3405 MacroAssembler* masm = new MacroAssembler(&buffer);
3404 3406 Register O2UnrollBlock = O2;
3405 3407 Register O2klass_index = O2;
3406 3408
3407 3409 //
3408 3410 // This is the entry point for all traps the compiler takes when it thinks
3409 3411 // it cannot handle further execution of compilation code. The frame is
3410 3412 // deoptimized in these cases and converted into interpreter frames for
3411 3413 // execution
3412 3414 // The steps taken by this frame are as follows:
3413 3415 // - push a fake "unpack_frame"
3414 3416 // - call the C routine Deoptimization::uncommon_trap (this function
3415 3417 // packs the current compiled frame into vframe arrays and returns
3416 3418 // information about the number and size of interpreter frames which
3417 3419 // are equivalent to the frame which is being deoptimized)
3418 3420 // - deallocate the "unpack_frame"
3419 3421 // - deallocate the deoptimization frame
3420 3422 // - in a loop using the information returned in the previous step
3421 3423 // push interpreter frames;
3422 3424 // - create a dummy "unpack_frame"
3423 3425 // - call the C routine: Deoptimization::unpack_frames (this function
3424 3426 // lays out values on the interpreter frame which was just created)
3425 3427 // - deallocate the dummy unpack_frame
3426 3428 // - return to the interpreter entry point
3427 3429 //
3428 3430 // Refer to the following methods for more information:
3429 3431 // - Deoptimization::uncommon_trap
3430 3432 // - Deoptimization::unpack_frame
3431 3433
3432 3434 // the unloaded class index is in O0 (first parameter to this blob)
3433 3435
3434 3436 // push a dummy "unpack_frame"
3435 3437 // and call Deoptimization::uncommon_trap to pack the compiled frame into
3436 3438 // vframe array and return the UnrollBlock information
3437 3439 __ save_frame(0);
3438 3440 __ set_last_Java_frame(SP, noreg);
3439 3441 __ mov(I0, O2klass_index);
3440 3442 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index);
3441 3443 __ reset_last_Java_frame();
3442 3444 __ mov(O0, O2UnrollBlock->after_save());
3443 3445 __ restore();
3444 3446
3445 3447 // deallocate the deoptimized frame taking care to preserve the return values
3446 3448 __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
3447 3449 __ restore();
3448 3450
3449 3451 // Allocate new interpreter frame(s) and possible c2i adapter frame
3450 3452
3451 3453 make_new_frames(masm, false);
3452 3454
3453 3455 // push a dummy "unpack_frame" taking care of float return values and
3454 3456 // call Deoptimization::unpack_frames to have the unpacker layout
3455 3457 // information in the interpreter frames just created and then return
3456 3458 // to the interpreter entry point
3457 3459 __ save_frame(0);
3458 3460 __ set_last_Java_frame(SP, noreg);
3459 3461 __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case
3460 3462 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3);
3461 3463 __ reset_last_Java_frame();
3462 3464 __ ret();
3463 3465 __ delayed()->restore();
3464 3466
3465 3467 masm->flush();
3466 3468 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize);
3467 3469 }
3468 3470
3469 3471 #endif // COMPILER2
3470 3472
3471 3473 //------------------------------generate_handler_blob-------------------
3472 3474 //
3473 3475 // Generate a special Compile2Runtime blob that saves all registers, and sets
3474 3476 // up an OopMap.
3475 3477 //
3476 3478 // This blob is jumped to (via a breakpoint and the signal handler) from a
3477 3479 // safepoint in compiled code. On entry to this blob, O7 contains the
3478 3480 // address in the original nmethod at which we should resume normal execution.
3479 3481 // Thus, this blob looks like a subroutine which must preserve lots of
3480 3482 // registers and return normally. Note that O7 is never register-allocated,
3481 3483 // so it is guaranteed to be free here.
3482 3484 //
3483 3485
3484 3486 // The hardest part of what this blob must do is to save the 64-bit %o
3485 3487 // registers in the 32-bit build. A simple 'save' turn the %o's to %i's and
3486 3488 // an interrupt will chop off their heads. Making space in the caller's frame
3487 3489 // first will let us save the 64-bit %o's before save'ing, but we cannot hand
3488 3490 // the adjusted FP off to the GC stack-crawler: this will modify the caller's
3489 3491 // SP and mess up HIS OopMaps. So we first adjust the caller's SP, then save
3490 3492 // the 64-bit %o's, then do a save, then fixup the caller's SP (our FP).
3491 3493 // Tricky, tricky, tricky...
3492 3494
3493 3495 static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return) {
3494 3496 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3495 3497
3496 3498 // allocate space for the code
3497 3499 ResourceMark rm;
3498 3500 // setup code generation tools
3499 3501 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3500 3502 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3501 3503 // even larger with TraceJumps
3502 3504 int pad = TraceJumps ? 512 : 0;
3503 3505 CodeBuffer buffer("handler_blob", 1600 + pad, 512);
3504 3506 MacroAssembler* masm = new MacroAssembler(&buffer);
3505 3507 int frame_size_words;
3506 3508 OopMapSet *oop_maps = new OopMapSet();
3507 3509 OopMap* map = NULL;
3508 3510
3509 3511 int start = __ offset();
3510 3512
3511 3513 // If this causes a return before the processing, then do a "restore"
3512 3514 if (cause_return) {
3513 3515 __ restore();
3514 3516 } else {
3515 3517 // Make it look like we were called via the poll
3516 3518 // so that frame constructor always sees a valid return address
3517 3519 __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7);
3518 3520 __ sub(O7, frame::pc_return_offset, O7);
3519 3521 }
3520 3522
3521 3523 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3522 3524
3523 3525 // setup last_Java_sp (blows G4)
3524 3526 __ set_last_Java_frame(SP, noreg);
3525 3527
3526 3528 // call into the runtime to handle illegal instructions exception
3527 3529 // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3528 3530 __ mov(G2_thread, O0);
3529 3531 __ save_thread(L7_thread_cache);
3530 3532 __ call(call_ptr);
3531 3533 __ delayed()->nop();
3532 3534
3533 3535 // Set an oopmap for the call site.
3534 3536 // We need this not only for callee-saved registers, but also for volatile
3535 3537 // registers that the compiler might be keeping live across a safepoint.
3536 3538
3537 3539 oop_maps->add_gc_map( __ offset() - start, map);
3538 3540
3539 3541 __ restore_thread(L7_thread_cache);
3540 3542 // clear last_Java_sp
3541 3543 __ reset_last_Java_frame();
3542 3544
3543 3545 // Check for exceptions
3544 3546 Label pending;
3545 3547
3546 3548 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3547 3549 __ tst(O1);
3548 3550 __ brx(Assembler::notEqual, true, Assembler::pn, pending);
3549 3551 __ delayed()->nop();
3550 3552
3551 3553 RegisterSaver::restore_live_registers(masm);
3552 3554
3553 3555 // We are back the the original state on entry and ready to go.
3554 3556
3555 3557 __ retl();
3556 3558 __ delayed()->nop();
3557 3559
3558 3560 // Pending exception after the safepoint
3559 3561
3560 3562 __ bind(pending);
3561 3563
3562 3564 RegisterSaver::restore_live_registers(masm);
3563 3565
3564 3566 // We are back the the original state on entry.
3565 3567
3566 3568 // Tail-call forward_exception_entry, with the issuing PC in O7,
3567 3569 // so it looks like the original nmethod called forward_exception_entry.
3568 3570 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3569 3571 __ JMP(O0, 0);
3570 3572 __ delayed()->nop();
3571 3573
3572 3574 // -------------
3573 3575 // make sure all code is generated
3574 3576 masm->flush();
3575 3577
3576 3578 // return exception blob
3577 3579 return SafepointBlob::create(&buffer, oop_maps, frame_size_words);
3578 3580 }
3579 3581
3580 3582 //
3581 3583 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3582 3584 //
3583 3585 // Generate a stub that calls into vm to find out the proper destination
3584 3586 // of a java call. All the argument registers are live at this point
3585 3587 // but since this is generic code we don't know what they are and the caller
3586 3588 // must do any gc of the args.
3587 3589 //
3588 3590 static RuntimeStub* generate_resolve_blob(address destination, const char* name) {
3589 3591 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3590 3592
3591 3593 // allocate space for the code
3592 3594 ResourceMark rm;
3593 3595 // setup code generation tools
3594 3596 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3595 3597 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3596 3598 // even larger with TraceJumps
3597 3599 int pad = TraceJumps ? 512 : 0;
3598 3600 CodeBuffer buffer(name, 1600 + pad, 512);
3599 3601 MacroAssembler* masm = new MacroAssembler(&buffer);
3600 3602 int frame_size_words;
3601 3603 OopMapSet *oop_maps = new OopMapSet();
3602 3604 OopMap* map = NULL;
3603 3605
3604 3606 int start = __ offset();
3605 3607
3606 3608 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3607 3609
3608 3610 int frame_complete = __ offset();
3609 3611
3610 3612 // setup last_Java_sp (blows G4)
3611 3613 __ set_last_Java_frame(SP, noreg);
3612 3614
3613 3615 // call into the runtime to handle illegal instructions exception
3614 3616 // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3615 3617 __ mov(G2_thread, O0);
3616 3618 __ save_thread(L7_thread_cache);
3617 3619 __ call(destination, relocInfo::runtime_call_type);
3618 3620 __ delayed()->nop();
3619 3621
3620 3622 // O0 contains the address we are going to jump to assuming no exception got installed
3621 3623
3622 3624 // Set an oopmap for the call site.
3623 3625 // We need this not only for callee-saved registers, but also for volatile
3624 3626 // registers that the compiler might be keeping live across a safepoint.
3625 3627
3626 3628 oop_maps->add_gc_map( __ offset() - start, map);
3627 3629
3628 3630 __ restore_thread(L7_thread_cache);
3629 3631 // clear last_Java_sp
3630 3632 __ reset_last_Java_frame();
3631 3633
3632 3634 // Check for exceptions
3633 3635 Label pending;
3634 3636
3635 3637 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3636 3638 __ tst(O1);
3637 3639 __ brx(Assembler::notEqual, true, Assembler::pn, pending);
3638 3640 __ delayed()->nop();
3639 3641
3640 3642 // get the returned methodOop
3641 3643
3642 3644 __ get_vm_result(G5_method);
3643 3645 __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS);
3644 3646
3645 3647 // O0 is where we want to jump, overwrite G3 which is saved and scratch
3646 3648
3647 3649 __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS);
3648 3650
3649 3651 RegisterSaver::restore_live_registers(masm);
3650 3652
3651 3653 // We are back the the original state on entry and ready to go.
3652 3654
3653 3655 __ JMP(G3, 0);
3654 3656 __ delayed()->nop();
3655 3657
3656 3658 // Pending exception after the safepoint
3657 3659
3658 3660 __ bind(pending);
3659 3661
3660 3662 RegisterSaver::restore_live_registers(masm);
3661 3663
3662 3664 // We are back the the original state on entry.
3663 3665
3664 3666 // Tail-call forward_exception_entry, with the issuing PC in O7,
3665 3667 // so it looks like the original nmethod called forward_exception_entry.
3666 3668 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3667 3669 __ JMP(O0, 0);
3668 3670 __ delayed()->nop();
3669 3671
3670 3672 // -------------
3671 3673 // make sure all code is generated
3672 3674 masm->flush();
3673 3675
3674 3676 // return the blob
3675 3677 // frame_size_words or bytes??
3676 3678 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
3677 3679 }
3678 3680
3679 3681 void SharedRuntime::generate_stubs() {
3680 3682
3681 3683 _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),
3682 3684 "wrong_method_stub");
3683 3685
3684 3686 _ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),
3685 3687 "ic_miss_stub");
3686 3688
3687 3689 _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),
3688 3690 "resolve_opt_virtual_call");
3689 3691
3690 3692 _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),
3691 3693 "resolve_virtual_call");
3692 3694
3693 3695 _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),
3694 3696 "resolve_static_call");
3695 3697
3696 3698 _polling_page_safepoint_handler_blob =
3697 3699 generate_handler_blob(CAST_FROM_FN_PTR(address,
3698 3700 SafepointSynchronize::handle_polling_page_exception), false);
3699 3701
3700 3702 _polling_page_return_handler_blob =
3701 3703 generate_handler_blob(CAST_FROM_FN_PTR(address,
3702 3704 SafepointSynchronize::handle_polling_page_exception), true);
3703 3705
3704 3706 generate_deopt_blob();
3705 3707
3706 3708 #ifdef COMPILER2
3707 3709 generate_uncommon_trap_blob();
3708 3710 #endif // COMPILER2
3709 3711 }
↓ open down ↓ |
1235 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX