Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
+++ new/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
1 1 /*
2 2 * Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 21 * have any questions.
22 22 *
23 23 */
24 24
25 25 #include "incls/_precompiled.incl"
26 26 #include "incls/_sharedRuntime_sparc.cpp.incl"
27 27
28 28 #define __ masm->
29 29
30 30 #ifdef COMPILER2
31 31 UncommonTrapBlob* SharedRuntime::_uncommon_trap_blob;
32 32 #endif // COMPILER2
33 33
34 34 DeoptimizationBlob* SharedRuntime::_deopt_blob;
35 35 SafepointBlob* SharedRuntime::_polling_page_safepoint_handler_blob;
36 36 SafepointBlob* SharedRuntime::_polling_page_return_handler_blob;
37 37 RuntimeStub* SharedRuntime::_wrong_method_blob;
38 38 RuntimeStub* SharedRuntime::_ic_miss_blob;
39 39 RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob;
40 40 RuntimeStub* SharedRuntime::_resolve_virtual_call_blob;
41 41 RuntimeStub* SharedRuntime::_resolve_static_call_blob;
42 42
43 43 class RegisterSaver {
44 44
45 45 // Used for saving volatile registers. This is Gregs, Fregs, I/L/O.
46 46 // The Oregs are problematic. In the 32bit build the compiler can
47 47 // have O registers live with 64 bit quantities. A window save will
48 48 // cut the heads off of the registers. We have to do a very extensive
49 49 // stack dance to save and restore these properly.
50 50
51 51 // Note that the Oregs problem only exists if we block at either a polling
52 52 // page exception a compiled code safepoint that was not originally a call
53 53 // or deoptimize following one of these kinds of safepoints.
54 54
55 55 // Lots of registers to save. For all builds, a window save will preserve
56 56 // the %i and %l registers. For the 32-bit longs-in-two entries and 64-bit
57 57 // builds a window-save will preserve the %o registers. In the LION build
58 58 // we need to save the 64-bit %o registers which requires we save them
59 59 // before the window-save (as then they become %i registers and get their
60 60 // heads chopped off on interrupt). We have to save some %g registers here
61 61 // as well.
62 62 enum {
63 63 // This frame's save area. Includes extra space for the native call:
64 64 // vararg's layout space and the like. Briefly holds the caller's
65 65 // register save area.
66 66 call_args_area = frame::register_save_words_sp_offset +
67 67 frame::memory_parameter_word_sp_offset*wordSize,
68 68 // Make sure save locations are always 8 byte aligned.
69 69 // can't use round_to because it doesn't produce compile time constant
70 70 start_of_extra_save_area = ((call_args_area + 7) & ~7),
71 71 g1_offset = start_of_extra_save_area, // g-regs needing saving
72 72 g3_offset = g1_offset+8,
73 73 g4_offset = g3_offset+8,
74 74 g5_offset = g4_offset+8,
75 75 o0_offset = g5_offset+8,
76 76 o1_offset = o0_offset+8,
77 77 o2_offset = o1_offset+8,
78 78 o3_offset = o2_offset+8,
79 79 o4_offset = o3_offset+8,
80 80 o5_offset = o4_offset+8,
81 81 start_of_flags_save_area = o5_offset+8,
82 82 ccr_offset = start_of_flags_save_area,
83 83 fsr_offset = ccr_offset + 8,
84 84 d00_offset = fsr_offset+8, // Start of float save area
85 85 register_save_size = d00_offset+8*32
86 86 };
87 87
88 88
89 89 public:
90 90
91 91 static int Oexception_offset() { return o0_offset; };
92 92 static int G3_offset() { return g3_offset; };
93 93 static int G5_offset() { return g5_offset; };
94 94 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
95 95 static void restore_live_registers(MacroAssembler* masm);
96 96
97 97 // During deoptimization only the result register need to be restored
98 98 // all the other values have already been extracted.
99 99
100 100 static void restore_result_registers(MacroAssembler* masm);
101 101 };
102 102
103 103 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
104 104 // Record volatile registers as callee-save values in an OopMap so their save locations will be
105 105 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
106 106 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers
107 107 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
108 108 // (as the stub's I's) when the runtime routine called by the stub creates its frame.
109 109 int i;
110 110 // Always make the frame size 16 byte aligned.
111 111 int frame_size = round_to(additional_frame_words + register_save_size, 16);
112 112 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words
113 113 int frame_size_in_slots = frame_size / sizeof(jint);
114 114 // CodeBlob frame size is in words.
115 115 *total_frame_words = frame_size / wordSize;
116 116 // OopMap* map = new OopMap(*total_frame_words, 0);
117 117 OopMap* map = new OopMap(frame_size_in_slots, 0);
118 118
119 119 #if !defined(_LP64)
120 120
121 121 // Save 64-bit O registers; they will get their heads chopped off on a 'save'.
122 122 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
123 123 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
124 124 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
125 125 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
126 126 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
127 127 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
128 128 #endif /* _LP64 */
129 129
130 130 __ save(SP, -frame_size, SP);
131 131
132 132 #ifndef _LP64
133 133 // Reload the 64 bit Oregs. Although they are now Iregs we load them
134 134 // to Oregs here to avoid interrupts cutting off their heads
135 135
136 136 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
137 137 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
138 138 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
139 139 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
140 140 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
141 141 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
142 142
143 143 __ stx(O0, SP, o0_offset+STACK_BIAS);
144 144 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg());
145 145
146 146 __ stx(O1, SP, o1_offset+STACK_BIAS);
147 147
148 148 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg());
149 149
150 150 __ stx(O2, SP, o2_offset+STACK_BIAS);
151 151 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg());
152 152
153 153 __ stx(O3, SP, o3_offset+STACK_BIAS);
154 154 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg());
155 155
156 156 __ stx(O4, SP, o4_offset+STACK_BIAS);
157 157 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg());
158 158
159 159 __ stx(O5, SP, o5_offset+STACK_BIAS);
160 160 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg());
161 161 #endif /* _LP64 */
162 162
163 163
164 164 #ifdef _LP64
165 165 int debug_offset = 0;
166 166 #else
167 167 int debug_offset = 4;
168 168 #endif
169 169 // Save the G's
170 170 __ stx(G1, SP, g1_offset+STACK_BIAS);
171 171 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg());
172 172
173 173 __ stx(G3, SP, g3_offset+STACK_BIAS);
174 174 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg());
175 175
176 176 __ stx(G4, SP, g4_offset+STACK_BIAS);
177 177 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg());
178 178
179 179 __ stx(G5, SP, g5_offset+STACK_BIAS);
180 180 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg());
181 181
182 182 // This is really a waste but we'll keep things as they were for now
183 183 if (true) {
184 184 #ifndef _LP64
185 185 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next());
186 186 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next());
187 187 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next());
188 188 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next());
189 189 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next());
190 190 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next());
191 191 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next());
192 192 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next());
193 193 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next());
194 194 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next());
195 195 #endif /* _LP64 */
196 196 }
197 197
198 198
199 199 // Save the flags
200 200 __ rdccr( G5 );
201 201 __ stx(G5, SP, ccr_offset+STACK_BIAS);
202 202 __ stxfsr(SP, fsr_offset+STACK_BIAS);
203 203
204 204 // Save all the FP registers: 32 doubles (32 floats correspond to the 2 halves of the first 16 doubles)
205 205 int offset = d00_offset;
206 206 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
207 207 FloatRegister f = as_FloatRegister(i);
208 208 __ stf(FloatRegisterImpl::D, f, SP, offset+STACK_BIAS);
209 209 // Record as callee saved both halves of double registers (2 float registers).
210 210 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg());
211 211 map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next());
212 212 offset += sizeof(double);
213 213 }
214 214
215 215 // And we're done.
216 216
217 217 return map;
218 218 }
219 219
220 220
221 221 // Pop the current frame and restore all the registers that we
222 222 // saved.
223 223 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
224 224
225 225 // Restore all the FP registers
226 226 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
227 227 __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i));
228 228 }
229 229
230 230 __ ldx(SP, ccr_offset+STACK_BIAS, G1);
231 231 __ wrccr (G1) ;
232 232
233 233 // Restore the G's
234 234 // Note that G2 (AKA GThread) must be saved and restored separately.
235 235 // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr.
236 236
237 237 __ ldx(SP, g1_offset+STACK_BIAS, G1);
238 238 __ ldx(SP, g3_offset+STACK_BIAS, G3);
239 239 __ ldx(SP, g4_offset+STACK_BIAS, G4);
240 240 __ ldx(SP, g5_offset+STACK_BIAS, G5);
241 241
242 242
243 243 #if !defined(_LP64)
244 244 // Restore the 64-bit O's.
245 245 __ ldx(SP, o0_offset+STACK_BIAS, O0);
246 246 __ ldx(SP, o1_offset+STACK_BIAS, O1);
247 247 __ ldx(SP, o2_offset+STACK_BIAS, O2);
248 248 __ ldx(SP, o3_offset+STACK_BIAS, O3);
249 249 __ ldx(SP, o4_offset+STACK_BIAS, O4);
250 250 __ ldx(SP, o5_offset+STACK_BIAS, O5);
251 251
252 252 // And temporarily place them in TLS
253 253
254 254 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
255 255 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
256 256 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
257 257 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
258 258 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
259 259 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
260 260 #endif /* _LP64 */
261 261
262 262 // Restore flags
263 263
264 264 __ ldxfsr(SP, fsr_offset+STACK_BIAS);
265 265
266 266 __ restore();
267 267
268 268 #if !defined(_LP64)
269 269 // Now reload the 64bit Oregs after we've restore the window.
270 270 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
271 271 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
272 272 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
273 273 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
274 274 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
275 275 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
276 276 #endif /* _LP64 */
277 277
278 278 }
279 279
280 280 // Pop the current frame and restore the registers that might be holding
281 281 // a result.
282 282 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
283 283
284 284 #if !defined(_LP64)
285 285 // 32bit build returns longs in G1
286 286 __ ldx(SP, g1_offset+STACK_BIAS, G1);
287 287
288 288 // Retrieve the 64-bit O's.
289 289 __ ldx(SP, o0_offset+STACK_BIAS, O0);
290 290 __ ldx(SP, o1_offset+STACK_BIAS, O1);
291 291 // and save to TLS
292 292 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
293 293 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
294 294 #endif /* _LP64 */
295 295
296 296 __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0));
297 297
298 298 __ restore();
299 299
300 300 #if !defined(_LP64)
301 301 // Now reload the 64bit Oregs after we've restore the window.
302 302 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
303 303 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
304 304 #endif /* _LP64 */
305 305
306 306 }
307 307
308 308 // The java_calling_convention describes stack locations as ideal slots on
309 309 // a frame with no abi restrictions. Since we must observe abi restrictions
310 310 // (like the placement of the register window) the slots must be biased by
311 311 // the following value.
312 312 static int reg2offset(VMReg r) {
313 313 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
314 314 }
315 315
316 316 // ---------------------------------------------------------------------------
317 317 // Read the array of BasicTypes from a signature, and compute where the
318 318 // arguments should go. Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size)
319 319 // quantities. Values less than VMRegImpl::stack0 are registers, those above
320 320 // refer to 4-byte stack slots. All stack slots are based off of the window
321 321 // top. VMRegImpl::stack0 refers to the first slot past the 16-word window,
322 322 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
323 323 // values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit
324 324 // integer registers. Values 64-95 are the (32-bit only) float registers.
325 325 // Each 32-bit quantity is given its own number, so the integer registers
326 326 // (in either 32- or 64-bit builds) use 2 numbers. For example, there is
327 327 // an O0-low and an O0-high. Essentially, all int register numbers are doubled.
328 328
329 329 // Register results are passed in O0-O5, for outgoing call arguments. To
330 330 // convert to incoming arguments, convert all O's to I's. The regs array
331 331 // refer to the low and hi 32-bit words of 64-bit registers or stack slots.
332 332 // If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a
333 333 // 32-bit value was passed). If both are VMRegImpl::Bad(), it means no value was
334 334 // passed (used as a placeholder for the other half of longs and doubles in
335 335 // the 64-bit build). regs[].second() is either VMRegImpl::Bad() or regs[].second() is
336 336 // regs[].first()+1 (regs[].first() may be misaligned in the C calling convention).
337 337 // Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first()
338 338 // == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the
339 339 // same VMRegPair.
340 340
341 341 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
342 342 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
343 343 // units regardless of build.
344 344
345 345
346 346 // ---------------------------------------------------------------------------
347 347 // The compiled Java calling convention. The Java convention always passes
348 348 // 64-bit values in adjacent aligned locations (either registers or stack),
349 349 // floats in float registers and doubles in aligned float pairs. Values are
350 350 // packed in the registers. There is no backing varargs store for values in
351 351 // registers. In the 32-bit build, longs are passed in G1 and G4 (cannot be
352 352 // passed in I's, because longs in I's get their heads chopped off at
353 353 // interrupt).
354 354 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
355 355 VMRegPair *regs,
356 356 int total_args_passed,
357 357 int is_outgoing) {
358 358 assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers");
359 359
360 360 // Convention is to pack the first 6 int/oop args into the first 6 registers
361 361 // (I0-I5), extras spill to the stack. Then pack the first 8 float args
362 362 // into F0-F7, extras spill to the stack. Then pad all register sets to
363 363 // align. Then put longs and doubles into the same registers as they fit,
364 364 // else spill to the stack.
365 365 const int int_reg_max = SPARC_ARGS_IN_REGS_NUM;
366 366 const int flt_reg_max = 8;
367 367 //
368 368 // Where 32-bit 1-reg longs start being passed
369 369 // In tiered we must pass on stack because c1 can't use a "pair" in a single reg.
370 370 // So make it look like we've filled all the G regs that c2 wants to use.
371 371 Register g_reg = TieredCompilation ? noreg : G1;
372 372
373 373 // Count int/oop and float args. See how many stack slots we'll need and
374 374 // where the longs & doubles will go.
375 375 int int_reg_cnt = 0;
376 376 int flt_reg_cnt = 0;
377 377 // int stk_reg_pairs = frame::register_save_words*(wordSize>>2);
378 378 // int stk_reg_pairs = SharedRuntime::out_preserve_stack_slots();
379 379 int stk_reg_pairs = 0;
380 380 for (int i = 0; i < total_args_passed; i++) {
381 381 switch (sig_bt[i]) {
382 382 case T_LONG: // LP64, longs compete with int args
383 383 assert(sig_bt[i+1] == T_VOID, "");
384 384 #ifdef _LP64
385 385 if (int_reg_cnt < int_reg_max) int_reg_cnt++;
386 386 #endif
387 387 break;
388 388 case T_OBJECT:
389 389 case T_ARRAY:
390 390 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
391 391 if (int_reg_cnt < int_reg_max) int_reg_cnt++;
392 392 #ifndef _LP64
393 393 else stk_reg_pairs++;
394 394 #endif
395 395 break;
396 396 case T_INT:
397 397 case T_SHORT:
398 398 case T_CHAR:
399 399 case T_BYTE:
400 400 case T_BOOLEAN:
401 401 if (int_reg_cnt < int_reg_max) int_reg_cnt++;
402 402 else stk_reg_pairs++;
403 403 break;
404 404 case T_FLOAT:
405 405 if (flt_reg_cnt < flt_reg_max) flt_reg_cnt++;
406 406 else stk_reg_pairs++;
407 407 break;
408 408 case T_DOUBLE:
409 409 assert(sig_bt[i+1] == T_VOID, "");
410 410 break;
411 411 case T_VOID:
412 412 break;
413 413 default:
414 414 ShouldNotReachHere();
415 415 }
416 416 }
417 417
418 418 // This is where the longs/doubles start on the stack.
419 419 stk_reg_pairs = (stk_reg_pairs+1) & ~1; // Round
420 420
421 421 int int_reg_pairs = (int_reg_cnt+1) & ~1; // 32-bit 2-reg longs only
422 422 int flt_reg_pairs = (flt_reg_cnt+1) & ~1;
423 423
424 424 // int stk_reg = frame::register_save_words*(wordSize>>2);
425 425 // int stk_reg = SharedRuntime::out_preserve_stack_slots();
426 426 int stk_reg = 0;
427 427 int int_reg = 0;
428 428 int flt_reg = 0;
429 429
430 430 // Now do the signature layout
431 431 for (int i = 0; i < total_args_passed; i++) {
432 432 switch (sig_bt[i]) {
433 433 case T_INT:
434 434 case T_SHORT:
435 435 case T_CHAR:
436 436 case T_BYTE:
437 437 case T_BOOLEAN:
438 438 #ifndef _LP64
439 439 case T_OBJECT:
440 440 case T_ARRAY:
441 441 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
442 442 #endif // _LP64
443 443 if (int_reg < int_reg_max) {
444 444 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
445 445 regs[i].set1(r->as_VMReg());
446 446 } else {
447 447 regs[i].set1(VMRegImpl::stack2reg(stk_reg++));
448 448 }
449 449 break;
450 450
451 451 #ifdef _LP64
452 452 case T_OBJECT:
453 453 case T_ARRAY:
454 454 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
455 455 if (int_reg < int_reg_max) {
456 456 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
457 457 regs[i].set2(r->as_VMReg());
458 458 } else {
459 459 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
460 460 stk_reg_pairs += 2;
461 461 }
462 462 break;
463 463 #endif // _LP64
464 464
465 465 case T_LONG:
466 466 assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half");
467 467 #ifdef _LP64
468 468 if (int_reg < int_reg_max) {
469 469 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
470 470 regs[i].set2(r->as_VMReg());
471 471 } else {
472 472 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
473 473 stk_reg_pairs += 2;
474 474 }
475 475 #else
476 476 #ifdef COMPILER2
477 477 // For 32-bit build, can't pass longs in O-regs because they become
478 478 // I-regs and get trashed. Use G-regs instead. G1 and G4 are almost
479 479 // spare and available. This convention isn't used by the Sparc ABI or
480 480 // anywhere else. If we're tiered then we don't use G-regs because c1
481 481 // can't deal with them as a "pair". (Tiered makes this code think g's are filled)
482 482 // G0: zero
483 483 // G1: 1st Long arg
484 484 // G2: global allocated to TLS
485 485 // G3: used in inline cache check
486 486 // G4: 2nd Long arg
487 487 // G5: used in inline cache check
488 488 // G6: used by OS
489 489 // G7: used by OS
490 490
491 491 if (g_reg == G1) {
492 492 regs[i].set2(G1->as_VMReg()); // This long arg in G1
493 493 g_reg = G4; // Where the next arg goes
494 494 } else if (g_reg == G4) {
495 495 regs[i].set2(G4->as_VMReg()); // The 2nd long arg in G4
496 496 g_reg = noreg; // No more longs in registers
497 497 } else {
498 498 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
499 499 stk_reg_pairs += 2;
500 500 }
501 501 #else // COMPILER2
502 502 if (int_reg_pairs + 1 < int_reg_max) {
503 503 if (is_outgoing) {
504 504 regs[i].set_pair(as_oRegister(int_reg_pairs + 1)->as_VMReg(), as_oRegister(int_reg_pairs)->as_VMReg());
505 505 } else {
506 506 regs[i].set_pair(as_iRegister(int_reg_pairs + 1)->as_VMReg(), as_iRegister(int_reg_pairs)->as_VMReg());
507 507 }
508 508 int_reg_pairs += 2;
509 509 } else {
510 510 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
511 511 stk_reg_pairs += 2;
512 512 }
513 513 #endif // COMPILER2
514 514 #endif // _LP64
515 515 break;
516 516
517 517 case T_FLOAT:
518 518 if (flt_reg < flt_reg_max) regs[i].set1(as_FloatRegister(flt_reg++)->as_VMReg());
519 519 else regs[i].set1( VMRegImpl::stack2reg(stk_reg++));
520 520 break;
521 521 case T_DOUBLE:
522 522 assert(sig_bt[i+1] == T_VOID, "expecting half");
523 523 if (flt_reg_pairs + 1 < flt_reg_max) {
524 524 regs[i].set2(as_FloatRegister(flt_reg_pairs)->as_VMReg());
525 525 flt_reg_pairs += 2;
526 526 } else {
527 527 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
528 528 stk_reg_pairs += 2;
529 529 }
530 530 break;
531 531 case T_VOID: regs[i].set_bad(); break; // Halves of longs & doubles
532 532 default:
533 533 ShouldNotReachHere();
534 534 }
535 535 }
536 536
537 537 // retun the amount of stack space these arguments will need.
538 538 return stk_reg_pairs;
539 539
540 540 }
541 541
542 542 // Helper class mostly to avoid passing masm everywhere, and handle
543 543 // store displacement overflow logic.
544 544 class AdapterGenerator {
545 545 MacroAssembler *masm;
546 546 Register Rdisp;
547 547 void set_Rdisp(Register r) { Rdisp = r; }
548 548
549 549 void patch_callers_callsite();
550 550 void tag_c2i_arg(frame::Tag t, Register base, int st_off, Register scratch);
551 551
552 552 // base+st_off points to top of argument
553 553 int arg_offset(const int st_off) { return st_off + Interpreter::value_offset_in_bytes(); }
554 554 int next_arg_offset(const int st_off) {
555 555 return st_off - Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
556 556 }
557 557
558 558 int tag_offset(const int st_off) { return st_off + Interpreter::tag_offset_in_bytes(); }
559 559 int next_tag_offset(const int st_off) {
560 560 return st_off - Interpreter::stackElementSize() + Interpreter::tag_offset_in_bytes();
561 561 }
562 562
563 563 // Argument slot values may be loaded first into a register because
564 564 // they might not fit into displacement.
565 565 RegisterOrConstant arg_slot(const int st_off);
566 566 RegisterOrConstant next_arg_slot(const int st_off);
567 567
568 568 RegisterOrConstant tag_slot(const int st_off);
569 569 RegisterOrConstant next_tag_slot(const int st_off);
570 570
571 571 // Stores long into offset pointed to by base
572 572 void store_c2i_long(Register r, Register base,
573 573 const int st_off, bool is_stack);
574 574 void store_c2i_object(Register r, Register base,
575 575 const int st_off);
576 576 void store_c2i_int(Register r, Register base,
577 577 const int st_off);
578 578 void store_c2i_double(VMReg r_2,
579 579 VMReg r_1, Register base, const int st_off);
580 580 void store_c2i_float(FloatRegister f, Register base,
581 581 const int st_off);
582 582
583 583 public:
584 584 void gen_c2i_adapter(int total_args_passed,
585 585 // VMReg max_arg,
586 586 int comp_args_on_stack, // VMRegStackSlots
587 587 const BasicType *sig_bt,
588 588 const VMRegPair *regs,
589 589 Label& skip_fixup);
590 590 void gen_i2c_adapter(int total_args_passed,
591 591 // VMReg max_arg,
592 592 int comp_args_on_stack, // VMRegStackSlots
593 593 const BasicType *sig_bt,
594 594 const VMRegPair *regs);
595 595
596 596 AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {}
597 597 };
598 598
599 599
600 600 // Patch the callers callsite with entry to compiled code if it exists.
601 601 void AdapterGenerator::patch_callers_callsite() {
602 602 Label L;
603 603 __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch);
604 604 __ br_null(G3_scratch, false, __ pt, L);
605 605 // Schedule the branch target address early.
606 606 __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
607 607 // Call into the VM to patch the caller, then jump to compiled callee
608 608 __ save_frame(4); // Args in compiled layout; do not blow them
609 609
610 610 // Must save all the live Gregs the list is:
611 611 // G1: 1st Long arg (32bit build)
612 612 // G2: global allocated to TLS
613 613 // G3: used in inline cache check (scratch)
614 614 // G4: 2nd Long arg (32bit build);
615 615 // G5: used in inline cache check (methodOop)
616 616
617 617 // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops.
618 618
619 619 #ifdef _LP64
620 620 // mov(s,d)
621 621 __ mov(G1, L1);
622 622 __ mov(G4, L4);
623 623 __ mov(G5_method, L5);
624 624 __ mov(G5_method, O0); // VM needs target method
625 625 __ mov(I7, O1); // VM needs caller's callsite
626 626 // Must be a leaf call...
627 627 // can be very far once the blob has been relocated
628 628 AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
629 629 __ relocate(relocInfo::runtime_call_type);
630 630 __ jumpl_to(dest, O7, O7);
631 631 __ delayed()->mov(G2_thread, L7_thread_cache);
632 632 __ mov(L7_thread_cache, G2_thread);
633 633 __ mov(L1, G1);
634 634 __ mov(L4, G4);
635 635 __ mov(L5, G5_method);
636 636 #else
637 637 __ stx(G1, FP, -8 + STACK_BIAS);
638 638 __ stx(G4, FP, -16 + STACK_BIAS);
639 639 __ mov(G5_method, L5);
640 640 __ mov(G5_method, O0); // VM needs target method
641 641 __ mov(I7, O1); // VM needs caller's callsite
642 642 // Must be a leaf call...
643 643 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type);
644 644 __ delayed()->mov(G2_thread, L7_thread_cache);
645 645 __ mov(L7_thread_cache, G2_thread);
646 646 __ ldx(FP, -8 + STACK_BIAS, G1);
647 647 __ ldx(FP, -16 + STACK_BIAS, G4);
648 648 __ mov(L5, G5_method);
649 649 __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
650 650 #endif /* _LP64 */
651 651
652 652 __ restore(); // Restore args
653 653 __ bind(L);
654 654 }
655 655
656 656 void AdapterGenerator::tag_c2i_arg(frame::Tag t, Register base, int st_off,
657 657 Register scratch) {
658 658 if (TaggedStackInterpreter) {
659 659 RegisterOrConstant slot = tag_slot(st_off);
660 660 // have to store zero because local slots can be reused (rats!)
661 661 if (t == frame::TagValue) {
662 662 __ st_ptr(G0, base, slot);
663 663 } else if (t == frame::TagCategory2) {
664 664 __ st_ptr(G0, base, slot);
665 665 __ st_ptr(G0, base, next_tag_slot(st_off));
666 666 } else {
667 667 __ mov(t, scratch);
668 668 __ st_ptr(scratch, base, slot);
669 669 }
670 670 }
671 671 }
672 672
673 673
674 674 RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) {
675 675 RegisterOrConstant roc(arg_offset(st_off));
676 676 return __ ensure_simm13_or_reg(roc, Rdisp);
677 677 }
678 678
679 679 RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) {
680 680 RegisterOrConstant roc(next_arg_offset(st_off));
681 681 return __ ensure_simm13_or_reg(roc, Rdisp);
682 682 }
683 683
684 684
685 685 RegisterOrConstant AdapterGenerator::tag_slot(const int st_off) {
686 686 RegisterOrConstant roc(tag_offset(st_off));
687 687 return __ ensure_simm13_or_reg(roc, Rdisp);
688 688 }
689 689
690 690 RegisterOrConstant AdapterGenerator::next_tag_slot(const int st_off) {
691 691 RegisterOrConstant roc(next_tag_offset(st_off));
692 692 return __ ensure_simm13_or_reg(roc, Rdisp);
693 693 }
694 694
695 695
696 696 // Stores long into offset pointed to by base
697 697 void AdapterGenerator::store_c2i_long(Register r, Register base,
698 698 const int st_off, bool is_stack) {
699 699 #ifdef _LP64
700 700 // In V9, longs are given 2 64-bit slots in the interpreter, but the
701 701 // data is passed in only 1 slot.
702 702 __ stx(r, base, next_arg_slot(st_off));
703 703 #else
704 704 #ifdef COMPILER2
705 705 // Misaligned store of 64-bit data
706 706 __ stw(r, base, arg_slot(st_off)); // lo bits
707 707 __ srlx(r, 32, r);
708 708 __ stw(r, base, next_arg_slot(st_off)); // hi bits
709 709 #else
710 710 if (is_stack) {
711 711 // Misaligned store of 64-bit data
712 712 __ stw(r, base, arg_slot(st_off)); // lo bits
713 713 __ srlx(r, 32, r);
714 714 __ stw(r, base, next_arg_slot(st_off)); // hi bits
715 715 } else {
716 716 __ stw(r->successor(), base, arg_slot(st_off) ); // lo bits
717 717 __ stw(r , base, next_arg_slot(st_off)); // hi bits
718 718 }
719 719 #endif // COMPILER2
720 720 #endif // _LP64
721 721 tag_c2i_arg(frame::TagCategory2, base, st_off, r);
722 722 }
723 723
724 724 void AdapterGenerator::store_c2i_object(Register r, Register base,
725 725 const int st_off) {
726 726 __ st_ptr (r, base, arg_slot(st_off));
727 727 tag_c2i_arg(frame::TagReference, base, st_off, r);
728 728 }
729 729
730 730 void AdapterGenerator::store_c2i_int(Register r, Register base,
731 731 const int st_off) {
732 732 __ st (r, base, arg_slot(st_off));
733 733 tag_c2i_arg(frame::TagValue, base, st_off, r);
734 734 }
735 735
736 736 // Stores into offset pointed to by base
737 737 void AdapterGenerator::store_c2i_double(VMReg r_2,
738 738 VMReg r_1, Register base, const int st_off) {
739 739 #ifdef _LP64
740 740 // In V9, doubles are given 2 64-bit slots in the interpreter, but the
741 741 // data is passed in only 1 slot.
742 742 __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
743 743 #else
744 744 // Need to marshal 64-bit value from misaligned Lesp loads
745 745 __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
746 746 __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) );
747 747 #endif
748 748 tag_c2i_arg(frame::TagCategory2, base, st_off, G1_scratch);
749 749 }
750 750
751 751 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
752 752 const int st_off) {
753 753 __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off));
754 754 tag_c2i_arg(frame::TagValue, base, st_off, G1_scratch);
755 755 }
756 756
757 757 void AdapterGenerator::gen_c2i_adapter(
758 758 int total_args_passed,
759 759 // VMReg max_arg,
760 760 int comp_args_on_stack, // VMRegStackSlots
761 761 const BasicType *sig_bt,
762 762 const VMRegPair *regs,
763 763 Label& skip_fixup) {
764 764
765 765 // Before we get into the guts of the C2I adapter, see if we should be here
766 766 // at all. We've come from compiled code and are attempting to jump to the
767 767 // interpreter, which means the caller made a static call to get here
768 768 // (vcalls always get a compiled target if there is one). Check for a
769 769 // compiled target. If there is one, we need to patch the caller's call.
770 770 // However we will run interpreted if we come thru here. The next pass
771 771 // thru the call site will run compiled. If we ran compiled here then
772 772 // we can (theorectically) do endless i2c->c2i->i2c transitions during
773 773 // deopt/uncommon trap cycles. If we always go interpreted here then
774 774 // we can have at most one and don't need to play any tricks to keep
775 775 // from endlessly growing the stack.
776 776 //
777 777 // Actually if we detected that we had an i2c->c2i transition here we
778 778 // ought to be able to reset the world back to the state of the interpreted
779 779 // call and not bother building another interpreter arg area. We don't
780 780 // do that at this point.
781 781
782 782 patch_callers_callsite();
783 783
784 784 __ bind(skip_fixup);
785 785
786 786 // Since all args are passed on the stack, total_args_passed*wordSize is the
787 787 // space we need. Add in varargs area needed by the interpreter. Round up
788 788 // to stack alignment.
789 789 const int arg_size = total_args_passed * Interpreter::stackElementSize();
790 790 const int varargs_area =
791 791 (frame::varargs_offset - frame::register_save_words)*wordSize;
792 792 const int extraspace = round_to(arg_size + varargs_area, 2*wordSize);
793 793
794 794 int bias = STACK_BIAS;
795 795 const int interp_arg_offset = frame::varargs_offset*wordSize +
796 796 (total_args_passed-1)*Interpreter::stackElementSize();
797 797
798 798 Register base = SP;
799 799
800 800 #ifdef _LP64
801 801 // In the 64bit build because of wider slots and STACKBIAS we can run
802 802 // out of bits in the displacement to do loads and stores. Use g3 as
803 803 // temporary displacement.
804 804 if (! __ is_simm13(extraspace)) {
805 805 __ set(extraspace, G3_scratch);
806 806 __ sub(SP, G3_scratch, SP);
807 807 } else {
808 808 __ sub(SP, extraspace, SP);
809 809 }
810 810 set_Rdisp(G3_scratch);
811 811 #else
812 812 __ sub(SP, extraspace, SP);
813 813 #endif // _LP64
814 814
815 815 // First write G1 (if used) to where ever it must go
816 816 for (int i=0; i<total_args_passed; i++) {
817 817 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize()) + bias;
818 818 VMReg r_1 = regs[i].first();
819 819 VMReg r_2 = regs[i].second();
820 820 if (r_1 == G1_scratch->as_VMReg()) {
821 821 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
822 822 store_c2i_object(G1_scratch, base, st_off);
823 823 } else if (sig_bt[i] == T_LONG) {
824 824 assert(!TieredCompilation, "should not use register args for longs");
825 825 store_c2i_long(G1_scratch, base, st_off, false);
826 826 } else {
827 827 store_c2i_int(G1_scratch, base, st_off);
828 828 }
829 829 }
830 830 }
831 831
832 832 // Now write the args into the outgoing interpreter space
833 833 for (int i=0; i<total_args_passed; i++) {
834 834 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize()) + bias;
835 835 VMReg r_1 = regs[i].first();
836 836 VMReg r_2 = regs[i].second();
837 837 if (!r_1->is_valid()) {
838 838 assert(!r_2->is_valid(), "");
839 839 continue;
840 840 }
841 841 // Skip G1 if found as we did it first in order to free it up
842 842 if (r_1 == G1_scratch->as_VMReg()) {
843 843 continue;
844 844 }
845 845 #ifdef ASSERT
846 846 bool G1_forced = false;
847 847 #endif // ASSERT
848 848 if (r_1->is_stack()) { // Pretend stack targets are loaded into G1
849 849 #ifdef _LP64
850 850 Register ld_off = Rdisp;
851 851 __ set(reg2offset(r_1) + extraspace + bias, ld_off);
852 852 #else
853 853 int ld_off = reg2offset(r_1) + extraspace + bias;
854 854 #endif // _LP64
855 855 #ifdef ASSERT
856 856 G1_forced = true;
857 857 #endif // ASSERT
858 858 r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle
859 859 if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch);
860 860 else __ ldx(base, ld_off, G1_scratch);
861 861 }
862 862
863 863 if (r_1->is_Register()) {
864 864 Register r = r_1->as_Register()->after_restore();
865 865 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
866 866 store_c2i_object(r, base, st_off);
867 867 } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
868 868 #ifndef _LP64
869 869 if (TieredCompilation) {
870 870 assert(G1_forced || sig_bt[i] != T_LONG, "should not use register args for longs");
871 871 }
872 872 #endif // _LP64
873 873 store_c2i_long(r, base, st_off, r_2->is_stack());
874 874 } else {
875 875 store_c2i_int(r, base, st_off);
876 876 }
877 877 } else {
878 878 assert(r_1->is_FloatRegister(), "");
879 879 if (sig_bt[i] == T_FLOAT) {
880 880 store_c2i_float(r_1->as_FloatRegister(), base, st_off);
881 881 } else {
882 882 assert(sig_bt[i] == T_DOUBLE, "wrong type");
883 883 store_c2i_double(r_2, r_1, base, st_off);
884 884 }
885 885 }
886 886 }
887 887
888 888 #ifdef _LP64
889 889 // Need to reload G3_scratch, used for temporary displacements.
890 890 __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
891 891
892 892 // Pass O5_savedSP as an argument to the interpreter.
893 893 // The interpreter will restore SP to this value before returning.
894 894 __ set(extraspace, G1);
895 895 __ add(SP, G1, O5_savedSP);
896 896 #else
897 897 // Pass O5_savedSP as an argument to the interpreter.
898 898 // The interpreter will restore SP to this value before returning.
899 899 __ add(SP, extraspace, O5_savedSP);
900 900 #endif // _LP64
901 901
902 902 __ mov((frame::varargs_offset)*wordSize -
903 903 1*Interpreter::stackElementSize()+bias+BytesPerWord, G1);
904 904 // Jump to the interpreter just as if interpreter was doing it.
905 905 __ jmpl(G3_scratch, 0, G0);
906 906 // Setup Lesp for the call. Cannot actually set Lesp as the current Lesp
907 907 // (really L0) is in use by the compiled frame as a generic temp. However,
908 908 // the interpreter does not know where its args are without some kind of
909 909 // arg pointer being passed in. Pass it in Gargs.
910 910 __ delayed()->add(SP, G1, Gargs);
911 911 }
912 912
913 913 void AdapterGenerator::gen_i2c_adapter(
914 914 int total_args_passed,
915 915 // VMReg max_arg,
916 916 int comp_args_on_stack, // VMRegStackSlots
917 917 const BasicType *sig_bt,
918 918 const VMRegPair *regs) {
919 919
920 920 // Generate an I2C adapter: adjust the I-frame to make space for the C-frame
921 921 // layout. Lesp was saved by the calling I-frame and will be restored on
922 922 // return. Meanwhile, outgoing arg space is all owned by the callee
923 923 // C-frame, so we can mangle it at will. After adjusting the frame size,
924 924 // hoist register arguments and repack other args according to the compiled
925 925 // code convention. Finally, end in a jump to the compiled code. The entry
926 926 // point address is the start of the buffer.
927 927
928 928 // We will only enter here from an interpreted frame and never from after
929 929 // passing thru a c2i. Azul allowed this but we do not. If we lose the
930 930 // race and use a c2i we will remain interpreted for the race loser(s).
931 931 // This removes all sorts of headaches on the x86 side and also eliminates
932 932 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
933 933
934 934 // As you can see from the list of inputs & outputs there are not a lot
935 935 // of temp registers to work with: mostly G1, G3 & G4.
936 936
937 937 // Inputs:
938 938 // G2_thread - TLS
939 939 // G5_method - Method oop
940 940 // G4 (Gargs) - Pointer to interpreter's args
941 941 // O0..O4 - free for scratch
942 942 // O5_savedSP - Caller's saved SP, to be restored if needed
↓ open down ↓ |
942 lines elided |
↑ open up ↑ |
943 943 // O6 - Current SP!
944 944 // O7 - Valid return address
945 945 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
946 946
947 947 // Outputs:
948 948 // G2_thread - TLS
949 949 // G1, G4 - Outgoing long args in 32-bit build
950 950 // O0-O5 - Outgoing args in compiled layout
951 951 // O6 - Adjusted or restored SP
952 952 // O7 - Valid return address
953 - // L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
953 + // L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
954 954 // F0-F7 - more outgoing args
955 955
956 956
957 957 // Gargs is the incoming argument base, and also an outgoing argument.
958 958 __ sub(Gargs, BytesPerWord, Gargs);
959 959
960 -#ifdef ASSERT
961 - {
962 - // on entry OsavedSP and SP should be equal
963 - Label ok;
964 - __ cmp(O5_savedSP, SP);
965 - __ br(Assembler::equal, false, Assembler::pt, ok);
966 - __ delayed()->nop();
967 - __ stop("I5_savedSP not set");
968 - __ should_not_reach_here();
969 - __ bind(ok);
970 - }
971 -#endif
972 -
973 960 // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME
974 961 // WITH O7 HOLDING A VALID RETURN PC
975 962 //
976 963 // | |
977 964 // : java stack :
978 965 // | |
979 966 // +--------------+ <--- start of outgoing args
980 967 // | receiver | |
981 968 // : rest of args : |---size is java-arg-words
982 969 // | | |
983 970 // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I
984 971 // | | |
985 972 // : unused : |---Space for max Java stack, plus stack alignment
986 973 // | | |
987 974 // +--------------+ <--- SP + 16*wordsize
988 975 // | |
989 976 // : window :
990 977 // | |
991 978 // +--------------+ <--- SP
992 979
993 980 // WE REPACK THE STACK. We use the common calling convention layout as
994 981 // discovered by calling SharedRuntime::calling_convention. We assume it
995 982 // causes an arbitrary shuffle of memory, which may require some register
996 983 // temps to do the shuffle. We hope for (and optimize for) the case where
997 984 // temps are not needed. We may have to resize the stack slightly, in case
998 985 // we need alignment padding (32-bit interpreter can pass longs & doubles
999 986 // misaligned, but the compilers expect them aligned).
1000 987 //
1001 988 // | |
1002 989 // : java stack :
1003 990 // | |
1004 991 // +--------------+ <--- start of outgoing args
1005 992 // | pad, align | |
1006 993 // +--------------+ |
1007 994 // | ints, floats | |---Outgoing stack args, packed low.
1008 995 // +--------------+ | First few args in registers.
1009 996 // : doubles : |
1010 997 // | longs | |
1011 998 // +--------------+ <--- SP' + 16*wordsize
1012 999 // | |
1013 1000 // : window :
1014 1001 // | |
1015 1002 // +--------------+ <--- SP'
1016 1003
1017 1004 // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME
1018 1005 // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP
1019 1006 // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN.
1020 1007
1021 1008 // Cut-out for having no stack args. Since up to 6 args are passed
1022 1009 // in registers, we will commonly have no stack args.
1023 1010 if (comp_args_on_stack > 0) {
1024 1011
1025 1012 // Convert VMReg stack slots to words.
1026 1013 int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
1027 1014 // Round up to miminum stack alignment, in wordSize
1028 1015 comp_words_on_stack = round_to(comp_words_on_stack, 2);
1029 1016 // Now compute the distance from Lesp to SP. This calculation does not
1030 1017 // include the space for total_args_passed because Lesp has not yet popped
1031 1018 // the arguments.
1032 1019 __ sub(SP, (comp_words_on_stack)*wordSize, SP);
1033 1020 }
1034 1021
1035 1022 // Will jump to the compiled code just as if compiled code was doing it.
1036 1023 // Pre-load the register-jump target early, to schedule it better.
1037 1024 __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3);
1038 1025
1039 1026 // Now generate the shuffle code. Pick up all register args and move the
1040 1027 // rest through G1_scratch.
1041 1028 for (int i=0; i<total_args_passed; i++) {
1042 1029 if (sig_bt[i] == T_VOID) {
1043 1030 // Longs and doubles are passed in native word order, but misaligned
1044 1031 // in the 32-bit build.
1045 1032 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
1046 1033 continue;
1047 1034 }
1048 1035
1049 1036 // Pick up 0, 1 or 2 words from Lesp+offset. Assume mis-aligned in the
1050 1037 // 32-bit build and aligned in the 64-bit build. Look for the obvious
1051 1038 // ldx/lddf optimizations.
1052 1039
1053 1040 // Load in argument order going down.
1054 1041 const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize();
1055 1042 set_Rdisp(G1_scratch);
1056 1043
1057 1044 VMReg r_1 = regs[i].first();
1058 1045 VMReg r_2 = regs[i].second();
1059 1046 if (!r_1->is_valid()) {
1060 1047 assert(!r_2->is_valid(), "");
1061 1048 continue;
1062 1049 }
1063 1050 if (r_1->is_stack()) { // Pretend stack targets are loaded into F8/F9
1064 1051 r_1 = F8->as_VMReg(); // as part of the load/store shuffle
1065 1052 if (r_2->is_valid()) r_2 = r_1->next();
1066 1053 }
1067 1054 if (r_1->is_Register()) { // Register argument
1068 1055 Register r = r_1->as_Register()->after_restore();
1069 1056 if (!r_2->is_valid()) {
1070 1057 __ ld(Gargs, arg_slot(ld_off), r);
1071 1058 } else {
1072 1059 #ifdef _LP64
1073 1060 // In V9, longs are given 2 64-bit slots in the interpreter, but the
1074 1061 // data is passed in only 1 slot.
1075 1062 RegisterOrConstant slot = (sig_bt[i] == T_LONG) ?
1076 1063 next_arg_slot(ld_off) : arg_slot(ld_off);
1077 1064 __ ldx(Gargs, slot, r);
1078 1065 #else
1079 1066 // Need to load a 64-bit value into G1/G4, but G1/G4 is being used in the
1080 1067 // stack shuffle. Load the first 2 longs into G1/G4 later.
1081 1068 #endif
1082 1069 }
1083 1070 } else {
1084 1071 assert(r_1->is_FloatRegister(), "");
1085 1072 if (!r_2->is_valid()) {
1086 1073 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister());
1087 1074 } else {
1088 1075 #ifdef _LP64
1089 1076 // In V9, doubles are given 2 64-bit slots in the interpreter, but the
1090 1077 // data is passed in only 1 slot. This code also handles longs that
1091 1078 // are passed on the stack, but need a stack-to-stack move through a
1092 1079 // spare float register.
1093 1080 RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
1094 1081 next_arg_slot(ld_off) : arg_slot(ld_off);
1095 1082 __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister());
1096 1083 #else
1097 1084 // Need to marshal 64-bit value from misaligned Lesp loads
1098 1085 __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister());
1099 1086 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister());
1100 1087 #endif
1101 1088 }
1102 1089 }
1103 1090 // Was the argument really intended to be on the stack, but was loaded
1104 1091 // into F8/F9?
1105 1092 if (regs[i].first()->is_stack()) {
1106 1093 assert(r_1->as_FloatRegister() == F8, "fix this code");
1107 1094 // Convert stack slot to an SP offset
1108 1095 int st_off = reg2offset(regs[i].first()) + STACK_BIAS;
1109 1096 // Store down the shuffled stack word. Target address _is_ aligned.
1110 1097 RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp);
1111 1098 if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot);
1112 1099 else __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot);
1113 1100 }
1114 1101 }
1115 1102 bool made_space = false;
1116 1103 #ifndef _LP64
1117 1104 // May need to pick up a few long args in G1/G4
1118 1105 bool g4_crushed = false;
1119 1106 bool g3_crushed = false;
1120 1107 for (int i=0; i<total_args_passed; i++) {
1121 1108 if (regs[i].first()->is_Register() && regs[i].second()->is_valid()) {
1122 1109 // Load in argument order going down
1123 1110 int ld_off = (total_args_passed-i)*Interpreter::stackElementSize();
1124 1111 // Need to marshal 64-bit value from misaligned Lesp loads
1125 1112 Register r = regs[i].first()->as_Register()->after_restore();
1126 1113 if (r == G1 || r == G4) {
1127 1114 assert(!g4_crushed, "ordering problem");
1128 1115 if (r == G4){
1129 1116 g4_crushed = true;
1130 1117 __ lduw(Gargs, arg_slot(ld_off) , G3_scratch); // Load lo bits
1131 1118 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits
1132 1119 } else {
1133 1120 // better schedule this way
1134 1121 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits
1135 1122 __ lduw(Gargs, arg_slot(ld_off) , G3_scratch); // Load lo bits
1136 1123 }
1137 1124 g3_crushed = true;
1138 1125 __ sllx(r, 32, r);
1139 1126 __ or3(G3_scratch, r, r);
1140 1127 } else {
1141 1128 assert(r->is_out(), "longs passed in two O registers");
1142 1129 __ ld (Gargs, arg_slot(ld_off) , r->successor()); // Load lo bits
1143 1130 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits
1144 1131 }
1145 1132 }
1146 1133 }
1147 1134 #endif
1148 1135
1149 1136 // Jump to the compiled code just as if compiled code was doing it.
1150 1137 //
1151 1138 #ifndef _LP64
1152 1139 if (g3_crushed) {
1153 1140 // Rats load was wasted, at least it is in cache...
1154 1141 __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3);
1155 1142 }
1156 1143 #endif /* _LP64 */
1157 1144
1158 1145 // 6243940 We might end up in handle_wrong_method if
1159 1146 // the callee is deoptimized as we race thru here. If that
1160 1147 // happens we don't want to take a safepoint because the
1161 1148 // caller frame will look interpreted and arguments are now
1162 1149 // "compiled" so it is much better to make this transition
1163 1150 // invisible to the stack walking code. Unfortunately if
1164 1151 // we try and find the callee by normal means a safepoint
1165 1152 // is possible. So we stash the desired callee in the thread
1166 1153 // and the vm will find there should this case occur.
1167 1154 Address callee_target_addr(G2_thread, JavaThread::callee_target_offset());
1168 1155 __ st_ptr(G5_method, callee_target_addr);
1169 1156
1170 1157 if (StressNonEntrant) {
1171 1158 // Open a big window for deopt failure
1172 1159 __ save_frame(0);
1173 1160 __ mov(G0, L0);
1174 1161 Label loop;
1175 1162 __ bind(loop);
1176 1163 __ sub(L0, 1, L0);
1177 1164 __ br_null(L0, false, Assembler::pt, loop);
1178 1165 __ delayed()->nop();
1179 1166
1180 1167 __ restore();
1181 1168 }
1182 1169
1183 1170
1184 1171 __ jmpl(G3, 0, G0);
1185 1172 __ delayed()->nop();
1186 1173 }
1187 1174
1188 1175 // ---------------------------------------------------------------
1189 1176 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
1190 1177 int total_args_passed,
1191 1178 // VMReg max_arg,
1192 1179 int comp_args_on_stack, // VMRegStackSlots
1193 1180 const BasicType *sig_bt,
1194 1181 const VMRegPair *regs,
1195 1182 AdapterFingerPrint* fingerprint) {
1196 1183 address i2c_entry = __ pc();
1197 1184
1198 1185 AdapterGenerator agen(masm);
1199 1186
1200 1187 agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs);
1201 1188
1202 1189
1203 1190 // -------------------------------------------------------------------------
1204 1191 // Generate a C2I adapter. On entry we know G5 holds the methodOop. The
1205 1192 // args start out packed in the compiled layout. They need to be unpacked
1206 1193 // into the interpreter layout. This will almost always require some stack
1207 1194 // space. We grow the current (compiled) stack, then repack the args. We
1208 1195 // finally end in a jump to the generic interpreter entry point. On exit
1209 1196 // from the interpreter, the interpreter will restore our SP (lest the
1210 1197 // compiled code, which relys solely on SP and not FP, get sick).
1211 1198
1212 1199 address c2i_unverified_entry = __ pc();
1213 1200 Label skip_fixup;
1214 1201 {
1215 1202 #if !defined(_LP64) && defined(COMPILER2)
1216 1203 Register R_temp = L0; // another scratch register
1217 1204 #else
1218 1205 Register R_temp = G1; // another scratch register
1219 1206 #endif
1220 1207
1221 1208 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
1222 1209
1223 1210 __ verify_oop(O0);
1224 1211 __ verify_oop(G5_method);
1225 1212 __ load_klass(O0, G3_scratch);
1226 1213 __ verify_oop(G3_scratch);
1227 1214
1228 1215 #if !defined(_LP64) && defined(COMPILER2)
1229 1216 __ save(SP, -frame::register_save_words*wordSize, SP);
1230 1217 __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp);
1231 1218 __ verify_oop(R_temp);
1232 1219 __ cmp(G3_scratch, R_temp);
1233 1220 __ restore();
1234 1221 #else
1235 1222 __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp);
1236 1223 __ verify_oop(R_temp);
1237 1224 __ cmp(G3_scratch, R_temp);
1238 1225 #endif
1239 1226
1240 1227 Label ok, ok2;
1241 1228 __ brx(Assembler::equal, false, Assembler::pt, ok);
1242 1229 __ delayed()->ld_ptr(G5_method, compiledICHolderOopDesc::holder_method_offset(), G5_method);
1243 1230 __ jump_to(ic_miss, G3_scratch);
1244 1231 __ delayed()->nop();
1245 1232
1246 1233 __ bind(ok);
1247 1234 // Method might have been compiled since the call site was patched to
1248 1235 // interpreted if that is the case treat it as a miss so we can get
1249 1236 // the call site corrected.
1250 1237 __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch);
1251 1238 __ bind(ok2);
1252 1239 __ br_null(G3_scratch, false, __ pt, skip_fixup);
1253 1240 __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
1254 1241 __ jump_to(ic_miss, G3_scratch);
1255 1242 __ delayed()->nop();
1256 1243
1257 1244 }
1258 1245
1259 1246 address c2i_entry = __ pc();
1260 1247
1261 1248 agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
1262 1249
1263 1250 __ flush();
1264 1251 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
1265 1252
1266 1253 }
1267 1254
1268 1255 // Helper function for native calling conventions
1269 1256 static VMReg int_stk_helper( int i ) {
1270 1257 // Bias any stack based VMReg we get by ignoring the window area
1271 1258 // but not the register parameter save area.
1272 1259 //
1273 1260 // This is strange for the following reasons. We'd normally expect
1274 1261 // the calling convention to return an VMReg for a stack slot
1275 1262 // completely ignoring any abi reserved area. C2 thinks of that
1276 1263 // abi area as only out_preserve_stack_slots. This does not include
1277 1264 // the area allocated by the C abi to store down integer arguments
1278 1265 // because the java calling convention does not use it. So
1279 1266 // since c2 assumes that there are only out_preserve_stack_slots
1280 1267 // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack
1281 1268 // location the c calling convention must add in this bias amount
1282 1269 // to make up for the fact that the out_preserve_stack_slots is
1283 1270 // insufficient for C calls. What a mess. I sure hope those 6
1284 1271 // stack words were worth it on every java call!
1285 1272
1286 1273 // Another way of cleaning this up would be for out_preserve_stack_slots
1287 1274 // to take a parameter to say whether it was C or java calling conventions.
1288 1275 // Then things might look a little better (but not much).
1289 1276
1290 1277 int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM;
1291 1278 if( mem_parm_offset < 0 ) {
1292 1279 return as_oRegister(i)->as_VMReg();
1293 1280 } else {
1294 1281 int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word;
1295 1282 // Now return a biased offset that will be correct when out_preserve_slots is added back in
1296 1283 return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots());
1297 1284 }
1298 1285 }
1299 1286
1300 1287
1301 1288 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1302 1289 VMRegPair *regs,
1303 1290 int total_args_passed) {
1304 1291
1305 1292 // Return the number of VMReg stack_slots needed for the args.
1306 1293 // This value does not include an abi space (like register window
1307 1294 // save area).
1308 1295
1309 1296 // The native convention is V8 if !LP64
1310 1297 // The LP64 convention is the V9 convention which is slightly more sane.
1311 1298
1312 1299 // We return the amount of VMReg stack slots we need to reserve for all
1313 1300 // the arguments NOT counting out_preserve_stack_slots. Since we always
1314 1301 // have space for storing at least 6 registers to memory we start with that.
1315 1302 // See int_stk_helper for a further discussion.
1316 1303 int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots();
1317 1304
1318 1305 #ifdef _LP64
1319 1306 // V9 convention: All things "as-if" on double-wide stack slots.
1320 1307 // Hoist any int/ptr/long's in the first 6 to int regs.
1321 1308 // Hoist any flt/dbl's in the first 16 dbl regs.
1322 1309 int j = 0; // Count of actual args, not HALVES
1323 1310 for( int i=0; i<total_args_passed; i++, j++ ) {
1324 1311 switch( sig_bt[i] ) {
1325 1312 case T_BOOLEAN:
1326 1313 case T_BYTE:
1327 1314 case T_CHAR:
1328 1315 case T_INT:
1329 1316 case T_SHORT:
1330 1317 regs[i].set1( int_stk_helper( j ) ); break;
1331 1318 case T_LONG:
1332 1319 assert( sig_bt[i+1] == T_VOID, "expecting half" );
1333 1320 case T_ADDRESS: // raw pointers, like current thread, for VM calls
1334 1321 case T_ARRAY:
1335 1322 case T_OBJECT:
1336 1323 regs[i].set2( int_stk_helper( j ) );
1337 1324 break;
1338 1325 case T_FLOAT:
1339 1326 if ( j < 16 ) {
1340 1327 // V9ism: floats go in ODD registers
1341 1328 regs[i].set1(as_FloatRegister(1 + (j<<1))->as_VMReg());
1342 1329 } else {
1343 1330 // V9ism: floats go in ODD stack slot
1344 1331 regs[i].set1(VMRegImpl::stack2reg(1 + (j<<1)));
1345 1332 }
1346 1333 break;
1347 1334 case T_DOUBLE:
1348 1335 assert( sig_bt[i+1] == T_VOID, "expecting half" );
1349 1336 if ( j < 16 ) {
1350 1337 // V9ism: doubles go in EVEN/ODD regs
1351 1338 regs[i].set2(as_FloatRegister(j<<1)->as_VMReg());
1352 1339 } else {
1353 1340 // V9ism: doubles go in EVEN/ODD stack slots
1354 1341 regs[i].set2(VMRegImpl::stack2reg(j<<1));
1355 1342 }
1356 1343 break;
1357 1344 case T_VOID: regs[i].set_bad(); j--; break; // Do not count HALVES
1358 1345 default:
1359 1346 ShouldNotReachHere();
1360 1347 }
1361 1348 if (regs[i].first()->is_stack()) {
1362 1349 int off = regs[i].first()->reg2stack();
1363 1350 if (off > max_stack_slots) max_stack_slots = off;
1364 1351 }
1365 1352 if (regs[i].second()->is_stack()) {
1366 1353 int off = regs[i].second()->reg2stack();
1367 1354 if (off > max_stack_slots) max_stack_slots = off;
1368 1355 }
1369 1356 }
1370 1357
1371 1358 #else // _LP64
1372 1359 // V8 convention: first 6 things in O-regs, rest on stack.
1373 1360 // Alignment is willy-nilly.
1374 1361 for( int i=0; i<total_args_passed; i++ ) {
1375 1362 switch( sig_bt[i] ) {
1376 1363 case T_ADDRESS: // raw pointers, like current thread, for VM calls
1377 1364 case T_ARRAY:
1378 1365 case T_BOOLEAN:
1379 1366 case T_BYTE:
1380 1367 case T_CHAR:
1381 1368 case T_FLOAT:
1382 1369 case T_INT:
1383 1370 case T_OBJECT:
1384 1371 case T_SHORT:
1385 1372 regs[i].set1( int_stk_helper( i ) );
1386 1373 break;
1387 1374 case T_DOUBLE:
1388 1375 case T_LONG:
1389 1376 assert( sig_bt[i+1] == T_VOID, "expecting half" );
1390 1377 regs[i].set_pair( int_stk_helper( i+1 ), int_stk_helper( i ) );
1391 1378 break;
1392 1379 case T_VOID: regs[i].set_bad(); break;
1393 1380 default:
1394 1381 ShouldNotReachHere();
1395 1382 }
1396 1383 if (regs[i].first()->is_stack()) {
1397 1384 int off = regs[i].first()->reg2stack();
1398 1385 if (off > max_stack_slots) max_stack_slots = off;
1399 1386 }
1400 1387 if (regs[i].second()->is_stack()) {
1401 1388 int off = regs[i].second()->reg2stack();
1402 1389 if (off > max_stack_slots) max_stack_slots = off;
1403 1390 }
1404 1391 }
1405 1392 #endif // _LP64
1406 1393
1407 1394 return round_to(max_stack_slots + 1, 2);
1408 1395
1409 1396 }
1410 1397
1411 1398
1412 1399 // ---------------------------------------------------------------------------
1413 1400 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1414 1401 switch (ret_type) {
1415 1402 case T_FLOAT:
1416 1403 __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS);
1417 1404 break;
1418 1405 case T_DOUBLE:
1419 1406 __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS);
1420 1407 break;
1421 1408 }
1422 1409 }
1423 1410
1424 1411 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1425 1412 switch (ret_type) {
1426 1413 case T_FLOAT:
1427 1414 __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0);
1428 1415 break;
1429 1416 case T_DOUBLE:
1430 1417 __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0);
1431 1418 break;
1432 1419 }
1433 1420 }
1434 1421
1435 1422 // Check and forward and pending exception. Thread is stored in
1436 1423 // L7_thread_cache and possibly NOT in G2_thread. Since this is a native call, there
1437 1424 // is no exception handler. We merely pop this frame off and throw the
1438 1425 // exception in the caller's frame.
1439 1426 static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) {
1440 1427 Label L;
1441 1428 __ br_null(Rex_oop, false, Assembler::pt, L);
1442 1429 __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception
1443 1430 // Since this is a native call, we *know* the proper exception handler
1444 1431 // without calling into the VM: it's the empty function. Just pop this
1445 1432 // frame and then jump to forward_exception_entry; O7 will contain the
1446 1433 // native caller's return PC.
1447 1434 AddressLiteral exception_entry(StubRoutines::forward_exception_entry());
1448 1435 __ jump_to(exception_entry, G3_scratch);
1449 1436 __ delayed()->restore(); // Pop this frame off.
1450 1437 __ bind(L);
1451 1438 }
1452 1439
1453 1440 // A simple move of integer like type
1454 1441 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1455 1442 if (src.first()->is_stack()) {
1456 1443 if (dst.first()->is_stack()) {
1457 1444 // stack to stack
1458 1445 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1459 1446 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1460 1447 } else {
1461 1448 // stack to reg
1462 1449 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1463 1450 }
1464 1451 } else if (dst.first()->is_stack()) {
1465 1452 // reg to stack
1466 1453 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1467 1454 } else {
1468 1455 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1469 1456 }
1470 1457 }
1471 1458
1472 1459 // On 64 bit we will store integer like items to the stack as
1473 1460 // 64 bits items (sparc abi) even though java would only store
1474 1461 // 32bits for a parameter. On 32bit it will simply be 32 bits
1475 1462 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
1476 1463 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1477 1464 if (src.first()->is_stack()) {
1478 1465 if (dst.first()->is_stack()) {
1479 1466 // stack to stack
1480 1467 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1481 1468 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1482 1469 } else {
1483 1470 // stack to reg
1484 1471 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1485 1472 }
1486 1473 } else if (dst.first()->is_stack()) {
1487 1474 // reg to stack
1488 1475 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1489 1476 } else {
1490 1477 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1491 1478 }
1492 1479 }
1493 1480
1494 1481
1495 1482 // An oop arg. Must pass a handle not the oop itself
1496 1483 static void object_move(MacroAssembler* masm,
1497 1484 OopMap* map,
1498 1485 int oop_handle_offset,
1499 1486 int framesize_in_slots,
1500 1487 VMRegPair src,
1501 1488 VMRegPair dst,
1502 1489 bool is_receiver,
1503 1490 int* receiver_offset) {
1504 1491
1505 1492 // must pass a handle. First figure out the location we use as a handle
1506 1493
1507 1494 if (src.first()->is_stack()) {
1508 1495 // Oop is already on the stack
1509 1496 Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register();
1510 1497 __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle);
1511 1498 __ ld_ptr(rHandle, 0, L4);
1512 1499 #ifdef _LP64
1513 1500 __ movr( Assembler::rc_z, L4, G0, rHandle );
1514 1501 #else
1515 1502 __ tst( L4 );
1516 1503 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
1517 1504 #endif
1518 1505 if (dst.first()->is_stack()) {
1519 1506 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1520 1507 }
1521 1508 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1522 1509 if (is_receiver) {
1523 1510 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1524 1511 }
1525 1512 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1526 1513 } else {
1527 1514 // Oop is in an input register pass we must flush it to the stack
1528 1515 const Register rOop = src.first()->as_Register();
1529 1516 const Register rHandle = L5;
1530 1517 int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset;
1531 1518 int offset = oop_slot*VMRegImpl::stack_slot_size;
1532 1519 Label skip;
1533 1520 __ st_ptr(rOop, SP, offset + STACK_BIAS);
1534 1521 if (is_receiver) {
1535 1522 *receiver_offset = oop_slot * VMRegImpl::stack_slot_size;
1536 1523 }
1537 1524 map->set_oop(VMRegImpl::stack2reg(oop_slot));
1538 1525 __ add(SP, offset + STACK_BIAS, rHandle);
1539 1526 #ifdef _LP64
1540 1527 __ movr( Assembler::rc_z, rOop, G0, rHandle );
1541 1528 #else
1542 1529 __ tst( rOop );
1543 1530 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
1544 1531 #endif
1545 1532
1546 1533 if (dst.first()->is_stack()) {
1547 1534 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1548 1535 } else {
1549 1536 __ mov(rHandle, dst.first()->as_Register());
1550 1537 }
1551 1538 }
1552 1539 }
1553 1540
1554 1541 // A float arg may have to do float reg int reg conversion
1555 1542 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1556 1543 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1557 1544
1558 1545 if (src.first()->is_stack()) {
1559 1546 if (dst.first()->is_stack()) {
1560 1547 // stack to stack the easiest of the bunch
1561 1548 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1562 1549 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1563 1550 } else {
1564 1551 // stack to reg
1565 1552 if (dst.first()->is_Register()) {
1566 1553 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1567 1554 } else {
1568 1555 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1569 1556 }
1570 1557 }
1571 1558 } else if (dst.first()->is_stack()) {
1572 1559 // reg to stack
1573 1560 if (src.first()->is_Register()) {
1574 1561 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1575 1562 } else {
1576 1563 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1577 1564 }
1578 1565 } else {
1579 1566 // reg to reg
1580 1567 if (src.first()->is_Register()) {
1581 1568 if (dst.first()->is_Register()) {
1582 1569 // gpr -> gpr
1583 1570 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1584 1571 } else {
1585 1572 // gpr -> fpr
1586 1573 __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS);
1587 1574 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister());
1588 1575 }
1589 1576 } else if (dst.first()->is_Register()) {
1590 1577 // fpr -> gpr
1591 1578 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS);
1592 1579 __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register());
1593 1580 } else {
1594 1581 // fpr -> fpr
1595 1582 // In theory these overlap but the ordering is such that this is likely a nop
1596 1583 if ( src.first() != dst.first()) {
1597 1584 __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1598 1585 }
1599 1586 }
1600 1587 }
1601 1588 }
1602 1589
1603 1590 static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1604 1591 VMRegPair src_lo(src.first());
1605 1592 VMRegPair src_hi(src.second());
1606 1593 VMRegPair dst_lo(dst.first());
1607 1594 VMRegPair dst_hi(dst.second());
1608 1595 simple_move32(masm, src_lo, dst_lo);
1609 1596 simple_move32(masm, src_hi, dst_hi);
1610 1597 }
1611 1598
1612 1599 // A long move
1613 1600 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1614 1601
1615 1602 // Do the simple ones here else do two int moves
1616 1603 if (src.is_single_phys_reg() ) {
1617 1604 if (dst.is_single_phys_reg()) {
1618 1605 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1619 1606 } else {
1620 1607 // split src into two separate registers
1621 1608 // Remember hi means hi address or lsw on sparc
1622 1609 // Move msw to lsw
1623 1610 if (dst.second()->is_reg()) {
1624 1611 // MSW -> MSW
1625 1612 __ srax(src.first()->as_Register(), 32, dst.first()->as_Register());
1626 1613 // Now LSW -> LSW
1627 1614 // this will only move lo -> lo and ignore hi
1628 1615 VMRegPair split(dst.second());
1629 1616 simple_move32(masm, src, split);
1630 1617 } else {
1631 1618 VMRegPair split(src.first(), L4->as_VMReg());
1632 1619 // MSW -> MSW (lo ie. first word)
1633 1620 __ srax(src.first()->as_Register(), 32, L4);
1634 1621 split_long_move(masm, split, dst);
1635 1622 }
1636 1623 }
1637 1624 } else if (dst.is_single_phys_reg()) {
1638 1625 if (src.is_adjacent_aligned_on_stack(2)) {
1639 1626 __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1640 1627 } else {
1641 1628 // dst is a single reg.
1642 1629 // Remember lo is low address not msb for stack slots
1643 1630 // and lo is the "real" register for registers
1644 1631 // src is
1645 1632
1646 1633 VMRegPair split;
1647 1634
1648 1635 if (src.first()->is_reg()) {
1649 1636 // src.lo (msw) is a reg, src.hi is stk/reg
1650 1637 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg]
1651 1638 split.set_pair(dst.first(), src.first());
1652 1639 } else {
1653 1640 // msw is stack move to L5
1654 1641 // lsw is stack move to dst.lo (real reg)
1655 1642 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5
1656 1643 split.set_pair(dst.first(), L5->as_VMReg());
1657 1644 }
1658 1645
1659 1646 // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg)
1660 1647 // msw -> src.lo/L5, lsw -> dst.lo
1661 1648 split_long_move(masm, src, split);
1662 1649
1663 1650 // So dst now has the low order correct position the
1664 1651 // msw half
1665 1652 __ sllx(split.first()->as_Register(), 32, L5);
1666 1653
1667 1654 const Register d = dst.first()->as_Register();
1668 1655 __ or3(L5, d, d);
1669 1656 }
1670 1657 } else {
1671 1658 // For LP64 we can probably do better.
1672 1659 split_long_move(masm, src, dst);
1673 1660 }
1674 1661 }
1675 1662
1676 1663 // A double move
1677 1664 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1678 1665
1679 1666 // The painful thing here is that like long_move a VMRegPair might be
1680 1667 // 1: a single physical register
1681 1668 // 2: two physical registers (v8)
1682 1669 // 3: a physical reg [lo] and a stack slot [hi] (v8)
1683 1670 // 4: two stack slots
1684 1671
1685 1672 // Since src is always a java calling convention we know that the src pair
1686 1673 // is always either all registers or all stack (and aligned?)
1687 1674
1688 1675 // in a register [lo] and a stack slot [hi]
1689 1676 if (src.first()->is_stack()) {
1690 1677 if (dst.first()->is_stack()) {
1691 1678 // stack to stack the easiest of the bunch
1692 1679 // ought to be a way to do this where if alignment is ok we use ldd/std when possible
1693 1680 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1694 1681 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1695 1682 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1696 1683 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1697 1684 } else {
1698 1685 // stack to reg
1699 1686 if (dst.second()->is_stack()) {
1700 1687 // stack -> reg, stack -> stack
1701 1688 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1702 1689 if (dst.first()->is_Register()) {
1703 1690 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1704 1691 } else {
1705 1692 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1706 1693 }
1707 1694 // This was missing. (very rare case)
1708 1695 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1709 1696 } else {
1710 1697 // stack -> reg
1711 1698 // Eventually optimize for alignment QQQ
1712 1699 if (dst.first()->is_Register()) {
1713 1700 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1714 1701 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register());
1715 1702 } else {
1716 1703 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1717 1704 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister());
1718 1705 }
1719 1706 }
1720 1707 }
1721 1708 } else if (dst.first()->is_stack()) {
1722 1709 // reg to stack
1723 1710 if (src.first()->is_Register()) {
1724 1711 // Eventually optimize for alignment QQQ
1725 1712 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1726 1713 if (src.second()->is_stack()) {
1727 1714 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1728 1715 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1729 1716 } else {
1730 1717 __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS);
1731 1718 }
1732 1719 } else {
1733 1720 // fpr to stack
1734 1721 if (src.second()->is_stack()) {
1735 1722 ShouldNotReachHere();
1736 1723 } else {
1737 1724 // Is the stack aligned?
1738 1725 if (reg2offset(dst.first()) & 0x7) {
1739 1726 // No do as pairs
1740 1727 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1741 1728 __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS);
1742 1729 } else {
1743 1730 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1744 1731 }
1745 1732 }
1746 1733 }
1747 1734 } else {
1748 1735 // reg to reg
1749 1736 if (src.first()->is_Register()) {
1750 1737 if (dst.first()->is_Register()) {
1751 1738 // gpr -> gpr
1752 1739 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1753 1740 __ mov(src.second()->as_Register(), dst.second()->as_Register());
1754 1741 } else {
1755 1742 // gpr -> fpr
1756 1743 // ought to be able to do a single store
1757 1744 __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS);
1758 1745 __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS);
1759 1746 // ought to be able to do a single load
1760 1747 __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister());
1761 1748 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister());
1762 1749 }
1763 1750 } else if (dst.first()->is_Register()) {
1764 1751 // fpr -> gpr
1765 1752 // ought to be able to do a single store
1766 1753 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS);
1767 1754 // ought to be able to do a single load
1768 1755 // REMEMBER first() is low address not LSB
1769 1756 __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register());
1770 1757 if (dst.second()->is_Register()) {
1771 1758 __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register());
1772 1759 } else {
1773 1760 __ ld(FP, -4 + STACK_BIAS, L4);
1774 1761 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1775 1762 }
1776 1763 } else {
1777 1764 // fpr -> fpr
1778 1765 // In theory these overlap but the ordering is such that this is likely a nop
1779 1766 if ( src.first() != dst.first()) {
1780 1767 __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1781 1768 }
1782 1769 }
1783 1770 }
1784 1771 }
1785 1772
1786 1773 // Creates an inner frame if one hasn't already been created, and
1787 1774 // saves a copy of the thread in L7_thread_cache
1788 1775 static void create_inner_frame(MacroAssembler* masm, bool* already_created) {
1789 1776 if (!*already_created) {
1790 1777 __ save_frame(0);
1791 1778 // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below
1792 1779 // Don't use save_thread because it smashes G2 and we merely want to save a
1793 1780 // copy
1794 1781 __ mov(G2_thread, L7_thread_cache);
1795 1782 *already_created = true;
1796 1783 }
1797 1784 }
1798 1785
1799 1786 // ---------------------------------------------------------------------------
1800 1787 // Generate a native wrapper for a given method. The method takes arguments
1801 1788 // in the Java compiled code convention, marshals them to the native
1802 1789 // convention (handlizes oops, etc), transitions to native, makes the call,
1803 1790 // returns to java state (possibly blocking), unhandlizes any result and
1804 1791 // returns.
1805 1792 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1806 1793 methodHandle method,
1807 1794 int total_in_args,
1808 1795 int comp_args_on_stack, // in VMRegStackSlots
1809 1796 BasicType *in_sig_bt,
1810 1797 VMRegPair *in_regs,
1811 1798 BasicType ret_type) {
1812 1799
1813 1800 // Native nmethod wrappers never take possesion of the oop arguments.
1814 1801 // So the caller will gc the arguments. The only thing we need an
1815 1802 // oopMap for is if the call is static
1816 1803 //
1817 1804 // An OopMap for lock (and class if static), and one for the VM call itself
1818 1805 OopMapSet *oop_maps = new OopMapSet();
1819 1806 intptr_t start = (intptr_t)__ pc();
1820 1807
1821 1808 // First thing make an ic check to see if we should even be here
1822 1809 {
1823 1810 Label L;
1824 1811 const Register temp_reg = G3_scratch;
1825 1812 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
1826 1813 __ verify_oop(O0);
1827 1814 __ load_klass(O0, temp_reg);
1828 1815 __ cmp(temp_reg, G5_inline_cache_reg);
1829 1816 __ brx(Assembler::equal, true, Assembler::pt, L);
1830 1817 __ delayed()->nop();
1831 1818
1832 1819 __ jump_to(ic_miss, temp_reg);
1833 1820 __ delayed()->nop();
1834 1821 __ align(CodeEntryAlignment);
1835 1822 __ bind(L);
1836 1823 }
1837 1824
1838 1825 int vep_offset = ((intptr_t)__ pc()) - start;
1839 1826
1840 1827 #ifdef COMPILER1
1841 1828 if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
1842 1829 // Object.hashCode can pull the hashCode from the header word
1843 1830 // instead of doing a full VM transition once it's been computed.
1844 1831 // Since hashCode is usually polymorphic at call sites we can't do
1845 1832 // this optimization at the call site without a lot of work.
1846 1833 Label slowCase;
1847 1834 Register receiver = O0;
1848 1835 Register result = O0;
1849 1836 Register header = G3_scratch;
1850 1837 Register hash = G3_scratch; // overwrite header value with hash value
1851 1838 Register mask = G1; // to get hash field from header
1852 1839
1853 1840 // Read the header and build a mask to get its hash field. Give up if the object is not unlocked.
1854 1841 // We depend on hash_mask being at most 32 bits and avoid the use of
1855 1842 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
1856 1843 // vm: see markOop.hpp.
1857 1844 __ ld_ptr(receiver, oopDesc::mark_offset_in_bytes(), header);
1858 1845 __ sethi(markOopDesc::hash_mask, mask);
1859 1846 __ btst(markOopDesc::unlocked_value, header);
1860 1847 __ br(Assembler::zero, false, Assembler::pn, slowCase);
1861 1848 if (UseBiasedLocking) {
1862 1849 // Check if biased and fall through to runtime if so
1863 1850 __ delayed()->nop();
1864 1851 __ btst(markOopDesc::biased_lock_bit_in_place, header);
1865 1852 __ br(Assembler::notZero, false, Assembler::pn, slowCase);
1866 1853 }
1867 1854 __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);
1868 1855
1869 1856 // Check for a valid (non-zero) hash code and get its value.
1870 1857 #ifdef _LP64
1871 1858 __ srlx(header, markOopDesc::hash_shift, hash);
1872 1859 #else
1873 1860 __ srl(header, markOopDesc::hash_shift, hash);
1874 1861 #endif
1875 1862 __ andcc(hash, mask, hash);
1876 1863 __ br(Assembler::equal, false, Assembler::pn, slowCase);
1877 1864 __ delayed()->nop();
1878 1865
1879 1866 // leaf return.
1880 1867 __ retl();
1881 1868 __ delayed()->mov(hash, result);
1882 1869 __ bind(slowCase);
1883 1870 }
1884 1871 #endif // COMPILER1
1885 1872
1886 1873
1887 1874 // We have received a description of where all the java arg are located
1888 1875 // on entry to the wrapper. We need to convert these args to where
1889 1876 // the jni function will expect them. To figure out where they go
1890 1877 // we convert the java signature to a C signature by inserting
1891 1878 // the hidden arguments as arg[0] and possibly arg[1] (static method)
1892 1879
1893 1880 int total_c_args = total_in_args + 1;
1894 1881 if (method->is_static()) {
1895 1882 total_c_args++;
1896 1883 }
1897 1884
1898 1885 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1899 1886 VMRegPair * out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1900 1887
1901 1888 int argc = 0;
1902 1889 out_sig_bt[argc++] = T_ADDRESS;
1903 1890 if (method->is_static()) {
1904 1891 out_sig_bt[argc++] = T_OBJECT;
1905 1892 }
1906 1893
1907 1894 for (int i = 0; i < total_in_args ; i++ ) {
1908 1895 out_sig_bt[argc++] = in_sig_bt[i];
1909 1896 }
1910 1897
1911 1898 // Now figure out where the args must be stored and how much stack space
1912 1899 // they require (neglecting out_preserve_stack_slots but space for storing
1913 1900 // the 1st six register arguments). It's weird see int_stk_helper.
1914 1901 //
1915 1902 int out_arg_slots;
1916 1903 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
1917 1904
1918 1905 // Compute framesize for the wrapper. We need to handlize all oops in
1919 1906 // registers. We must create space for them here that is disjoint from
1920 1907 // the windowed save area because we have no control over when we might
1921 1908 // flush the window again and overwrite values that gc has since modified.
1922 1909 // (The live window race)
1923 1910 //
1924 1911 // We always just allocate 6 word for storing down these object. This allow
1925 1912 // us to simply record the base and use the Ireg number to decide which
1926 1913 // slot to use. (Note that the reg number is the inbound number not the
1927 1914 // outbound number).
1928 1915 // We must shuffle args to match the native convention, and include var-args space.
1929 1916
1930 1917 // Calculate the total number of stack slots we will need.
1931 1918
1932 1919 // First count the abi requirement plus all of the outgoing args
1933 1920 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1934 1921
1935 1922 // Now the space for the inbound oop handle area
1936 1923
1937 1924 int oop_handle_offset = stack_slots;
1938 1925 stack_slots += 6*VMRegImpl::slots_per_word;
1939 1926
1940 1927 // Now any space we need for handlizing a klass if static method
1941 1928
1942 1929 int oop_temp_slot_offset = 0;
1943 1930 int klass_slot_offset = 0;
1944 1931 int klass_offset = -1;
1945 1932 int lock_slot_offset = 0;
1946 1933 bool is_static = false;
1947 1934
1948 1935 if (method->is_static()) {
1949 1936 klass_slot_offset = stack_slots;
1950 1937 stack_slots += VMRegImpl::slots_per_word;
1951 1938 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1952 1939 is_static = true;
1953 1940 }
1954 1941
1955 1942 // Plus a lock if needed
1956 1943
1957 1944 if (method->is_synchronized()) {
1958 1945 lock_slot_offset = stack_slots;
1959 1946 stack_slots += VMRegImpl::slots_per_word;
1960 1947 }
1961 1948
1962 1949 // Now a place to save return value or as a temporary for any gpr -> fpr moves
1963 1950 stack_slots += 2;
1964 1951
1965 1952 // Ok The space we have allocated will look like:
1966 1953 //
1967 1954 //
1968 1955 // FP-> | |
1969 1956 // |---------------------|
1970 1957 // | 2 slots for moves |
1971 1958 // |---------------------|
1972 1959 // | lock box (if sync) |
1973 1960 // |---------------------| <- lock_slot_offset
1974 1961 // | klass (if static) |
1975 1962 // |---------------------| <- klass_slot_offset
1976 1963 // | oopHandle area |
1977 1964 // |---------------------| <- oop_handle_offset
1978 1965 // | outbound memory |
1979 1966 // | based arguments |
1980 1967 // | |
1981 1968 // |---------------------|
1982 1969 // | vararg area |
1983 1970 // |---------------------|
1984 1971 // | |
1985 1972 // SP-> | out_preserved_slots |
1986 1973 //
1987 1974 //
1988 1975
1989 1976
1990 1977 // Now compute actual number of stack words we need rounding to make
1991 1978 // stack properly aligned.
1992 1979 stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);
1993 1980
1994 1981 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1995 1982
1996 1983 // Generate stack overflow check before creating frame
1997 1984 __ generate_stack_overflow_check(stack_size);
1998 1985
1999 1986 // Generate a new frame for the wrapper.
2000 1987 __ save(SP, -stack_size, SP);
2001 1988
2002 1989 int frame_complete = ((intptr_t)__ pc()) - start;
2003 1990
2004 1991 __ verify_thread();
2005 1992
2006 1993
2007 1994 //
2008 1995 // We immediately shuffle the arguments so that any vm call we have to
2009 1996 // make from here on out (sync slow path, jvmti, etc.) we will have
2010 1997 // captured the oops from our caller and have a valid oopMap for
2011 1998 // them.
2012 1999
2013 2000 // -----------------
2014 2001 // The Grand Shuffle
2015 2002 //
2016 2003 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
2017 2004 // (derived from JavaThread* which is in L7_thread_cache) and, if static,
2018 2005 // the class mirror instead of a receiver. This pretty much guarantees that
2019 2006 // register layout will not match. We ignore these extra arguments during
2020 2007 // the shuffle. The shuffle is described by the two calling convention
2021 2008 // vectors we have in our possession. We simply walk the java vector to
2022 2009 // get the source locations and the c vector to get the destinations.
2023 2010 // Because we have a new window and the argument registers are completely
2024 2011 // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about
2025 2012 // here.
2026 2013
2027 2014 // This is a trick. We double the stack slots so we can claim
2028 2015 // the oops in the caller's frame. Since we are sure to have
2029 2016 // more args than the caller doubling is enough to make
2030 2017 // sure we can capture all the incoming oop args from the
2031 2018 // caller.
2032 2019 //
2033 2020 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2034 2021 int c_arg = total_c_args - 1;
2035 2022 // Record sp-based slot for receiver on stack for non-static methods
2036 2023 int receiver_offset = -1;
2037 2024
2038 2025 // We move the arguments backward because the floating point registers
2039 2026 // destination will always be to a register with a greater or equal register
2040 2027 // number or the stack.
2041 2028
2042 2029 #ifdef ASSERT
2043 2030 bool reg_destroyed[RegisterImpl::number_of_registers];
2044 2031 bool freg_destroyed[FloatRegisterImpl::number_of_registers];
2045 2032 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2046 2033 reg_destroyed[r] = false;
2047 2034 }
2048 2035 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
2049 2036 freg_destroyed[f] = false;
2050 2037 }
2051 2038
2052 2039 #endif /* ASSERT */
2053 2040
2054 2041 for ( int i = total_in_args - 1; i >= 0 ; i--, c_arg-- ) {
2055 2042
2056 2043 #ifdef ASSERT
2057 2044 if (in_regs[i].first()->is_Register()) {
2058 2045 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!");
2059 2046 } else if (in_regs[i].first()->is_FloatRegister()) {
2060 2047 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!");
2061 2048 }
2062 2049 if (out_regs[c_arg].first()->is_Register()) {
2063 2050 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2064 2051 } else if (out_regs[c_arg].first()->is_FloatRegister()) {
2065 2052 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true;
2066 2053 }
2067 2054 #endif /* ASSERT */
2068 2055
2069 2056 switch (in_sig_bt[i]) {
2070 2057 case T_ARRAY:
2071 2058 case T_OBJECT:
2072 2059 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2073 2060 ((i == 0) && (!is_static)),
2074 2061 &receiver_offset);
2075 2062 break;
2076 2063 case T_VOID:
2077 2064 break;
2078 2065
2079 2066 case T_FLOAT:
2080 2067 float_move(masm, in_regs[i], out_regs[c_arg]);
2081 2068 break;
2082 2069
2083 2070 case T_DOUBLE:
2084 2071 assert( i + 1 < total_in_args &&
2085 2072 in_sig_bt[i + 1] == T_VOID &&
2086 2073 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2087 2074 double_move(masm, in_regs[i], out_regs[c_arg]);
2088 2075 break;
2089 2076
2090 2077 case T_LONG :
2091 2078 long_move(masm, in_regs[i], out_regs[c_arg]);
2092 2079 break;
2093 2080
2094 2081 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2095 2082
2096 2083 default:
2097 2084 move32_64(masm, in_regs[i], out_regs[c_arg]);
2098 2085 }
2099 2086 }
2100 2087
2101 2088 // Pre-load a static method's oop into O1. Used both by locking code and
2102 2089 // the normal JNI call code.
2103 2090 if (method->is_static()) {
2104 2091 __ set_oop_constant(JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()), O1);
2105 2092
2106 2093 // Now handlize the static class mirror in O1. It's known not-null.
2107 2094 __ st_ptr(O1, SP, klass_offset + STACK_BIAS);
2108 2095 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2109 2096 __ add(SP, klass_offset + STACK_BIAS, O1);
2110 2097 }
2111 2098
2112 2099
2113 2100 const Register L6_handle = L6;
2114 2101
2115 2102 if (method->is_synchronized()) {
2116 2103 __ mov(O1, L6_handle);
2117 2104 }
2118 2105
2119 2106 // We have all of the arguments setup at this point. We MUST NOT touch any Oregs
2120 2107 // except O6/O7. So if we must call out we must push a new frame. We immediately
2121 2108 // push a new frame and flush the windows.
2122 2109
2123 2110 #ifdef _LP64
2124 2111 intptr_t thepc = (intptr_t) __ pc();
2125 2112 {
2126 2113 address here = __ pc();
2127 2114 // Call the next instruction
2128 2115 __ call(here + 8, relocInfo::none);
2129 2116 __ delayed()->nop();
2130 2117 }
2131 2118 #else
2132 2119 intptr_t thepc = __ load_pc_address(O7, 0);
2133 2120 #endif /* _LP64 */
2134 2121
2135 2122 // We use the same pc/oopMap repeatedly when we call out
2136 2123 oop_maps->add_gc_map(thepc - start, map);
2137 2124
2138 2125 // O7 now has the pc loaded that we will use when we finally call to native.
2139 2126
2140 2127 // Save thread in L7; it crosses a bunch of VM calls below
2141 2128 // Don't use save_thread because it smashes G2 and we merely
2142 2129 // want to save a copy
2143 2130 __ mov(G2_thread, L7_thread_cache);
2144 2131
2145 2132
2146 2133 // If we create an inner frame once is plenty
2147 2134 // when we create it we must also save G2_thread
2148 2135 bool inner_frame_created = false;
2149 2136
2150 2137 // dtrace method entry support
2151 2138 {
2152 2139 SkipIfEqual skip_if(
2153 2140 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2154 2141 // create inner frame
2155 2142 __ save_frame(0);
2156 2143 __ mov(G2_thread, L7_thread_cache);
2157 2144 __ set_oop_constant(JNIHandles::make_local(method()), O1);
2158 2145 __ call_VM_leaf(L7_thread_cache,
2159 2146 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2160 2147 G2_thread, O1);
2161 2148 __ restore();
2162 2149 }
2163 2150
2164 2151 // RedefineClasses() tracing support for obsolete method entry
2165 2152 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2166 2153 // create inner frame
2167 2154 __ save_frame(0);
2168 2155 __ mov(G2_thread, L7_thread_cache);
2169 2156 __ set_oop_constant(JNIHandles::make_local(method()), O1);
2170 2157 __ call_VM_leaf(L7_thread_cache,
2171 2158 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2172 2159 G2_thread, O1);
2173 2160 __ restore();
2174 2161 }
2175 2162
2176 2163 // We are in the jni frame unless saved_frame is true in which case
2177 2164 // we are in one frame deeper (the "inner" frame). If we are in the
2178 2165 // "inner" frames the args are in the Iregs and if the jni frame then
2179 2166 // they are in the Oregs.
2180 2167 // If we ever need to go to the VM (for locking, jvmti) then
2181 2168 // we will always be in the "inner" frame.
2182 2169
2183 2170 // Lock a synchronized method
2184 2171 int lock_offset = -1; // Set if locked
2185 2172 if (method->is_synchronized()) {
2186 2173 Register Roop = O1;
2187 2174 const Register L3_box = L3;
2188 2175
2189 2176 create_inner_frame(masm, &inner_frame_created);
2190 2177
2191 2178 __ ld_ptr(I1, 0, O1);
2192 2179 Label done;
2193 2180
2194 2181 lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size);
2195 2182 __ add(FP, lock_offset+STACK_BIAS, L3_box);
2196 2183 #ifdef ASSERT
2197 2184 if (UseBiasedLocking) {
2198 2185 // making the box point to itself will make it clear it went unused
2199 2186 // but also be obviously invalid
2200 2187 __ st_ptr(L3_box, L3_box, 0);
2201 2188 }
2202 2189 #endif // ASSERT
2203 2190 //
2204 2191 // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch
2205 2192 //
2206 2193 __ compiler_lock_object(Roop, L1, L3_box, L2);
2207 2194 __ br(Assembler::equal, false, Assembler::pt, done);
2208 2195 __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box);
2209 2196
2210 2197
2211 2198 // None of the above fast optimizations worked so we have to get into the
2212 2199 // slow case of monitor enter. Inline a special case of call_VM that
2213 2200 // disallows any pending_exception.
2214 2201 __ mov(Roop, O0); // Need oop in O0
2215 2202 __ mov(L3_box, O1);
2216 2203
2217 2204 // Record last_Java_sp, in case the VM code releases the JVM lock.
2218 2205
2219 2206 __ set_last_Java_frame(FP, I7);
2220 2207
2221 2208 // do the call
2222 2209 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type);
2223 2210 __ delayed()->mov(L7_thread_cache, O2);
2224 2211
2225 2212 __ restore_thread(L7_thread_cache); // restore G2_thread
2226 2213 __ reset_last_Java_frame();
2227 2214
2228 2215 #ifdef ASSERT
2229 2216 { Label L;
2230 2217 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2231 2218 __ br_null(O0, false, Assembler::pt, L);
2232 2219 __ delayed()->nop();
2233 2220 __ stop("no pending exception allowed on exit from IR::monitorenter");
2234 2221 __ bind(L);
2235 2222 }
2236 2223 #endif
2237 2224 __ bind(done);
2238 2225 }
2239 2226
2240 2227
2241 2228 // Finally just about ready to make the JNI call
2242 2229
2243 2230 __ flush_windows();
2244 2231 if (inner_frame_created) {
2245 2232 __ restore();
2246 2233 } else {
2247 2234 // Store only what we need from this frame
2248 2235 // QQQ I think that non-v9 (like we care) we don't need these saves
2249 2236 // either as the flush traps and the current window goes too.
2250 2237 __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2251 2238 __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2252 2239 }
2253 2240
2254 2241 // get JNIEnv* which is first argument to native
2255 2242
2256 2243 __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
2257 2244
2258 2245 // Use that pc we placed in O7 a while back as the current frame anchor
2259 2246
2260 2247 __ set_last_Java_frame(SP, O7);
2261 2248
2262 2249 // Transition from _thread_in_Java to _thread_in_native.
2263 2250 __ set(_thread_in_native, G3_scratch);
2264 2251 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2265 2252
2266 2253 // We flushed the windows ages ago now mark them as flushed
2267 2254
2268 2255 // mark windows as flushed
2269 2256 __ set(JavaFrameAnchor::flushed, G3_scratch);
2270 2257
2271 2258 Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
2272 2259
2273 2260 #ifdef _LP64
2274 2261 AddressLiteral dest(method->native_function());
2275 2262 __ relocate(relocInfo::runtime_call_type);
2276 2263 __ jumpl_to(dest, O7, O7);
2277 2264 #else
2278 2265 __ call(method->native_function(), relocInfo::runtime_call_type);
2279 2266 #endif
2280 2267 __ delayed()->st(G3_scratch, flags);
2281 2268
2282 2269 __ restore_thread(L7_thread_cache); // restore G2_thread
2283 2270
2284 2271 // Unpack native results. For int-types, we do any needed sign-extension
2285 2272 // and move things into I0. The return value there will survive any VM
2286 2273 // calls for blocking or unlocking. An FP or OOP result (handle) is done
2287 2274 // specially in the slow-path code.
2288 2275 switch (ret_type) {
2289 2276 case T_VOID: break; // Nothing to do!
2290 2277 case T_FLOAT: break; // Got it where we want it (unless slow-path)
2291 2278 case T_DOUBLE: break; // Got it where we want it (unless slow-path)
2292 2279 // In 64 bits build result is in O0, in O0, O1 in 32bit build
2293 2280 case T_LONG:
2294 2281 #ifndef _LP64
2295 2282 __ mov(O1, I1);
2296 2283 #endif
2297 2284 // Fall thru
2298 2285 case T_OBJECT: // Really a handle
2299 2286 case T_ARRAY:
2300 2287 case T_INT:
2301 2288 __ mov(O0, I0);
2302 2289 break;
2303 2290 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false
2304 2291 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, I0); break;
2305 2292 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, I0); break; // cannot use and3, 0xFFFF too big as immediate value!
2306 2293 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, I0); break;
2307 2294 break; // Cannot de-handlize until after reclaiming jvm_lock
2308 2295 default:
2309 2296 ShouldNotReachHere();
2310 2297 }
2311 2298
2312 2299 // must we block?
2313 2300
2314 2301 // Block, if necessary, before resuming in _thread_in_Java state.
2315 2302 // In order for GC to work, don't clear the last_Java_sp until after blocking.
2316 2303 { Label no_block;
2317 2304 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
2318 2305
2319 2306 // Switch thread to "native transition" state before reading the synchronization state.
2320 2307 // This additional state is necessary because reading and testing the synchronization
2321 2308 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2322 2309 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2323 2310 // VM thread changes sync state to synchronizing and suspends threads for GC.
2324 2311 // Thread A is resumed to finish this native method, but doesn't block here since it
2325 2312 // didn't see any synchronization is progress, and escapes.
2326 2313 __ set(_thread_in_native_trans, G3_scratch);
2327 2314 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2328 2315 if(os::is_MP()) {
2329 2316 if (UseMembar) {
2330 2317 // Force this write out before the read below
2331 2318 __ membar(Assembler::StoreLoad);
2332 2319 } else {
2333 2320 // Write serialization page so VM thread can do a pseudo remote membar.
2334 2321 // We use the current thread pointer to calculate a thread specific
2335 2322 // offset to write to within the page. This minimizes bus traffic
2336 2323 // due to cache line collision.
2337 2324 __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
2338 2325 }
2339 2326 }
2340 2327 __ load_contents(sync_state, G3_scratch);
2341 2328 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
2342 2329
2343 2330 Label L;
2344 2331 Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
2345 2332 __ br(Assembler::notEqual, false, Assembler::pn, L);
2346 2333 __ delayed()->ld(suspend_state, G3_scratch);
2347 2334 __ cmp(G3_scratch, 0);
2348 2335 __ br(Assembler::equal, false, Assembler::pt, no_block);
2349 2336 __ delayed()->nop();
2350 2337 __ bind(L);
2351 2338
2352 2339 // Block. Save any potential method result value before the operation and
2353 2340 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
2354 2341 // lets us share the oopMap we used when we went native rather the create
2355 2342 // a distinct one for this pc
2356 2343 //
2357 2344 save_native_result(masm, ret_type, stack_slots);
2358 2345 __ call_VM_leaf(L7_thread_cache,
2359 2346 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
2360 2347 G2_thread);
2361 2348
2362 2349 // Restore any method result value
2363 2350 restore_native_result(masm, ret_type, stack_slots);
2364 2351 __ bind(no_block);
2365 2352 }
2366 2353
2367 2354 // thread state is thread_in_native_trans. Any safepoint blocking has already
2368 2355 // happened so we can now change state to _thread_in_Java.
2369 2356
2370 2357
2371 2358 __ set(_thread_in_Java, G3_scratch);
2372 2359 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2373 2360
2374 2361
2375 2362 Label no_reguard;
2376 2363 __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
2377 2364 __ cmp(G3_scratch, JavaThread::stack_guard_yellow_disabled);
2378 2365 __ br(Assembler::notEqual, false, Assembler::pt, no_reguard);
2379 2366 __ delayed()->nop();
2380 2367
2381 2368 save_native_result(masm, ret_type, stack_slots);
2382 2369 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2383 2370 __ delayed()->nop();
2384 2371
2385 2372 __ restore_thread(L7_thread_cache); // restore G2_thread
2386 2373 restore_native_result(masm, ret_type, stack_slots);
2387 2374
2388 2375 __ bind(no_reguard);
2389 2376
2390 2377 // Handle possible exception (will unlock if necessary)
2391 2378
2392 2379 // native result if any is live in freg or I0 (and I1 if long and 32bit vm)
2393 2380
2394 2381 // Unlock
2395 2382 if (method->is_synchronized()) {
2396 2383 Label done;
2397 2384 Register I2_ex_oop = I2;
2398 2385 const Register L3_box = L3;
2399 2386 // Get locked oop from the handle we passed to jni
2400 2387 __ ld_ptr(L6_handle, 0, L4);
2401 2388 __ add(SP, lock_offset+STACK_BIAS, L3_box);
2402 2389 // Must save pending exception around the slow-path VM call. Since it's a
2403 2390 // leaf call, the pending exception (if any) can be kept in a register.
2404 2391 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop);
2405 2392 // Now unlock
2406 2393 // (Roop, Rmark, Rbox, Rscratch)
2407 2394 __ compiler_unlock_object(L4, L1, L3_box, L2);
2408 2395 __ br(Assembler::equal, false, Assembler::pt, done);
2409 2396 __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box);
2410 2397
2411 2398 // save and restore any potential method result value around the unlocking
2412 2399 // operation. Will save in I0 (or stack for FP returns).
2413 2400 save_native_result(masm, ret_type, stack_slots);
2414 2401
2415 2402 // Must clear pending-exception before re-entering the VM. Since this is
2416 2403 // a leaf call, pending-exception-oop can be safely kept in a register.
2417 2404 __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset()));
2418 2405
2419 2406 // slow case of monitor enter. Inline a special case of call_VM that
2420 2407 // disallows any pending_exception.
2421 2408 __ mov(L3_box, O1);
2422 2409
2423 2410 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type);
2424 2411 __ delayed()->mov(L4, O0); // Need oop in O0
2425 2412
2426 2413 __ restore_thread(L7_thread_cache); // restore G2_thread
2427 2414
2428 2415 #ifdef ASSERT
2429 2416 { Label L;
2430 2417 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2431 2418 __ br_null(O0, false, Assembler::pt, L);
2432 2419 __ delayed()->nop();
2433 2420 __ stop("no pending exception allowed on exit from IR::monitorexit");
2434 2421 __ bind(L);
2435 2422 }
2436 2423 #endif
2437 2424 restore_native_result(masm, ret_type, stack_slots);
2438 2425 // check_forward_pending_exception jump to forward_exception if any pending
2439 2426 // exception is set. The forward_exception routine expects to see the
2440 2427 // exception in pending_exception and not in a register. Kind of clumsy,
2441 2428 // since all folks who branch to forward_exception must have tested
2442 2429 // pending_exception first and hence have it in a register already.
2443 2430 __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset()));
2444 2431 __ bind(done);
2445 2432 }
2446 2433
2447 2434 // Tell dtrace about this method exit
2448 2435 {
2449 2436 SkipIfEqual skip_if(
2450 2437 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2451 2438 save_native_result(masm, ret_type, stack_slots);
2452 2439 __ set_oop_constant(JNIHandles::make_local(method()), O1);
2453 2440 __ call_VM_leaf(L7_thread_cache,
2454 2441 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2455 2442 G2_thread, O1);
2456 2443 restore_native_result(masm, ret_type, stack_slots);
2457 2444 }
2458 2445
2459 2446 // Clear "last Java frame" SP and PC.
2460 2447 __ verify_thread(); // G2_thread must be correct
2461 2448 __ reset_last_Java_frame();
2462 2449
2463 2450 // Unpack oop result
2464 2451 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2465 2452 Label L;
2466 2453 __ addcc(G0, I0, G0);
2467 2454 __ brx(Assembler::notZero, true, Assembler::pt, L);
2468 2455 __ delayed()->ld_ptr(I0, 0, I0);
2469 2456 __ mov(G0, I0);
2470 2457 __ bind(L);
2471 2458 __ verify_oop(I0);
2472 2459 }
2473 2460
2474 2461 // reset handle block
2475 2462 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
2476 2463 __ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes());
2477 2464
2478 2465 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
2479 2466 check_forward_pending_exception(masm, G3_scratch);
2480 2467
2481 2468
2482 2469 // Return
2483 2470
2484 2471 #ifndef _LP64
2485 2472 if (ret_type == T_LONG) {
2486 2473
2487 2474 // Must leave proper result in O0,O1 and G1 (c2/tiered only)
2488 2475 __ sllx(I0, 32, G1); // Shift bits into high G1
2489 2476 __ srl (I1, 0, I1); // Zero extend O1 (harmless?)
2490 2477 __ or3 (I1, G1, G1); // OR 64 bits into G1
2491 2478 }
2492 2479 #endif
2493 2480
2494 2481 __ ret();
2495 2482 __ delayed()->restore();
2496 2483
2497 2484 __ flush();
2498 2485
2499 2486 nmethod *nm = nmethod::new_native_nmethod(method,
2500 2487 masm->code(),
2501 2488 vep_offset,
2502 2489 frame_complete,
2503 2490 stack_slots / VMRegImpl::slots_per_word,
2504 2491 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2505 2492 in_ByteSize(lock_offset),
2506 2493 oop_maps);
2507 2494 return nm;
2508 2495
2509 2496 }
2510 2497
2511 2498 #ifdef HAVE_DTRACE_H
2512 2499 // ---------------------------------------------------------------------------
2513 2500 // Generate a dtrace nmethod for a given signature. The method takes arguments
2514 2501 // in the Java compiled code convention, marshals them to the native
2515 2502 // abi and then leaves nops at the position you would expect to call a native
2516 2503 // function. When the probe is enabled the nops are replaced with a trap
2517 2504 // instruction that dtrace inserts and the trace will cause a notification
2518 2505 // to dtrace.
2519 2506 //
2520 2507 // The probes are only able to take primitive types and java/lang/String as
2521 2508 // arguments. No other java types are allowed. Strings are converted to utf8
2522 2509 // strings so that from dtrace point of view java strings are converted to C
2523 2510 // strings. There is an arbitrary fixed limit on the total space that a method
2524 2511 // can use for converting the strings. (256 chars per string in the signature).
2525 2512 // So any java string larger then this is truncated.
2526 2513
2527 2514 static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
2528 2515 static bool offsets_initialized = false;
2529 2516
2530 2517 static VMRegPair reg64_to_VMRegPair(Register r) {
2531 2518 VMRegPair ret;
2532 2519 if (wordSize == 8) {
2533 2520 ret.set2(r->as_VMReg());
2534 2521 } else {
2535 2522 ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
2536 2523 }
2537 2524 return ret;
2538 2525 }
2539 2526
2540 2527
2541 2528 nmethod *SharedRuntime::generate_dtrace_nmethod(
2542 2529 MacroAssembler *masm, methodHandle method) {
2543 2530
2544 2531
2545 2532 // generate_dtrace_nmethod is guarded by a mutex so we are sure to
2546 2533 // be single threaded in this method.
2547 2534 assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be");
2548 2535
2549 2536 // Fill in the signature array, for the calling-convention call.
2550 2537 int total_args_passed = method->size_of_parameters();
2551 2538
2552 2539 BasicType* in_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2553 2540 VMRegPair *in_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2554 2541
2555 2542 // The signature we are going to use for the trap that dtrace will see
2556 2543 // java/lang/String is converted. We drop "this" and any other object
2557 2544 // is converted to NULL. (A one-slot java/lang/Long object reference
2558 2545 // is converted to a two-slot long, which is why we double the allocation).
2559 2546 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2);
2560 2547 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2);
2561 2548
2562 2549 int i=0;
2563 2550 int total_strings = 0;
2564 2551 int first_arg_to_pass = 0;
2565 2552 int total_c_args = 0;
2566 2553
2567 2554 // Skip the receiver as dtrace doesn't want to see it
2568 2555 if( !method->is_static() ) {
2569 2556 in_sig_bt[i++] = T_OBJECT;
2570 2557 first_arg_to_pass = 1;
2571 2558 }
2572 2559
2573 2560 SignatureStream ss(method->signature());
2574 2561 for ( ; !ss.at_return_type(); ss.next()) {
2575 2562 BasicType bt = ss.type();
2576 2563 in_sig_bt[i++] = bt; // Collect remaining bits of signature
2577 2564 out_sig_bt[total_c_args++] = bt;
2578 2565 if( bt == T_OBJECT) {
2579 2566 symbolOop s = ss.as_symbol_or_null();
2580 2567 if (s == vmSymbols::java_lang_String()) {
2581 2568 total_strings++;
2582 2569 out_sig_bt[total_c_args-1] = T_ADDRESS;
2583 2570 } else if (s == vmSymbols::java_lang_Boolean() ||
2584 2571 s == vmSymbols::java_lang_Byte()) {
2585 2572 out_sig_bt[total_c_args-1] = T_BYTE;
2586 2573 } else if (s == vmSymbols::java_lang_Character() ||
2587 2574 s == vmSymbols::java_lang_Short()) {
2588 2575 out_sig_bt[total_c_args-1] = T_SHORT;
2589 2576 } else if (s == vmSymbols::java_lang_Integer() ||
2590 2577 s == vmSymbols::java_lang_Float()) {
2591 2578 out_sig_bt[total_c_args-1] = T_INT;
2592 2579 } else if (s == vmSymbols::java_lang_Long() ||
2593 2580 s == vmSymbols::java_lang_Double()) {
2594 2581 out_sig_bt[total_c_args-1] = T_LONG;
2595 2582 out_sig_bt[total_c_args++] = T_VOID;
2596 2583 }
2597 2584 } else if ( bt == T_LONG || bt == T_DOUBLE ) {
2598 2585 in_sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
2599 2586 // We convert double to long
2600 2587 out_sig_bt[total_c_args-1] = T_LONG;
2601 2588 out_sig_bt[total_c_args++] = T_VOID;
2602 2589 } else if ( bt == T_FLOAT) {
2603 2590 // We convert float to int
2604 2591 out_sig_bt[total_c_args-1] = T_INT;
2605 2592 }
2606 2593 }
2607 2594
2608 2595 assert(i==total_args_passed, "validly parsed signature");
2609 2596
2610 2597 // Now get the compiled-Java layout as input arguments
2611 2598 int comp_args_on_stack;
2612 2599 comp_args_on_stack = SharedRuntime::java_calling_convention(
2613 2600 in_sig_bt, in_regs, total_args_passed, false);
2614 2601
2615 2602 // We have received a description of where all the java arg are located
2616 2603 // on entry to the wrapper. We need to convert these args to where
2617 2604 // the a native (non-jni) function would expect them. To figure out
2618 2605 // where they go we convert the java signature to a C signature and remove
2619 2606 // T_VOID for any long/double we might have received.
2620 2607
2621 2608
2622 2609 // Now figure out where the args must be stored and how much stack space
2623 2610 // they require (neglecting out_preserve_stack_slots but space for storing
2624 2611 // the 1st six register arguments). It's weird see int_stk_helper.
2625 2612 //
2626 2613 int out_arg_slots;
2627 2614 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
2628 2615
2629 2616 // Calculate the total number of stack slots we will need.
2630 2617
2631 2618 // First count the abi requirement plus all of the outgoing args
2632 2619 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2633 2620
2634 2621 // Plus a temp for possible converion of float/double/long register args
2635 2622
2636 2623 int conversion_temp = stack_slots;
2637 2624 stack_slots += 2;
2638 2625
2639 2626
2640 2627 // Now space for the string(s) we must convert
2641 2628
2642 2629 int string_locs = stack_slots;
2643 2630 stack_slots += total_strings *
2644 2631 (max_dtrace_string_size / VMRegImpl::stack_slot_size);
2645 2632
2646 2633 // Ok The space we have allocated will look like:
2647 2634 //
2648 2635 //
2649 2636 // FP-> | |
2650 2637 // |---------------------|
2651 2638 // | string[n] |
2652 2639 // |---------------------| <- string_locs[n]
2653 2640 // | string[n-1] |
2654 2641 // |---------------------| <- string_locs[n-1]
2655 2642 // | ... |
2656 2643 // | ... |
2657 2644 // |---------------------| <- string_locs[1]
2658 2645 // | string[0] |
2659 2646 // |---------------------| <- string_locs[0]
2660 2647 // | temp |
2661 2648 // |---------------------| <- conversion_temp
2662 2649 // | outbound memory |
2663 2650 // | based arguments |
2664 2651 // | |
2665 2652 // |---------------------|
2666 2653 // | |
2667 2654 // SP-> | out_preserved_slots |
2668 2655 //
2669 2656 //
2670 2657
2671 2658 // Now compute actual number of stack words we need rounding to make
2672 2659 // stack properly aligned.
2673 2660 stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word);
2674 2661
2675 2662 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2676 2663
2677 2664 intptr_t start = (intptr_t)__ pc();
2678 2665
2679 2666 // First thing make an ic check to see if we should even be here
2680 2667
2681 2668 {
2682 2669 Label L;
2683 2670 const Register temp_reg = G3_scratch;
2684 2671 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
2685 2672 __ verify_oop(O0);
2686 2673 __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg);
2687 2674 __ cmp(temp_reg, G5_inline_cache_reg);
2688 2675 __ brx(Assembler::equal, true, Assembler::pt, L);
2689 2676 __ delayed()->nop();
2690 2677
2691 2678 __ jump_to(ic_miss, temp_reg);
2692 2679 __ delayed()->nop();
2693 2680 __ align(CodeEntryAlignment);
2694 2681 __ bind(L);
2695 2682 }
2696 2683
2697 2684 int vep_offset = ((intptr_t)__ pc()) - start;
2698 2685
2699 2686
2700 2687 // The instruction at the verified entry point must be 5 bytes or longer
2701 2688 // because it can be patched on the fly by make_non_entrant. The stack bang
2702 2689 // instruction fits that requirement.
2703 2690
2704 2691 // Generate stack overflow check before creating frame
2705 2692 __ generate_stack_overflow_check(stack_size);
2706 2693
2707 2694 assert(((intptr_t)__ pc() - start - vep_offset) >= 5,
2708 2695 "valid size for make_non_entrant");
2709 2696
2710 2697 // Generate a new frame for the wrapper.
2711 2698 __ save(SP, -stack_size, SP);
2712 2699
2713 2700 // Frame is now completed as far a size and linkage.
2714 2701
2715 2702 int frame_complete = ((intptr_t)__ pc()) - start;
2716 2703
2717 2704 #ifdef ASSERT
2718 2705 bool reg_destroyed[RegisterImpl::number_of_registers];
2719 2706 bool freg_destroyed[FloatRegisterImpl::number_of_registers];
2720 2707 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2721 2708 reg_destroyed[r] = false;
2722 2709 }
2723 2710 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
2724 2711 freg_destroyed[f] = false;
2725 2712 }
2726 2713
2727 2714 #endif /* ASSERT */
2728 2715
2729 2716 VMRegPair zero;
2730 2717 const Register g0 = G0; // without this we get a compiler warning (why??)
2731 2718 zero.set2(g0->as_VMReg());
2732 2719
2733 2720 int c_arg, j_arg;
2734 2721
2735 2722 Register conversion_off = noreg;
2736 2723
2737 2724 for (j_arg = first_arg_to_pass, c_arg = 0 ;
2738 2725 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
2739 2726
2740 2727 VMRegPair src = in_regs[j_arg];
2741 2728 VMRegPair dst = out_regs[c_arg];
2742 2729
2743 2730 #ifdef ASSERT
2744 2731 if (src.first()->is_Register()) {
2745 2732 assert(!reg_destroyed[src.first()->as_Register()->encoding()], "ack!");
2746 2733 } else if (src.first()->is_FloatRegister()) {
2747 2734 assert(!freg_destroyed[src.first()->as_FloatRegister()->encoding(
2748 2735 FloatRegisterImpl::S)], "ack!");
2749 2736 }
2750 2737 if (dst.first()->is_Register()) {
2751 2738 reg_destroyed[dst.first()->as_Register()->encoding()] = true;
2752 2739 } else if (dst.first()->is_FloatRegister()) {
2753 2740 freg_destroyed[dst.first()->as_FloatRegister()->encoding(
2754 2741 FloatRegisterImpl::S)] = true;
2755 2742 }
2756 2743 #endif /* ASSERT */
2757 2744
2758 2745 switch (in_sig_bt[j_arg]) {
2759 2746 case T_ARRAY:
2760 2747 case T_OBJECT:
2761 2748 {
2762 2749 if (out_sig_bt[c_arg] == T_BYTE || out_sig_bt[c_arg] == T_SHORT ||
2763 2750 out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
2764 2751 // need to unbox a one-slot value
2765 2752 Register in_reg = L0;
2766 2753 Register tmp = L2;
2767 2754 if ( src.first()->is_reg() ) {
2768 2755 in_reg = src.first()->as_Register();
2769 2756 } else {
2770 2757 assert(Assembler::is_simm13(reg2offset(src.first()) + STACK_BIAS),
2771 2758 "must be");
2772 2759 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, in_reg);
2773 2760 }
2774 2761 // If the final destination is an acceptable register
2775 2762 if ( dst.first()->is_reg() ) {
2776 2763 if ( dst.is_single_phys_reg() || out_sig_bt[c_arg] != T_LONG ) {
2777 2764 tmp = dst.first()->as_Register();
2778 2765 }
2779 2766 }
2780 2767
2781 2768 Label skipUnbox;
2782 2769 if ( wordSize == 4 && out_sig_bt[c_arg] == T_LONG ) {
2783 2770 __ mov(G0, tmp->successor());
2784 2771 }
2785 2772 __ br_null(in_reg, true, Assembler::pn, skipUnbox);
2786 2773 __ delayed()->mov(G0, tmp);
2787 2774
2788 2775 BasicType bt = out_sig_bt[c_arg];
2789 2776 int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
2790 2777 switch (bt) {
2791 2778 case T_BYTE:
2792 2779 __ ldub(in_reg, box_offset, tmp); break;
2793 2780 case T_SHORT:
2794 2781 __ lduh(in_reg, box_offset, tmp); break;
2795 2782 case T_INT:
2796 2783 __ ld(in_reg, box_offset, tmp); break;
2797 2784 case T_LONG:
2798 2785 __ ld_long(in_reg, box_offset, tmp); break;
2799 2786 default: ShouldNotReachHere();
2800 2787 }
2801 2788
2802 2789 __ bind(skipUnbox);
2803 2790 // If tmp wasn't final destination copy to final destination
2804 2791 if (tmp == L2) {
2805 2792 VMRegPair tmp_as_VM = reg64_to_VMRegPair(L2);
2806 2793 if (out_sig_bt[c_arg] == T_LONG) {
2807 2794 long_move(masm, tmp_as_VM, dst);
2808 2795 } else {
2809 2796 move32_64(masm, tmp_as_VM, out_regs[c_arg]);
2810 2797 }
2811 2798 }
2812 2799 if (out_sig_bt[c_arg] == T_LONG) {
2813 2800 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
2814 2801 ++c_arg; // move over the T_VOID to keep the loop indices in sync
2815 2802 }
2816 2803 } else if (out_sig_bt[c_arg] == T_ADDRESS) {
2817 2804 Register s =
2818 2805 src.first()->is_reg() ? src.first()->as_Register() : L2;
2819 2806 Register d =
2820 2807 dst.first()->is_reg() ? dst.first()->as_Register() : L2;
2821 2808
2822 2809 // We store the oop now so that the conversion pass can reach
2823 2810 // while in the inner frame. This will be the only store if
2824 2811 // the oop is NULL.
2825 2812 if (s != L2) {
2826 2813 // src is register
2827 2814 if (d != L2) {
2828 2815 // dst is register
2829 2816 __ mov(s, d);
2830 2817 } else {
2831 2818 assert(Assembler::is_simm13(reg2offset(dst.first()) +
2832 2819 STACK_BIAS), "must be");
2833 2820 __ st_ptr(s, SP, reg2offset(dst.first()) + STACK_BIAS);
2834 2821 }
2835 2822 } else {
2836 2823 // src not a register
2837 2824 assert(Assembler::is_simm13(reg2offset(src.first()) +
2838 2825 STACK_BIAS), "must be");
2839 2826 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, d);
2840 2827 if (d == L2) {
2841 2828 assert(Assembler::is_simm13(reg2offset(dst.first()) +
2842 2829 STACK_BIAS), "must be");
2843 2830 __ st_ptr(d, SP, reg2offset(dst.first()) + STACK_BIAS);
2844 2831 }
2845 2832 }
2846 2833 } else if (out_sig_bt[c_arg] != T_VOID) {
2847 2834 // Convert the arg to NULL
2848 2835 if (dst.first()->is_reg()) {
2849 2836 __ mov(G0, dst.first()->as_Register());
2850 2837 } else {
2851 2838 assert(Assembler::is_simm13(reg2offset(dst.first()) +
2852 2839 STACK_BIAS), "must be");
2853 2840 __ st_ptr(G0, SP, reg2offset(dst.first()) + STACK_BIAS);
2854 2841 }
2855 2842 }
2856 2843 }
2857 2844 break;
2858 2845 case T_VOID:
2859 2846 break;
2860 2847
2861 2848 case T_FLOAT:
2862 2849 if (src.first()->is_stack()) {
2863 2850 // Stack to stack/reg is simple
2864 2851 move32_64(masm, src, dst);
2865 2852 } else {
2866 2853 if (dst.first()->is_reg()) {
2867 2854 // freg -> reg
2868 2855 int off =
2869 2856 STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
2870 2857 Register d = dst.first()->as_Register();
2871 2858 if (Assembler::is_simm13(off)) {
2872 2859 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
2873 2860 SP, off);
2874 2861 __ ld(SP, off, d);
2875 2862 } else {
2876 2863 if (conversion_off == noreg) {
2877 2864 __ set(off, L6);
2878 2865 conversion_off = L6;
2879 2866 }
2880 2867 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
2881 2868 SP, conversion_off);
2882 2869 __ ld(SP, conversion_off , d);
2883 2870 }
2884 2871 } else {
2885 2872 // freg -> mem
2886 2873 int off = STACK_BIAS + reg2offset(dst.first());
2887 2874 if (Assembler::is_simm13(off)) {
2888 2875 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
2889 2876 SP, off);
2890 2877 } else {
2891 2878 if (conversion_off == noreg) {
2892 2879 __ set(off, L6);
2893 2880 conversion_off = L6;
2894 2881 }
2895 2882 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
2896 2883 SP, conversion_off);
2897 2884 }
2898 2885 }
2899 2886 }
2900 2887 break;
2901 2888
2902 2889 case T_DOUBLE:
2903 2890 assert( j_arg + 1 < total_args_passed &&
2904 2891 in_sig_bt[j_arg + 1] == T_VOID &&
2905 2892 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2906 2893 if (src.first()->is_stack()) {
2907 2894 // Stack to stack/reg is simple
2908 2895 long_move(masm, src, dst);
2909 2896 } else {
2910 2897 Register d = dst.first()->is_reg() ? dst.first()->as_Register() : L2;
2911 2898
2912 2899 // Destination could be an odd reg on 32bit in which case
2913 2900 // we can't load direct to the destination.
2914 2901
2915 2902 if (!d->is_even() && wordSize == 4) {
2916 2903 d = L2;
2917 2904 }
2918 2905 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
2919 2906 if (Assembler::is_simm13(off)) {
2920 2907 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
2921 2908 SP, off);
2922 2909 __ ld_long(SP, off, d);
2923 2910 } else {
2924 2911 if (conversion_off == noreg) {
2925 2912 __ set(off, L6);
2926 2913 conversion_off = L6;
2927 2914 }
2928 2915 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
2929 2916 SP, conversion_off);
2930 2917 __ ld_long(SP, conversion_off, d);
2931 2918 }
2932 2919 if (d == L2) {
2933 2920 long_move(masm, reg64_to_VMRegPair(L2), dst);
2934 2921 }
2935 2922 }
2936 2923 break;
2937 2924
2938 2925 case T_LONG :
2939 2926 // 32bit can't do a split move of something like g1 -> O0, O1
2940 2927 // so use a memory temp
2941 2928 if (src.is_single_phys_reg() && wordSize == 4) {
2942 2929 Register tmp = L2;
2943 2930 if (dst.first()->is_reg() &&
2944 2931 (wordSize == 8 || dst.first()->as_Register()->is_even())) {
2945 2932 tmp = dst.first()->as_Register();
2946 2933 }
2947 2934
2948 2935 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
2949 2936 if (Assembler::is_simm13(off)) {
2950 2937 __ stx(src.first()->as_Register(), SP, off);
2951 2938 __ ld_long(SP, off, tmp);
2952 2939 } else {
2953 2940 if (conversion_off == noreg) {
2954 2941 __ set(off, L6);
2955 2942 conversion_off = L6;
2956 2943 }
2957 2944 __ stx(src.first()->as_Register(), SP, conversion_off);
2958 2945 __ ld_long(SP, conversion_off, tmp);
2959 2946 }
2960 2947
2961 2948 if (tmp == L2) {
2962 2949 long_move(masm, reg64_to_VMRegPair(L2), dst);
2963 2950 }
2964 2951 } else {
2965 2952 long_move(masm, src, dst);
2966 2953 }
2967 2954 break;
2968 2955
2969 2956 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2970 2957
2971 2958 default:
2972 2959 move32_64(masm, src, dst);
2973 2960 }
2974 2961 }
2975 2962
2976 2963
2977 2964 // If we have any strings we must store any register based arg to the stack
2978 2965 // This includes any still live xmm registers too.
2979 2966
2980 2967 if (total_strings > 0 ) {
2981 2968
2982 2969 // protect all the arg registers
2983 2970 __ save_frame(0);
2984 2971 __ mov(G2_thread, L7_thread_cache);
2985 2972 const Register L2_string_off = L2;
2986 2973
2987 2974 // Get first string offset
2988 2975 __ set(string_locs * VMRegImpl::stack_slot_size, L2_string_off);
2989 2976
2990 2977 for (c_arg = 0 ; c_arg < total_c_args ; c_arg++ ) {
2991 2978 if (out_sig_bt[c_arg] == T_ADDRESS) {
2992 2979
2993 2980 VMRegPair dst = out_regs[c_arg];
2994 2981 const Register d = dst.first()->is_reg() ?
2995 2982 dst.first()->as_Register()->after_save() : noreg;
2996 2983
2997 2984 // It's a string the oop and it was already copied to the out arg
2998 2985 // position
2999 2986 if (d != noreg) {
3000 2987 __ mov(d, O0);
3001 2988 } else {
3002 2989 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
3003 2990 "must be");
3004 2991 __ ld_ptr(FP, reg2offset(dst.first()) + STACK_BIAS, O0);
3005 2992 }
3006 2993 Label skip;
3007 2994
3008 2995 __ br_null(O0, false, Assembler::pn, skip);
3009 2996 __ delayed()->add(FP, L2_string_off, O1);
3010 2997
3011 2998 if (d != noreg) {
3012 2999 __ mov(O1, d);
3013 3000 } else {
3014 3001 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
3015 3002 "must be");
3016 3003 __ st_ptr(O1, FP, reg2offset(dst.first()) + STACK_BIAS);
3017 3004 }
3018 3005
3019 3006 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::get_utf),
3020 3007 relocInfo::runtime_call_type);
3021 3008 __ delayed()->add(L2_string_off, max_dtrace_string_size, L2_string_off);
3022 3009
3023 3010 __ bind(skip);
3024 3011
3025 3012 }
3026 3013
3027 3014 }
3028 3015 __ mov(L7_thread_cache, G2_thread);
3029 3016 __ restore();
3030 3017
3031 3018 }
3032 3019
3033 3020
3034 3021 // Ok now we are done. Need to place the nop that dtrace wants in order to
3035 3022 // patch in the trap
3036 3023
3037 3024 int patch_offset = ((intptr_t)__ pc()) - start;
3038 3025
3039 3026 __ nop();
3040 3027
3041 3028
3042 3029 // Return
3043 3030
3044 3031 __ ret();
3045 3032 __ delayed()->restore();
3046 3033
3047 3034 __ flush();
3048 3035
3049 3036 nmethod *nm = nmethod::new_dtrace_nmethod(
3050 3037 method, masm->code(), vep_offset, patch_offset, frame_complete,
3051 3038 stack_slots / VMRegImpl::slots_per_word);
3052 3039 return nm;
3053 3040
3054 3041 }
3055 3042
3056 3043 #endif // HAVE_DTRACE_H
3057 3044
3058 3045 // this function returns the adjust size (in number of words) to a c2i adapter
3059 3046 // activation for use during deoptimization
3060 3047 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
3061 3048 assert(callee_locals >= callee_parameters,
3062 3049 "test and remove; got more parms than locals");
3063 3050 if (callee_locals < callee_parameters)
3064 3051 return 0; // No adjustment for negative locals
3065 3052 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords();
3066 3053 return round_to(diff, WordsPerLong);
3067 3054 }
3068 3055
3069 3056 // "Top of Stack" slots that may be unused by the calling convention but must
3070 3057 // otherwise be preserved.
3071 3058 // On Intel these are not necessary and the value can be zero.
3072 3059 // On Sparc this describes the words reserved for storing a register window
3073 3060 // when an interrupt occurs.
3074 3061 uint SharedRuntime::out_preserve_stack_slots() {
3075 3062 return frame::register_save_words * VMRegImpl::slots_per_word;
3076 3063 }
3077 3064
3078 3065 static void gen_new_frame(MacroAssembler* masm, bool deopt) {
3079 3066 //
3080 3067 // Common out the new frame generation for deopt and uncommon trap
3081 3068 //
3082 3069 Register G3pcs = G3_scratch; // Array of new pcs (input)
3083 3070 Register Oreturn0 = O0;
3084 3071 Register Oreturn1 = O1;
3085 3072 Register O2UnrollBlock = O2;
3086 3073 Register O3array = O3; // Array of frame sizes (input)
3087 3074 Register O4array_size = O4; // number of frames (input)
3088 3075 Register O7frame_size = O7; // number of frames (input)
3089 3076
3090 3077 __ ld_ptr(O3array, 0, O7frame_size);
3091 3078 __ sub(G0, O7frame_size, O7frame_size);
3092 3079 __ save(SP, O7frame_size, SP);
3093 3080 __ ld_ptr(G3pcs, 0, I7); // load frame's new pc
3094 3081
3095 3082 #ifdef ASSERT
3096 3083 // make sure that the frames are aligned properly
3097 3084 #ifndef _LP64
3098 3085 __ btst(wordSize*2-1, SP);
3099 3086 __ breakpoint_trap(Assembler::notZero);
3100 3087 #endif
3101 3088 #endif
3102 3089
3103 3090 // Deopt needs to pass some extra live values from frame to frame
3104 3091
3105 3092 if (deopt) {
3106 3093 __ mov(Oreturn0->after_save(), Oreturn0);
3107 3094 __ mov(Oreturn1->after_save(), Oreturn1);
3108 3095 }
3109 3096
3110 3097 __ mov(O4array_size->after_save(), O4array_size);
3111 3098 __ sub(O4array_size, 1, O4array_size);
3112 3099 __ mov(O3array->after_save(), O3array);
3113 3100 __ mov(O2UnrollBlock->after_save(), O2UnrollBlock);
3114 3101 __ add(G3pcs, wordSize, G3pcs); // point to next pc value
3115 3102
3116 3103 #ifdef ASSERT
3117 3104 // trash registers to show a clear pattern in backtraces
3118 3105 __ set(0xDEAD0000, I0);
3119 3106 __ add(I0, 2, I1);
3120 3107 __ add(I0, 4, I2);
3121 3108 __ add(I0, 6, I3);
3122 3109 __ add(I0, 8, I4);
3123 3110 // Don't touch I5 could have valuable savedSP
3124 3111 __ set(0xDEADBEEF, L0);
3125 3112 __ mov(L0, L1);
3126 3113 __ mov(L0, L2);
3127 3114 __ mov(L0, L3);
3128 3115 __ mov(L0, L4);
3129 3116 __ mov(L0, L5);
3130 3117
3131 3118 // trash the return value as there is nothing to return yet
3132 3119 __ set(0xDEAD0001, O7);
3133 3120 #endif
3134 3121
3135 3122 __ mov(SP, O5_savedSP);
3136 3123 }
3137 3124
3138 3125
3139 3126 static void make_new_frames(MacroAssembler* masm, bool deopt) {
3140 3127 //
3141 3128 // loop through the UnrollBlock info and create new frames
3142 3129 //
3143 3130 Register G3pcs = G3_scratch;
3144 3131 Register Oreturn0 = O0;
3145 3132 Register Oreturn1 = O1;
3146 3133 Register O2UnrollBlock = O2;
3147 3134 Register O3array = O3;
3148 3135 Register O4array_size = O4;
3149 3136 Label loop;
3150 3137
3151 3138 // Before we make new frames, check to see if stack is available.
3152 3139 // Do this after the caller's return address is on top of stack
3153 3140 if (UseStackBanging) {
3154 3141 // Get total frame size for interpreted frames
3155 3142 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4);
3156 3143 __ bang_stack_size(O4, O3, G3_scratch);
3157 3144 }
3158 3145
3159 3146 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size);
3160 3147 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs);
3161 3148 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array);
3162 3149
3163 3150 // Adjust old interpreter frame to make space for new frame's extra java locals
3164 3151 //
3165 3152 // We capture the original sp for the transition frame only because it is needed in
3166 3153 // order to properly calculate interpreter_sp_adjustment. Even though in real life
3167 3154 // every interpreter frame captures a savedSP it is only needed at the transition
3168 3155 // (fortunately). If we had to have it correct everywhere then we would need to
3169 3156 // be told the sp_adjustment for each frame we create. If the frame size array
3170 3157 // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size]
3171 3158 // for each frame we create and keep up the illusion every where.
3172 3159 //
3173 3160
3174 3161 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7);
3175 3162 __ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment
3176 3163 __ sub(SP, O7, SP);
3177 3164
3178 3165 #ifdef ASSERT
3179 3166 // make sure that there is at least one entry in the array
3180 3167 __ tst(O4array_size);
3181 3168 __ breakpoint_trap(Assembler::zero);
3182 3169 #endif
3183 3170
3184 3171 // Now push the new interpreter frames
3185 3172 __ bind(loop);
3186 3173
3187 3174 // allocate a new frame, filling the registers
3188 3175
3189 3176 gen_new_frame(masm, deopt); // allocate an interpreter frame
3190 3177
3191 3178 __ tst(O4array_size);
3192 3179 __ br(Assembler::notZero, false, Assembler::pn, loop);
3193 3180 __ delayed()->add(O3array, wordSize, O3array);
3194 3181 __ ld_ptr(G3pcs, 0, O7); // load final frame new pc
3195 3182
3196 3183 }
3197 3184
3198 3185 //------------------------------generate_deopt_blob----------------------------
3199 3186 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
3200 3187 // instead.
3201 3188 void SharedRuntime::generate_deopt_blob() {
3202 3189 // allocate space for the code
3203 3190 ResourceMark rm;
3204 3191 // setup code generation tools
3205 3192 int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code
3206 3193 #ifdef _LP64
3207 3194 CodeBuffer buffer("deopt_blob", 2100+pad, 512);
3208 3195 #else
3209 3196 // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread)
3210 3197 // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread)
3211 3198 CodeBuffer buffer("deopt_blob", 1600+pad, 512);
3212 3199 #endif /* _LP64 */
3213 3200 MacroAssembler* masm = new MacroAssembler(&buffer);
3214 3201 FloatRegister Freturn0 = F0;
3215 3202 Register Greturn1 = G1;
3216 3203 Register Oreturn0 = O0;
3217 3204 Register Oreturn1 = O1;
3218 3205 Register O2UnrollBlock = O2;
3219 3206 Register L0deopt_mode = L0;
3220 3207 Register G4deopt_mode = G4_scratch;
3221 3208 int frame_size_words;
3222 3209 Address saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS);
3223 3210 #if !defined(_LP64) && defined(COMPILER2)
3224 3211 Address saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS);
3225 3212 #endif
3226 3213 Label cont;
3227 3214
3228 3215 OopMapSet *oop_maps = new OopMapSet();
3229 3216
3230 3217 //
3231 3218 // This is the entry point for code which is returning to a de-optimized
3232 3219 // frame.
3233 3220 // The steps taken by this frame are as follows:
3234 3221 // - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1)
3235 3222 // and all potentially live registers (at a pollpoint many registers can be live).
3236 3223 //
3237 3224 // - call the C routine: Deoptimization::fetch_unroll_info (this function
3238 3225 // returns information about the number and size of interpreter frames
3239 3226 // which are equivalent to the frame which is being deoptimized)
3240 3227 // - deallocate the unpack frame, restoring only results values. Other
3241 3228 // volatile registers will now be captured in the vframeArray as needed.
3242 3229 // - deallocate the deoptimization frame
3243 3230 // - in a loop using the information returned in the previous step
3244 3231 // push new interpreter frames (take care to propagate the return
3245 3232 // values through each new frame pushed)
3246 3233 // - create a dummy "unpack_frame" and save the return values (O0, O1, F0)
3247 3234 // - call the C routine: Deoptimization::unpack_frames (this function
3248 3235 // lays out values on the interpreter frame which was just created)
3249 3236 // - deallocate the dummy unpack_frame
3250 3237 // - ensure that all the return values are correctly set and then do
3251 3238 // a return to the interpreter entry point
3252 3239 //
3253 3240 // Refer to the following methods for more information:
3254 3241 // - Deoptimization::fetch_unroll_info
3255 3242 // - Deoptimization::unpack_frames
3256 3243
3257 3244 OopMap* map = NULL;
3258 3245
3259 3246 int start = __ offset();
3260 3247
3261 3248 // restore G2, the trampoline destroyed it
3262 3249 __ get_thread();
3263 3250
3264 3251 // On entry we have been called by the deoptimized nmethod with a call that
3265 3252 // replaced the original call (or safepoint polling location) so the deoptimizing
3266 3253 // pc is now in O7. Return values are still in the expected places
3267 3254
3268 3255 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3269 3256 __ ba(false, cont);
3270 3257 __ delayed()->mov(Deoptimization::Unpack_deopt, L0deopt_mode);
3271 3258
3272 3259 int exception_offset = __ offset() - start;
3273 3260
3274 3261 // restore G2, the trampoline destroyed it
3275 3262 __ get_thread();
3276 3263
3277 3264 // On entry we have been jumped to by the exception handler (or exception_blob
3278 3265 // for server). O0 contains the exception oop and O7 contains the original
3279 3266 // exception pc. So if we push a frame here it will look to the
3280 3267 // stack walking code (fetch_unroll_info) just like a normal call so
3281 3268 // state will be extracted normally.
3282 3269
3283 3270 // save exception oop in JavaThread and fall through into the
3284 3271 // exception_in_tls case since they are handled in same way except
3285 3272 // for where the pending exception is kept.
3286 3273 __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset());
3287 3274
3288 3275 //
3289 3276 // Vanilla deoptimization with an exception pending in exception_oop
3290 3277 //
3291 3278 int exception_in_tls_offset = __ offset() - start;
3292 3279
3293 3280 // No need to update oop_map as each call to save_live_registers will produce identical oopmap
3294 3281 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3295 3282
3296 3283 // Restore G2_thread
3297 3284 __ get_thread();
3298 3285
3299 3286 #ifdef ASSERT
3300 3287 {
3301 3288 // verify that there is really an exception oop in exception_oop
3302 3289 Label has_exception;
3303 3290 __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception);
3304 3291 __ br_notnull(Oexception, false, Assembler::pt, has_exception);
3305 3292 __ delayed()-> nop();
3306 3293 __ stop("no exception in thread");
3307 3294 __ bind(has_exception);
3308 3295
3309 3296 // verify that there is no pending exception
3310 3297 Label no_pending_exception;
3311 3298 Address exception_addr(G2_thread, Thread::pending_exception_offset());
3312 3299 __ ld_ptr(exception_addr, Oexception);
3313 3300 __ br_null(Oexception, false, Assembler::pt, no_pending_exception);
3314 3301 __ delayed()->nop();
3315 3302 __ stop("must not have pending exception here");
3316 3303 __ bind(no_pending_exception);
3317 3304 }
3318 3305 #endif
3319 3306
3320 3307 __ ba(false, cont);
3321 3308 __ delayed()->mov(Deoptimization::Unpack_exception, L0deopt_mode);;
3322 3309
3323 3310 //
3324 3311 // Reexecute entry, similar to c2 uncommon trap
3325 3312 //
3326 3313 int reexecute_offset = __ offset() - start;
3327 3314
3328 3315 // No need to update oop_map as each call to save_live_registers will produce identical oopmap
3329 3316 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3330 3317
3331 3318 __ mov(Deoptimization::Unpack_reexecute, L0deopt_mode);
3332 3319
3333 3320 __ bind(cont);
3334 3321
3335 3322 __ set_last_Java_frame(SP, noreg);
3336 3323
3337 3324 // do the call by hand so we can get the oopmap
3338 3325
3339 3326 __ mov(G2_thread, L7_thread_cache);
3340 3327 __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type);
3341 3328 __ delayed()->mov(G2_thread, O0);
3342 3329
3343 3330 // Set an oopmap for the call site this describes all our saved volatile registers
3344 3331
3345 3332 oop_maps->add_gc_map( __ offset()-start, map);
3346 3333
3347 3334 __ mov(L7_thread_cache, G2_thread);
3348 3335
3349 3336 __ reset_last_Java_frame();
3350 3337
3351 3338 // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers
3352 3339 // so this move will survive
3353 3340
3354 3341 __ mov(L0deopt_mode, G4deopt_mode);
3355 3342
3356 3343 __ mov(O0, O2UnrollBlock->after_save());
3357 3344
3358 3345 RegisterSaver::restore_result_registers(masm);
3359 3346
3360 3347 Label noException;
3361 3348 __ cmp(G4deopt_mode, Deoptimization::Unpack_exception); // Was exception pending?
3362 3349 __ br(Assembler::notEqual, false, Assembler::pt, noException);
3363 3350 __ delayed()->nop();
3364 3351
3365 3352 // Move the pending exception from exception_oop to Oexception so
3366 3353 // the pending exception will be picked up the interpreter.
3367 3354 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
3368 3355 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
3369 3356 __ bind(noException);
3370 3357
3371 3358 // deallocate the deoptimization frame taking care to preserve the return values
3372 3359 __ mov(Oreturn0, Oreturn0->after_save());
3373 3360 __ mov(Oreturn1, Oreturn1->after_save());
3374 3361 __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
3375 3362 __ restore();
3376 3363
3377 3364 // Allocate new interpreter frame(s) and possible c2i adapter frame
3378 3365
3379 3366 make_new_frames(masm, true);
3380 3367
3381 3368 // push a dummy "unpack_frame" taking care of float return values and
3382 3369 // call Deoptimization::unpack_frames to have the unpacker layout
3383 3370 // information in the interpreter frames just created and then return
3384 3371 // to the interpreter entry point
3385 3372 __ save(SP, -frame_size_words*wordSize, SP);
3386 3373 __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
3387 3374 #if !defined(_LP64)
3388 3375 #if defined(COMPILER2)
3389 3376 if (!TieredCompilation) {
3390 3377 // 32-bit 1-register longs return longs in G1
3391 3378 __ stx(Greturn1, saved_Greturn1_addr);
3392 3379 }
3393 3380 #endif
3394 3381 __ set_last_Java_frame(SP, noreg);
3395 3382 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode);
3396 3383 #else
3397 3384 // LP64 uses g4 in set_last_Java_frame
3398 3385 __ mov(G4deopt_mode, O1);
3399 3386 __ set_last_Java_frame(SP, G0);
3400 3387 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1);
3401 3388 #endif
3402 3389 __ reset_last_Java_frame();
3403 3390 __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);
3404 3391
3405 3392 // In tiered we never use C2 to compile methods returning longs so
3406 3393 // the result is where we expect it already.
3407 3394
3408 3395 #if !defined(_LP64) && defined(COMPILER2)
3409 3396 // In 32 bit, C2 returns longs in G1 so restore the saved G1 into
3410 3397 // I0/I1 if the return value is long. In the tiered world there is
3411 3398 // a mismatch between how C1 and C2 return longs compiles and so
3412 3399 // currently compilation of methods which return longs is disabled
3413 3400 // for C2 and so is this code. Eventually C1 and C2 will do the
3414 3401 // same thing for longs in the tiered world.
3415 3402 if (!TieredCompilation) {
3416 3403 Label not_long;
3417 3404 __ cmp(O0,T_LONG);
3418 3405 __ br(Assembler::notEqual, false, Assembler::pt, not_long);
3419 3406 __ delayed()->nop();
3420 3407 __ ldd(saved_Greturn1_addr,I0);
3421 3408 __ bind(not_long);
3422 3409 }
3423 3410 #endif
3424 3411 __ ret();
3425 3412 __ delayed()->restore();
3426 3413
3427 3414 masm->flush();
3428 3415 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words);
3429 3416 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3430 3417 }
3431 3418
3432 3419 #ifdef COMPILER2
3433 3420
3434 3421 //------------------------------generate_uncommon_trap_blob--------------------
3435 3422 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
3436 3423 // instead.
3437 3424 void SharedRuntime::generate_uncommon_trap_blob() {
3438 3425 // allocate space for the code
3439 3426 ResourceMark rm;
3440 3427 // setup code generation tools
3441 3428 int pad = VerifyThread ? 512 : 0;
3442 3429 #ifdef _LP64
3443 3430 CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
3444 3431 #else
3445 3432 // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread)
3446 3433 // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread)
3447 3434 CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512);
3448 3435 #endif
3449 3436 MacroAssembler* masm = new MacroAssembler(&buffer);
3450 3437 Register O2UnrollBlock = O2;
3451 3438 Register O2klass_index = O2;
3452 3439
3453 3440 //
3454 3441 // This is the entry point for all traps the compiler takes when it thinks
3455 3442 // it cannot handle further execution of compilation code. The frame is
3456 3443 // deoptimized in these cases and converted into interpreter frames for
3457 3444 // execution
3458 3445 // The steps taken by this frame are as follows:
3459 3446 // - push a fake "unpack_frame"
3460 3447 // - call the C routine Deoptimization::uncommon_trap (this function
3461 3448 // packs the current compiled frame into vframe arrays and returns
3462 3449 // information about the number and size of interpreter frames which
3463 3450 // are equivalent to the frame which is being deoptimized)
3464 3451 // - deallocate the "unpack_frame"
3465 3452 // - deallocate the deoptimization frame
3466 3453 // - in a loop using the information returned in the previous step
3467 3454 // push interpreter frames;
3468 3455 // - create a dummy "unpack_frame"
3469 3456 // - call the C routine: Deoptimization::unpack_frames (this function
3470 3457 // lays out values on the interpreter frame which was just created)
3471 3458 // - deallocate the dummy unpack_frame
3472 3459 // - return to the interpreter entry point
3473 3460 //
3474 3461 // Refer to the following methods for more information:
3475 3462 // - Deoptimization::uncommon_trap
3476 3463 // - Deoptimization::unpack_frame
3477 3464
3478 3465 // the unloaded class index is in O0 (first parameter to this blob)
3479 3466
3480 3467 // push a dummy "unpack_frame"
3481 3468 // and call Deoptimization::uncommon_trap to pack the compiled frame into
3482 3469 // vframe array and return the UnrollBlock information
3483 3470 __ save_frame(0);
3484 3471 __ set_last_Java_frame(SP, noreg);
3485 3472 __ mov(I0, O2klass_index);
3486 3473 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index);
3487 3474 __ reset_last_Java_frame();
3488 3475 __ mov(O0, O2UnrollBlock->after_save());
3489 3476 __ restore();
3490 3477
3491 3478 // deallocate the deoptimized frame taking care to preserve the return values
3492 3479 __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
3493 3480 __ restore();
3494 3481
3495 3482 // Allocate new interpreter frame(s) and possible c2i adapter frame
3496 3483
3497 3484 make_new_frames(masm, false);
3498 3485
3499 3486 // push a dummy "unpack_frame" taking care of float return values and
3500 3487 // call Deoptimization::unpack_frames to have the unpacker layout
3501 3488 // information in the interpreter frames just created and then return
3502 3489 // to the interpreter entry point
3503 3490 __ save_frame(0);
3504 3491 __ set_last_Java_frame(SP, noreg);
3505 3492 __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case
3506 3493 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3);
3507 3494 __ reset_last_Java_frame();
3508 3495 __ ret();
3509 3496 __ delayed()->restore();
3510 3497
3511 3498 masm->flush();
3512 3499 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize);
3513 3500 }
3514 3501
3515 3502 #endif // COMPILER2
3516 3503
3517 3504 //------------------------------generate_handler_blob-------------------
3518 3505 //
3519 3506 // Generate a special Compile2Runtime blob that saves all registers, and sets
3520 3507 // up an OopMap.
3521 3508 //
3522 3509 // This blob is jumped to (via a breakpoint and the signal handler) from a
3523 3510 // safepoint in compiled code. On entry to this blob, O7 contains the
3524 3511 // address in the original nmethod at which we should resume normal execution.
3525 3512 // Thus, this blob looks like a subroutine which must preserve lots of
3526 3513 // registers and return normally. Note that O7 is never register-allocated,
3527 3514 // so it is guaranteed to be free here.
3528 3515 //
3529 3516
3530 3517 // The hardest part of what this blob must do is to save the 64-bit %o
3531 3518 // registers in the 32-bit build. A simple 'save' turn the %o's to %i's and
3532 3519 // an interrupt will chop off their heads. Making space in the caller's frame
3533 3520 // first will let us save the 64-bit %o's before save'ing, but we cannot hand
3534 3521 // the adjusted FP off to the GC stack-crawler: this will modify the caller's
3535 3522 // SP and mess up HIS OopMaps. So we first adjust the caller's SP, then save
3536 3523 // the 64-bit %o's, then do a save, then fixup the caller's SP (our FP).
3537 3524 // Tricky, tricky, tricky...
3538 3525
3539 3526 static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return) {
3540 3527 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3541 3528
3542 3529 // allocate space for the code
3543 3530 ResourceMark rm;
3544 3531 // setup code generation tools
3545 3532 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3546 3533 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3547 3534 // even larger with TraceJumps
3548 3535 int pad = TraceJumps ? 512 : 0;
3549 3536 CodeBuffer buffer("handler_blob", 1600 + pad, 512);
3550 3537 MacroAssembler* masm = new MacroAssembler(&buffer);
3551 3538 int frame_size_words;
3552 3539 OopMapSet *oop_maps = new OopMapSet();
3553 3540 OopMap* map = NULL;
3554 3541
3555 3542 int start = __ offset();
3556 3543
3557 3544 // If this causes a return before the processing, then do a "restore"
3558 3545 if (cause_return) {
3559 3546 __ restore();
3560 3547 } else {
3561 3548 // Make it look like we were called via the poll
3562 3549 // so that frame constructor always sees a valid return address
3563 3550 __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7);
3564 3551 __ sub(O7, frame::pc_return_offset, O7);
3565 3552 }
3566 3553
3567 3554 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3568 3555
3569 3556 // setup last_Java_sp (blows G4)
3570 3557 __ set_last_Java_frame(SP, noreg);
3571 3558
3572 3559 // call into the runtime to handle illegal instructions exception
3573 3560 // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3574 3561 __ mov(G2_thread, O0);
3575 3562 __ save_thread(L7_thread_cache);
3576 3563 __ call(call_ptr);
3577 3564 __ delayed()->nop();
3578 3565
3579 3566 // Set an oopmap for the call site.
3580 3567 // We need this not only for callee-saved registers, but also for volatile
3581 3568 // registers that the compiler might be keeping live across a safepoint.
3582 3569
3583 3570 oop_maps->add_gc_map( __ offset() - start, map);
3584 3571
3585 3572 __ restore_thread(L7_thread_cache);
3586 3573 // clear last_Java_sp
3587 3574 __ reset_last_Java_frame();
3588 3575
3589 3576 // Check for exceptions
3590 3577 Label pending;
3591 3578
3592 3579 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3593 3580 __ tst(O1);
3594 3581 __ brx(Assembler::notEqual, true, Assembler::pn, pending);
3595 3582 __ delayed()->nop();
3596 3583
3597 3584 RegisterSaver::restore_live_registers(masm);
3598 3585
3599 3586 // We are back the the original state on entry and ready to go.
3600 3587
3601 3588 __ retl();
3602 3589 __ delayed()->nop();
3603 3590
3604 3591 // Pending exception after the safepoint
3605 3592
3606 3593 __ bind(pending);
3607 3594
3608 3595 RegisterSaver::restore_live_registers(masm);
3609 3596
3610 3597 // We are back the the original state on entry.
3611 3598
3612 3599 // Tail-call forward_exception_entry, with the issuing PC in O7,
3613 3600 // so it looks like the original nmethod called forward_exception_entry.
3614 3601 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3615 3602 __ JMP(O0, 0);
3616 3603 __ delayed()->nop();
3617 3604
3618 3605 // -------------
3619 3606 // make sure all code is generated
3620 3607 masm->flush();
3621 3608
3622 3609 // return exception blob
3623 3610 return SafepointBlob::create(&buffer, oop_maps, frame_size_words);
3624 3611 }
3625 3612
3626 3613 //
3627 3614 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3628 3615 //
3629 3616 // Generate a stub that calls into vm to find out the proper destination
3630 3617 // of a java call. All the argument registers are live at this point
3631 3618 // but since this is generic code we don't know what they are and the caller
3632 3619 // must do any gc of the args.
3633 3620 //
3634 3621 static RuntimeStub* generate_resolve_blob(address destination, const char* name) {
3635 3622 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3636 3623
3637 3624 // allocate space for the code
3638 3625 ResourceMark rm;
3639 3626 // setup code generation tools
3640 3627 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3641 3628 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3642 3629 // even larger with TraceJumps
3643 3630 int pad = TraceJumps ? 512 : 0;
3644 3631 CodeBuffer buffer(name, 1600 + pad, 512);
3645 3632 MacroAssembler* masm = new MacroAssembler(&buffer);
3646 3633 int frame_size_words;
3647 3634 OopMapSet *oop_maps = new OopMapSet();
3648 3635 OopMap* map = NULL;
3649 3636
3650 3637 int start = __ offset();
3651 3638
3652 3639 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3653 3640
3654 3641 int frame_complete = __ offset();
3655 3642
3656 3643 // setup last_Java_sp (blows G4)
3657 3644 __ set_last_Java_frame(SP, noreg);
3658 3645
3659 3646 // call into the runtime to handle illegal instructions exception
3660 3647 // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3661 3648 __ mov(G2_thread, O0);
3662 3649 __ save_thread(L7_thread_cache);
3663 3650 __ call(destination, relocInfo::runtime_call_type);
3664 3651 __ delayed()->nop();
3665 3652
3666 3653 // O0 contains the address we are going to jump to assuming no exception got installed
3667 3654
3668 3655 // Set an oopmap for the call site.
3669 3656 // We need this not only for callee-saved registers, but also for volatile
3670 3657 // registers that the compiler might be keeping live across a safepoint.
3671 3658
3672 3659 oop_maps->add_gc_map( __ offset() - start, map);
3673 3660
3674 3661 __ restore_thread(L7_thread_cache);
3675 3662 // clear last_Java_sp
3676 3663 __ reset_last_Java_frame();
3677 3664
3678 3665 // Check for exceptions
3679 3666 Label pending;
3680 3667
3681 3668 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3682 3669 __ tst(O1);
3683 3670 __ brx(Assembler::notEqual, true, Assembler::pn, pending);
3684 3671 __ delayed()->nop();
3685 3672
3686 3673 // get the returned methodOop
3687 3674
3688 3675 __ get_vm_result(G5_method);
3689 3676 __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS);
3690 3677
3691 3678 // O0 is where we want to jump, overwrite G3 which is saved and scratch
3692 3679
3693 3680 __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS);
3694 3681
3695 3682 RegisterSaver::restore_live_registers(masm);
3696 3683
3697 3684 // We are back the the original state on entry and ready to go.
3698 3685
3699 3686 __ JMP(G3, 0);
3700 3687 __ delayed()->nop();
3701 3688
3702 3689 // Pending exception after the safepoint
3703 3690
3704 3691 __ bind(pending);
3705 3692
3706 3693 RegisterSaver::restore_live_registers(masm);
3707 3694
3708 3695 // We are back the the original state on entry.
3709 3696
3710 3697 // Tail-call forward_exception_entry, with the issuing PC in O7,
3711 3698 // so it looks like the original nmethod called forward_exception_entry.
3712 3699 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3713 3700 __ JMP(O0, 0);
3714 3701 __ delayed()->nop();
3715 3702
3716 3703 // -------------
3717 3704 // make sure all code is generated
3718 3705 masm->flush();
3719 3706
3720 3707 // return the blob
3721 3708 // frame_size_words or bytes??
3722 3709 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
3723 3710 }
3724 3711
3725 3712 void SharedRuntime::generate_stubs() {
3726 3713
3727 3714 _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),
3728 3715 "wrong_method_stub");
3729 3716
3730 3717 _ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),
3731 3718 "ic_miss_stub");
3732 3719
3733 3720 _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),
3734 3721 "resolve_opt_virtual_call");
3735 3722
3736 3723 _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),
3737 3724 "resolve_virtual_call");
3738 3725
3739 3726 _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),
3740 3727 "resolve_static_call");
3741 3728
3742 3729 _polling_page_safepoint_handler_blob =
3743 3730 generate_handler_blob(CAST_FROM_FN_PTR(address,
3744 3731 SafepointSynchronize::handle_polling_page_exception), false);
3745 3732
3746 3733 _polling_page_return_handler_blob =
3747 3734 generate_handler_blob(CAST_FROM_FN_PTR(address,
3748 3735 SafepointSynchronize::handle_polling_page_exception), true);
3749 3736
3750 3737 generate_deopt_blob();
3751 3738
3752 3739 #ifdef COMPILER2
3753 3740 generate_uncommon_trap_blob();
3754 3741 #endif // COMPILER2
3755 3742 }
↓ open down ↓ |
2773 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX