Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/cpu/sparc/vm/templateInterpreter_sparc.cpp
+++ new/src/cpu/sparc/vm/templateInterpreter_sparc.cpp
1 1 /*
2 - * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
2 + * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 21 * have any questions.
22 22 *
23 23 */
24 24
25 25 #include "incls/_precompiled.incl"
26 26 #include "incls/_templateInterpreter_sparc.cpp.incl"
27 27
28 28 #ifndef CC_INTERP
29 29 #ifndef FAST_DISPATCH
30 30 #define FAST_DISPATCH 1
31 31 #endif
32 32 #undef FAST_DISPATCH
33 33
34 34
35 35 // Generation of Interpreter
36 36 //
37 37 // The InterpreterGenerator generates the interpreter into Interpreter::_code.
38 38
39 39
40 40 #define __ _masm->
41 41
42 42
43 43 //----------------------------------------------------------------------------------------------------
44 44
45 45
46 46 void InterpreterGenerator::save_native_result(void) {
47 47 // result potentially in O0/O1: save it across calls
48 48 const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
49 49
50 50 // result potentially in F0/F1: save it across calls
51 51 const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
52 52
53 53 // save and restore any potential method result value around the unlocking operation
54 54 __ stf(FloatRegisterImpl::D, F0, d_tmp);
55 55 #ifdef _LP64
56 56 __ stx(O0, l_tmp);
57 57 #else
58 58 __ std(O0, l_tmp);
59 59 #endif
60 60 }
61 61
62 62 void InterpreterGenerator::restore_native_result(void) {
63 63 const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
64 64 const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
65 65
66 66 // Restore any method result value
67 67 __ ldf(FloatRegisterImpl::D, d_tmp, F0);
68 68 #ifdef _LP64
69 69 __ ldx(l_tmp, O0);
70 70 #else
71 71 __ ldd(l_tmp, O0);
72 72 #endif
73 73 }
74 74
75 75 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
76 76 assert(!pass_oop || message == NULL, "either oop or message but not both");
77 77 address entry = __ pc();
78 78 // expression stack must be empty before entering the VM if an exception happened
79 79 __ empty_expression_stack();
80 80 // load exception object
81 81 __ set((intptr_t)name, G3_scratch);
82 82 if (pass_oop) {
83 83 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i);
84 84 } else {
85 85 __ set((intptr_t)message, G4_scratch);
86 86 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch);
87 87 }
88 88 // throw exception
89 89 assert(Interpreter::throw_exception_entry() != NULL, "generate it first");
90 90 AddressLiteral thrower(Interpreter::throw_exception_entry());
91 91 __ jump_to(thrower, G3_scratch);
92 92 __ delayed()->nop();
93 93 return entry;
94 94 }
95 95
96 96 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
97 97 address entry = __ pc();
98 98 // expression stack must be empty before entering the VM if an exception
99 99 // happened
100 100 __ empty_expression_stack();
101 101 // load exception object
102 102 __ call_VM(Oexception,
103 103 CAST_FROM_FN_PTR(address,
104 104 InterpreterRuntime::throw_ClassCastException),
105 105 Otos_i);
106 106 __ should_not_reach_here();
107 107 return entry;
108 108 }
109 109
110 110
111 111 // Arguments are: required type in G5_method_type, and
112 112 // failing object (or NULL) in G3_method_handle.
113 113 address TemplateInterpreterGenerator::generate_WrongMethodType_handler() {
114 114 address entry = __ pc();
115 115 // expression stack must be empty before entering the VM if an exception
116 116 // happened
117 117 __ empty_expression_stack();
118 118 // load exception object
119 119 __ call_VM(Oexception,
120 120 CAST_FROM_FN_PTR(address,
121 121 InterpreterRuntime::throw_WrongMethodTypeException),
122 122 G5_method_type, // required
123 123 G3_method_handle); // actual
124 124 __ should_not_reach_here();
125 125 return entry;
126 126 }
127 127
128 128
129 129 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
130 130 address entry = __ pc();
131 131 // expression stack must be empty before entering the VM if an exception happened
132 132 __ empty_expression_stack();
133 133 // convention: expect aberrant index in register G3_scratch, then shuffle the
134 134 // index to G4_scratch for the VM call
135 135 __ mov(G3_scratch, G4_scratch);
136 136 __ set((intptr_t)name, G3_scratch);
137 137 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch);
138 138 __ should_not_reach_here();
139 139 return entry;
140 140 }
141 141
142 142
143 143 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
144 144 address entry = __ pc();
145 145 // expression stack must be empty before entering the VM if an exception happened
146 146 __ empty_expression_stack();
147 147 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
148 148 __ should_not_reach_here();
149 149 return entry;
150 150 }
151 151
152 152
153 153 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
154 154 address compiled_entry = __ pc();
155 155 Label cont;
156 156
157 157 address entry = __ pc();
158 158 #if !defined(_LP64) && defined(COMPILER2)
159 159 // All return values are where we want them, except for Longs. C2 returns
160 160 // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
161 161 // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
162 162 // build even if we are returning from interpreted we just do a little
163 163 // stupid shuffing.
164 164 // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
165 165 // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
166 166 // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
167 167
168 168 if( state == ltos ) {
169 169 __ srl (G1, 0,O1);
170 170 __ srlx(G1,32,O0);
171 171 }
172 172 #endif /* !_LP64 && COMPILER2 */
173 173
174 174
175 175 __ bind(cont);
176 176
177 177 // The callee returns with the stack possibly adjusted by adapter transition
178 178 // We remove that possible adjustment here.
179 179 // All interpreter local registers are untouched. Any result is passed back
180 180 // in the O0/O1 or float registers. Before continuing, the arguments must be
181 181 // popped from the java expression stack; i.e., Lesp must be adjusted.
182 182
183 183 __ mov(Llast_SP, SP); // Remove any adapter added stack space.
184 184
185 185
186 186 const Register cache = G3_scratch;
187 187 const Register size = G1_scratch;
188 188 __ get_cache_and_index_at_bcp(cache, G1_scratch, 1);
189 189 __ ld_ptr(cache, constantPoolCacheOopDesc::base_offset() +
190 190 ConstantPoolCacheEntry::flags_offset(), size);
191 191 __ and3(size, 0xFF, size); // argument size in words
192 192 __ sll(size, Interpreter::logStackElementSize(), size); // each argument size in bytes
193 193 __ add(Lesp, size, Lesp); // pop arguments
194 194 __ dispatch_next(state, step);
195 195
196 196 return entry;
197 197 }
198 198
199 199
200 200 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
201 201 address entry = __ pc();
202 202 __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache
203 203 { Label L;
204 204 Address exception_addr(G2_thread, Thread::pending_exception_offset());
205 205 __ ld_ptr(exception_addr, Gtemp); // Load pending exception.
206 206 __ tst(Gtemp);
207 207 __ brx(Assembler::equal, false, Assembler::pt, L);
208 208 __ delayed()->nop();
209 209 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
210 210 __ should_not_reach_here();
211 211 __ bind(L);
212 212 }
213 213 __ dispatch_next(state, step);
214 214 return entry;
215 215 }
216 216
217 217 // A result handler converts/unboxes a native call result into
218 218 // a java interpreter/compiler result. The current frame is an
219 219 // interpreter frame. The activation frame unwind code must be
220 220 // consistent with that of TemplateTable::_return(...). In the
221 221 // case of native methods, the caller's SP was not modified.
222 222 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
223 223 address entry = __ pc();
224 224 Register Itos_i = Otos_i ->after_save();
225 225 Register Itos_l = Otos_l ->after_save();
226 226 Register Itos_l1 = Otos_l1->after_save();
227 227 Register Itos_l2 = Otos_l2->after_save();
228 228 switch (type) {
229 229 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false
230 230 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i); break; // cannot use and3, 0xFFFF too big as immediate value!
231 231 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i); break;
232 232 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i); break;
233 233 case T_LONG :
234 234 #ifndef _LP64
235 235 __ mov(O1, Itos_l2); // move other half of long
236 236 #endif // ifdef or no ifdef, fall through to the T_INT case
237 237 case T_INT : __ mov(O0, Itos_i); break;
238 238 case T_VOID : /* nothing to do */ break;
239 239 case T_FLOAT : assert(F0 == Ftos_f, "fix this code" ); break;
240 240 case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" ); break;
241 241 case T_OBJECT :
242 242 __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i);
243 243 __ verify_oop(Itos_i);
244 244 break;
245 245 default : ShouldNotReachHere();
246 246 }
247 247 __ ret(); // return from interpreter activation
248 248 __ delayed()->restore(I5_savedSP, G0, SP); // remove interpreter frame
249 249 NOT_PRODUCT(__ emit_long(0);) // marker for disassembly
250 250 return entry;
251 251 }
252 252
253 253 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
254 254 address entry = __ pc();
255 255 __ push(state);
256 256 __ call_VM(noreg, runtime_entry);
257 257 __ dispatch_via(vtos, Interpreter::normal_table(vtos));
258 258 return entry;
259 259 }
260 260
261 261
262 262 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
263 263 address entry = __ pc();
264 264 __ dispatch_next(state);
265 265 return entry;
266 266 }
267 267
268 268 //
269 269 // Helpers for commoning out cases in the various type of method entries.
270 270 //
271 271
272 272 // increment invocation count & check for overflow
273 273 //
274 274 // Note: checking for negative value instead of overflow
275 275 // so we have a 'sticky' overflow test
276 276 //
277 277 // Lmethod: method
278 278 // ??: invocation counter
279 279 //
280 280 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
281 281 // Update standard invocation counters
282 282 __ increment_invocation_counter(O0, G3_scratch);
283 283 if (ProfileInterpreter) { // %%% Merge this into methodDataOop
284 284 Address interpreter_invocation_counter(Lmethod, methodOopDesc::interpreter_invocation_counter_offset());
285 285 __ ld(interpreter_invocation_counter, G3_scratch);
286 286 __ inc(G3_scratch);
287 287 __ st(G3_scratch, interpreter_invocation_counter);
288 288 }
289 289
290 290 if (ProfileInterpreter && profile_method != NULL) {
291 291 // Test to see if we should create a method data oop
292 292 AddressLiteral profile_limit(&InvocationCounter::InterpreterProfileLimit);
293 293 __ sethi(profile_limit, G3_scratch);
294 294 __ ld(G3_scratch, profile_limit.low10(), G3_scratch);
295 295 __ cmp(O0, G3_scratch);
296 296 __ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue);
297 297 __ delayed()->nop();
298 298
299 299 // if no method data exists, go to profile_method
300 300 __ test_method_data_pointer(*profile_method);
301 301 }
302 302
303 303 AddressLiteral invocation_limit(&InvocationCounter::InterpreterInvocationLimit);
304 304 __ sethi(invocation_limit, G3_scratch);
305 305 __ ld(G3_scratch, invocation_limit.low10(), G3_scratch);
306 306 __ cmp(O0, G3_scratch);
307 307 __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
308 308 __ delayed()->nop();
309 309
310 310 }
311 311
312 312 // Allocate monitor and lock method (asm interpreter)
313 313 // ebx - methodOop
314 314 //
315 315 void InterpreterGenerator::lock_method(void) {
316 316 __ ld(Lmethod, in_bytes(methodOopDesc::access_flags_offset()), O0); // Load access flags.
317 317
318 318 #ifdef ASSERT
319 319 { Label ok;
320 320 __ btst(JVM_ACC_SYNCHRONIZED, O0);
321 321 __ br( Assembler::notZero, false, Assembler::pt, ok);
322 322 __ delayed()->nop();
323 323 __ stop("method doesn't need synchronization");
324 324 __ bind(ok);
325 325 }
326 326 #endif // ASSERT
327 327
328 328 // get synchronization object to O0
329 329 { Label done;
330 330 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
331 331 __ btst(JVM_ACC_STATIC, O0);
332 332 __ br( Assembler::zero, true, Assembler::pt, done);
333 333 __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case
334 334
335 335 __ ld_ptr( Lmethod, in_bytes(methodOopDesc::constants_offset()), O0);
336 336 __ ld_ptr( O0, constantPoolOopDesc::pool_holder_offset_in_bytes(), O0);
337 337
338 338 // lock the mirror, not the klassOop
339 339 __ ld_ptr( O0, mirror_offset, O0);
340 340
341 341 #ifdef ASSERT
342 342 __ tst(O0);
343 343 __ breakpoint_trap(Assembler::zero);
344 344 #endif // ASSERT
345 345
346 346 __ bind(done);
347 347 }
348 348
349 349 __ add_monitor_to_stack(true, noreg, noreg); // allocate monitor elem
350 350 __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes()); // store object
351 351 // __ untested("lock_object from method entry");
352 352 __ lock_object(Lmonitors, O0);
353 353 }
354 354
355 355
356 356 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size,
357 357 Register Rscratch,
358 358 Register Rscratch2) {
359 359 const int page_size = os::vm_page_size();
360 360 Address saved_exception_pc(G2_thread, JavaThread::saved_exception_pc_offset());
361 361 Label after_frame_check;
362 362
363 363 assert_different_registers(Rframe_size, Rscratch, Rscratch2);
364 364
365 365 __ set( page_size, Rscratch );
366 366 __ cmp( Rframe_size, Rscratch );
367 367
368 368 __ br( Assembler::lessEqual, false, Assembler::pt, after_frame_check );
369 369 __ delayed()->nop();
370 370
371 371 // get the stack base, and in debug, verify it is non-zero
372 372 __ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch );
373 373 #ifdef ASSERT
374 374 Label base_not_zero;
375 375 __ cmp( Rscratch, G0 );
376 376 __ brx( Assembler::notEqual, false, Assembler::pn, base_not_zero );
377 377 __ delayed()->nop();
378 378 __ stop("stack base is zero in generate_stack_overflow_check");
379 379 __ bind(base_not_zero);
380 380 #endif
381 381
382 382 // get the stack size, and in debug, verify it is non-zero
383 383 assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" );
384 384 __ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 );
385 385 #ifdef ASSERT
386 386 Label size_not_zero;
387 387 __ cmp( Rscratch2, G0 );
388 388 __ brx( Assembler::notEqual, false, Assembler::pn, size_not_zero );
389 389 __ delayed()->nop();
390 390 __ stop("stack size is zero in generate_stack_overflow_check");
391 391 __ bind(size_not_zero);
392 392 #endif
393 393
394 394 // compute the beginning of the protected zone minus the requested frame size
395 395 __ sub( Rscratch, Rscratch2, Rscratch );
396 396 __ set( (StackRedPages+StackYellowPages) * page_size, Rscratch2 );
397 397 __ add( Rscratch, Rscratch2, Rscratch );
398 398
399 399 // Add in the size of the frame (which is the same as subtracting it from the
400 400 // SP, which would take another register
401 401 __ add( Rscratch, Rframe_size, Rscratch );
402 402
403 403 // the frame is greater than one page in size, so check against
404 404 // the bottom of the stack
405 405 __ cmp( SP, Rscratch );
406 406 __ brx( Assembler::greater, false, Assembler::pt, after_frame_check );
407 407 __ delayed()->nop();
408 408
409 409 // Save the return address as the exception pc
410 410 __ st_ptr(O7, saved_exception_pc);
411 411
412 412 // the stack will overflow, throw an exception
413 413 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
414 414
415 415 // if you get to here, then there is enough stack space
416 416 __ bind( after_frame_check );
417 417 }
418 418
419 419
420 420 //
421 421 // Generate a fixed interpreter frame. This is identical setup for interpreted
422 422 // methods and for native methods hence the shared code.
423 423
424 424 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
425 425 //
426 426 //
427 427 // The entry code sets up a new interpreter frame in 4 steps:
428 428 //
429 429 // 1) Increase caller's SP by for the extra local space needed:
430 430 // (check for overflow)
431 431 // Efficient implementation of xload/xstore bytecodes requires
432 432 // that arguments and non-argument locals are in a contigously
433 433 // addressable memory block => non-argument locals must be
434 434 // allocated in the caller's frame.
435 435 //
436 436 // 2) Create a new stack frame and register window:
437 437 // The new stack frame must provide space for the standard
438 438 // register save area, the maximum java expression stack size,
439 439 // the monitor slots (0 slots initially), and some frame local
440 440 // scratch locations.
441 441 //
442 442 // 3) The following interpreter activation registers must be setup:
443 443 // Lesp : expression stack pointer
444 444 // Lbcp : bytecode pointer
445 445 // Lmethod : method
446 446 // Llocals : locals pointer
447 447 // Lmonitors : monitor pointer
448 448 // LcpoolCache: constant pool cache
449 449 //
450 450 // 4) Initialize the non-argument locals if necessary:
451 451 // Non-argument locals may need to be initialized to NULL
452 452 // for GC to work. If the oop-map information is accurate
453 453 // (in the absence of the JSR problem), no initialization
454 454 // is necessary.
455 455 //
456 456 // (gri - 2/25/2000)
457 457
458 458
459 459 const Address size_of_parameters(G5_method, methodOopDesc::size_of_parameters_offset());
460 460 const Address size_of_locals (G5_method, methodOopDesc::size_of_locals_offset());
461 461 const Address max_stack (G5_method, methodOopDesc::max_stack_offset());
462 462 int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
463 463
464 464 const int extra_space =
465 465 rounded_vm_local_words + // frame local scratch space
466 466 //6815692//methodOopDesc::extra_stack_words() + // extra push slots for MH adapters
467 467 frame::memory_parameter_word_sp_offset + // register save area
468 468 (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0);
469 469
470 470 const Register Glocals_size = G3;
471 471 const Register Otmp1 = O3;
472 472 const Register Otmp2 = O4;
473 473 // Lscratch can't be used as a temporary because the call_stub uses
474 474 // it to assert that the stack frame was setup correctly.
475 475
476 476 __ lduh( size_of_parameters, Glocals_size);
477 477
478 478 // Gargs points to first local + BytesPerWord
479 479 // Set the saved SP after the register window save
480 480 //
481 481 assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP);
482 482 __ sll(Glocals_size, Interpreter::logStackElementSize(), Otmp1);
483 483 __ add(Gargs, Otmp1, Gargs);
484 484
485 485 if (native_call) {
486 486 __ calc_mem_param_words( Glocals_size, Gframe_size );
487 487 __ add( Gframe_size, extra_space, Gframe_size);
488 488 __ round_to( Gframe_size, WordsPerLong );
489 489 __ sll( Gframe_size, LogBytesPerWord, Gframe_size );
490 490 } else {
491 491
492 492 //
493 493 // Compute number of locals in method apart from incoming parameters
494 494 //
495 495 __ lduh( size_of_locals, Otmp1 );
496 496 __ sub( Otmp1, Glocals_size, Glocals_size );
497 497 __ round_to( Glocals_size, WordsPerLong );
498 498 __ sll( Glocals_size, Interpreter::logStackElementSize(), Glocals_size );
499 499
500 500 // see if the frame is greater than one page in size. If so,
501 501 // then we need to verify there is enough stack space remaining
502 502 // Frame_size = (max_stack + extra_space) * BytesPerWord;
503 503 __ lduh( max_stack, Gframe_size );
504 504 __ add( Gframe_size, extra_space, Gframe_size );
505 505 __ round_to( Gframe_size, WordsPerLong );
506 506 __ sll( Gframe_size, Interpreter::logStackElementSize(), Gframe_size);
507 507
508 508 // Add in java locals size for stack overflow check only
509 509 __ add( Gframe_size, Glocals_size, Gframe_size );
510 510
511 511 const Register Otmp2 = O4;
512 512 assert_different_registers(Otmp1, Otmp2, O5_savedSP);
513 513 generate_stack_overflow_check(Gframe_size, Otmp1, Otmp2);
514 514
515 515 __ sub( Gframe_size, Glocals_size, Gframe_size);
516 516
517 517 //
518 518 // bump SP to accomodate the extra locals
519 519 //
520 520 __ sub( SP, Glocals_size, SP );
521 521 }
522 522
523 523 //
524 524 // now set up a stack frame with the size computed above
525 525 //
526 526 __ neg( Gframe_size );
527 527 __ save( SP, Gframe_size, SP );
528 528
529 529 //
530 530 // now set up all the local cache registers
531 531 //
532 532 // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note
533 533 // that all present references to Lbyte_code initialize the register
534 534 // immediately before use
535 535 if (native_call) {
536 536 __ mov(G0, Lbcp);
537 537 } else {
538 538 __ ld_ptr(G5_method, methodOopDesc::const_offset(), Lbcp);
539 539 __ add(Lbcp, in_bytes(constMethodOopDesc::codes_offset()), Lbcp);
540 540 }
541 541 __ mov( G5_method, Lmethod); // set Lmethod
542 542 __ get_constant_pool_cache( LcpoolCache ); // set LcpoolCache
543 543 __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors
544 544 #ifdef _LP64
545 545 __ add( Lmonitors, STACK_BIAS, Lmonitors ); // Account for 64 bit stack bias
546 546 #endif
547 547 __ sub(Lmonitors, BytesPerWord, Lesp); // set Lesp
548 548
549 549 // setup interpreter activation registers
550 550 __ sub(Gargs, BytesPerWord, Llocals); // set Llocals
551 551
552 552 if (ProfileInterpreter) {
553 553 #ifdef FAST_DISPATCH
554 554 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
555 555 // they both use I2.
556 556 assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
557 557 #endif // FAST_DISPATCH
558 558 __ set_method_data_pointer();
559 559 }
560 560
561 561 }
562 562
563 563 // Empty method, generate a very fast return.
564 564
565 565 address InterpreterGenerator::generate_empty_entry(void) {
566 566
567 567 // A method that does nother but return...
568 568
569 569 address entry = __ pc();
570 570 Label slow_path;
571 571
572 572 __ verify_oop(G5_method);
573 573
574 574 // do nothing for empty methods (do not even increment invocation counter)
575 575 if ( UseFastEmptyMethods) {
576 576 // If we need a safepoint check, generate full interpreter entry.
577 577 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
578 578 __ set(sync_state, G3_scratch);
579 579 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
580 580 __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
581 581 __ delayed()->nop();
582 582
583 583 // Code: _return
584 584 __ retl();
585 585 __ delayed()->mov(O5_savedSP, SP);
586 586
587 587 __ bind(slow_path);
588 588 (void) generate_normal_entry(false);
589 589
590 590 return entry;
591 591 }
592 592 return NULL;
593 593 }
594 594
595 595 // Call an accessor method (assuming it is resolved, otherwise drop into
596 596 // vanilla (slow path) entry
597 597
598 598 // Generates code to elide accessor methods
599 599 // Uses G3_scratch and G1_scratch as scratch
600 600 address InterpreterGenerator::generate_accessor_entry(void) {
601 601
602 602 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
603 603 // parameter size = 1
604 604 // Note: We can only use this code if the getfield has been resolved
605 605 // and if we don't have a null-pointer exception => check for
606 606 // these conditions first and use slow path if necessary.
607 607 address entry = __ pc();
608 608 Label slow_path;
609 609
610 610
611 611 // XXX: for compressed oops pointer loading and decoding doesn't fit in
612 612 // delay slot and damages G1
613 613 if ( UseFastAccessorMethods && !UseCompressedOops ) {
614 614 // Check if we need to reach a safepoint and generate full interpreter
615 615 // frame if so.
616 616 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
617 617 __ load_contents(sync_state, G3_scratch);
618 618 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
619 619 __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
620 620 __ delayed()->nop();
621 621
622 622 // Check if local 0 != NULL
623 623 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
624 624 __ tst(Otos_i); // check if local 0 == NULL and go the slow path
625 625 __ brx(Assembler::zero, false, Assembler::pn, slow_path);
626 626 __ delayed()->nop();
627 627
628 628
629 629 // read first instruction word and extract bytecode @ 1 and index @ 2
630 630 // get first 4 bytes of the bytecodes (big endian!)
631 631 __ ld_ptr(G5_method, methodOopDesc::const_offset(), G1_scratch);
632 632 __ ld(G1_scratch, constMethodOopDesc::codes_offset(), G1_scratch);
633 633
634 634 // move index @ 2 far left then to the right most two bytes.
635 635 __ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
636 636 __ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
637 637 ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
638 638
639 639 // get constant pool cache
640 640 __ ld_ptr(G5_method, methodOopDesc::constants_offset(), G3_scratch);
641 641 __ ld_ptr(G3_scratch, constantPoolOopDesc::cache_offset_in_bytes(), G3_scratch);
642 642
643 643 // get specific constant pool cache entry
644 644 __ add(G3_scratch, G1_scratch, G3_scratch);
645 645
646 646 // Check the constant Pool cache entry to see if it has been resolved.
647 647 // If not, need the slow path.
648 648 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
649 649 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch);
650 650 __ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
651 651 __ and3(G1_scratch, 0xFF, G1_scratch);
652 652 __ cmp(G1_scratch, Bytecodes::_getfield);
653 653 __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
654 654 __ delayed()->nop();
655 655
656 656 // Get the type and return field offset from the constant pool cache
657 657 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch);
658 658 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch);
659 659
660 660 Label xreturn_path;
661 661 // Need to differentiate between igetfield, agetfield, bgetfield etc.
662 662 // because they are different sizes.
663 663 // Get the type from the constant pool cache
664 664 __ srl(G1_scratch, ConstantPoolCacheEntry::tosBits, G1_scratch);
665 665 // Make sure we don't need to mask G1_scratch for tosBits after the above shift
666 666 ConstantPoolCacheEntry::verify_tosBits();
667 667 __ cmp(G1_scratch, atos );
668 668 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
669 669 __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
670 670 __ cmp(G1_scratch, itos);
671 671 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
672 672 __ delayed()->ld(Otos_i, G3_scratch, Otos_i);
673 673 __ cmp(G1_scratch, stos);
674 674 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
675 675 __ delayed()->ldsh(Otos_i, G3_scratch, Otos_i);
676 676 __ cmp(G1_scratch, ctos);
677 677 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
678 678 __ delayed()->lduh(Otos_i, G3_scratch, Otos_i);
679 679 #ifdef ASSERT
680 680 __ cmp(G1_scratch, btos);
681 681 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
682 682 __ delayed()->ldsb(Otos_i, G3_scratch, Otos_i);
683 683 __ should_not_reach_here();
684 684 #endif
685 685 __ ldsb(Otos_i, G3_scratch, Otos_i);
686 686 __ bind(xreturn_path);
687 687
688 688 // _ireturn/_areturn
689 689 __ retl(); // return from leaf routine
690 690 __ delayed()->mov(O5_savedSP, SP);
691 691
692 692 // Generate regular method entry
693 693 __ bind(slow_path);
694 694 (void) generate_normal_entry(false);
695 695 return entry;
696 696 }
697 697 return NULL;
698 698 }
699 699
700 700 //
701 701 // Interpreter stub for calling a native method. (asm interpreter)
702 702 // This sets up a somewhat different looking stack for calling the native method
703 703 // than the typical interpreter frame setup.
704 704 //
705 705
706 706 address InterpreterGenerator::generate_native_entry(bool synchronized) {
707 707 address entry = __ pc();
708 708
709 709 // the following temporary registers are used during frame creation
710 710 const Register Gtmp1 = G3_scratch ;
711 711 const Register Gtmp2 = G1_scratch;
712 712 bool inc_counter = UseCompiler || CountCompiledCalls;
713 713
714 714 // make sure registers are different!
715 715 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
716 716
717 717 const Address Laccess_flags(Lmethod, methodOopDesc::access_flags_offset());
718 718
719 719 __ verify_oop(G5_method);
720 720
721 721 const Register Glocals_size = G3;
722 722 assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
723 723
724 724 // make sure method is native & not abstract
725 725 // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
726 726 #ifdef ASSERT
727 727 __ ld(G5_method, methodOopDesc::access_flags_offset(), Gtmp1);
728 728 {
729 729 Label L;
730 730 __ btst(JVM_ACC_NATIVE, Gtmp1);
731 731 __ br(Assembler::notZero, false, Assembler::pt, L);
732 732 __ delayed()->nop();
733 733 __ stop("tried to execute non-native method as native");
734 734 __ bind(L);
735 735 }
736 736 { Label L;
737 737 __ btst(JVM_ACC_ABSTRACT, Gtmp1);
738 738 __ br(Assembler::zero, false, Assembler::pt, L);
739 739 __ delayed()->nop();
740 740 __ stop("tried to execute abstract method as non-abstract");
741 741 __ bind(L);
742 742 }
743 743 #endif // ASSERT
744 744
745 745 // generate the code to allocate the interpreter stack frame
746 746 generate_fixed_frame(true);
747 747
748 748 //
749 749 // No locals to initialize for native method
750 750 //
751 751
752 752 // this slot will be set later, we initialize it to null here just in
753 753 // case we get a GC before the actual value is stored later
754 754 __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS);
755 755
756 756 const Address do_not_unlock_if_synchronized(G2_thread,
757 757 JavaThread::do_not_unlock_if_synchronized_offset());
758 758 // Since at this point in the method invocation the exception handler
759 759 // would try to exit the monitor of synchronized methods which hasn't
760 760 // been entered yet, we set the thread local variable
761 761 // _do_not_unlock_if_synchronized to true. If any exception was thrown by
762 762 // runtime, exception handling i.e. unlock_if_synchronized_method will
763 763 // check this thread local flag.
764 764 // This flag has two effects, one is to force an unwind in the topmost
765 765 // interpreter frame and not perform an unlock while doing so.
766 766
767 767 __ movbool(true, G3_scratch);
768 768 __ stbool(G3_scratch, do_not_unlock_if_synchronized);
769 769
770 770 // increment invocation counter and check for overflow
771 771 //
772 772 // Note: checking for negative value instead of overflow
773 773 // so we have a 'sticky' overflow test (may be of
774 774 // importance as soon as we have true MT/MP)
775 775 Label invocation_counter_overflow;
776 776 Label Lcontinue;
777 777 if (inc_counter) {
778 778 generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
779 779
780 780 }
781 781 __ bind(Lcontinue);
782 782
783 783 bang_stack_shadow_pages(true);
784 784
785 785 // reset the _do_not_unlock_if_synchronized flag
786 786 __ stbool(G0, do_not_unlock_if_synchronized);
787 787
788 788 // check for synchronized methods
789 789 // Must happen AFTER invocation_counter check and stack overflow check,
790 790 // so method is not locked if overflows.
791 791
792 792 if (synchronized) {
793 793 lock_method();
794 794 } else {
795 795 #ifdef ASSERT
796 796 { Label ok;
797 797 __ ld(Laccess_flags, O0);
798 798 __ btst(JVM_ACC_SYNCHRONIZED, O0);
799 799 __ br( Assembler::zero, false, Assembler::pt, ok);
800 800 __ delayed()->nop();
801 801 __ stop("method needs synchronization");
802 802 __ bind(ok);
803 803 }
804 804 #endif // ASSERT
805 805 }
806 806
807 807
808 808 // start execution
809 809 __ verify_thread();
810 810
811 811 // JVMTI support
812 812 __ notify_method_entry();
813 813
814 814 // native call
815 815
816 816 // (note that O0 is never an oop--at most it is a handle)
817 817 // It is important not to smash any handles created by this call,
818 818 // until any oop handle in O0 is dereferenced.
819 819
820 820 // (note that the space for outgoing params is preallocated)
821 821
822 822 // get signature handler
823 823 { Label L;
824 824 Address signature_handler(Lmethod, methodOopDesc::signature_handler_offset());
825 825 __ ld_ptr(signature_handler, G3_scratch);
826 826 __ tst(G3_scratch);
827 827 __ brx(Assembler::notZero, false, Assembler::pt, L);
828 828 __ delayed()->nop();
829 829 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod);
830 830 __ ld_ptr(signature_handler, G3_scratch);
831 831 __ bind(L);
832 832 }
833 833
834 834 // Push a new frame so that the args will really be stored in
835 835 // Copy a few locals across so the new frame has the variables
836 836 // we need but these values will be dead at the jni call and
837 837 // therefore not gc volatile like the values in the current
838 838 // frame (Lmethod in particular)
839 839
840 840 // Flush the method pointer to the register save area
841 841 __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS);
842 842 __ mov(Llocals, O1);
843 843
844 844 // calculate where the mirror handle body is allocated in the interpreter frame:
845 845 __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2);
846 846
847 847 // Calculate current frame size
848 848 __ sub(SP, FP, O3); // Calculate negative of current frame size
849 849 __ save(SP, O3, SP); // Allocate an identical sized frame
850 850
851 851 // Note I7 has leftover trash. Slow signature handler will fill it in
852 852 // should we get there. Normal jni call will set reasonable last_Java_pc
853 853 // below (and fix I7 so the stack trace doesn't have a meaningless frame
854 854 // in it).
855 855
856 856 // Load interpreter frame's Lmethod into same register here
857 857
858 858 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
859 859
860 860 __ mov(I1, Llocals);
861 861 __ mov(I2, Lscratch2); // save the address of the mirror
862 862
863 863
864 864 // ONLY Lmethod and Llocals are valid here!
865 865
866 866 // call signature handler, It will move the arg properly since Llocals in current frame
867 867 // matches that in outer frame
868 868
869 869 __ callr(G3_scratch, 0);
870 870 __ delayed()->nop();
871 871
872 872 // Result handler is in Lscratch
873 873
874 874 // Reload interpreter frame's Lmethod since slow signature handler may block
875 875 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
876 876
877 877 { Label not_static;
878 878
879 879 __ ld(Laccess_flags, O0);
880 880 __ btst(JVM_ACC_STATIC, O0);
881 881 __ br( Assembler::zero, false, Assembler::pt, not_static);
882 882 // get native function entry point(O0 is a good temp until the very end)
883 883 __ delayed()->ld_ptr(Lmethod, in_bytes(methodOopDesc::native_function_offset()), O0);
884 884 // for static methods insert the mirror argument
885 885 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
886 886
887 887 __ ld_ptr(Lmethod, methodOopDesc:: constants_offset(), O1);
888 888 __ ld_ptr(O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1);
889 889 __ ld_ptr(O1, mirror_offset, O1);
890 890 #ifdef ASSERT
891 891 if (!PrintSignatureHandlers) // do not dirty the output with this
892 892 { Label L;
893 893 __ tst(O1);
894 894 __ brx(Assembler::notZero, false, Assembler::pt, L);
895 895 __ delayed()->nop();
896 896 __ stop("mirror is missing");
897 897 __ bind(L);
898 898 }
899 899 #endif // ASSERT
900 900 __ st_ptr(O1, Lscratch2, 0);
901 901 __ mov(Lscratch2, O1);
902 902 __ bind(not_static);
903 903 }
904 904
905 905 // At this point, arguments have been copied off of stack into
906 906 // their JNI positions, which are O1..O5 and SP[68..].
907 907 // Oops are boxed in-place on the stack, with handles copied to arguments.
908 908 // The result handler is in Lscratch. O0 will shortly hold the JNIEnv*.
909 909
910 910 #ifdef ASSERT
911 911 { Label L;
912 912 __ tst(O0);
913 913 __ brx(Assembler::notZero, false, Assembler::pt, L);
914 914 __ delayed()->nop();
915 915 __ stop("native entry point is missing");
916 916 __ bind(L);
917 917 }
918 918 #endif // ASSERT
919 919
920 920 //
921 921 // setup the frame anchor
922 922 //
923 923 // The scavenge function only needs to know that the PC of this frame is
924 924 // in the interpreter method entry code, it doesn't need to know the exact
925 925 // PC and hence we can use O7 which points to the return address from the
926 926 // previous call in the code stream (signature handler function)
927 927 //
928 928 // The other trick is we set last_Java_sp to FP instead of the usual SP because
929 929 // we have pushed the extra frame in order to protect the volatile register(s)
930 930 // in that frame when we return from the jni call
931 931 //
932 932
933 933 __ set_last_Java_frame(FP, O7);
934 934 __ mov(O7, I7); // make dummy interpreter frame look like one above,
935 935 // not meaningless information that'll confuse me.
936 936
937 937 // flush the windows now. We don't care about the current (protection) frame
938 938 // only the outer frames
939 939
940 940 __ flush_windows();
941 941
942 942 // mark windows as flushed
943 943 Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
944 944 __ set(JavaFrameAnchor::flushed, G3_scratch);
945 945 __ st(G3_scratch, flags);
946 946
947 947 // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
948 948
949 949 Address thread_state(G2_thread, JavaThread::thread_state_offset());
950 950 #ifdef ASSERT
951 951 { Label L;
952 952 __ ld(thread_state, G3_scratch);
953 953 __ cmp(G3_scratch, _thread_in_Java);
954 954 __ br(Assembler::equal, false, Assembler::pt, L);
955 955 __ delayed()->nop();
956 956 __ stop("Wrong thread state in native stub");
957 957 __ bind(L);
958 958 }
959 959 #endif // ASSERT
960 960 __ set(_thread_in_native, G3_scratch);
961 961 __ st(G3_scratch, thread_state);
962 962
963 963 // Call the jni method, using the delay slot to set the JNIEnv* argument.
964 964 __ save_thread(L7_thread_cache); // save Gthread
965 965 __ callr(O0, 0);
966 966 __ delayed()->
967 967 add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0);
968 968
969 969 // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD
970 970
971 971 __ restore_thread(L7_thread_cache); // restore G2_thread
972 972 __ reinit_heapbase();
973 973
974 974 // must we block?
975 975
976 976 // Block, if necessary, before resuming in _thread_in_Java state.
977 977 // In order for GC to work, don't clear the last_Java_sp until after blocking.
978 978 { Label no_block;
979 979 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
980 980
981 981 // Switch thread to "native transition" state before reading the synchronization state.
982 982 // This additional state is necessary because reading and testing the synchronization
983 983 // state is not atomic w.r.t. GC, as this scenario demonstrates:
984 984 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
985 985 // VM thread changes sync state to synchronizing and suspends threads for GC.
986 986 // Thread A is resumed to finish this native method, but doesn't block here since it
987 987 // didn't see any synchronization is progress, and escapes.
988 988 __ set(_thread_in_native_trans, G3_scratch);
989 989 __ st(G3_scratch, thread_state);
990 990 if(os::is_MP()) {
991 991 if (UseMembar) {
992 992 // Force this write out before the read below
993 993 __ membar(Assembler::StoreLoad);
994 994 } else {
995 995 // Write serialization page so VM thread can do a pseudo remote membar.
996 996 // We use the current thread pointer to calculate a thread specific
997 997 // offset to write to within the page. This minimizes bus traffic
998 998 // due to cache line collision.
999 999 __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
1000 1000 }
1001 1001 }
1002 1002 __ load_contents(sync_state, G3_scratch);
1003 1003 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
1004 1004
1005 1005 Label L;
1006 1006 __ br(Assembler::notEqual, false, Assembler::pn, L);
1007 1007 __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch);
1008 1008 __ cmp(G3_scratch, 0);
1009 1009 __ br(Assembler::equal, false, Assembler::pt, no_block);
1010 1010 __ delayed()->nop();
1011 1011 __ bind(L);
1012 1012
1013 1013 // Block. Save any potential method result value before the operation and
1014 1014 // use a leaf call to leave the last_Java_frame setup undisturbed.
1015 1015 save_native_result();
1016 1016 __ call_VM_leaf(L7_thread_cache,
1017 1017 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
1018 1018 G2_thread);
1019 1019
1020 1020 // Restore any method result value
1021 1021 restore_native_result();
1022 1022 __ bind(no_block);
1023 1023 }
1024 1024
1025 1025 // Clear the frame anchor now
1026 1026
1027 1027 __ reset_last_Java_frame();
1028 1028
1029 1029 // Move the result handler address
1030 1030 __ mov(Lscratch, G3_scratch);
1031 1031 // return possible result to the outer frame
1032 1032 #ifndef __LP64
1033 1033 __ mov(O0, I0);
1034 1034 __ restore(O1, G0, O1);
1035 1035 #else
1036 1036 __ restore(O0, G0, O0);
1037 1037 #endif /* __LP64 */
1038 1038
1039 1039 // Move result handler to expected register
1040 1040 __ mov(G3_scratch, Lscratch);
1041 1041
1042 1042 // Back in normal (native) interpreter frame. State is thread_in_native_trans
1043 1043 // switch to thread_in_Java.
1044 1044
1045 1045 __ set(_thread_in_Java, G3_scratch);
1046 1046 __ st(G3_scratch, thread_state);
1047 1047
1048 1048 // reset handle block
1049 1049 __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch);
1050 1050 __ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
1051 1051
1052 1052 // If we have an oop result store it where it will be safe for any further gc
1053 1053 // until we return now that we've released the handle it might be protected by
1054 1054
1055 1055 {
1056 1056 Label no_oop, store_result;
1057 1057
1058 1058 __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch);
1059 1059 __ cmp(G3_scratch, Lscratch);
1060 1060 __ brx(Assembler::notEqual, false, Assembler::pt, no_oop);
1061 1061 __ delayed()->nop();
1062 1062 __ addcc(G0, O0, O0);
1063 1063 __ brx(Assembler::notZero, true, Assembler::pt, store_result); // if result is not NULL:
1064 1064 __ delayed()->ld_ptr(O0, 0, O0); // unbox it
1065 1065 __ mov(G0, O0);
1066 1066
1067 1067 __ bind(store_result);
1068 1068 // Store it where gc will look for it and result handler expects it.
1069 1069 __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS);
1070 1070
1071 1071 __ bind(no_oop);
1072 1072
1073 1073 }
1074 1074
1075 1075
1076 1076 // handle exceptions (exception handling will handle unlocking!)
1077 1077 { Label L;
1078 1078 Address exception_addr(G2_thread, Thread::pending_exception_offset());
1079 1079 __ ld_ptr(exception_addr, Gtemp);
1080 1080 __ tst(Gtemp);
1081 1081 __ brx(Assembler::equal, false, Assembler::pt, L);
1082 1082 __ delayed()->nop();
1083 1083 // Note: This could be handled more efficiently since we know that the native
1084 1084 // method doesn't have an exception handler. We could directly return
1085 1085 // to the exception handler for the caller.
1086 1086 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
1087 1087 __ should_not_reach_here();
1088 1088 __ bind(L);
1089 1089 }
1090 1090
1091 1091 // JVMTI support (preserves thread register)
1092 1092 __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI);
1093 1093
1094 1094 if (synchronized) {
1095 1095 // save and restore any potential method result value around the unlocking operation
1096 1096 save_native_result();
1097 1097
1098 1098 __ add( __ top_most_monitor(), O1);
1099 1099 __ unlock_object(O1);
1100 1100
1101 1101 restore_native_result();
1102 1102 }
1103 1103
1104 1104 #if defined(COMPILER2) && !defined(_LP64)
1105 1105
1106 1106 // C2 expects long results in G1 we can't tell if we're returning to interpreted
1107 1107 // or compiled so just be safe.
1108 1108
1109 1109 __ sllx(O0, 32, G1); // Shift bits into high G1
1110 1110 __ srl (O1, 0, O1); // Zero extend O1
1111 1111 __ or3 (O1, G1, G1); // OR 64 bits into G1
1112 1112
1113 1113 #endif /* COMPILER2 && !_LP64 */
1114 1114
1115 1115 // dispose of return address and remove activation
1116 1116 #ifdef ASSERT
1117 1117 {
1118 1118 Label ok;
1119 1119 __ cmp(I5_savedSP, FP);
1120 1120 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, ok);
1121 1121 __ delayed()->nop();
1122 1122 __ stop("bad I5_savedSP value");
1123 1123 __ should_not_reach_here();
1124 1124 __ bind(ok);
1125 1125 }
1126 1126 #endif
1127 1127 if (TraceJumps) {
1128 1128 // Move target to register that is recordable
1129 1129 __ mov(Lscratch, G3_scratch);
1130 1130 __ JMP(G3_scratch, 0);
1131 1131 } else {
1132 1132 __ jmp(Lscratch, 0);
1133 1133 }
1134 1134 __ delayed()->nop();
1135 1135
1136 1136
1137 1137 if (inc_counter) {
1138 1138 // handle invocation counter overflow
1139 1139 __ bind(invocation_counter_overflow);
1140 1140 generate_counter_overflow(Lcontinue);
1141 1141 }
1142 1142
1143 1143
1144 1144
1145 1145 return entry;
1146 1146 }
1147 1147
1148 1148
1149 1149 // Generic method entry to (asm) interpreter
1150 1150 //------------------------------------------------------------------------------------------------------------------------
1151 1151 //
1152 1152 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
1153 1153 address entry = __ pc();
1154 1154
1155 1155 bool inc_counter = UseCompiler || CountCompiledCalls;
1156 1156
1157 1157 // the following temporary registers are used during frame creation
1158 1158 const Register Gtmp1 = G3_scratch ;
1159 1159 const Register Gtmp2 = G1_scratch;
1160 1160
1161 1161 // make sure registers are different!
1162 1162 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
1163 1163
1164 1164 const Address size_of_parameters(G5_method, methodOopDesc::size_of_parameters_offset());
1165 1165 const Address size_of_locals (G5_method, methodOopDesc::size_of_locals_offset());
1166 1166 // Seems like G5_method is live at the point this is used. So we could make this look consistent
1167 1167 // and use in the asserts.
1168 1168 const Address access_flags (Lmethod, methodOopDesc::access_flags_offset());
1169 1169
1170 1170 __ verify_oop(G5_method);
1171 1171
1172 1172 const Register Glocals_size = G3;
1173 1173 assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
1174 1174
1175 1175 // make sure method is not native & not abstract
1176 1176 // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
1177 1177 #ifdef ASSERT
1178 1178 __ ld(G5_method, methodOopDesc::access_flags_offset(), Gtmp1);
1179 1179 {
1180 1180 Label L;
1181 1181 __ btst(JVM_ACC_NATIVE, Gtmp1);
1182 1182 __ br(Assembler::zero, false, Assembler::pt, L);
1183 1183 __ delayed()->nop();
1184 1184 __ stop("tried to execute native method as non-native");
1185 1185 __ bind(L);
1186 1186 }
1187 1187 { Label L;
1188 1188 __ btst(JVM_ACC_ABSTRACT, Gtmp1);
1189 1189 __ br(Assembler::zero, false, Assembler::pt, L);
1190 1190 __ delayed()->nop();
1191 1191 __ stop("tried to execute abstract method as non-abstract");
1192 1192 __ bind(L);
1193 1193 }
1194 1194 #endif // ASSERT
1195 1195
1196 1196 // generate the code to allocate the interpreter stack frame
1197 1197
1198 1198 generate_fixed_frame(false);
1199 1199
1200 1200 #ifdef FAST_DISPATCH
1201 1201 __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables);
1202 1202 // set bytecode dispatch table base
1203 1203 #endif
1204 1204
1205 1205 //
1206 1206 // Code to initialize the extra (i.e. non-parm) locals
1207 1207 //
1208 1208 Register init_value = noreg; // will be G0 if we must clear locals
1209 1209 // The way the code was setup before zerolocals was always true for vanilla java entries.
1210 1210 // It could only be false for the specialized entries like accessor or empty which have
1211 1211 // no extra locals so the testing was a waste of time and the extra locals were always
1212 1212 // initialized. We removed this extra complication to already over complicated code.
1213 1213
1214 1214 init_value = G0;
1215 1215 Label clear_loop;
1216 1216
1217 1217 // NOTE: If you change the frame layout, this code will need to
1218 1218 // be updated!
1219 1219 __ lduh( size_of_locals, O2 );
1220 1220 __ lduh( size_of_parameters, O1 );
1221 1221 __ sll( O2, Interpreter::logStackElementSize(), O2);
1222 1222 __ sll( O1, Interpreter::logStackElementSize(), O1 );
1223 1223 __ sub( Llocals, O2, O2 );
1224 1224 __ sub( Llocals, O1, O1 );
1225 1225
1226 1226 __ bind( clear_loop );
1227 1227 __ inc( O2, wordSize );
1228 1228
1229 1229 __ cmp( O2, O1 );
1230 1230 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop );
1231 1231 __ delayed()->st_ptr( init_value, O2, 0 );
1232 1232
1233 1233 const Address do_not_unlock_if_synchronized(G2_thread,
1234 1234 JavaThread::do_not_unlock_if_synchronized_offset());
1235 1235 // Since at this point in the method invocation the exception handler
1236 1236 // would try to exit the monitor of synchronized methods which hasn't
1237 1237 // been entered yet, we set the thread local variable
1238 1238 // _do_not_unlock_if_synchronized to true. If any exception was thrown by
1239 1239 // runtime, exception handling i.e. unlock_if_synchronized_method will
1240 1240 // check this thread local flag.
1241 1241 __ movbool(true, G3_scratch);
1242 1242 __ stbool(G3_scratch, do_not_unlock_if_synchronized);
1243 1243
1244 1244 // increment invocation counter and check for overflow
1245 1245 //
1246 1246 // Note: checking for negative value instead of overflow
1247 1247 // so we have a 'sticky' overflow test (may be of
1248 1248 // importance as soon as we have true MT/MP)
1249 1249 Label invocation_counter_overflow;
1250 1250 Label profile_method;
1251 1251 Label profile_method_continue;
1252 1252 Label Lcontinue;
1253 1253 if (inc_counter) {
1254 1254 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
1255 1255 if (ProfileInterpreter) {
1256 1256 __ bind(profile_method_continue);
1257 1257 }
1258 1258 }
1259 1259 __ bind(Lcontinue);
1260 1260
1261 1261 bang_stack_shadow_pages(false);
1262 1262
1263 1263 // reset the _do_not_unlock_if_synchronized flag
1264 1264 __ stbool(G0, do_not_unlock_if_synchronized);
1265 1265
1266 1266 // check for synchronized methods
1267 1267 // Must happen AFTER invocation_counter check and stack overflow check,
1268 1268 // so method is not locked if overflows.
1269 1269
1270 1270 if (synchronized) {
1271 1271 lock_method();
1272 1272 } else {
1273 1273 #ifdef ASSERT
1274 1274 { Label ok;
1275 1275 __ ld(access_flags, O0);
1276 1276 __ btst(JVM_ACC_SYNCHRONIZED, O0);
1277 1277 __ br( Assembler::zero, false, Assembler::pt, ok);
1278 1278 __ delayed()->nop();
1279 1279 __ stop("method needs synchronization");
1280 1280 __ bind(ok);
1281 1281 }
1282 1282 #endif // ASSERT
1283 1283 }
1284 1284
1285 1285 // start execution
1286 1286
1287 1287 __ verify_thread();
1288 1288
1289 1289 // jvmti support
1290 1290 __ notify_method_entry();
1291 1291
1292 1292 // start executing instructions
1293 1293 __ dispatch_next(vtos);
1294 1294
1295 1295
1296 1296 if (inc_counter) {
1297 1297 if (ProfileInterpreter) {
1298 1298 // We have decided to profile this method in the interpreter
1299 1299 __ bind(profile_method);
1300 1300
1301 1301 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), Lbcp, true);
1302 1302
1303 1303 #ifdef ASSERT
1304 1304 __ tst(O0);
1305 1305 __ breakpoint_trap(Assembler::notEqual);
1306 1306 #endif
1307 1307
1308 1308 __ set_method_data_pointer();
1309 1309
1310 1310 __ ba(false, profile_method_continue);
1311 1311 __ delayed()->nop();
1312 1312 }
1313 1313
1314 1314 // handle invocation counter overflow
1315 1315 __ bind(invocation_counter_overflow);
1316 1316 generate_counter_overflow(Lcontinue);
1317 1317 }
1318 1318
1319 1319
1320 1320 return entry;
1321 1321 }
1322 1322
1323 1323
1324 1324 //----------------------------------------------------------------------------------------------------
1325 1325 // Entry points & stack frame layout
1326 1326 //
1327 1327 // Here we generate the various kind of entries into the interpreter.
1328 1328 // The two main entry type are generic bytecode methods and native call method.
1329 1329 // These both come in synchronized and non-synchronized versions but the
1330 1330 // frame layout they create is very similar. The other method entry
1331 1331 // types are really just special purpose entries that are really entry
1332 1332 // and interpretation all in one. These are for trivial methods like
1333 1333 // accessor, empty, or special math methods.
1334 1334 //
1335 1335 // When control flow reaches any of the entry types for the interpreter
1336 1336 // the following holds ->
1337 1337 //
1338 1338 // C2 Calling Conventions:
1339 1339 //
1340 1340 // The entry code below assumes that the following registers are set
1341 1341 // when coming in:
1342 1342 // G5_method: holds the methodOop of the method to call
1343 1343 // Lesp: points to the TOS of the callers expression stack
1344 1344 // after having pushed all the parameters
1345 1345 //
1346 1346 // The entry code does the following to setup an interpreter frame
1347 1347 // pop parameters from the callers stack by adjusting Lesp
1348 1348 // set O0 to Lesp
1349 1349 // compute X = (max_locals - num_parameters)
1350 1350 // bump SP up by X to accomadate the extra locals
1351 1351 // compute X = max_expression_stack
1352 1352 // + vm_local_words
1353 1353 // + 16 words of register save area
1354 1354 // save frame doing a save sp, -X, sp growing towards lower addresses
1355 1355 // set Lbcp, Lmethod, LcpoolCache
1356 1356 // set Llocals to i0
1357 1357 // set Lmonitors to FP - rounded_vm_local_words
1358 1358 // set Lesp to Lmonitors - 4
1359 1359 //
1360 1360 // The frame has now been setup to do the rest of the entry code
1361 1361
1362 1362 // Try this optimization: Most method entries could live in a
1363 1363 // "one size fits all" stack frame without all the dynamic size
1364 1364 // calculations. It might be profitable to do all this calculation
1365 1365 // statically and approximately for "small enough" methods.
1366 1366
1367 1367 //-----------------------------------------------------------------------------------------------
1368 1368
1369 1369 // C1 Calling conventions
1370 1370 //
1371 1371 // Upon method entry, the following registers are setup:
1372 1372 //
1373 1373 // g2 G2_thread: current thread
1374 1374 // g5 G5_method: method to activate
1375 1375 // g4 Gargs : pointer to last argument
1376 1376 //
1377 1377 //
1378 1378 // Stack:
1379 1379 //
1380 1380 // +---------------+ <--- sp
1381 1381 // | |
1382 1382 // : reg save area :
1383 1383 // | |
1384 1384 // +---------------+ <--- sp + 0x40
1385 1385 // | |
1386 1386 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
1387 1387 // | |
1388 1388 // +---------------+ <--- sp + 0x5c
1389 1389 // | |
1390 1390 // : free :
1391 1391 // | |
1392 1392 // +---------------+ <--- Gargs
1393 1393 // | |
1394 1394 // : arguments :
1395 1395 // | |
1396 1396 // +---------------+
1397 1397 // | |
1398 1398 //
1399 1399 //
1400 1400 //
1401 1401 // AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
1402 1402 //
1403 1403 // +---------------+ <--- sp
1404 1404 // | |
1405 1405 // : reg save area :
1406 1406 // | |
1407 1407 // +---------------+ <--- sp + 0x40
1408 1408 // | |
1409 1409 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
1410 1410 // | |
1411 1411 // +---------------+ <--- sp + 0x5c
1412 1412 // | |
1413 1413 // : :
1414 1414 // | | <--- Lesp
1415 1415 // +---------------+ <--- Lmonitors (fp - 0x18)
1416 1416 // | VM locals |
1417 1417 // +---------------+ <--- fp
1418 1418 // | |
1419 1419 // : reg save area :
1420 1420 // | |
1421 1421 // +---------------+ <--- fp + 0x40
1422 1422 // | |
1423 1423 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
1424 1424 // | |
1425 1425 // +---------------+ <--- fp + 0x5c
1426 1426 // | |
1427 1427 // : free :
1428 1428 // | |
1429 1429 // +---------------+
1430 1430 // | |
1431 1431 // : nonarg locals :
1432 1432 // | |
1433 1433 // +---------------+
1434 1434 // | |
1435 1435 // : arguments :
1436 1436 // | | <--- Llocals
1437 1437 // +---------------+ <--- Gargs
1438 1438 // | |
1439 1439
1440 1440 static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
1441 1441
1442 1442 // Figure out the size of an interpreter frame (in words) given that we have a fully allocated
1443 1443 // expression stack, the callee will have callee_extra_locals (so we can account for
1444 1444 // frame extension) and monitor_size for monitors. Basically we need to calculate
1445 1445 // this exactly like generate_fixed_frame/generate_compute_interpreter_state.
1446 1446 //
1447 1447 //
1448 1448 // The big complicating thing here is that we must ensure that the stack stays properly
1449 1449 // aligned. This would be even uglier if monitor size wasn't modulo what the stack
1450 1450 // needs to be aligned for). We are given that the sp (fp) is already aligned by
1451 1451 // the caller so we must ensure that it is properly aligned for our callee.
1452 1452 //
1453 1453 const int rounded_vm_local_words =
1454 1454 round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
1455 1455 // callee_locals and max_stack are counts, not the size in frame.
1456 1456 const int locals_size =
1457 1457 round_to(callee_extra_locals * Interpreter::stackElementWords(), WordsPerLong);
1458 1458 const int max_stack_words = max_stack * Interpreter::stackElementWords();
1459 1459 return (round_to((max_stack_words
1460 1460 //6815692//+ methodOopDesc::extra_stack_words()
1461 1461 + rounded_vm_local_words
1462 1462 + frame::memory_parameter_word_sp_offset), WordsPerLong)
1463 1463 // already rounded
1464 1464 + locals_size + monitor_size);
1465 1465 }
1466 1466
1467 1467 // How much stack a method top interpreter activation needs in words.
1468 1468 int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
1469 1469
1470 1470 // See call_stub code
1471 1471 int call_stub_size = round_to(7 + frame::memory_parameter_word_sp_offset,
1472 1472 WordsPerLong); // 7 + register save area
1473 1473
1474 1474 // Save space for one monitor to get into the interpreted method in case
1475 1475 // the method is synchronized
1476 1476 int monitor_size = method->is_synchronized() ?
1477 1477 1*frame::interpreter_frame_monitor_size() : 0;
1478 1478 return size_activation_helper(method->max_locals(), method->max_stack(),
1479 1479 monitor_size) + call_stub_size;
1480 1480 }
1481 1481
1482 1482 int AbstractInterpreter::layout_activation(methodOop method,
1483 1483 int tempcount,
1484 1484 int popframe_extra_args,
1485 1485 int moncount,
1486 1486 int callee_param_count,
1487 1487 int callee_local_count,
1488 1488 frame* caller,
1489 1489 frame* interpreter_frame,
1490 1490 bool is_top_frame) {
1491 1491 // Note: This calculation must exactly parallel the frame setup
1492 1492 // in InterpreterGenerator::generate_fixed_frame.
1493 1493 // If f!=NULL, set up the following variables:
1494 1494 // - Lmethod
1495 1495 // - Llocals
1496 1496 // - Lmonitors (to the indicated number of monitors)
1497 1497 // - Lesp (to the indicated number of temps)
1498 1498 // The frame f (if not NULL) on entry is a description of the caller of the frame
1499 1499 // we are about to layout. We are guaranteed that we will be able to fill in a
1500 1500 // new interpreter frame as its callee (i.e. the stack space is allocated and
1501 1501 // the amount was determined by an earlier call to this method with f == NULL).
1502 1502 // On return f (if not NULL) while describe the interpreter frame we just layed out.
1503 1503
1504 1504 int monitor_size = moncount * frame::interpreter_frame_monitor_size();
1505 1505 int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
1506 1506
1507 1507 assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");
1508 1508 //
1509 1509 // Note: if you look closely this appears to be doing something much different
1510 1510 // than generate_fixed_frame. What is happening is this. On sparc we have to do
1511 1511 // this dance with interpreter_sp_adjustment because the window save area would
1512 1512 // appear just below the bottom (tos) of the caller's java expression stack. Because
1513 1513 // the interpreter want to have the locals completely contiguous generate_fixed_frame
1514 1514 // will adjust the caller's sp for the "extra locals" (max_locals - parameter_size).
1515 1515 // Now in generate_fixed_frame the extension of the caller's sp happens in the callee.
1516 1516 // In this code the opposite occurs the caller adjusts it's own stack base on the callee.
1517 1517 // This is mostly ok but it does cause a problem when we get to the initial frame (the oldest)
1518 1518 // because the oldest frame would have adjust its callers frame and yet that frame
1519 1519 // already exists and isn't part of this array of frames we are unpacking. So at first
1520 1520 // glance this would seem to mess up that frame. However Deoptimization::fetch_unroll_info_helper()
1521 1521 // will after it calculates all of the frame's on_stack_size()'s will then figure out the
1522 1522 // amount to adjust the caller of the initial (oldest) frame and the calculation will all
1523 1523 // add up. It does seem like it simpler to account for the adjustment here (and remove the
1524 1524 // callee... parameters here). However this would mean that this routine would have to take
1525 1525 // the caller frame as input so we could adjust its sp (and set it's interpreter_sp_adjustment)
1526 1526 // and run the calling loop in the reverse order. This would also would appear to mean making
1527 1527 // this code aware of what the interactions are when that initial caller fram was an osr or
1528 1528 // other adapter frame. deoptimization is complicated enough and hard enough to debug that
1529 1529 // there is no sense in messing working code.
1530 1530 //
1531 1531
1532 1532 int rounded_cls = round_to((callee_local_count - callee_param_count), WordsPerLong);
1533 1533 assert(rounded_cls == round_to(rounded_cls, WordsPerLong), "must align");
1534 1534
1535 1535 int raw_frame_size = size_activation_helper(rounded_cls, method->max_stack(),
1536 1536 monitor_size);
1537 1537
1538 1538 if (interpreter_frame != NULL) {
1539 1539 // The skeleton frame must already look like an interpreter frame
1540 1540 // even if not fully filled out.
1541 1541 assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame");
1542 1542
1543 1543 intptr_t* fp = interpreter_frame->fp();
1544 1544
1545 1545 JavaThread* thread = JavaThread::current();
1546 1546 RegisterMap map(thread, false);
1547 1547 // More verification that skeleton frame is properly walkable
1548 1548 assert(fp == caller->sp(), "fp must match");
1549 1549
1550 1550 intptr_t* montop = fp - rounded_vm_local_words;
1551 1551
1552 1552 // preallocate monitors (cf. __ add_monitor_to_stack)
1553 1553 intptr_t* monitors = montop - monitor_size;
1554 1554
1555 1555 // preallocate stack space
1556 1556 intptr_t* esp = monitors - 1 -
1557 1557 (tempcount * Interpreter::stackElementWords()) -
1558 1558 popframe_extra_args;
1559 1559
1560 1560 int local_words = method->max_locals() * Interpreter::stackElementWords();
1561 1561 int parm_words = method->size_of_parameters() * Interpreter::stackElementWords();
1562 1562 NEEDS_CLEANUP;
1563 1563 intptr_t* locals;
1564 1564 if (caller->is_interpreted_frame()) {
1565 1565 // Can force the locals area to end up properly overlapping the top of the expression stack.
1566 1566 intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1;
1567 1567 // Note that this computation means we replace size_of_parameters() values from the caller
1568 1568 // interpreter frame's expression stack with our argument locals
1569 1569 locals = Lesp_ptr + parm_words;
1570 1570 int delta = local_words - parm_words;
1571 1571 int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
1572 1572 *interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
1573 1573 } else {
1574 1574 assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases");
1575 1575 // Don't have Lesp available; lay out locals block in the caller
1576 1576 // adjacent to the register window save area.
1577 1577 //
1578 1578 // Compiled frames do not allocate a varargs area which is why this if
1579 1579 // statement is needed.
1580 1580 //
1581 1581 if (caller->is_compiled_frame()) {
1582 1582 locals = fp + frame::register_save_words + local_words - 1;
1583 1583 } else {
1584 1584 locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
1585 1585 }
1586 1586 if (!caller->is_entry_frame()) {
1587 1587 // Caller wants his own SP back
1588 1588 int caller_frame_size = caller->cb()->frame_size();
1589 1589 *interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS;
1590 1590 }
1591 1591 }
1592 1592 if (TraceDeoptimization) {
1593 1593 if (caller->is_entry_frame()) {
1594 1594 // make sure I5_savedSP and the entry frames notion of saved SP
1595 1595 // agree. This assertion duplicate a check in entry frame code
1596 1596 // but catches the failure earlier.
1597 1597 assert(*caller->register_addr(Lscratch) == *interpreter_frame->register_addr(I5_savedSP),
1598 1598 "would change callers SP");
1599 1599 }
1600 1600 if (caller->is_entry_frame()) {
1601 1601 tty->print("entry ");
1602 1602 }
1603 1603 if (caller->is_compiled_frame()) {
1604 1604 tty->print("compiled ");
1605 1605 if (caller->is_deoptimized_frame()) {
1606 1606 tty->print("(deopt) ");
1607 1607 }
1608 1608 }
1609 1609 if (caller->is_interpreted_frame()) {
1610 1610 tty->print("interpreted ");
1611 1611 }
1612 1612 tty->print_cr("caller fp=0x%x sp=0x%x", caller->fp(), caller->sp());
1613 1613 tty->print_cr("save area = 0x%x, 0x%x", caller->sp(), caller->sp() + 16);
1614 1614 tty->print_cr("save area = 0x%x, 0x%x", caller->fp(), caller->fp() + 16);
1615 1615 tty->print_cr("interpreter fp=0x%x sp=0x%x", interpreter_frame->fp(), interpreter_frame->sp());
1616 1616 tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->sp(), interpreter_frame->sp() + 16);
1617 1617 tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->fp(), interpreter_frame->fp() + 16);
1618 1618 tty->print_cr("Llocals = 0x%x", locals);
1619 1619 tty->print_cr("Lesp = 0x%x", esp);
1620 1620 tty->print_cr("Lmonitors = 0x%x", monitors);
1621 1621 }
1622 1622
1623 1623 if (method->max_locals() > 0) {
1624 1624 assert(locals < caller->sp() || locals >= (caller->sp() + 16), "locals in save area");
1625 1625 assert(locals < caller->fp() || locals > (caller->fp() + 16), "locals in save area");
1626 1626 assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area");
1627 1627 assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area");
1628 1628 }
1629 1629 #ifdef _LP64
1630 1630 assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd");
1631 1631 #endif
1632 1632
1633 1633 *interpreter_frame->register_addr(Lmethod) = (intptr_t) method;
1634 1634 *interpreter_frame->register_addr(Llocals) = (intptr_t) locals;
1635 1635 *interpreter_frame->register_addr(Lmonitors) = (intptr_t) monitors;
1636 1636 *interpreter_frame->register_addr(Lesp) = (intptr_t) esp;
1637 1637 // Llast_SP will be same as SP as there is no adapter space
1638 1638 *interpreter_frame->register_addr(Llast_SP) = (intptr_t) interpreter_frame->sp() - STACK_BIAS;
1639 1639 *interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache();
1640 1640 #ifdef FAST_DISPATCH
1641 1641 *interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table();
1642 1642 #endif
1643 1643
1644 1644
1645 1645 #ifdef ASSERT
1646 1646 BasicObjectLock* mp = (BasicObjectLock*)monitors;
1647 1647
1648 1648 assert(interpreter_frame->interpreter_frame_method() == method, "method matches");
1649 1649 assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize())+Interpreter::value_offset_in_bytes()), "locals match");
1650 1650 assert(interpreter_frame->interpreter_frame_monitor_end() == mp, "monitor_end matches");
1651 1651 assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches");
1652 1652 assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches");
1653 1653
1654 1654 // check bounds
1655 1655 intptr_t* lo = interpreter_frame->sp() + (frame::memory_parameter_word_sp_offset - 1);
1656 1656 intptr_t* hi = interpreter_frame->fp() - rounded_vm_local_words;
1657 1657 assert(lo < monitors && montop <= hi, "monitors in bounds");
1658 1658 assert(lo <= esp && esp < monitors, "esp in bounds");
1659 1659 #endif // ASSERT
1660 1660 }
1661 1661
1662 1662 return raw_frame_size;
1663 1663 }
1664 1664
1665 1665 //----------------------------------------------------------------------------------------------------
1666 1666 // Exceptions
1667 1667 void TemplateInterpreterGenerator::generate_throw_exception() {
1668 1668
1669 1669 // Entry point in previous activation (i.e., if the caller was interpreted)
1670 1670 Interpreter::_rethrow_exception_entry = __ pc();
1671 1671 // O0: exception
1672 1672
1673 1673 // entry point for exceptions thrown within interpreter code
1674 1674 Interpreter::_throw_exception_entry = __ pc();
1675 1675 __ verify_thread();
1676 1676 // expression stack is undefined here
1677 1677 // O0: exception, i.e. Oexception
1678 1678 // Lbcp: exception bcx
1679 1679 __ verify_oop(Oexception);
1680 1680
1681 1681
1682 1682 // expression stack must be empty before entering the VM in case of an exception
1683 1683 __ empty_expression_stack();
1684 1684 // find exception handler address and preserve exception oop
1685 1685 // call C routine to find handler and jump to it
1686 1686 __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception);
1687 1687 __ push_ptr(O1); // push exception for exception handler bytecodes
1688 1688
1689 1689 __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!)
1690 1690 __ delayed()->nop();
1691 1691
1692 1692
1693 1693 // if the exception is not handled in the current frame
1694 1694 // the frame is removed and the exception is rethrown
1695 1695 // (i.e. exception continuation is _rethrow_exception)
1696 1696 //
1697 1697 // Note: At this point the bci is still the bxi for the instruction which caused
1698 1698 // the exception and the expression stack is empty. Thus, for any VM calls
1699 1699 // at this point, GC will find a legal oop map (with empty expression stack).
1700 1700
1701 1701 // in current activation
1702 1702 // tos: exception
1703 1703 // Lbcp: exception bcp
1704 1704
1705 1705 //
1706 1706 // JVMTI PopFrame support
1707 1707 //
1708 1708
1709 1709 Interpreter::_remove_activation_preserving_args_entry = __ pc();
1710 1710 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
1711 1711 // Set the popframe_processing bit in popframe_condition indicating that we are
1712 1712 // currently handling popframe, so that call_VMs that may happen later do not trigger new
1713 1713 // popframe handling cycles.
1714 1714
1715 1715 __ ld(popframe_condition_addr, G3_scratch);
1716 1716 __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch);
1717 1717 __ stw(G3_scratch, popframe_condition_addr);
1718 1718
1719 1719 // Empty the expression stack, as in normal exception handling
1720 1720 __ empty_expression_stack();
1721 1721 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false);
1722 1722
1723 1723 {
1724 1724 // Check to see whether we are returning to a deoptimized frame.
1725 1725 // (The PopFrame call ensures that the caller of the popped frame is
1726 1726 // either interpreted or compiled and deoptimizes it if compiled.)
1727 1727 // In this case, we can't call dispatch_next() after the frame is
1728 1728 // popped, but instead must save the incoming arguments and restore
1729 1729 // them after deoptimization has occurred.
1730 1730 //
1731 1731 // Note that we don't compare the return PC against the
1732 1732 // deoptimization blob's unpack entry because of the presence of
1733 1733 // adapter frames in C2.
1734 1734 Label caller_not_deoptimized;
1735 1735 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7);
1736 1736 __ tst(O0);
1737 1737 __ brx(Assembler::notEqual, false, Assembler::pt, caller_not_deoptimized);
1738 1738 __ delayed()->nop();
1739 1739
1740 1740 const Register Gtmp1 = G3_scratch;
1741 1741 const Register Gtmp2 = G1_scratch;
1742 1742
1743 1743 // Compute size of arguments for saving when returning to deoptimized caller
1744 1744 __ lduh(Lmethod, in_bytes(methodOopDesc::size_of_parameters_offset()), Gtmp1);
1745 1745 __ sll(Gtmp1, Interpreter::logStackElementSize(), Gtmp1);
1746 1746 __ sub(Llocals, Gtmp1, Gtmp2);
1747 1747 __ add(Gtmp2, wordSize, Gtmp2);
1748 1748 // Save these arguments
1749 1749 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2);
1750 1750 // Inform deoptimization that it is responsible for restoring these arguments
1751 1751 __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1);
1752 1752 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
1753 1753 __ st(Gtmp1, popframe_condition_addr);
1754 1754
1755 1755 // Return from the current method
1756 1756 // The caller's SP was adjusted upon method entry to accomodate
1757 1757 // the callee's non-argument locals. Undo that adjustment.
1758 1758 __ ret();
1759 1759 __ delayed()->restore(I5_savedSP, G0, SP);
1760 1760
1761 1761 __ bind(caller_not_deoptimized);
1762 1762 }
1763 1763
1764 1764 // Clear the popframe condition flag
1765 1765 __ stw(G0 /* popframe_inactive */, popframe_condition_addr);
1766 1766
1767 1767 // Get out of the current method (how this is done depends on the particular compiler calling
1768 1768 // convention that the interpreter currently follows)
1769 1769 // The caller's SP was adjusted upon method entry to accomodate
1770 1770 // the callee's non-argument locals. Undo that adjustment.
1771 1771 __ restore(I5_savedSP, G0, SP);
1772 1772 // The method data pointer was incremented already during
1773 1773 // call profiling. We have to restore the mdp for the current bcp.
1774 1774 if (ProfileInterpreter) {
1775 1775 __ set_method_data_pointer_for_bcp();
1776 1776 }
1777 1777 // Resume bytecode interpretation at the current bcp
1778 1778 __ dispatch_next(vtos);
1779 1779 // end of JVMTI PopFrame support
1780 1780
1781 1781 Interpreter::_remove_activation_entry = __ pc();
1782 1782
1783 1783 // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here)
1784 1784 __ pop_ptr(Oexception); // get exception
1785 1785
1786 1786 // Intel has the following comment:
1787 1787 //// remove the activation (without doing throws on illegalMonitorExceptions)
1788 1788 // They remove the activation without checking for bad monitor state.
1789 1789 // %%% We should make sure this is the right semantics before implementing.
1790 1790
1791 1791 // %%% changed set_vm_result_2 to set_vm_result and get_vm_result_2 to get_vm_result. Is there a bug here?
1792 1792 __ set_vm_result(Oexception);
1793 1793 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false);
1794 1794
1795 1795 __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI);
1796 1796
1797 1797 __ get_vm_result(Oexception);
1798 1798 __ verify_oop(Oexception);
1799 1799
1800 1800 const int return_reg_adjustment = frame::pc_return_offset;
1801 1801 Address issuing_pc_addr(I7, return_reg_adjustment);
1802 1802
1803 1803 // We are done with this activation frame; find out where to go next.
1804 1804 // The continuation point will be an exception handler, which expects
1805 1805 // the following registers set up:
1806 1806 //
1807 1807 // Oexception: exception
1808 1808 // Oissuing_pc: the local call that threw exception
1809 1809 // Other On: garbage
1810 1810 // In/Ln: the contents of the caller's register window
1811 1811 //
1812 1812 // We do the required restore at the last possible moment, because we
1813 1813 // need to preserve some state across a runtime call.
1814 1814 // (Remember that the caller activation is unknown--it might not be
↓ open down ↓ |
1802 lines elided |
↑ open up ↑ |
1815 1815 // interpreted, so things like Lscratch are useless in the caller.)
1816 1816
1817 1817 // Although the Intel version uses call_C, we can use the more
1818 1818 // compact call_VM. (The only real difference on SPARC is a
1819 1819 // harmlessly ignored [re]set_last_Java_frame, compared with
1820 1820 // the Intel code which lacks this.)
1821 1821 __ mov(Oexception, Oexception ->after_save()); // get exception in I0 so it will be on O0 after restore
1822 1822 __ add(issuing_pc_addr, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller
1823 1823 __ super_call_VM_leaf(L7_thread_cache,
1824 1824 CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
1825 - Oissuing_pc->after_save());
1825 + G2_thread, Oissuing_pc->after_save());
1826 1826
1827 1827 // The caller's SP was adjusted upon method entry to accomodate
1828 1828 // the callee's non-argument locals. Undo that adjustment.
1829 1829 __ JMP(O0, 0); // return exception handler in caller
1830 1830 __ delayed()->restore(I5_savedSP, G0, SP);
1831 1831
1832 1832 // (same old exception object is already in Oexception; see above)
1833 1833 // Note that an "issuing PC" is actually the next PC after the call
1834 1834 }
1835 1835
1836 1836
1837 1837 //
1838 1838 // JVMTI ForceEarlyReturn support
1839 1839 //
1840 1840
1841 1841 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1842 1842 address entry = __ pc();
1843 1843
1844 1844 __ empty_expression_stack();
1845 1845 __ load_earlyret_value(state);
1846 1846
1847 1847 __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch);
1848 1848 Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset());
1849 1849
1850 1850 // Clear the earlyret state
1851 1851 __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr);
1852 1852
1853 1853 __ remove_activation(state,
1854 1854 /* throw_monitor_exception */ false,
1855 1855 /* install_monitor_exception */ false);
1856 1856
1857 1857 // The caller's SP was adjusted upon method entry to accomodate
1858 1858 // the callee's non-argument locals. Undo that adjustment.
1859 1859 __ ret(); // return to caller
1860 1860 __ delayed()->restore(I5_savedSP, G0, SP);
1861 1861
1862 1862 return entry;
1863 1863 } // end of JVMTI ForceEarlyReturn support
1864 1864
1865 1865
1866 1866 //------------------------------------------------------------------------------------------------------------------------
1867 1867 // Helper for vtos entry point generation
1868 1868
1869 1869 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
1870 1870 assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1871 1871 Label L;
1872 1872 aep = __ pc(); __ push_ptr(); __ ba(false, L); __ delayed()->nop();
1873 1873 fep = __ pc(); __ push_f(); __ ba(false, L); __ delayed()->nop();
1874 1874 dep = __ pc(); __ push_d(); __ ba(false, L); __ delayed()->nop();
1875 1875 lep = __ pc(); __ push_l(); __ ba(false, L); __ delayed()->nop();
1876 1876 iep = __ pc(); __ push_i();
1877 1877 bep = cep = sep = iep; // there aren't any
1878 1878 vep = __ pc(); __ bind(L); // fall through
1879 1879 generate_and_dispatch(t);
1880 1880 }
1881 1881
1882 1882 // --------------------------------------------------------------------------------
1883 1883
1884 1884
1885 1885 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
1886 1886 : TemplateInterpreterGenerator(code) {
1887 1887 generate_all(); // down here so it can be "virtual"
1888 1888 }
1889 1889
1890 1890 // --------------------------------------------------------------------------------
1891 1891
1892 1892 // Non-product code
1893 1893 #ifndef PRODUCT
1894 1894 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1895 1895 address entry = __ pc();
1896 1896
1897 1897 __ push(state);
1898 1898 __ mov(O7, Lscratch); // protect return address within interpreter
1899 1899
1900 1900 // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer
1901 1901 __ mov( Otos_l2, G3_scratch );
1902 1902 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), G0, Otos_l1, G3_scratch);
1903 1903 __ mov(Lscratch, O7); // restore return address
1904 1904 __ pop(state);
1905 1905 __ retl();
1906 1906 __ delayed()->nop();
1907 1907
1908 1908 return entry;
1909 1909 }
1910 1910
1911 1911
1912 1912 // helpers for generate_and_dispatch
1913 1913
1914 1914 void TemplateInterpreterGenerator::count_bytecode() {
1915 1915 __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch);
1916 1916 }
1917 1917
1918 1918
1919 1919 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1920 1920 __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch);
1921 1921 }
1922 1922
1923 1923
1924 1924 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1925 1925 AddressLiteral index (&BytecodePairHistogram::_index);
1926 1926 AddressLiteral counters((address) &BytecodePairHistogram::_counters);
1927 1927
1928 1928 // get index, shift out old bytecode, bring in new bytecode, and store it
1929 1929 // _index = (_index >> log2_number_of_codes) |
1930 1930 // (bytecode << log2_number_of_codes);
1931 1931
1932 1932 __ load_contents(index, G4_scratch);
1933 1933 __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch );
1934 1934 __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes, G3_scratch );
1935 1935 __ or3( G3_scratch, G4_scratch, G4_scratch );
1936 1936 __ store_contents(G4_scratch, index, G3_scratch);
1937 1937
1938 1938 // bump bucket contents
1939 1939 // _counters[_index] ++;
1940 1940
1941 1941 __ set(counters, G3_scratch); // loads into G3_scratch
1942 1942 __ sll( G4_scratch, LogBytesPerWord, G4_scratch ); // Index is word address
1943 1943 __ add (G3_scratch, G4_scratch, G3_scratch); // Add in index
1944 1944 __ ld (G3_scratch, 0, G4_scratch);
1945 1945 __ inc (G4_scratch);
1946 1946 __ st (G4_scratch, 0, G3_scratch);
1947 1947 }
1948 1948
1949 1949
1950 1950 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
1951 1951 // Call a little run-time stub to avoid blow-up for each bytecode.
1952 1952 // The run-time runtime saves the right registers, depending on
1953 1953 // the tosca in-state for the given template.
1954 1954 address entry = Interpreter::trace_code(t->tos_in());
1955 1955 guarantee(entry != NULL, "entry must have been generated");
1956 1956 __ call(entry, relocInfo::none);
1957 1957 __ delayed()->nop();
1958 1958 }
1959 1959
1960 1960
1961 1961 void TemplateInterpreterGenerator::stop_interpreter_at() {
1962 1962 AddressLiteral counter(&BytecodeCounter::_counter_value);
1963 1963 __ load_contents(counter, G3_scratch);
1964 1964 AddressLiteral stop_at(&StopInterpreterAt);
1965 1965 __ load_ptr_contents(stop_at, G4_scratch);
1966 1966 __ cmp(G3_scratch, G4_scratch);
1967 1967 __ breakpoint_trap(Assembler::equal);
1968 1968 }
1969 1969 #endif // not PRODUCT
1970 1970 #endif // !CC_INTERP
↓ open down ↓ |
135 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX