22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "interp_masm_sparc.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "oops/arrayOop.hpp"
30 #include "oops/markOop.hpp"
31 #include "oops/methodData.hpp"
32 #include "oops/method.hpp"
33 #include "oops/methodCounters.hpp"
34 #include "prims/jvmtiExport.hpp"
35 #include "prims/jvmtiRedefineClassesTrace.hpp"
36 #include "prims/jvmtiThreadState.hpp"
37 #include "runtime/basicLock.hpp"
38 #include "runtime/biasedLocking.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/thread.inline.hpp"
41
42 #ifndef CC_INTERP
43 #ifndef FAST_DISPATCH
44 #define FAST_DISPATCH 1
45 #endif
46 #undef FAST_DISPATCH
47
48 // Implementation of InterpreterMacroAssembler
49
50 // This file specializes the assember with interpreter-specific macros
51
52 const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS);
53 const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS);
54
55 #else // CC_INTERP
56 #ifndef STATE
57 #define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))
58 #endif // STATE
59
60 #endif // CC_INTERP
61
62 void InterpreterMacroAssembler::jump_to_entry(address entry) {
63 assert(entry, "Entry must have been generated by now");
64 AddressLiteral al(entry);
65 jump_to(al, G3_scratch);
66 delayed()->nop();
67 }
68
69 void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) {
70 // Note: this algorithm is also used by C1's OSR entry sequence.
71 // Any changes should also be applied to CodeEmitter::emit_osr_entry().
72 assert_different_registers(args_size, locals_size);
73 // max_locals*2 for TAGS. Assumes that args_size has already been adjusted.
74 subcc(locals_size, args_size, delta);// extra space for non-arguments locals in words
75 // Use br/mov combination because it works on both V8 and V9 and is
76 // faster.
77 Label skip_move;
78 br(Assembler::negative, true, Assembler::pt, skip_move);
79 delayed()->mov(G0, delta);
80 bind(skip_move);
81 round_to(delta, WordsPerLong); // make multiple of 2 (SP must be 2-word aligned)
82 sll(delta, LogBytesPerWord, delta); // extra space for locals in bytes
83 }
84
85 #ifndef CC_INTERP
86
87 // Dispatch code executed in the prolog of a bytecode which does not do it's
88 // own dispatch. The dispatch address is computed and placed in IdispatchAddress
89 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) {
90 assert_not_delayed();
91 #ifdef FAST_DISPATCH
92 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
93 // they both use I2.
94 assert(!ProfileInterpreter, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
95 ldub(Lbcp, bcp_incr, Lbyte_code); // load next bytecode
96 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);
97 // add offset to correct dispatch table
98 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
99 ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr
100 #else
101 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode
102 // dispatch table to use
103 AddressLiteral tbl(Interpreter::dispatch_table(state));
104 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
105 set(tbl, G3_scratch); // compute addr of table
106 ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr
248 cmp_and_br_short(G4_scratch, JvmtiThreadState::earlyret_pending, Assembler::notEqual, pt, L);
249
250 // Call Interpreter::remove_activation_early_entry() to get the address of the
251 // same-named entrypoint in the generated interpreter code
252 ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1);
253 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1);
254
255 // Jump to Interpreter::_remove_activation_early_entry
256 jmpl(O0, G0, G0);
257 delayed()->nop();
258 bind(L);
259 }
260 }
261
262
263 void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
264 mov(arg_1, O0);
265 mov(arg_2, O1);
266 MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2);
267 }
268 #endif /* CC_INTERP */
269
270
271 #ifndef CC_INTERP
272
273 void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) {
274 assert_not_delayed();
275 dispatch_Lbyte_code(state, table);
276 }
277
278
279 void InterpreterMacroAssembler::dispatch_normal(TosState state) {
280 dispatch_base(state, Interpreter::normal_table(state));
281 }
282
283
284 void InterpreterMacroAssembler::dispatch_only(TosState state) {
285 dispatch_base(state, Interpreter::dispatch_table(state));
286 }
287
288
289 // common code to dispatch and dispatch_only
290 // dispatch value in Lbyte_code and increment Lbcp
291
1172 case ftos: // fall through
1173 case dtos: // fall through
1174 case vtos: /* nothing to do */ break;
1175 default : ShouldNotReachHere();
1176 }
1177
1178 #if defined(COMPILER2) && !defined(_LP64)
1179 if (state == ltos) {
1180 // C2 expects long results in G1 we can't tell if we're returning to interpreted
1181 // or compiled so just be safe use G1 and O0/O1
1182
1183 // Shift bits into high (msb) of G1
1184 sllx(Otos_l1->after_save(), 32, G1);
1185 // Zero extend low bits
1186 srl (Otos_l2->after_save(), 0, Otos_l2->after_save());
1187 or3 (Otos_l2->after_save(), G1, G1);
1188 }
1189 #endif /* COMPILER2 */
1190
1191 }
1192 #endif /* CC_INTERP */
1193
1194
1195 // Lock object
1196 //
1197 // Argument - lock_reg points to the BasicObjectLock to be used for locking,
1198 // it must be initialized with the object to lock
1199 void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object) {
1200 if (UseHeavyMonitors) {
1201 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg);
1202 }
1203 else {
1204 Register obj_reg = Object;
1205 Register mark_reg = G4_scratch;
1206 Register temp_reg = G1_scratch;
1207 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes());
1208 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());
1209 Label done;
1210
1211 Label slow_case;
1212
1213 assert_different_registers(lock_reg, obj_reg, mark_reg, temp_reg);
1306 }
1307
1308 // we have the displaced header in displaced_header_reg
1309 // we expect to see the stack address of the basicLock in case the
1310 // lock is still a light weight lock (lock_reg)
1311 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
1312 cas_ptr(mark_addr.base(), lock_reg, displaced_header_reg);
1313 cmp(lock_reg, displaced_header_reg);
1314 brx(Assembler::equal, true, Assembler::pn, done);
1315 delayed()->st_ptr(G0, lockobj_addr); // free entry
1316
1317 // The lock has been converted into a heavy lock and hence
1318 // we need to get into the slow case
1319
1320 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1321
1322 bind(done);
1323 }
1324 }
1325
1326 #ifndef CC_INTERP
1327
1328 // Get the method data pointer from the Method* and set the
1329 // specified register to its value.
1330
1331 void InterpreterMacroAssembler::set_method_data_pointer() {
1332 assert(ProfileInterpreter, "must be profiling interpreter");
1333 Label get_continue;
1334
1335 ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr);
1336 test_method_data_pointer(get_continue);
1337 add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr);
1338 bind(get_continue);
1339 }
1340
1341 // Set the method data pointer for the current bcp.
1342
1343 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1344 assert(ProfileInterpreter, "must be profiling interpreter");
1345 Label zero_continue;
1346
1347 // Test MDO to avoid the call if it is NULL.
2349 store_unaligned_double(src, index, Interpreter::local_offset_in_bytes(1));
2350 }
2351
2352
2353 int InterpreterMacroAssembler::top_most_monitor_byte_offset() {
2354 const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
2355 int rounded_vm_local_words = ::round_to(frame::interpreter_frame_vm_local_words, WordsPerLong);
2356 return ((-rounded_vm_local_words * wordSize) - delta ) + STACK_BIAS;
2357 }
2358
2359
2360 Address InterpreterMacroAssembler::top_most_monitor() {
2361 return Address(FP, top_most_monitor_byte_offset());
2362 }
2363
2364
2365 void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) {
2366 add( Lesp, wordSize, Rdest );
2367 }
2368
2369 #endif /* CC_INTERP */
2370
2371 void InterpreterMacroAssembler::get_method_counters(Register method,
2372 Register Rcounters,
2373 Label& skip) {
2374 Label has_counters;
2375 Address method_counters(method, in_bytes(Method::method_counters_offset()));
2376 ld_ptr(method_counters, Rcounters);
2377 br_notnull_short(Rcounters, Assembler::pt, has_counters);
2378 call_VM(noreg, CAST_FROM_FN_PTR(address,
2379 InterpreterRuntime::build_method_counters), method);
2380 ld_ptr(method_counters, Rcounters);
2381 br_null(Rcounters, false, Assembler::pn, skip); // No MethodCounters, OutOfMemory
2382 delayed()->nop();
2383 bind(has_counters);
2384 }
2385
2386 void InterpreterMacroAssembler::increment_invocation_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ) {
2387 assert(UseCompiler || LogTouchedMethods, "incrementing must be useful");
2388 assert_different_registers(Rcounters, Rtmp, Rtmp2);
2389
2390 Address inv_counter(Rcounters, MethodCounters::invocation_counter_offset() +
2426 int delta = InvocationCounter::count_increment;
2427 // Load each counter in a register
2428 ld( be_counter, Rtmp );
2429 ld( inv_counter, Rtmp2 );
2430
2431 // Add the delta to the backedge counter
2432 add( Rtmp, delta, Rtmp );
2433
2434 // Mask the invocation counter, add to backedge counter
2435 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 );
2436
2437 // and store the result to memory
2438 st( Rtmp, be_counter );
2439
2440 // Add backedge + invocation counter
2441 add( Rtmp, Rtmp2, Rtmp );
2442
2443 // Note that this macro must leave backedge_count + invocation_count in Rtmp!
2444 }
2445
2446 #ifndef CC_INTERP
2447 void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count,
2448 Register method_counters,
2449 Register branch_bcp,
2450 Register Rtmp ) {
2451 Label did_not_overflow;
2452 Label overflow_with_error;
2453 assert_different_registers(backedge_count, Rtmp, branch_bcp);
2454 assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr");
2455
2456 Address limit(method_counters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2457 ld(limit, Rtmp);
2458 cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow);
2459
2460 // When ProfileInterpreter is on, the backedge_count comes from the
2461 // MethodData*, which value does not get reset on the call to
2462 // frequency_counter_overflow(). To avoid excessive calls to the overflow
2463 // routine while the method is being compiled, add a second test to make sure
2464 // the overflow function is called only once every overflow_frequency.
2465 if (ProfileInterpreter) {
2466 const int overflow_frequency = 1024;
2564 }
2565
2566
2567 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
2568 if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);
2569 }
2570
2571
2572 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
2573 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
2574 int increment, Address mask_addr,
2575 Register scratch1, Register scratch2,
2576 Condition cond, Label *where) {
2577 ld(counter_addr, scratch1);
2578 add(scratch1, increment, scratch1);
2579 ld(mask_addr, scratch2);
2580 andcc(scratch1, scratch2, G0);
2581 br(cond, false, Assembler::pn, *where);
2582 delayed()->st(scratch1, counter_addr);
2583 }
2584 #endif /* CC_INTERP */
2585
2586 // Inline assembly for:
2587 //
2588 // if (thread is in interp_only_mode) {
2589 // InterpreterRuntime::post_method_entry();
2590 // }
2591 // if (DTraceMethodProbes) {
2592 // SharedRuntime::dtrace_method_entry(method, receiver);
2593 // }
2594 // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2595 // SharedRuntime::rc_trace_method_entry(method, receiver);
2596 // }
2597
2598 void InterpreterMacroAssembler::notify_method_entry() {
2599
2600 // C++ interpreter only uses this for native methods.
2601
2602 // Whenever JVMTI puts a thread in interp_only_mode, method
2603 // entry/exit events are sent for that thread to track stack
2604 // depth. If it is possible to enter interp_only_mode we add
2605 // the code to check if the event should be sent.
2606 if (JvmtiExport::can_post_interpreter_events()) {
2607 Label L;
2608 Register temp_reg = O5;
2609 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
2610 ld(interp_only, temp_reg);
2611 cmp_and_br_short(temp_reg, 0, equal, pt, L);
2612 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
2613 bind(L);
2614 }
2615
2616 {
2617 Register temp_reg = O5;
2618 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
2619 call_VM_leaf(noreg,
2620 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2621 G2_thread, Lmethod);
2630 }
2631
2632
2633 // Inline assembly for:
2634 //
2635 // if (thread is in interp_only_mode) {
2636 // // save result
2637 // InterpreterRuntime::post_method_exit();
2638 // // restore result
2639 // }
2640 // if (DTraceMethodProbes) {
2641 // SharedRuntime::dtrace_method_exit(thread, method);
2642 // }
2643 //
2644 // Native methods have their result stored in d_tmp and l_tmp
2645 // Java methods have their result stored in the expression stack
2646
2647 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method,
2648 TosState state,
2649 NotifyMethodExitMode mode) {
2650 // C++ interpreter only uses this for native methods.
2651
2652 // Whenever JVMTI puts a thread in interp_only_mode, method
2653 // entry/exit events are sent for that thread to track stack
2654 // depth. If it is possible to enter interp_only_mode we add
2655 // the code to check if the event should be sent.
2656 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
2657 Label L;
2658 Register temp_reg = O5;
2659 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
2660 ld(interp_only, temp_reg);
2661 cmp_and_br_short(temp_reg, 0, equal, pt, L);
2662
2663 // Note: frame::interpreter_frame_result has a dependency on how the
2664 // method result is saved across the call to post_method_exit. For
2665 // native methods it assumes the result registers are saved to
2666 // l_scratch and d_scratch. If this changes then the interpreter_frame_result
2667 // implementation will need to be updated too.
2668
2669 save_return_value(state, is_native_method);
2670 call_VM(noreg,
2671 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
2672 restore_return_value(state, is_native_method);
2673 bind(L);
2674 }
2675
2676 {
2677 Register temp_reg = O5;
2678 // Dtrace notification
2679 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
2680 save_return_value(state, is_native_method);
2681 call_VM_leaf(
2682 noreg,
2683 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2684 G2_thread, Lmethod);
2685 restore_return_value(state, is_native_method);
2686 }
2687 }
2688
2689 void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) {
2690 #ifdef CC_INTERP
2691 // result potentially in O0/O1: save it across calls
2692 stf(FloatRegisterImpl::D, F0, STATE(_native_fresult));
2693 #ifdef _LP64
2694 stx(O0, STATE(_native_lresult));
2695 #else
2696 std(O0, STATE(_native_lresult));
2697 #endif
2698 #else // CC_INTERP
2699 if (is_native_call) {
2700 stf(FloatRegisterImpl::D, F0, d_tmp);
2701 #ifdef _LP64
2702 stx(O0, l_tmp);
2703 #else
2704 std(O0, l_tmp);
2705 #endif
2706 } else {
2707 push(state);
2708 }
2709 #endif // CC_INTERP
2710 }
2711
2712 void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) {
2713 #ifdef CC_INTERP
2714 ldf(FloatRegisterImpl::D, STATE(_native_fresult), F0);
2715 #ifdef _LP64
2716 ldx(STATE(_native_lresult), O0);
2717 #else
2718 ldd(STATE(_native_lresult), O0);
2719 #endif
2720 #else // CC_INTERP
2721 if (is_native_call) {
2722 ldf(FloatRegisterImpl::D, d_tmp, F0);
2723 #ifdef _LP64
2724 ldx(l_tmp, O0);
2725 #else
2726 ldd(l_tmp, O0);
2727 #endif
2728 } else {
2729 pop(state);
2730 }
2731 #endif // CC_INTERP
2732 }
|
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "interp_masm_sparc.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "oops/arrayOop.hpp"
30 #include "oops/markOop.hpp"
31 #include "oops/methodData.hpp"
32 #include "oops/method.hpp"
33 #include "oops/methodCounters.hpp"
34 #include "prims/jvmtiExport.hpp"
35 #include "prims/jvmtiRedefineClassesTrace.hpp"
36 #include "prims/jvmtiThreadState.hpp"
37 #include "runtime/basicLock.hpp"
38 #include "runtime/biasedLocking.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/thread.inline.hpp"
41
42 #ifndef FAST_DISPATCH
43 #define FAST_DISPATCH 1
44 #endif
45 #undef FAST_DISPATCH
46
47 // Implementation of InterpreterMacroAssembler
48
49 // This file specializes the assember with interpreter-specific macros
50
51 const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS);
52 const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS);
53
54 void InterpreterMacroAssembler::jump_to_entry(address entry) {
55 assert(entry, "Entry must have been generated by now");
56 AddressLiteral al(entry);
57 jump_to(al, G3_scratch);
58 delayed()->nop();
59 }
60
61 void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) {
62 // Note: this algorithm is also used by C1's OSR entry sequence.
63 // Any changes should also be applied to CodeEmitter::emit_osr_entry().
64 assert_different_registers(args_size, locals_size);
65 // max_locals*2 for TAGS. Assumes that args_size has already been adjusted.
66 subcc(locals_size, args_size, delta);// extra space for non-arguments locals in words
67 // Use br/mov combination because it works on both V8 and V9 and is
68 // faster.
69 Label skip_move;
70 br(Assembler::negative, true, Assembler::pt, skip_move);
71 delayed()->mov(G0, delta);
72 bind(skip_move);
73 round_to(delta, WordsPerLong); // make multiple of 2 (SP must be 2-word aligned)
74 sll(delta, LogBytesPerWord, delta); // extra space for locals in bytes
75 }
76
77 // Dispatch code executed in the prolog of a bytecode which does not do it's
78 // own dispatch. The dispatch address is computed and placed in IdispatchAddress
79 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) {
80 assert_not_delayed();
81 #ifdef FAST_DISPATCH
82 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
83 // they both use I2.
84 assert(!ProfileInterpreter, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
85 ldub(Lbcp, bcp_incr, Lbyte_code); // load next bytecode
86 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);
87 // add offset to correct dispatch table
88 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
89 ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr
90 #else
91 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode
92 // dispatch table to use
93 AddressLiteral tbl(Interpreter::dispatch_table(state));
94 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
95 set(tbl, G3_scratch); // compute addr of table
96 ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr
238 cmp_and_br_short(G4_scratch, JvmtiThreadState::earlyret_pending, Assembler::notEqual, pt, L);
239
240 // Call Interpreter::remove_activation_early_entry() to get the address of the
241 // same-named entrypoint in the generated interpreter code
242 ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1);
243 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1);
244
245 // Jump to Interpreter::_remove_activation_early_entry
246 jmpl(O0, G0, G0);
247 delayed()->nop();
248 bind(L);
249 }
250 }
251
252
253 void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
254 mov(arg_1, O0);
255 mov(arg_2, O1);
256 MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2);
257 }
258
259 void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) {
260 assert_not_delayed();
261 dispatch_Lbyte_code(state, table);
262 }
263
264
265 void InterpreterMacroAssembler::dispatch_normal(TosState state) {
266 dispatch_base(state, Interpreter::normal_table(state));
267 }
268
269
270 void InterpreterMacroAssembler::dispatch_only(TosState state) {
271 dispatch_base(state, Interpreter::dispatch_table(state));
272 }
273
274
275 // common code to dispatch and dispatch_only
276 // dispatch value in Lbyte_code and increment Lbcp
277
1158 case ftos: // fall through
1159 case dtos: // fall through
1160 case vtos: /* nothing to do */ break;
1161 default : ShouldNotReachHere();
1162 }
1163
1164 #if defined(COMPILER2) && !defined(_LP64)
1165 if (state == ltos) {
1166 // C2 expects long results in G1 we can't tell if we're returning to interpreted
1167 // or compiled so just be safe use G1 and O0/O1
1168
1169 // Shift bits into high (msb) of G1
1170 sllx(Otos_l1->after_save(), 32, G1);
1171 // Zero extend low bits
1172 srl (Otos_l2->after_save(), 0, Otos_l2->after_save());
1173 or3 (Otos_l2->after_save(), G1, G1);
1174 }
1175 #endif /* COMPILER2 */
1176
1177 }
1178
1179 // Lock object
1180 //
1181 // Argument - lock_reg points to the BasicObjectLock to be used for locking,
1182 // it must be initialized with the object to lock
1183 void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object) {
1184 if (UseHeavyMonitors) {
1185 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg);
1186 }
1187 else {
1188 Register obj_reg = Object;
1189 Register mark_reg = G4_scratch;
1190 Register temp_reg = G1_scratch;
1191 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes());
1192 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());
1193 Label done;
1194
1195 Label slow_case;
1196
1197 assert_different_registers(lock_reg, obj_reg, mark_reg, temp_reg);
1290 }
1291
1292 // we have the displaced header in displaced_header_reg
1293 // we expect to see the stack address of the basicLock in case the
1294 // lock is still a light weight lock (lock_reg)
1295 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
1296 cas_ptr(mark_addr.base(), lock_reg, displaced_header_reg);
1297 cmp(lock_reg, displaced_header_reg);
1298 brx(Assembler::equal, true, Assembler::pn, done);
1299 delayed()->st_ptr(G0, lockobj_addr); // free entry
1300
1301 // The lock has been converted into a heavy lock and hence
1302 // we need to get into the slow case
1303
1304 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1305
1306 bind(done);
1307 }
1308 }
1309
1310 // Get the method data pointer from the Method* and set the
1311 // specified register to its value.
1312
1313 void InterpreterMacroAssembler::set_method_data_pointer() {
1314 assert(ProfileInterpreter, "must be profiling interpreter");
1315 Label get_continue;
1316
1317 ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr);
1318 test_method_data_pointer(get_continue);
1319 add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr);
1320 bind(get_continue);
1321 }
1322
1323 // Set the method data pointer for the current bcp.
1324
1325 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1326 assert(ProfileInterpreter, "must be profiling interpreter");
1327 Label zero_continue;
1328
1329 // Test MDO to avoid the call if it is NULL.
2331 store_unaligned_double(src, index, Interpreter::local_offset_in_bytes(1));
2332 }
2333
2334
2335 int InterpreterMacroAssembler::top_most_monitor_byte_offset() {
2336 const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
2337 int rounded_vm_local_words = ::round_to(frame::interpreter_frame_vm_local_words, WordsPerLong);
2338 return ((-rounded_vm_local_words * wordSize) - delta ) + STACK_BIAS;
2339 }
2340
2341
2342 Address InterpreterMacroAssembler::top_most_monitor() {
2343 return Address(FP, top_most_monitor_byte_offset());
2344 }
2345
2346
2347 void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) {
2348 add( Lesp, wordSize, Rdest );
2349 }
2350
2351 void InterpreterMacroAssembler::get_method_counters(Register method,
2352 Register Rcounters,
2353 Label& skip) {
2354 Label has_counters;
2355 Address method_counters(method, in_bytes(Method::method_counters_offset()));
2356 ld_ptr(method_counters, Rcounters);
2357 br_notnull_short(Rcounters, Assembler::pt, has_counters);
2358 call_VM(noreg, CAST_FROM_FN_PTR(address,
2359 InterpreterRuntime::build_method_counters), method);
2360 ld_ptr(method_counters, Rcounters);
2361 br_null(Rcounters, false, Assembler::pn, skip); // No MethodCounters, OutOfMemory
2362 delayed()->nop();
2363 bind(has_counters);
2364 }
2365
2366 void InterpreterMacroAssembler::increment_invocation_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ) {
2367 assert(UseCompiler || LogTouchedMethods, "incrementing must be useful");
2368 assert_different_registers(Rcounters, Rtmp, Rtmp2);
2369
2370 Address inv_counter(Rcounters, MethodCounters::invocation_counter_offset() +
2406 int delta = InvocationCounter::count_increment;
2407 // Load each counter in a register
2408 ld( be_counter, Rtmp );
2409 ld( inv_counter, Rtmp2 );
2410
2411 // Add the delta to the backedge counter
2412 add( Rtmp, delta, Rtmp );
2413
2414 // Mask the invocation counter, add to backedge counter
2415 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 );
2416
2417 // and store the result to memory
2418 st( Rtmp, be_counter );
2419
2420 // Add backedge + invocation counter
2421 add( Rtmp, Rtmp2, Rtmp );
2422
2423 // Note that this macro must leave backedge_count + invocation_count in Rtmp!
2424 }
2425
2426 void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count,
2427 Register method_counters,
2428 Register branch_bcp,
2429 Register Rtmp ) {
2430 Label did_not_overflow;
2431 Label overflow_with_error;
2432 assert_different_registers(backedge_count, Rtmp, branch_bcp);
2433 assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr");
2434
2435 Address limit(method_counters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2436 ld(limit, Rtmp);
2437 cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow);
2438
2439 // When ProfileInterpreter is on, the backedge_count comes from the
2440 // MethodData*, which value does not get reset on the call to
2441 // frequency_counter_overflow(). To avoid excessive calls to the overflow
2442 // routine while the method is being compiled, add a second test to make sure
2443 // the overflow function is called only once every overflow_frequency.
2444 if (ProfileInterpreter) {
2445 const int overflow_frequency = 1024;
2543 }
2544
2545
2546 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
2547 if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);
2548 }
2549
2550
2551 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
2552 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
2553 int increment, Address mask_addr,
2554 Register scratch1, Register scratch2,
2555 Condition cond, Label *where) {
2556 ld(counter_addr, scratch1);
2557 add(scratch1, increment, scratch1);
2558 ld(mask_addr, scratch2);
2559 andcc(scratch1, scratch2, G0);
2560 br(cond, false, Assembler::pn, *where);
2561 delayed()->st(scratch1, counter_addr);
2562 }
2563
2564 // Inline assembly for:
2565 //
2566 // if (thread is in interp_only_mode) {
2567 // InterpreterRuntime::post_method_entry();
2568 // }
2569 // if (DTraceMethodProbes) {
2570 // SharedRuntime::dtrace_method_entry(method, receiver);
2571 // }
2572 // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2573 // SharedRuntime::rc_trace_method_entry(method, receiver);
2574 // }
2575
2576 void InterpreterMacroAssembler::notify_method_entry() {
2577
2578 // Whenever JVMTI puts a thread in interp_only_mode, method
2579 // entry/exit events are sent for that thread to track stack
2580 // depth. If it is possible to enter interp_only_mode we add
2581 // the code to check if the event should be sent.
2582 if (JvmtiExport::can_post_interpreter_events()) {
2583 Label L;
2584 Register temp_reg = O5;
2585 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
2586 ld(interp_only, temp_reg);
2587 cmp_and_br_short(temp_reg, 0, equal, pt, L);
2588 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
2589 bind(L);
2590 }
2591
2592 {
2593 Register temp_reg = O5;
2594 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
2595 call_VM_leaf(noreg,
2596 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2597 G2_thread, Lmethod);
2606 }
2607
2608
2609 // Inline assembly for:
2610 //
2611 // if (thread is in interp_only_mode) {
2612 // // save result
2613 // InterpreterRuntime::post_method_exit();
2614 // // restore result
2615 // }
2616 // if (DTraceMethodProbes) {
2617 // SharedRuntime::dtrace_method_exit(thread, method);
2618 // }
2619 //
2620 // Native methods have their result stored in d_tmp and l_tmp
2621 // Java methods have their result stored in the expression stack
2622
2623 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method,
2624 TosState state,
2625 NotifyMethodExitMode mode) {
2626
2627 // Whenever JVMTI puts a thread in interp_only_mode, method
2628 // entry/exit events are sent for that thread to track stack
2629 // depth. If it is possible to enter interp_only_mode we add
2630 // the code to check if the event should be sent.
2631 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
2632 Label L;
2633 Register temp_reg = O5;
2634 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
2635 ld(interp_only, temp_reg);
2636 cmp_and_br_short(temp_reg, 0, equal, pt, L);
2637
2638 // Note: frame::interpreter_frame_result has a dependency on how the
2639 // method result is saved across the call to post_method_exit. For
2640 // native methods it assumes the result registers are saved to
2641 // l_scratch and d_scratch. If this changes then the interpreter_frame_result
2642 // implementation will need to be updated too.
2643
2644 save_return_value(state, is_native_method);
2645 call_VM(noreg,
2646 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
2647 restore_return_value(state, is_native_method);
2648 bind(L);
2649 }
2650
2651 {
2652 Register temp_reg = O5;
2653 // Dtrace notification
2654 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
2655 save_return_value(state, is_native_method);
2656 call_VM_leaf(
2657 noreg,
2658 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2659 G2_thread, Lmethod);
2660 restore_return_value(state, is_native_method);
2661 }
2662 }
2663
2664 void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) {
2665 if (is_native_call) {
2666 stf(FloatRegisterImpl::D, F0, d_tmp);
2667 #ifdef _LP64
2668 stx(O0, l_tmp);
2669 #else
2670 std(O0, l_tmp);
2671 #endif
2672 } else {
2673 push(state);
2674 }
2675 }
2676
2677 void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) {
2678 if (is_native_call) {
2679 ldf(FloatRegisterImpl::D, d_tmp, F0);
2680 #ifdef _LP64
2681 ldx(l_tmp, O0);
2682 #else
2683 ldd(l_tmp, O0);
2684 #endif
2685 } else {
2686 pop(state);
2687 }
2688 }
|