34 #include "oops/methodData.hpp"
35 #include "oops/method.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "prims/jvmtiExport.hpp"
38 #include "prims/jvmtiThreadState.hpp"
39 #include "runtime/arguments.hpp"
40 #include "runtime/deoptimization.hpp"
41 #include "runtime/frame.inline.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "runtime/stubRoutines.hpp"
44 #include "runtime/synchronizer.hpp"
45 #include "runtime/timer.hpp"
46 #include "runtime/vframeArray.hpp"
47 #include "utilities/debug.hpp"
48 #include "utilities/macros.hpp"
49
50 #define __ _masm->
51
52 #ifndef CC_INTERP
53
54 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
55 const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize;
56 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
57
58 //-----------------------------------------------------------------------------
59
60 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
61 address entry = __ pc();
62
63 #ifdef ASSERT
64 {
65 Label L;
66 __ lea(rax, Address(rbp,
67 frame::interpreter_frame_monitor_block_top_offset *
68 wordSize));
69 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack
70 // grows negative)
71 __ jcc(Assembler::aboveEqual, L); // check if frame is complete
72 __ stop ("interpreter frame not set up");
73 __ bind(L);
78 __ restore_bcp();
79
80 // expression stack must be empty before entering the VM if an
81 // exception happened
82 __ empty_expression_stack();
83 // throw exception
84 __ call_VM(noreg,
85 CAST_FROM_FN_PTR(address,
86 InterpreterRuntime::throw_StackOverflowError));
87 return entry;
88 }
89
90 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(
91 const char* name) {
92 address entry = __ pc();
93 // expression stack must be empty before entering the VM if an
94 // exception happened
95 __ empty_expression_stack();
96 // setup parameters
97 // ??? convention: expect aberrant index in register ebx
98 __ lea(c_rarg1, ExternalAddress((address)name));
99 __ call_VM(noreg,
100 CAST_FROM_FN_PTR(address,
101 InterpreterRuntime::
102 throw_ArrayIndexOutOfBoundsException),
103 c_rarg1, rbx);
104 return entry;
105 }
106
107 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
108 address entry = __ pc();
109
110 // object is at TOS
111 __ pop(c_rarg1);
112
113 // expression stack must be empty before entering the VM if an
114 // exception happened
115 __ empty_expression_stack();
116
117 __ call_VM(noreg,
118 CAST_FROM_FN_PTR(address,
119 InterpreterRuntime::
120 throw_ClassCastException),
121 c_rarg1);
122 return entry;
123 }
124
125 address TemplateInterpreterGenerator::generate_exception_handler_common(
126 const char* name, const char* message, bool pass_oop) {
127 assert(!pass_oop || message == NULL, "either oop or message but not both");
128 address entry = __ pc();
129 if (pass_oop) {
130 // object is at TOS
131 __ pop(c_rarg2);
132 }
133 // expression stack must be empty before entering the VM if an
134 // exception happened
135 __ empty_expression_stack();
136 // setup parameters
137 __ lea(c_rarg1, ExternalAddress((address)name));
138 if (pass_oop) {
139 __ call_VM(rax, CAST_FROM_FN_PTR(address,
140 InterpreterRuntime::
141 create_klass_exception),
142 c_rarg1, c_rarg2);
143 } else {
144 // kind of lame ExternalAddress can't take NULL because
145 // external_word_Relocation will assert.
146 if (message != NULL) {
147 __ lea(c_rarg2, ExternalAddress((address)message));
148 } else {
149 __ movptr(c_rarg2, NULL_WORD);
150 }
151 __ call_VM(rax,
152 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
153 c_rarg1, c_rarg2);
154 }
155 // throw exception
156 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
157 return entry;
158 }
159
160
161 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
162 address entry = __ pc();
163 // NULL last_sp until next java call
164 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
165 __ dispatch_next(state);
166 return entry;
167 }
168
169
170 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
171 address entry = __ pc();
172
173 // Restore stack bottom in case i2c adjusted stack
174 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
175 // and NULL it as marker that esp is now tos until next java call
176 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
177
178 __ restore_bcp();
179 __ restore_locals();
180
181 if (state == atos) {
182 Register mdp = rbx;
183 Register tmp = rcx;
184 __ profile_return_type(mdp, rax, tmp);
185 }
186
187 const Register cache = rbx;
188 const Register index = rcx;
189 __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
190
191 const Register flags = cache;
192 __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
193 __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
194 __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));
195 __ dispatch_next(state, step);
196
197 return entry;
198 }
199
200
201 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
202 address entry = __ pc();
203 // NULL last_sp until next java call
204 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
205 __ restore_bcp();
206 __ restore_locals();
207 #if INCLUDE_JVMCI
208 // Check if we need to take lock at entry of synchronized method.
209 if (UseJVMCICompiler) {
210 Label L;
211 __ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0);
212 __ jcc(Assembler::zero, L);
213 // Clear flag.
214 __ movb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0);
215 // Satisfy calling convention for lock_method().
216 __ get_method(rbx);
217 // Take lock.
218 lock_method();
219 __ bind(L);
220 }
221 #endif
222 // handle exceptions
223 {
224 Label L;
225 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
226 __ jcc(Assembler::zero, L);
227 __ call_VM(noreg,
228 CAST_FROM_FN_PTR(address,
229 InterpreterRuntime::throw_pending_exception));
230 __ should_not_reach_here();
231 __ bind(L);
232 }
233 __ dispatch_next(state, step);
234 return entry;
235 }
236
237 int AbstractInterpreter::BasicType_as_index(BasicType type) {
238 int i = 0;
239 switch (type) {
240 case T_BOOLEAN: i = 0; break;
241 case T_CHAR : i = 1; break;
242 case T_BYTE : i = 2; break;
243 case T_SHORT : i = 3; break;
244 case T_INT : i = 4; break;
245 case T_LONG : i = 5; break;
246 case T_VOID : i = 6; break;
247 case T_FLOAT : i = 7; break;
248 case T_DOUBLE : i = 8; break;
249 case T_OBJECT : i = 9; break;
250 case T_ARRAY : i = 9; break;
251 default : ShouldNotReachHere();
252 }
253 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers,
254 "index out of bounds");
255 return i;
256 }
257
258
259 address TemplateInterpreterGenerator::generate_result_handler_for(
260 BasicType type) {
261 address entry = __ pc();
262 switch (type) {
263 case T_BOOLEAN: __ c2bool(rax); break;
264 case T_CHAR : __ movzwl(rax, rax); break;
265 case T_BYTE : __ sign_extend_byte(rax); break;
266 case T_SHORT : __ sign_extend_short(rax); break;
267 case T_INT : /* nothing to do */ break;
268 case T_LONG : /* nothing to do */ break;
269 case T_VOID : /* nothing to do */ break;
270 case T_FLOAT : /* nothing to do */ break;
271 case T_DOUBLE : /* nothing to do */ break;
272 case T_OBJECT :
273 // retrieve result from frame
274 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
275 // and verify it
276 __ verify_oop(rax);
277 break;
278 default : ShouldNotReachHere();
279 }
280 __ ret(0); // return from result handler
281 return entry;
282 }
283
284 address TemplateInterpreterGenerator::generate_safept_entry_for(
285 TosState state,
286 address runtime_entry) {
287 address entry = __ pc();
288 __ push(state);
289 __ call_VM(noreg, runtime_entry);
290 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
291 return entry;
292 }
293
294
295
296 // Helpers for commoning out cases in the various type of method entries.
297 //
298
299
300 // increment invocation count & check for overflow
301 //
302 // Note: checking for negative value instead of overflow
303 // so we have a 'sticky' overflow test
304 //
305 // rbx: method
306 // ecx: invocation counter
307 //
308 void InterpreterGenerator::generate_counter_incr(
309 Label* overflow,
310 Label* profile_method,
311 Label* profile_method_continue) {
312 Label done;
313 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
314 if (TieredCompilation) {
315 int increment = InvocationCounter::count_increment;
316 Label no_mdo;
317 if (ProfileInterpreter) {
318 // Are we profiling?
319 __ movptr(rax, Address(rbx, Method::method_data_offset()));
320 __ testptr(rax, rax);
321 __ jccb(Assembler::zero, no_mdo);
322 // Increment counter in the MDO
323 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
324 in_bytes(InvocationCounter::counter_offset()));
325 const Address mask(rax, in_bytes(MethodData::invoke_mask_offset()));
326 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
366 if (ProfileInterpreter && profile_method != NULL) {
367 // Test to see if we should create a method data oop
368 __ movptr(rax, Address(rbx, Method::method_counters_offset()));
369 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
370 __ jcc(Assembler::less, *profile_method_continue);
371
372 // if no method data exists, go to profile_method
373 __ test_method_data_pointer(rax, *profile_method);
374 }
375
376 __ movptr(rax, Address(rbx, Method::method_counters_offset()));
377 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
378 __ jcc(Assembler::aboveEqual, *overflow);
379 __ bind(done);
380 }
381 }
382
383 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
384
385 // Asm interpreter on entry
386 // r14 - locals
387 // r13 - bcp
388 // rbx - method
389 // edx - cpool --- DOES NOT APPEAR TO BE TRUE
390 // rbp - interpreter frame
391
392 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
393 // Everything as it was on entry
394 // rdx is not restored. Doesn't appear to really be set.
395
396 // InterpreterRuntime::frequency_counter_overflow takes two
397 // arguments, the first (thread) is passed by call_VM, the second
398 // indicates if the counter overflow occurs at a backwards branch
399 // (NULL bcp). We pass zero for it. The call returns the address
400 // of the verified entry point for the method or NULL if the
401 // compilation did not complete (either went background or bailed
402 // out).
403 __ movl(c_rarg1, 0);
404 __ call_VM(noreg,
405 CAST_FROM_FN_PTR(address,
406 InterpreterRuntime::frequency_counter_overflow),
407 c_rarg1);
408
409 __ movptr(rbx, Address(rbp, method_offset)); // restore Method*
410 // Preserve invariant that r13/r14 contain bcp/locals of sender frame
411 // and jump to the interpreted entry.
412 __ jmp(*do_continue, relocInfo::none);
413 }
414
415 // See if we've got enough room on the stack for locals plus overhead.
416 // The expression stack grows down incrementally, so the normal guard
417 // page mechanism will work for that.
418 //
419 // NOTE: Since the additional locals are also always pushed (wasn't
420 // obvious in generate_fixed_frame) so the guard should work for them
421 // too.
422 //
423 // Args:
424 // rdx: number of additional locals this frame needs (what we must check)
425 // rbx: Method*
426 //
427 // Kills:
433
434 // total overhead size: entry_size + (saved rbp through expr stack
435 // bottom). be sure to change this if you add/subtract anything
436 // to/from the overhead area
437 const int overhead_size =
438 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
439
440 const int page_size = os::vm_page_size();
441
442 Label after_frame_check;
443
444 // see if the frame is greater than one page in size. If so,
445 // then we need to verify there is enough stack space remaining
446 // for the additional locals.
447 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize);
448 __ jcc(Assembler::belowEqual, after_frame_check);
449
450 // compute rsp as if this were going to be the last frame on
451 // the stack before the red zone
452
453 const Address stack_base(r15_thread, Thread::stack_base_offset());
454 const Address stack_size(r15_thread, Thread::stack_size_offset());
455
456 // locals + overhead, in bytes
457 __ mov(rax, rdx);
458 __ shlptr(rax, Interpreter::logStackElementSize); // 2 slots per parameter.
459 __ addptr(rax, overhead_size);
460
461 #ifdef ASSERT
462 Label stack_base_okay, stack_size_okay;
463 // verify that thread stack base is non-zero
464 __ cmpptr(stack_base, (int32_t)NULL_WORD);
465 __ jcc(Assembler::notEqual, stack_base_okay);
466 __ stop("stack base is zero");
467 __ bind(stack_base_okay);
468 // verify that thread stack size is non-zero
469 __ cmpptr(stack_size, 0);
470 __ jcc(Assembler::notEqual, stack_size_okay);
471 __ stop("stack size is zero");
472 __ bind(stack_size_okay);
473 #endif
474
475 // Add stack base to locals and subtract stack size
476 __ addptr(rax, stack_base);
477 __ subptr(rax, stack_size);
478
479 // Use the maximum number of pages we might bang.
480 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
481 (StackRedPages+StackYellowPages);
482
483 // add in the red and yellow zone sizes
484 __ addptr(rax, max_pages * page_size);
485
486 // check against the current stack bottom
487 __ cmpptr(rsp, rax);
488 __ jcc(Assembler::above, after_frame_check);
489
490 // Restore sender's sp as SP. This is necessary if the sender's
491 // frame is an extended compiled frame (see gen_c2i_adapter())
492 // and safer anyway in case of JSR292 adaptations.
493
494 __ pop(rax); // return address must be moved if SP is changed
495 __ mov(rsp, r13);
496 __ push(rax);
497
498 // Note: the restored frame is not necessarily interpreted.
499 // Use the shared runtime version of the StackOverflowError.
500 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
501 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry()));
502
503 // all done with frame size check
504 __ bind(after_frame_check);
505 }
506
507 // Allocate monitor and lock method (asm interpreter)
508 //
509 // Args:
510 // rbx: Method*
511 // r14: locals
512 //
513 // Kills:
514 // rax
515 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
516 // rscratch1, rscratch2 (scratch regs)
517 void TemplateInterpreterGenerator::lock_method() {
518 // synchronize method
519 const Address access_flags(rbx, Method::access_flags_offset());
520 const Address monitor_block_top(
521 rbp,
522 frame::interpreter_frame_monitor_block_top_offset * wordSize);
523 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
524
525 #ifdef ASSERT
526 {
527 Label L;
528 __ movl(rax, access_flags);
529 __ testl(rax, JVM_ACC_SYNCHRONIZED);
530 __ jcc(Assembler::notZero, L);
531 __ stop("method doesn't need synchronization");
532 __ bind(L);
533 }
534 #endif // ASSERT
535
536 // get synchronization object
537 {
538 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
539 Label done;
540 __ movl(rax, access_flags);
541 __ testl(rax, JVM_ACC_STATIC);
542 // get receiver (assume this is frequent case)
543 __ movptr(rax, Address(r14, Interpreter::local_offset_in_bytes(0)));
544 __ jcc(Assembler::zero, done);
545 __ movptr(rax, Address(rbx, Method::const_offset()));
546 __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
547 __ movptr(rax, Address(rax,
548 ConstantPool::pool_holder_offset_in_bytes()));
549 __ movptr(rax, Address(rax, mirror_offset));
550
551 #ifdef ASSERT
552 {
553 Label L;
554 __ testptr(rax, rax);
555 __ jcc(Assembler::notZero, L);
556 __ stop("synchronization object is NULL");
557 __ bind(L);
558 }
559 #endif // ASSERT
560
561 __ bind(done);
562 }
563
564 // add space for monitor & lock
565 __ subptr(rsp, entry_size); // add space for a monitor entry
566 __ movptr(monitor_block_top, rsp); // set new monitor block top
567 // store object
568 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
569 __ movptr(c_rarg1, rsp); // object address
570 __ lock_object(c_rarg1);
571 }
572
573 // Generate a fixed interpreter frame. This is identical setup for
574 // interpreted methods and for native methods hence the shared code.
575 //
576 // Args:
577 // rax: return address
578 // rbx: Method*
579 // r14: pointer to locals
580 // r13: sender sp
581 // rdx: cp cache
582 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
583 // initialize fixed part of activation frame
584 __ push(rax); // save return address
585 __ enter(); // save old & set new rbp
586 __ push(r13); // set sender sp
587 __ push((int)NULL_WORD); // leave last_sp as null
588 __ movptr(r13, Address(rbx, Method::const_offset())); // get ConstMethod*
589 __ lea(r13, Address(r13, ConstMethod::codes_offset())); // get codebase
590 __ push(rbx); // save Method*
591 if (ProfileInterpreter) {
592 Label method_data_continue;
593 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset())));
594 __ testptr(rdx, rdx);
595 __ jcc(Assembler::zero, method_data_continue);
596 __ addptr(rdx, in_bytes(MethodData::data_offset()));
597 __ bind(method_data_continue);
598 __ push(rdx); // set the mdp (method data pointer)
599 } else {
600 __ push(0);
601 }
602
603 __ movptr(rdx, Address(rbx, Method::const_offset()));
604 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
605 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
606 __ push(rdx); // set constant pool cache
607 __ push(r14); // set locals pointer
608 if (native_call) {
609 __ push(0); // no bcp
610 } else {
611 __ push(r13); // set bcp
612 }
613 __ push(0); // reserve word for pointer to expression stack bottom
614 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
615 }
616
617 // End of helpers
618
619 // Method entry for java.lang.ref.Reference.get.
620 address InterpreterGenerator::generate_Reference_get_entry(void) {
621 #if INCLUDE_ALL_GCS
622 // Code: _aload_0, _getfield, _areturn
623 // parameter size = 1
624 //
625 // The code that gets generated by this routine is split into 2 parts:
626 // 1. The "intrinsified" code for G1 (or any SATB based GC),
627 // 2. The slow path - which is an expansion of the regular method entry.
628 //
629 // Notes:-
630 // * In the G1 code we do not check whether we need to block for
631 // a safepoint. If G1 is enabled then we must execute the specialized
650
651 const int referent_offset = java_lang_ref_Reference::referent_offset;
652 guarantee(referent_offset > 0, "referent offset not initialized");
653
654 if (UseG1GC) {
655 Label slow_path;
656 // rbx: method
657
658 // Check if local 0 != NULL
659 // If the receiver is null then it is OK to jump to the slow path.
660 __ movptr(rax, Address(rsp, wordSize));
661
662 __ testptr(rax, rax);
663 __ jcc(Assembler::zero, slow_path);
664
665 // rax: local 0
666 // rbx: method (but can be used as scratch now)
667 // rdx: scratch
668 // rdi: scratch
669
670 // Generate the G1 pre-barrier code to log the value of
671 // the referent field in an SATB buffer.
672
673 // Load the value of the referent field.
674 const Address field_address(rax, referent_offset);
675 __ load_heap_oop(rax, field_address);
676
677 // Generate the G1 pre-barrier code to log the value of
678 // the referent field in an SATB buffer.
679 __ g1_write_barrier_pre(noreg /* obj */,
680 rax /* pre_val */,
681 r15_thread /* thread */,
682 rbx /* tmp */,
683 true /* tosca_live */,
684 true /* expand_call */);
685
686 // _areturn
687 __ pop(rdi); // get return address
688 __ mov(rsp, r13); // set sp to sender sp
689 __ jmp(rdi);
690 __ ret(0);
691
692 // generate a vanilla interpreter entry as the slow path
693 __ bind(slow_path);
694 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
695 return entry;
696 }
697 #endif // INCLUDE_ALL_GCS
698
699 // If G1 is not enabled then attempt to go through the accessor entry point
700 // Reference.get is an accessor
701 return NULL;
702 }
703
704 /**
705 * Method entry for static native methods:
706 * int java.util.zip.CRC32.update(int crc, int b)
707 */
708 address InterpreterGenerator::generate_CRC32_update_entry() {
709 if (UseCRC32Intrinsics) {
710 address entry = __ pc();
711
712 // rbx,: Method*
713 // r13: senderSP must preserved for slow path, set SP to it on fast path
714 // c_rarg0: scratch (rdi on non-Win64, rcx on Win64)
715 // c_rarg1: scratch (rsi on non-Win64, rdx on Win64)
716
717 Label slow_path;
718 // If we need a safepoint check, generate full interpreter entry.
719 ExternalAddress state(SafepointSynchronize::address_of_state());
720 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
721 SafepointSynchronize::_not_synchronized);
722 __ jcc(Assembler::notEqual, slow_path);
723
724 // We don't generate local frame and don't align stack because
725 // we call stub code and there is no safepoint on this path.
726
727 // Load parameters
728 const Register crc = rax; // crc
729 const Register val = c_rarg0; // source java byte value
730 const Register tbl = c_rarg1; // scratch
731
732 // Arguments are reversed on java expression stack
733 __ movl(val, Address(rsp, wordSize)); // byte value
734 __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC
735
736 __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr()));
737 __ notl(crc); // ~crc
738 __ update_byte_crc32(crc, val, tbl);
739 __ notl(crc); // ~crc
740 // result in rax
741
742 // _areturn
743 __ pop(rdi); // get return address
744 __ mov(rsp, r13); // set sp to sender sp
745 __ jmp(rdi);
746
747 // generate a vanilla native entry as the slow path
748 __ bind(slow_path);
749 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
750 return entry;
751 }
752 return NULL;
753 }
754
755 /**
756 * Method entry for static native methods:
757 * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
758 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
759 */
760 address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
761 if (UseCRC32Intrinsics) {
762 address entry = __ pc();
763
764 // rbx,: Method*
765 // r13: senderSP must preserved for slow path, set SP to it on fast path
766
767 Label slow_path;
768 // If we need a safepoint check, generate full interpreter entry.
769 ExternalAddress state(SafepointSynchronize::address_of_state());
770 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
771 SafepointSynchronize::_not_synchronized);
772 __ jcc(Assembler::notEqual, slow_path);
773
774 // We don't generate local frame and don't align stack because
775 // we call stub code and there is no safepoint on this path.
776
777 // Load parameters
778 const Register crc = c_rarg0; // crc
779 const Register buf = c_rarg1; // source java byte array address
780 const Register len = c_rarg2; // length
781 const Register off = len; // offset (never overlaps with 'len')
782
783 // Arguments are reversed on java expression stack
784 // Calculate address of start element
785 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
786 __ movptr(buf, Address(rsp, 3*wordSize)); // long buf
787 __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
788 __ addq(buf, off); // + offset
789 __ movl(crc, Address(rsp, 5*wordSize)); // Initial CRC
790 } else {
791 __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array
792 __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
793 __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
794 __ addq(buf, off); // + offset
795 __ movl(crc, Address(rsp, 4*wordSize)); // Initial CRC
796 }
797 // Can now load 'len' since we're finished with 'off'
798 __ movl(len, Address(rsp, wordSize)); // Length
799
800 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
801 // result in rax
802
803 // _areturn
804 __ pop(rdi); // get return address
805 __ mov(rsp, r13); // set sp to sender sp
806 __ jmp(rdi);
807
808 // generate a vanilla native entry as the slow path
809 __ bind(slow_path);
810 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
811 return entry;
812 }
813 return NULL;
814 }
815
816 /**
817 * Method entry for static native methods:
818 * int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end)
819 * int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end)
820 */
821 address InterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
822 if (UseCRC32CIntrinsics) {
823 address entry = __ pc();
824 // Load parameters
825 const Register crc = c_rarg0; // crc
826 const Register buf = c_rarg1; // source java byte array address
827 const Register len = c_rarg2;
828 const Register off = c_rarg3; // offset
829 const Register end = len;
830
831 // Arguments are reversed on java expression stack
832 // Calculate address of start element
833 if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) {
834 __ movptr(buf, Address(rsp, 3 * wordSize)); // long buf
835 __ movl2ptr(off, Address(rsp, 2 * wordSize)); // offset
836 __ addq(buf, off); // + offset
837 __ movl(crc, Address(rsp, 5 * wordSize)); // Initial CRC
838 // Note on 5 * wordSize vs. 4 * wordSize:
839 // * int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end)
840 // 4 2,3 1 0
841 // end starts at SP + 8
842 // The Java(R) Virtual Machine Specification Java SE 7 Edition
843 // 4.10.2.3. Values of Types long and double
844 // "When calculating operand stack length, values of type long and double have length two."
845 } else {
846 __ movptr(buf, Address(rsp, 3 * wordSize)); // byte[] array
847 __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
848 __ movl2ptr(off, Address(rsp, 2 * wordSize)); // offset
849 __ addq(buf, off); // + offset
850 __ movl(crc, Address(rsp, 4 * wordSize)); // Initial CRC
851 }
852 __ movl(end, Address(rsp, wordSize)); // end
853 __ subl(end, off); // end - off
854 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32C()), crc, buf, len);
855 // result in rax
856 // _areturn
857 __ pop(rdi); // get return address
858 __ mov(rsp, r13); // set sp to sender sp
859 __ jmp(rdi);
860
861 return entry;
862 }
863
864 return NULL;
865 }
866
867 // Interpreter stub for calling a native method. (asm interpreter)
868 // This sets up a somewhat different looking stack for calling the
869 // native method than the typical interpreter frame setup.
870 address InterpreterGenerator::generate_native_entry(bool synchronized) {
871 // determine code generation flags
872 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
873
874 // rbx: Method*
875 // r13: sender sp
876
877 address entry_point = __ pc();
878
879 const Address constMethod (rbx, Method::const_offset());
880 const Address access_flags (rbx, Method::access_flags_offset());
881 const Address size_of_parameters(rcx, ConstMethod::
882 size_of_parameters_offset());
883
884
885 // get parameter size (always needed)
886 __ movptr(rcx, constMethod);
887 __ load_unsigned_short(rcx, size_of_parameters);
888
889 // native calls don't need the stack size check since they have no
890 // expression stack and the arguments are already on the stack and
891 // we only add a handful of words to the stack
892
893 // rbx: Method*
894 // rcx: size of parameters
895 // r13: sender sp
896 __ pop(rax); // get return address
897
898 // for natives the size of locals is zero
899
900 // compute beginning of parameters (r14)
901 __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize));
902
903 // add 2 zero-initialized slots for native calls
904 // initialize result_handler slot
905 __ push((int) NULL_WORD);
906 // slot for oop temp
907 // (static native method holder mirror/jni oop result)
908 __ push((int) NULL_WORD);
909
910 // initialize fixed part of activation frame
911 generate_fixed_frame(true);
912
913 // make sure method is native & not abstract
914 #ifdef ASSERT
915 __ movl(rax, access_flags);
916 {
917 Label L;
918 __ testl(rax, JVM_ACC_NATIVE);
919 __ jcc(Assembler::notZero, L);
920 __ stop("tried to execute non-native method as native");
921 __ bind(L);
922 }
923 {
924 Label L;
925 __ testl(rax, JVM_ACC_ABSTRACT);
926 __ jcc(Assembler::zero, L);
927 __ stop("tried to execute abstract method in interpreter");
928 __ bind(L);
929 }
930 #endif
931
932 // Since at this point in the method invocation the exception handler
933 // would try to exit the monitor of synchronized methods which hasn't
934 // been entered yet, we set the thread local variable
935 // _do_not_unlock_if_synchronized to true. The remove_activation will
936 // check this flag.
937
938 const Address do_not_unlock_if_synchronized(r15_thread,
939 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
940 __ movbool(do_not_unlock_if_synchronized, true);
941
942 // increment invocation count & check for overflow
943 Label invocation_counter_overflow;
944 if (inc_counter) {
945 generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
946 }
947
948 Label continue_after_compile;
949 __ bind(continue_after_compile);
950
951 bang_stack_shadow_pages(true);
952
953 // reset the _do_not_unlock_if_synchronized flag
954 __ movbool(do_not_unlock_if_synchronized, false);
955
956 // check for synchronized methods
957 // Must happen AFTER invocation_counter check and stack overflow check,
958 // so method is not locked if overflows.
959 if (synchronized) {
960 lock_method();
961 } else {
962 // no synchronization necessary
963 #ifdef ASSERT
964 {
965 Label L;
966 __ movl(rax, access_flags);
967 __ testl(rax, JVM_ACC_SYNCHRONIZED);
968 __ jcc(Assembler::zero, L);
969 __ stop("method needs synchronization");
970 __ bind(L);
971 }
972 #endif
973 }
974
975 // start execution
976 #ifdef ASSERT
977 {
978 Label L;
979 const Address monitor_block_top(rbp,
980 frame::interpreter_frame_monitor_block_top_offset * wordSize);
981 __ movptr(rax, monitor_block_top);
982 __ cmpptr(rax, rsp);
983 __ jcc(Assembler::equal, L);
984 __ stop("broken stack frame setup in interpreter");
985 __ bind(L);
986 }
987 #endif
988
989 // jvmti support
990 __ notify_method_entry();
991
992 // work registers
993 const Register method = rbx;
994 const Register t = r11;
995
996 // allocate space for parameters
997 __ get_method(method);
998 __ movptr(t, Address(method, Method::const_offset()));
999 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
1000 __ shll(t, Interpreter::logStackElementSize);
1001
1002 __ subptr(rsp, t);
1003 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1004 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
1005
1006 // get signature handler
1007 {
1008 Label L;
1009 __ movptr(t, Address(method, Method::signature_handler_offset()));
1010 __ testptr(t, t);
1011 __ jcc(Assembler::notZero, L);
1012 __ call_VM(noreg,
1013 CAST_FROM_FN_PTR(address,
1014 InterpreterRuntime::prepare_native_call),
1015 method);
1016 __ get_method(method);
1017 __ movptr(t, Address(method, Method::signature_handler_offset()));
1018 __ bind(L);
1019 }
1020
1021 // call signature handler
1022 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == r14,
1023 "adjust this code");
1024 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp,
1025 "adjust this code");
1026 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1,
1027 "adjust this code");
1028
1029 // The generated handlers do not touch RBX (the method oop).
1030 // However, large signatures cannot be cached and are generated
1031 // each time here. The slow-path generator can do a GC on return,
1032 // so we must reload it after the call.
1033 __ call(t);
1034 __ get_method(method); // slow path can do a GC, reload RBX
1035
1036
1037 // result handler is in rax
1038 // set result handler
1039 __ movptr(Address(rbp,
1040 (frame::interpreter_frame_result_handler_offset) * wordSize),
1041 rax);
1042
1043 // pass mirror handle if static call
1044 {
1045 Label L;
1046 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
1047 __ movl(t, Address(method, Method::access_flags_offset()));
1048 __ testl(t, JVM_ACC_STATIC);
1049 __ jcc(Assembler::zero, L);
1050 // get mirror
1051 __ movptr(t, Address(method, Method::const_offset()));
1052 __ movptr(t, Address(t, ConstMethod::constants_offset()));
1053 __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
1054 __ movptr(t, Address(t, mirror_offset));
1055 // copy mirror into activation frame
1056 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize),
1057 t);
1058 // pass handle to mirror
1059 __ lea(c_rarg1,
1060 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
1061 __ bind(L);
1062 }
1063
1064 // get native function entry point
1065 {
1066 Label L;
1067 __ movptr(rax, Address(method, Method::native_function_offset()));
1068 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
1069 __ movptr(rscratch2, unsatisfied.addr());
1070 __ cmpptr(rax, rscratch2);
1071 __ jcc(Assembler::notEqual, L);
1072 __ call_VM(noreg,
1073 CAST_FROM_FN_PTR(address,
1074 InterpreterRuntime::prepare_native_call),
1075 method);
1076 __ get_method(method);
1077 __ movptr(rax, Address(method, Method::native_function_offset()));
1078 __ bind(L);
1079 }
1080
1081 // pass JNIEnv
1082 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset()));
1083
1084 // It is enough that the pc() points into the right code
1085 // segment. It does not have to be the correct return pc.
1086 __ set_last_Java_frame(rsp, rbp, (address) __ pc());
1087
1088 // change thread state
1089 #ifdef ASSERT
1090 {
1091 Label L;
1092 __ movl(t, Address(r15_thread, JavaThread::thread_state_offset()));
1093 __ cmpl(t, _thread_in_Java);
1094 __ jcc(Assembler::equal, L);
1095 __ stop("Wrong thread state in native stub");
1096 __ bind(L);
1097 }
1098 #endif
1099
1100 // Change state to native
1101
1102 __ movl(Address(r15_thread, JavaThread::thread_state_offset()),
1103 _thread_in_native);
1104
1105 // Call the native method.
1106 __ call(rax);
1107 // result potentially in rax or xmm0
1108
1109 // Verify or restore cpu control state after JNI call
1110 __ restore_cpu_control_state_after_jni();
1111
1112 // NOTE: The order of these pushes is known to frame::interpreter_frame_result
1113 // in order to extract the result of a method call. If the order of these
1114 // pushes change or anything else is added to the stack then the code in
1115 // interpreter_frame_result must also change.
1116
1117 __ push(dtos);
1118 __ push(ltos);
1119
1120 // change thread state
1121 __ movl(Address(r15_thread, JavaThread::thread_state_offset()),
1122 _thread_in_native_trans);
1123
1124 if (os::is_MP()) {
1125 if (UseMembar) {
1126 // Force this write out before the read below
1127 __ membar(Assembler::Membar_mask_bits(
1128 Assembler::LoadLoad | Assembler::LoadStore |
1129 Assembler::StoreLoad | Assembler::StoreStore));
1130 } else {
1131 // Write serialization page so VM thread can do a pseudo remote membar.
1132 // We use the current thread pointer to calculate a thread specific
1133 // offset to write to within the page. This minimizes bus traffic
1134 // due to cache line collision.
1135 __ serialize_memory(r15_thread, rscratch2);
1136 }
1137 }
1138
1139 // check for safepoint operation in progress and/or pending suspend requests
1140 {
1141 Label Continue;
1142 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
1143 SafepointSynchronize::_not_synchronized);
1144
1145 Label L;
1146 __ jcc(Assembler::notEqual, L);
1147 __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
1148 __ jcc(Assembler::equal, Continue);
1149 __ bind(L);
1150
1151 // Don't use call_VM as it will see a possible pending exception
1152 // and forward it and never return here preventing us from
1153 // clearing _last_native_pc down below. Also can't use
1154 // call_VM_leaf either as it will check to see if r13 & r14 are
1155 // preserved and correspond to the bcp/locals pointers. So we do a
1156 // runtime call by hand.
1157 //
1158 __ mov(c_rarg0, r15_thread);
1159 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
1160 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1161 __ andptr(rsp, -16); // align stack as required by ABI
1162 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
1163 __ mov(rsp, r12); // restore sp
1164 __ reinit_heapbase();
1165 __ bind(Continue);
1166 }
1167
1168 // change thread state
1169 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
1170
1171 // reset_last_Java_frame
1172 __ reset_last_Java_frame(true, true);
1173
1174 // reset handle block
1175 __ movptr(t, Address(r15_thread, JavaThread::active_handles_offset()));
1176 __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
1177
1178 // If result is an oop unbox and store it in frame where gc will see it
1179 // and result handler will pick it up
1180
1181 {
1182 Label no_oop, store_result;
1183 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
1184 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
1185 __ jcc(Assembler::notEqual, no_oop);
1186 // retrieve result
1187 __ pop(ltos);
1188 __ testptr(rax, rax);
1189 __ jcc(Assembler::zero, store_result);
1190 __ movptr(rax, Address(rax, 0));
1191 __ bind(store_result);
1192 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax);
1193 // keep stack depth as expected by pushing oop which will eventually be discarde
1194 __ push(ltos);
1195 __ bind(no_oop);
1196 }
1197
1198
1199 {
1200 Label no_reguard;
1201 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()),
1202 JavaThread::stack_guard_yellow_disabled);
1203 __ jcc(Assembler::notEqual, no_reguard);
1204
1205 __ pusha(); // XXX only save smashed registers
1206 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
1207 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1208 __ andptr(rsp, -16); // align stack as required by ABI
1209 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
1210 __ mov(rsp, r12); // restore sp
1211 __ popa(); // XXX only restore smashed registers
1212 __ reinit_heapbase();
1213
1214 __ bind(no_reguard);
1215 }
1216
1217
1218 // The method register is junk from after the thread_in_native transition
1219 // until here. Also can't call_VM until the bcp has been
1220 // restored. Need bcp for throwing exception below so get it now.
1221 __ get_method(method);
1222
1223 // restore r13 to have legal interpreter frame, i.e., bci == 0 <=>
1224 // r13 == code_base()
1225 __ movptr(r13, Address(method, Method::const_offset())); // get ConstMethod*
1226 __ lea(r13, Address(r13, ConstMethod::codes_offset())); // get codebase
1227 // handle exceptions (exception handling will handle unlocking!)
1228 {
1229 Label L;
1230 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
1231 __ jcc(Assembler::zero, L);
1232 // Note: At some point we may want to unify this with the code
1233 // used in call_VM_base(); i.e., we should use the
1234 // StubRoutines::forward_exception code. For now this doesn't work
1235 // here because the rsp is not correctly set at this point.
1236 __ MacroAssembler::call_VM(noreg,
1237 CAST_FROM_FN_PTR(address,
1238 InterpreterRuntime::throw_pending_exception));
1239 __ should_not_reach_here();
1240 __ bind(L);
1241 }
1242
1243 // do unlocking if necessary
1244 {
1245 Label L;
1246 __ movl(t, Address(method, Method::access_flags_offset()));
1247 __ testl(t, JVM_ACC_SYNCHRONIZED);
1248 __ jcc(Assembler::zero, L);
1249 // the code below should be shared with interpreter macro
1250 // assembler implementation
1251 {
1252 Label unlock;
1253 // BasicObjectLock will be first in list, since this is a
1254 // synchronized method. However, need to check that the object
1255 // has not been unlocked by an explicit monitorexit bytecode.
1256 const Address monitor(rbp,
1257 (intptr_t)(frame::interpreter_frame_initial_sp_offset *
1258 wordSize - sizeof(BasicObjectLock)));
1259
1260 // monitor expect in c_rarg1 for slow unlock path
1261 __ lea(c_rarg1, monitor); // address of first monitor
1262
1263 __ movptr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
1264 __ testptr(t, t);
1265 __ jcc(Assembler::notZero, unlock);
1266
1267 // Entry already unlocked, need to throw exception
1268 __ MacroAssembler::call_VM(noreg,
1269 CAST_FROM_FN_PTR(address,
1270 InterpreterRuntime::throw_illegal_monitor_state_exception));
1271 __ should_not_reach_here();
1272
1273 __ bind(unlock);
1274 __ unlock_object(c_rarg1);
1275 }
1276 __ bind(L);
1277 }
1278
1279 // jvmti support
1280 // Note: This must happen _after_ handling/throwing any exceptions since
1281 // the exception handler code notifies the runtime of method exits
1282 // too. If this happens before, method entry/exit notifications are
1283 // not properly paired (was bug - gri 11/22/99).
1284 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1285
1286 // restore potential result in edx:eax, call result handler to
1287 // restore potential result in ST0 & handle result
1288
1289 __ pop(ltos);
1290 __ pop(dtos);
1291
1292 __ movptr(t, Address(rbp,
1293 (frame::interpreter_frame_result_handler_offset) * wordSize));
1294 __ call(t);
1295
1296 // remove activation
1297 __ movptr(t, Address(rbp,
1298 frame::interpreter_frame_sender_sp_offset *
1299 wordSize)); // get sender sp
1300 __ leave(); // remove frame anchor
1301 __ pop(rdi); // get return address
1302 __ mov(rsp, t); // set sp to sender sp
1303 __ jmp(rdi);
1304
1305 if (inc_counter) {
1306 // Handle overflow of counter and compile method
1307 __ bind(invocation_counter_overflow);
1308 generate_counter_overflow(&continue_after_compile);
1309 }
1310
1311 return entry_point;
1312 }
1313
1314 //
1315 // Generic interpreted method entry to (asm) interpreter
1316 //
1317 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
1318 // determine code generation flags
1319 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
1320
1321 // ebx: Method*
1322 // r13: sender sp
1323 address entry_point = __ pc();
1324
1325 const Address constMethod(rbx, Method::const_offset());
1326 const Address access_flags(rbx, Method::access_flags_offset());
1327 const Address size_of_parameters(rdx,
1328 ConstMethod::size_of_parameters_offset());
1329 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset());
1330
1331
1332 // get parameter size (always needed)
1333 __ movptr(rdx, constMethod);
1334 __ load_unsigned_short(rcx, size_of_parameters);
1335
1336 // rbx: Method*
1337 // rcx: size of parameters
1338 // r13: sender_sp (could differ from sp+wordSize if we were called via c2i )
1339
1340 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
1341 __ subl(rdx, rcx); // rdx = no. of additional locals
1342
1343 // YYY
1344 // __ incrementl(rdx);
1345 // __ andl(rdx, -2);
1346
1347 // see if we've got enough room on the stack for locals plus overhead.
1348 generate_stack_overflow_check();
1349
1350 // get return address
1351 __ pop(rax);
1352
1353 // compute beginning of parameters (r14)
1354 __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize));
1355
1356 // rdx - # of additional locals
1357 // allocate space for locals
1358 // explicitly initialize locals
1359 {
1360 Label exit, loop;
1361 __ testl(rdx, rdx);
1362 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
1363 __ bind(loop);
1364 __ push((int) NULL_WORD); // initialize local variables
1365 __ decrementl(rdx); // until everything initialized
1366 __ jcc(Assembler::greater, loop);
1367 __ bind(exit);
1368 }
1369
1370 // initialize fixed part of activation frame
1371 generate_fixed_frame(false);
1372
1373 // make sure method is not native & not abstract
1374 #ifdef ASSERT
1378 __ testl(rax, JVM_ACC_NATIVE);
1379 __ jcc(Assembler::zero, L);
1380 __ stop("tried to execute native method as non-native");
1381 __ bind(L);
1382 }
1383 {
1384 Label L;
1385 __ testl(rax, JVM_ACC_ABSTRACT);
1386 __ jcc(Assembler::zero, L);
1387 __ stop("tried to execute abstract method in interpreter");
1388 __ bind(L);
1389 }
1390 #endif
1391
1392 // Since at this point in the method invocation the exception
1393 // handler would try to exit the monitor of synchronized methods
1394 // which hasn't been entered yet, we set the thread local variable
1395 // _do_not_unlock_if_synchronized to true. The remove_activation
1396 // will check this flag.
1397
1398 const Address do_not_unlock_if_synchronized(r15_thread,
1399 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1400 __ movbool(do_not_unlock_if_synchronized, true);
1401
1402 __ profile_parameters_type(rax, rcx, rdx);
1403 // increment invocation count & check for overflow
1404 Label invocation_counter_overflow;
1405 Label profile_method;
1406 Label profile_method_continue;
1407 if (inc_counter) {
1408 generate_counter_incr(&invocation_counter_overflow,
1409 &profile_method,
1410 &profile_method_continue);
1411 if (ProfileInterpreter) {
1412 __ bind(profile_method_continue);
1413 }
1414 }
1415
1416 Label continue_after_compile;
1417 __ bind(continue_after_compile);
1418
1419 // check for synchronized interpreted methods
1420 bang_stack_shadow_pages(false);
1421
1422 // reset the _do_not_unlock_if_synchronized flag
1423 __ movbool(do_not_unlock_if_synchronized, false);
1424
1425 // check for synchronized methods
1426 // Must happen AFTER invocation_counter check and stack overflow check,
1427 // so method is not locked if overflows.
1428 if (synchronized) {
1429 // Allocate monitor and lock method
1430 lock_method();
1431 } else {
1432 // no synchronization necessary
1433 #ifdef ASSERT
1434 {
1435 Label L;
1436 __ movl(rax, access_flags);
1437 __ testl(rax, JVM_ACC_SYNCHRONIZED);
1438 __ jcc(Assembler::zero, L);
1439 __ stop("method needs synchronization");
1440 __ bind(L);
1441 }
1442 #endif
1462 __ dispatch_next(vtos);
1463
1464 // invocation counter overflow
1465 if (inc_counter) {
1466 if (ProfileInterpreter) {
1467 // We have decided to profile this method in the interpreter
1468 __ bind(profile_method);
1469 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1470 __ set_method_data_pointer_for_bcp();
1471 __ get_method(rbx);
1472 __ jmp(profile_method_continue);
1473 }
1474 // Handle overflow of counter and compile method
1475 __ bind(invocation_counter_overflow);
1476 generate_counter_overflow(&continue_after_compile);
1477 }
1478
1479 return entry_point;
1480 }
1481
1482
1483 // These should never be compiled since the interpreter will prefer
1484 // the compiled version to the intrinsic version.
1485 bool AbstractInterpreter::can_be_compiled(methodHandle m) {
1486 switch (method_kind(m)) {
1487 case Interpreter::java_lang_math_sin : // fall thru
1488 case Interpreter::java_lang_math_cos : // fall thru
1489 case Interpreter::java_lang_math_tan : // fall thru
1490 case Interpreter::java_lang_math_abs : // fall thru
1491 case Interpreter::java_lang_math_log : // fall thru
1492 case Interpreter::java_lang_math_log10 : // fall thru
1493 case Interpreter::java_lang_math_sqrt : // fall thru
1494 case Interpreter::java_lang_math_pow : // fall thru
1495 case Interpreter::java_lang_math_exp :
1496 return false;
1497 default:
1498 return true;
1499 }
1500 }
1501
1502 // How much stack a method activation needs in words.
1503 int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
1504 const int entry_size = frame::interpreter_frame_monitor_size();
1505
1506 // total overhead size: entry_size + (saved rbp thru expr stack
1507 // bottom). be sure to change this if you add/subtract anything
1508 // to/from the overhead area
1509 const int overhead_size =
1510 -(frame::interpreter_frame_initial_sp_offset) + entry_size;
1511
1512 const int stub_code = frame::entry_frame_after_call_words;
1513 const int method_stack = (method->max_locals() + method->max_stack()) *
1514 Interpreter::stackElementWords;
1515 return (overhead_size + method_stack + stub_code);
1516 }
1517
1518 //-----------------------------------------------------------------------------
1519 // Exceptions
1520
1521 void TemplateInterpreterGenerator::generate_throw_exception() {
1522 // Entry point in previous activation (i.e., if the caller was
1523 // interpreted)
1524 Interpreter::_rethrow_exception_entry = __ pc();
1525 // Restore sp to interpreter_frame_last_sp even though we are going
1526 // to empty the expression stack for the exception processing.
1527 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
1528 // rax: exception
1529 // rdx: return address/pc that threw exception
1530 __ restore_bcp(); // r13 points to call/send
1531 __ restore_locals();
1532 __ reinit_heapbase(); // restore r12 as heapbase.
1533 // Entry point for exceptions thrown within interpreter code
1534 Interpreter::_throw_exception_entry = __ pc();
1535 // expression stack is undefined here
1536 // rax: exception
1537 // r13: exception bcp
1538 __ verify_oop(rax);
1539 __ mov(c_rarg1, rax);
1540
1541 // expression stack must be empty before entering the VM in case of
1542 // an exception
1543 __ empty_expression_stack();
1544 // find exception handler address and preserve exception oop
1545 __ call_VM(rdx,
1546 CAST_FROM_FN_PTR(address,
1547 InterpreterRuntime::exception_handler_for_exception),
1548 c_rarg1);
1549 // rax: exception handler entry point
1550 // rdx: preserved exception oop
1551 // r13: bcp for exception handler
1552 __ push_ptr(rdx); // push exception which is now the only value on the stack
1553 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!)
1554
1555 // If the exception is not handled in the current frame the frame is
1556 // removed and the exception is rethrown (i.e. exception
1557 // continuation is _rethrow_exception).
1558 //
1559 // Note: At this point the bci is still the bxi for the instruction
1560 // which caused the exception and the expression stack is
1561 // empty. Thus, for any VM calls at this point, GC will find a legal
1562 // oop map (with empty expression stack).
1563
1564 // In current activation
1565 // tos: exception
1566 // esi: exception bcp
1567
1568 //
1569 // JVMTI PopFrame support
1570 //
1571
1572 Interpreter::_remove_activation_preserving_args_entry = __ pc();
1573 __ empty_expression_stack();
1574 // Set the popframe_processing bit in pending_popframe_condition
1575 // indicating that we are currently handling popframe, so that
1576 // call_VMs that may happen later do not trigger new popframe
1577 // handling cycles.
1578 __ movl(rdx, Address(r15_thread, JavaThread::popframe_condition_offset()));
1579 __ orl(rdx, JavaThread::popframe_processing_bit);
1580 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), rdx);
1581
1582 {
1583 // Check to see whether we are returning to a deoptimized frame.
1584 // (The PopFrame call ensures that the caller of the popped frame is
1585 // either interpreted or compiled and deoptimizes it if compiled.)
1586 // In this case, we can't call dispatch_next() after the frame is
1587 // popped, but instead must save the incoming arguments and restore
1588 // them after deoptimization has occurred.
1589 //
1590 // Note that we don't compare the return PC against the
1591 // deoptimization blob's unpack entry because of the presence of
1592 // adapter frames in C2.
1593 Label caller_not_deoptimized;
1594 __ movptr(c_rarg1, Address(rbp, frame::return_addr_offset * wordSize));
1595 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1596 InterpreterRuntime::interpreter_contains), c_rarg1);
1597 __ testl(rax, rax);
1598 __ jcc(Assembler::notZero, caller_not_deoptimized);
1599
1600 // Compute size of arguments for saving when returning to
1601 // deoptimized caller
1602 __ get_method(rax);
1603 __ movptr(rax, Address(rax, Method::const_offset()));
1604 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod::
1605 size_of_parameters_offset())));
1606 __ shll(rax, Interpreter::logStackElementSize);
1607 __ restore_locals(); // XXX do we need this?
1608 __ subptr(r14, rax);
1609 __ addptr(r14, wordSize);
1610 // Save these arguments
1611 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1612 Deoptimization::
1613 popframe_preserve_args),
1614 r15_thread, rax, r14);
1615
1616 __ remove_activation(vtos, rdx,
1617 /* throw_monitor_exception */ false,
1618 /* install_monitor_exception */ false,
1619 /* notify_jvmdi */ false);
1620
1621 // Inform deoptimization that it is responsible for restoring
1622 // these arguments
1623 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()),
1624 JavaThread::popframe_force_deopt_reexecution_bit);
1625
1626 // Continue in deoptimization handler
1627 __ jmp(rdx);
1628
1629 __ bind(caller_not_deoptimized);
1630 }
1631
1632 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */
1633 /* throw_monitor_exception */ false,
1634 /* install_monitor_exception */ false,
1635 /* notify_jvmdi */ false);
1636
1637 // Finish with popframe handling
1638 // A previous I2C followed by a deoptimization might have moved the
1639 // outgoing arguments further up the stack. PopFrame expects the
1640 // mutations to those outgoing arguments to be preserved and other
1641 // constraints basically require this frame to look exactly as
1642 // though it had previously invoked an interpreted activation with
1643 // no space between the top of the expression stack (current
1644 // last_sp) and the top of stack. Rather than force deopt to
1645 // maintain this kind of invariant all the time we call a small
1646 // fixup routine to move the mutated arguments onto the top of our
1647 // expression stack if necessary.
1648 __ mov(c_rarg1, rsp);
1649 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1650 // PC must point into interpreter here
1651 __ set_last_Java_frame(noreg, rbp, __ pc());
1652 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2);
1653 __ reset_last_Java_frame(true, true);
1654 // Restore the last_sp and null it out
1655 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1656 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
1657
1658 __ restore_bcp(); // XXX do we need this?
1659 __ restore_locals(); // XXX do we need this?
1660 // The method data pointer was incremented already during
1661 // call profiling. We have to restore the mdp for the current bcp.
1662 if (ProfileInterpreter) {
1663 __ set_method_data_pointer_for_bcp();
1664 }
1665
1666 // Clear the popframe condition flag
1667 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()),
1668 JavaThread::popframe_inactive);
1669
1670 #if INCLUDE_JVMTI
1671 {
1672 Label L_done;
1673 const Register local0 = r14;
1674
1675 __ cmpb(Address(r13, 0), Bytecodes::_invokestatic);
1676 __ jcc(Assembler::notEqual, L_done);
1677
1678 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
1679 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
1680
1681 __ get_method(rdx);
1682 __ movptr(rax, Address(local0, 0));
1683 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, r13);
1684
1685 __ testptr(rax, rax);
1686 __ jcc(Assembler::zero, L_done);
1687
1688 __ movptr(Address(rbx, 0), rax);
1689 __ bind(L_done);
1690 }
1691 #endif // INCLUDE_JVMTI
1692
1693 __ dispatch_next(vtos);
1694 // end of PopFrame support
1695
1696 Interpreter::_remove_activation_entry = __ pc();
1697
1698 // preserve exception over this code sequence
1699 __ pop_ptr(rax);
1700 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), rax);
1701 // remove the activation (without doing throws on illegalMonitorExceptions)
1702 __ remove_activation(vtos, rdx, false, true, false);
1703 // restore exception
1704 __ get_vm_result(rax, r15_thread);
1705
1706 // In between activations - previous activation type unknown yet
1707 // compute continuation point - the continuation point expects the
1708 // following registers set up:
1709 //
1710 // rax: exception
1711 // rdx: return address/pc that threw exception
1712 // rsp: expression stack of caller
1713 // rbp: ebp of caller
1714 __ push(rax); // save exception
1715 __ push(rdx); // save return address
1716 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1717 SharedRuntime::exception_handler_for_return_address),
1718 r15_thread, rdx);
1719 __ mov(rbx, rax); // save exception handler
1720 __ pop(rdx); // restore return address
1721 __ pop(rax); // restore exception
1722 // Note that an "issuing PC" is actually the next PC after the call
1723 __ jmp(rbx); // jump to exception
1724 // handler of caller
1725 }
1726
1727
1728 //
1729 // JVMTI ForceEarlyReturn support
1730 //
1731 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1732 address entry = __ pc();
1733
1734 __ restore_bcp();
1735 __ restore_locals();
1736 __ empty_expression_stack();
1737 __ load_earlyret_value(state);
1738
1739 __ movptr(rdx, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
1740 Address cond_addr(rdx, JvmtiThreadState::earlyret_state_offset());
1741
1742 // Clear the earlyret state
1743 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive);
1744
1745 __ remove_activation(state, rsi,
1746 false, /* throw_monitor_exception */
1747 false, /* install_monitor_exception */
1748 true); /* notify_jvmdi */
1749 __ jmp(rsi);
1750
1751 return entry;
1752 } // end of ForceEarlyReturn support
1753
1754
1755 //-----------------------------------------------------------------------------
1756 // Helper for vtos entry point generation
1757
1758 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
1759 address& bep,
1760 address& cep,
1761 address& sep,
1762 address& aep,
1763 address& iep,
1764 address& lep,
1765 address& fep,
1766 address& dep,
1767 address& vep) {
1768 assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1769 Label L;
1770 aep = __ pc(); __ push_ptr(); __ jmp(L);
1771 fep = __ pc(); __ push_f(xmm0); __ jmp(L);
1772 dep = __ pc(); __ push_d(xmm0); __ jmp(L);
1773 lep = __ pc(); __ push_l(); __ jmp(L);
1774 bep = cep = sep =
1775 iep = __ pc(); __ push_i();
1776 vep = __ pc();
1777 __ bind(L);
1778 generate_and_dispatch(t);
1779 }
1780
1781
1782 //-----------------------------------------------------------------------------
1783 // Generation of individual instructions
1784
1785 // helpers for generate_and_dispatch
1786
1787
1788 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
1789 : TemplateInterpreterGenerator(code) {
1790 generate_all(); // down here so it can be "virtual"
1791 }
1792
1793 //-----------------------------------------------------------------------------
1794
1795 // Non-product code
1796 #ifndef PRODUCT
1797 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1798 address entry = __ pc();
1799
1800 __ push(state);
1801 __ push(c_rarg0);
1802 __ push(c_rarg1);
1803 __ push(c_rarg2);
1804 __ push(c_rarg3);
1805 __ mov(c_rarg2, rax); // Pass itos
1806 #ifdef _WIN64
1807 __ movflt(xmm3, xmm0); // Pass ftos
1808 #endif
1809 __ call_VM(noreg,
1810 CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
1811 c_rarg1, c_rarg2, c_rarg3);
1812 __ pop(c_rarg3);
1813 __ pop(c_rarg2);
1814 __ pop(c_rarg1);
1815 __ pop(c_rarg0);
1816 __ pop(state);
1817 __ ret(0); // return from result handler
1818
1819 return entry;
1820 }
1821
1822 void TemplateInterpreterGenerator::count_bytecode() {
1823 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value));
1824 }
1825
1826 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1827 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]));
1828 }
1829
1830 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1831 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index));
1832 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes);
1833 __ orl(rbx,
1834 ((int) t->bytecode()) <<
1835 BytecodePairHistogram::log2_number_of_codes);
1836 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx);
1837 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters));
1838 __ incrementl(Address(rscratch1, rbx, Address::times_4));
1839 }
1840
1841
1842 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
1843 // Call a little run-time stub to avoid blow-up for each bytecode.
1844 // The run-time runtime saves the right registers, depending on
1845 // the tosca in-state for the given template.
1846
1847 assert(Interpreter::trace_code(t->tos_in()) != NULL,
1848 "entry must have been generated");
1849 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
1850 __ andptr(rsp, -16); // align stack as required by ABI
1851 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
1852 __ mov(rsp, r12); // restore sp
1853 __ reinit_heapbase();
1854 }
1855
1856
1857 void TemplateInterpreterGenerator::stop_interpreter_at() {
1858 Label L;
1859 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value),
1860 StopInterpreterAt);
1861 __ jcc(Assembler::notEqual, L);
1862 __ int3();
1863 __ bind(L);
1864 }
1865 #endif // !PRODUCT
1866 #endif // ! CC_INTERP
|
34 #include "oops/methodData.hpp"
35 #include "oops/method.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "prims/jvmtiExport.hpp"
38 #include "prims/jvmtiThreadState.hpp"
39 #include "runtime/arguments.hpp"
40 #include "runtime/deoptimization.hpp"
41 #include "runtime/frame.inline.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "runtime/stubRoutines.hpp"
44 #include "runtime/synchronizer.hpp"
45 #include "runtime/timer.hpp"
46 #include "runtime/vframeArray.hpp"
47 #include "utilities/debug.hpp"
48 #include "utilities/macros.hpp"
49
50 #define __ _masm->
51
52 #ifndef CC_INTERP
53
54 // Global Register Names
55 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi);
56 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi);
57
58 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
59 const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize;
60 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
61
62 //-----------------------------------------------------------------------------
63
64 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
65 address entry = __ pc();
66
67 #ifdef ASSERT
68 {
69 Label L;
70 __ lea(rax, Address(rbp,
71 frame::interpreter_frame_monitor_block_top_offset *
72 wordSize));
73 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack
74 // grows negative)
75 __ jcc(Assembler::aboveEqual, L); // check if frame is complete
76 __ stop ("interpreter frame not set up");
77 __ bind(L);
82 __ restore_bcp();
83
84 // expression stack must be empty before entering the VM if an
85 // exception happened
86 __ empty_expression_stack();
87 // throw exception
88 __ call_VM(noreg,
89 CAST_FROM_FN_PTR(address,
90 InterpreterRuntime::throw_StackOverflowError));
91 return entry;
92 }
93
94 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(
95 const char* name) {
96 address entry = __ pc();
97 // expression stack must be empty before entering the VM if an
98 // exception happened
99 __ empty_expression_stack();
100 // setup parameters
101 // ??? convention: expect aberrant index in register ebx
102 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
103 __ lea(rarg, ExternalAddress((address)name));
104 __ call_VM(noreg,
105 CAST_FROM_FN_PTR(address,
106 InterpreterRuntime::
107 throw_ArrayIndexOutOfBoundsException),
108 rarg, rbx);
109 return entry;
110 }
111
112 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
113 address entry = __ pc();
114
115 // object is at TOS
116 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
117 __ pop(rarg);
118
119 // expression stack must be empty before entering the VM if an
120 // exception happened
121 __ empty_expression_stack();
122
123 __ call_VM(noreg,
124 CAST_FROM_FN_PTR(address,
125 InterpreterRuntime::
126 throw_ClassCastException),
127 rarg);
128 return entry;
129 }
130
131 address TemplateInterpreterGenerator::generate_exception_handler_common(
132 const char* name, const char* message, bool pass_oop) {
133 assert(!pass_oop || message == NULL, "either oop or message but not both");
134 address entry = __ pc();
135
136 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
137 Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2);
138
139 if (pass_oop) {
140 // object is at TOS
141 __ pop(rarg2);
142 }
143 // expression stack must be empty before entering the VM if an
144 // exception happened
145 __ empty_expression_stack();
146 // setup parameters
147 __ lea(rarg, ExternalAddress((address)name));
148 if (pass_oop) {
149 __ call_VM(rax, CAST_FROM_FN_PTR(address,
150 InterpreterRuntime::
151 create_klass_exception),
152 rarg, rarg2);
153 } else {
154 // kind of lame ExternalAddress can't take NULL because
155 // external_word_Relocation will assert.
156 if (message != NULL) {
157 __ lea(rarg2, ExternalAddress((address)message));
158 } else {
159 __ movptr(rarg2, NULL_WORD);
160 }
161 __ call_VM(rax,
162 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
163 rarg, rarg2);
164 }
165 // throw exception
166 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
167 return entry;
168 }
169
170
171 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
172 address entry = __ pc();
173 // NULL last_sp until next java call
174 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
175 __ dispatch_next(state);
176 return entry;
177 }
178
179
180 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
181 address entry = __ pc();
182
183 #ifndef _LP64
184 #ifdef COMPILER2
185 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
186 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
187 for (int i = 1; i < 8; i++) {
188 __ ffree(i);
189 }
190 } else if (UseSSE < 2) {
191 __ empty_FPU_stack();
192 }
193 #endif // COMPILER2
194 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
195 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
196 } else {
197 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
198 }
199
200 if (state == ftos) {
201 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter");
202 } else if (state == dtos) {
203 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter");
204 }
205 #endif // _LP64
206
207 // Restore stack bottom in case i2c adjusted stack
208 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
209 // and NULL it as marker that esp is now tos until next java call
210 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
211
212 __ restore_bcp();
213 __ restore_locals();
214
215 if (state == atos) {
216 Register mdp = rbx;
217 Register tmp = rcx;
218 __ profile_return_type(mdp, rax, tmp);
219 }
220
221 const Register cache = rbx;
222 const Register index = rcx;
223 __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
224
225 const Register flags = cache;
226 __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
227 __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
228 __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));
229 __ dispatch_next(state, step);
230
231 return entry;
232 }
233
234
235 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
236 address entry = __ pc();
237
238 #ifndef _LP64
239 if (state == ftos) {
240 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter");
241 } else if (state == dtos) {
242 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter");
243 }
244 #endif // _LP64
245
246 // NULL last_sp until next java call
247 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
248 __ restore_bcp();
249 __ restore_locals();
250 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
251 NOT_LP64(__ get_thread(thread);)
252 #if INCLUDE_JVMCI
253 // Check if we need to take lock at entry of synchronized method.
254 if (UseJVMCICompiler) {
255 Label L;
256 __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0);
257 __ jcc(Assembler::zero, L);
258 // Clear flag.
259 __ movb(Address(thread, JavaThread::pending_monitorenter_offset()), 0);
260 // Satisfy calling convention for lock_method().
261 __ get_method(rbx);
262 // Take lock.
263 lock_method();
264 __ bind(L);
265 }
266 #endif
267 // handle exceptions
268 {
269 Label L;
270 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
271 __ jcc(Assembler::zero, L);
272 __ call_VM(noreg,
273 CAST_FROM_FN_PTR(address,
274 InterpreterRuntime::throw_pending_exception));
275 __ should_not_reach_here();
276 __ bind(L);
277 }
278 __ dispatch_next(state, step);
279 return entry;
280 }
281
282 address TemplateInterpreterGenerator::generate_result_handler_for(
283 BasicType type) {
284 address entry = __ pc();
285 switch (type) {
286 case T_BOOLEAN: __ c2bool(rax); break;
287 #ifndef _LP64
288 case T_CHAR : __ andptr(rax, 0xFFFF); break;
289 #else
290 case T_CHAR : __ movzwl(rax, rax); break;
291 #endif // _LP64
292 case T_BYTE : __ sign_extend_byte(rax); break;
293 case T_SHORT : __ sign_extend_short(rax); break;
294 case T_INT : /* nothing to do */ break;
295 case T_LONG : /* nothing to do */ break;
296 case T_VOID : /* nothing to do */ break;
297 #ifndef _LP64
298 case T_DOUBLE :
299 case T_FLOAT :
300 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
301 __ pop(t); // remove return address first
302 // Must return a result for interpreter or compiler. In SSE
303 // mode, results are returned in xmm0 and the FPU stack must
304 // be empty.
305 if (type == T_FLOAT && UseSSE >= 1) {
306 // Load ST0
307 __ fld_d(Address(rsp, 0));
308 // Store as float and empty fpu stack
309 __ fstp_s(Address(rsp, 0));
310 // and reload
311 __ movflt(xmm0, Address(rsp, 0));
312 } else if (type == T_DOUBLE && UseSSE >= 2 ) {
313 __ movdbl(xmm0, Address(rsp, 0));
314 } else {
315 // restore ST0
316 __ fld_d(Address(rsp, 0));
317 }
318 // and pop the temp
319 __ addptr(rsp, 2 * wordSize);
320 __ push(t); // restore return address
321 }
322 break;
323 #else
324 case T_FLOAT : /* nothing to do */ break;
325 case T_DOUBLE : /* nothing to do */ break;
326 #endif // _LP64
327
328 case T_OBJECT :
329 // retrieve result from frame
330 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
331 // and verify it
332 __ verify_oop(rax);
333 break;
334 default : ShouldNotReachHere();
335 }
336 __ ret(0); // return from result handler
337 return entry;
338 }
339
340 address TemplateInterpreterGenerator::generate_safept_entry_for(
341 TosState state,
342 address runtime_entry) {
343 address entry = __ pc();
344 __ push(state);
345 __ call_VM(noreg, runtime_entry);
346 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
347 return entry;
348 }
349
350
351
352 // Helpers for commoning out cases in the various type of method entries.
353 //
354
355
356 // increment invocation count & check for overflow
357 //
358 // Note: checking for negative value instead of overflow
359 // so we have a 'sticky' overflow test
360 //
361 // rbx: method
362 // rcx: invocation counter
363 //
364 void InterpreterGenerator::generate_counter_incr(
365 Label* overflow,
366 Label* profile_method,
367 Label* profile_method_continue) {
368 Label done;
369 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
370 if (TieredCompilation) {
371 int increment = InvocationCounter::count_increment;
372 Label no_mdo;
373 if (ProfileInterpreter) {
374 // Are we profiling?
375 __ movptr(rax, Address(rbx, Method::method_data_offset()));
376 __ testptr(rax, rax);
377 __ jccb(Assembler::zero, no_mdo);
378 // Increment counter in the MDO
379 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
380 in_bytes(InvocationCounter::counter_offset()));
381 const Address mask(rax, in_bytes(MethodData::invoke_mask_offset()));
382 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
422 if (ProfileInterpreter && profile_method != NULL) {
423 // Test to see if we should create a method data oop
424 __ movptr(rax, Address(rbx, Method::method_counters_offset()));
425 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
426 __ jcc(Assembler::less, *profile_method_continue);
427
428 // if no method data exists, go to profile_method
429 __ test_method_data_pointer(rax, *profile_method);
430 }
431
432 __ movptr(rax, Address(rbx, Method::method_counters_offset()));
433 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
434 __ jcc(Assembler::aboveEqual, *overflow);
435 __ bind(done);
436 }
437 }
438
439 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
440
441 // Asm interpreter on entry
442 // r14/rdi - locals
443 // r13/rsi - bcp
444 // rbx - method
445 // rdx - cpool --- DOES NOT APPEAR TO BE TRUE
446 // rbp - interpreter frame
447
448 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
449 // Everything as it was on entry
450 // rdx is not restored. Doesn't appear to really be set.
451
452 // InterpreterRuntime::frequency_counter_overflow takes two
453 // arguments, the first (thread) is passed by call_VM, the second
454 // indicates if the counter overflow occurs at a backwards branch
455 // (NULL bcp). We pass zero for it. The call returns the address
456 // of the verified entry point for the method or NULL if the
457 // compilation did not complete (either went background or bailed
458 // out).
459 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
460 __ movl(rarg, 0);
461 __ call_VM(noreg,
462 CAST_FROM_FN_PTR(address,
463 InterpreterRuntime::frequency_counter_overflow),
464 rarg);
465
466 __ movptr(rbx, Address(rbp, method_offset)); // restore Method*
467 // Preserve invariant that r13/r14 contain bcp/locals of sender frame
468 // and jump to the interpreted entry.
469 __ jmp(*do_continue, relocInfo::none);
470 }
471
472 // See if we've got enough room on the stack for locals plus overhead.
473 // The expression stack grows down incrementally, so the normal guard
474 // page mechanism will work for that.
475 //
476 // NOTE: Since the additional locals are also always pushed (wasn't
477 // obvious in generate_fixed_frame) so the guard should work for them
478 // too.
479 //
480 // Args:
481 // rdx: number of additional locals this frame needs (what we must check)
482 // rbx: Method*
483 //
484 // Kills:
490
491 // total overhead size: entry_size + (saved rbp through expr stack
492 // bottom). be sure to change this if you add/subtract anything
493 // to/from the overhead area
494 const int overhead_size =
495 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
496
497 const int page_size = os::vm_page_size();
498
499 Label after_frame_check;
500
501 // see if the frame is greater than one page in size. If so,
502 // then we need to verify there is enough stack space remaining
503 // for the additional locals.
504 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize);
505 __ jcc(Assembler::belowEqual, after_frame_check);
506
507 // compute rsp as if this were going to be the last frame on
508 // the stack before the red zone
509
510 Label after_frame_check_pop;
511 const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
512 #ifndef _LP64
513 __ push(rsi);
514 __ get_thread(thread);
515 #endif
516
517 const Address stack_base(thread, Thread::stack_base_offset());
518 const Address stack_size(thread, Thread::stack_size_offset());
519
520 // locals + overhead, in bytes
521 __ mov(rax, rdx);
522 __ shlptr(rax, Interpreter::logStackElementSize); // 2 slots per parameter.
523 __ addptr(rax, overhead_size);
524
525 #ifdef ASSERT
526 Label stack_base_okay, stack_size_okay;
527 // verify that thread stack base is non-zero
528 __ cmpptr(stack_base, (int32_t)NULL_WORD);
529 __ jcc(Assembler::notEqual, stack_base_okay);
530 __ stop("stack base is zero");
531 __ bind(stack_base_okay);
532 // verify that thread stack size is non-zero
533 __ cmpptr(stack_size, 0);
534 __ jcc(Assembler::notEqual, stack_size_okay);
535 __ stop("stack size is zero");
536 __ bind(stack_size_okay);
537 #endif
538
539 // Add stack base to locals and subtract stack size
540 __ addptr(rax, stack_base);
541 __ subptr(rax, stack_size);
542
543 // Use the maximum number of pages we might bang.
544 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
545 (StackRedPages+StackYellowPages);
546
547 // add in the red and yellow zone sizes
548 __ addptr(rax, max_pages * page_size);
549
550 // check against the current stack bottom
551 __ cmpptr(rsp, rax);
552
553 __ jcc(Assembler::above, after_frame_check_pop);
554 NOT_LP64(__ pop(rsi);) // get saved bcp
555
556 // Restore sender's sp as SP. This is necessary if the sender's
557 // frame is an extended compiled frame (see gen_c2i_adapter())
558 // and safer anyway in case of JSR292 adaptations.
559
560 __ pop(rax); // return address must be moved if SP is changed
561 __ mov(rsp, rbcp);
562 __ push(rax);
563
564 // Note: the restored frame is not necessarily interpreted.
565 // Use the shared runtime version of the StackOverflowError.
566 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
567 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry()));
568 // all done with frame size check
569 __ bind(after_frame_check_pop);
570 NOT_LP64(__ pop(rsi);)
571
572 // all done with frame size check
573 __ bind(after_frame_check);
574 }
575
576 // Allocate monitor and lock method (asm interpreter)
577 //
578 // Args:
579 // rbx: Method*
580 // r14/rdi: locals
581 //
582 // Kills:
583 // rax
584 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
585 // rscratch1, rscratch2 (scratch regs)
586 void TemplateInterpreterGenerator::lock_method() {
587 // synchronize method
588 const Address access_flags(rbx, Method::access_flags_offset());
589 const Address monitor_block_top(
590 rbp,
591 frame::interpreter_frame_monitor_block_top_offset * wordSize);
592 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
593
594 #ifdef ASSERT
595 {
596 Label L;
597 __ movl(rax, access_flags);
598 __ testl(rax, JVM_ACC_SYNCHRONIZED);
599 __ jcc(Assembler::notZero, L);
600 __ stop("method doesn't need synchronization");
601 __ bind(L);
602 }
603 #endif // ASSERT
604
605 // get synchronization object
606 {
607 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
608 Label done;
609 __ movl(rax, access_flags);
610 __ testl(rax, JVM_ACC_STATIC);
611 // get receiver (assume this is frequent case)
612 __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0)));
613 __ jcc(Assembler::zero, done);
614 __ movptr(rax, Address(rbx, Method::const_offset()));
615 __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
616 __ movptr(rax, Address(rax,
617 ConstantPool::pool_holder_offset_in_bytes()));
618 __ movptr(rax, Address(rax, mirror_offset));
619
620 #ifdef ASSERT
621 {
622 Label L;
623 __ testptr(rax, rax);
624 __ jcc(Assembler::notZero, L);
625 __ stop("synchronization object is NULL");
626 __ bind(L);
627 }
628 #endif // ASSERT
629
630 __ bind(done);
631 }
632
633 // add space for monitor & lock
634 __ subptr(rsp, entry_size); // add space for a monitor entry
635 __ movptr(monitor_block_top, rsp); // set new monitor block top
636 // store object
637 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
638 const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
639 __ movptr(lockreg, rsp); // object address
640 __ lock_object(lockreg);
641 }
642
643 // Generate a fixed interpreter frame. This is identical setup for
644 // interpreted methods and for native methods hence the shared code.
645 //
646 // Args:
647 // rax: return address
648 // rbx: Method*
649 // r14/rdi: pointer to locals
650 // r13/rsi: sender sp
651 // rdx: cp cache
652 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
653 // initialize fixed part of activation frame
654 __ push(rax); // save return address
655 __ enter(); // save old & set new rbp
656 __ push(rbcp); // set sender sp
657 __ push((int)NULL_WORD); // leave last_sp as null
658 __ movptr(rbcp, Address(rbx, Method::const_offset())); // get ConstMethod*
659 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase
660 __ push(rbx); // save Method*
661 if (ProfileInterpreter) {
662 Label method_data_continue;
663 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset())));
664 __ testptr(rdx, rdx);
665 __ jcc(Assembler::zero, method_data_continue);
666 __ addptr(rdx, in_bytes(MethodData::data_offset()));
667 __ bind(method_data_continue);
668 __ push(rdx); // set the mdp (method data pointer)
669 } else {
670 __ push(0);
671 }
672
673 __ movptr(rdx, Address(rbx, Method::const_offset()));
674 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
675 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
676 __ push(rdx); // set constant pool cache
677 __ push(rlocals); // set locals pointer
678 if (native_call) {
679 __ push(0); // no bcp
680 } else {
681 __ push(rbcp); // set bcp
682 }
683 __ push(0); // reserve word for pointer to expression stack bottom
684 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
685 }
686
687 // End of helpers
688
689 // Method entry for java.lang.ref.Reference.get.
690 address InterpreterGenerator::generate_Reference_get_entry(void) {
691 #if INCLUDE_ALL_GCS
692 // Code: _aload_0, _getfield, _areturn
693 // parameter size = 1
694 //
695 // The code that gets generated by this routine is split into 2 parts:
696 // 1. The "intrinsified" code for G1 (or any SATB based GC),
697 // 2. The slow path - which is an expansion of the regular method entry.
698 //
699 // Notes:-
700 // * In the G1 code we do not check whether we need to block for
701 // a safepoint. If G1 is enabled then we must execute the specialized
720
721 const int referent_offset = java_lang_ref_Reference::referent_offset;
722 guarantee(referent_offset > 0, "referent offset not initialized");
723
724 if (UseG1GC) {
725 Label slow_path;
726 // rbx: method
727
728 // Check if local 0 != NULL
729 // If the receiver is null then it is OK to jump to the slow path.
730 __ movptr(rax, Address(rsp, wordSize));
731
732 __ testptr(rax, rax);
733 __ jcc(Assembler::zero, slow_path);
734
735 // rax: local 0
736 // rbx: method (but can be used as scratch now)
737 // rdx: scratch
738 // rdi: scratch
739
740 // Preserve the sender sp in case the pre-barrier
741 // calls the runtime
742 NOT_LP64(__ push(rsi);)
743
744 // Generate the G1 pre-barrier code to log the value of
745 // the referent field in an SATB buffer.
746
747 // Load the value of the referent field.
748 const Address field_address(rax, referent_offset);
749 __ load_heap_oop(rax, field_address);
750
751 const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13);
752 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
753 NOT_LP64(__ get_thread(thread);)
754
755 // Generate the G1 pre-barrier code to log the value of
756 // the referent field in an SATB buffer.
757 __ g1_write_barrier_pre(noreg /* obj */,
758 rax /* pre_val */,
759 thread /* thread */,
760 rbx /* tmp */,
761 true /* tosca_live */,
762 true /* expand_call */);
763
764 // _areturn
765 NOT_LP64(__ pop(rsi);) // get sender sp
766 __ pop(rdi); // get return address
767 __ mov(rsp, sender_sp); // set sp to sender sp
768 __ jmp(rdi);
769 __ ret(0);
770
771 // generate a vanilla interpreter entry as the slow path
772 __ bind(slow_path);
773 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
774 return entry;
775 }
776 #endif // INCLUDE_ALL_GCS
777
778 // If G1 is not enabled then attempt to go through the accessor entry point
779 // Reference.get is an accessor
780 return NULL;
781 }
782
783 // Interpreter stub for calling a native method. (asm interpreter)
784 // This sets up a somewhat different looking stack for calling the
785 // native method than the typical interpreter frame setup.
786 address InterpreterGenerator::generate_native_entry(bool synchronized) {
787 // determine code generation flags
788 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
789
790 // rbx: Method*
791 // rbcp: sender sp
792
793 address entry_point = __ pc();
794
795 const Address constMethod (rbx, Method::const_offset());
796 const Address access_flags (rbx, Method::access_flags_offset());
797 const Address size_of_parameters(rcx, ConstMethod::
798 size_of_parameters_offset());
799
800
801 // get parameter size (always needed)
802 __ movptr(rcx, constMethod);
803 __ load_unsigned_short(rcx, size_of_parameters);
804
805 // native calls don't need the stack size check since they have no
806 // expression stack and the arguments are already on the stack and
807 // we only add a handful of words to the stack
808
809 // rbx: Method*
810 // rcx: size of parameters
811 // rbcp: sender sp
812 __ pop(rax); // get return address
813
814 // for natives the size of locals is zero
815
816 // compute beginning of parameters (rdi/r14)
817 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
818
819 // add 2 zero-initialized slots for native calls
820 // initialize result_handler slot
821 __ push((int) NULL_WORD);
822 // slot for oop temp
823 // (static native method holder mirror/jni oop result)
824 __ push((int) NULL_WORD);
825
826 // initialize fixed part of activation frame
827 generate_fixed_frame(true);
828
829 // make sure method is native & not abstract
830 #ifdef ASSERT
831 __ movl(rax, access_flags);
832 {
833 Label L;
834 __ testl(rax, JVM_ACC_NATIVE);
835 __ jcc(Assembler::notZero, L);
836 __ stop("tried to execute non-native method as native");
837 __ bind(L);
838 }
839 {
840 Label L;
841 __ testl(rax, JVM_ACC_ABSTRACT);
842 __ jcc(Assembler::zero, L);
843 __ stop("tried to execute abstract method in interpreter");
844 __ bind(L);
845 }
846 #endif
847
848 // Since at this point in the method invocation the exception handler
849 // would try to exit the monitor of synchronized methods which hasn't
850 // been entered yet, we set the thread local variable
851 // _do_not_unlock_if_synchronized to true. The remove_activation will
852 // check this flag.
853
854 const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread);
855 NOT_LP64(__ get_thread(thread1);)
856 const Address do_not_unlock_if_synchronized(thread1,
857 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
858 __ movbool(do_not_unlock_if_synchronized, true);
859
860 // increment invocation count & check for overflow
861 Label invocation_counter_overflow;
862 if (inc_counter) {
863 generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
864 }
865
866 Label continue_after_compile;
867 __ bind(continue_after_compile);
868
869 bang_stack_shadow_pages(true);
870
871 // reset the _do_not_unlock_if_synchronized flag
872 NOT_LP64(__ get_thread(thread1);)
873 __ movbool(do_not_unlock_if_synchronized, false);
874
875 // check for synchronized methods
876 // Must happen AFTER invocation_counter check and stack overflow check,
877 // so method is not locked if overflows.
878 if (synchronized) {
879 lock_method();
880 } else {
881 // no synchronization necessary
882 #ifdef ASSERT
883 {
884 Label L;
885 __ movl(rax, access_flags);
886 __ testl(rax, JVM_ACC_SYNCHRONIZED);
887 __ jcc(Assembler::zero, L);
888 __ stop("method needs synchronization");
889 __ bind(L);
890 }
891 #endif
892 }
893
894 // start execution
895 #ifdef ASSERT
896 {
897 Label L;
898 const Address monitor_block_top(rbp,
899 frame::interpreter_frame_monitor_block_top_offset * wordSize);
900 __ movptr(rax, monitor_block_top);
901 __ cmpptr(rax, rsp);
902 __ jcc(Assembler::equal, L);
903 __ stop("broken stack frame setup in interpreter");
904 __ bind(L);
905 }
906 #endif
907
908 // jvmti support
909 __ notify_method_entry();
910
911 // work registers
912 const Register method = rbx;
913 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
914 const Register t = NOT_LP64(rcx) LP64_ONLY(r11);
915
916 // allocate space for parameters
917 __ get_method(method);
918 __ movptr(t, Address(method, Method::const_offset()));
919 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
920
921 #ifndef _LP64
922 __ shlptr(t, Interpreter::logStackElementSize);
923 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
924 __ subptr(rsp, t);
925 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
926 #else
927 __ shll(t, Interpreter::logStackElementSize);
928
929 __ subptr(rsp, t);
930 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
931 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
932 #endif // _LP64
933
934 // get signature handler
935 {
936 Label L;
937 __ movptr(t, Address(method, Method::signature_handler_offset()));
938 __ testptr(t, t);
939 __ jcc(Assembler::notZero, L);
940 __ call_VM(noreg,
941 CAST_FROM_FN_PTR(address,
942 InterpreterRuntime::prepare_native_call),
943 method);
944 __ get_method(method);
945 __ movptr(t, Address(method, Method::signature_handler_offset()));
946 __ bind(L);
947 }
948
949 // call signature handler
950 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals,
951 "adjust this code");
952 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp,
953 "adjust this code");
954 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1),
955 "adjust this code");
956
957 // The generated handlers do not touch RBX (the method oop).
958 // However, large signatures cannot be cached and are generated
959 // each time here. The slow-path generator can do a GC on return,
960 // so we must reload it after the call.
961 __ call(t);
962 __ get_method(method); // slow path can do a GC, reload RBX
963
964
965 // result handler is in rax
966 // set result handler
967 __ movptr(Address(rbp,
968 (frame::interpreter_frame_result_handler_offset) * wordSize),
969 rax);
970
971 // pass mirror handle if static call
972 {
973 Label L;
974 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
975 __ movl(t, Address(method, Method::access_flags_offset()));
976 __ testl(t, JVM_ACC_STATIC);
977 __ jcc(Assembler::zero, L);
978 // get mirror
979 __ movptr(t, Address(method, Method::const_offset()));
980 __ movptr(t, Address(t, ConstMethod::constants_offset()));
981 __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
982 __ movptr(t, Address(t, mirror_offset));
983 // copy mirror into activation frame
984 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize),
985 t);
986 // pass handle to mirror
987 #ifndef _LP64
988 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
989 __ movptr(Address(rsp, wordSize), t);
990 #else
991 __ lea(c_rarg1,
992 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
993 #endif // _LP64
994 __ bind(L);
995 }
996
997 // get native function entry point
998 {
999 Label L;
1000 __ movptr(rax, Address(method, Method::native_function_offset()));
1001 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
1002 __ cmpptr(rax, unsatisfied.addr());
1003 __ jcc(Assembler::notEqual, L);
1004 __ call_VM(noreg,
1005 CAST_FROM_FN_PTR(address,
1006 InterpreterRuntime::prepare_native_call),
1007 method);
1008 __ get_method(method);
1009 __ movptr(rax, Address(method, Method::native_function_offset()));
1010 __ bind(L);
1011 }
1012
1013 // pass JNIEnv
1014 #ifndef _LP64
1015 __ get_thread(thread);
1016 __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
1017 __ movptr(Address(rsp, 0), t);
1018
1019 // set_last_Java_frame_before_call
1020 // It is enough that the pc()
1021 // points into the right code segment. It does not have to be the correct return pc.
1022 __ set_last_Java_frame(thread, noreg, rbp, __ pc());
1023 #else
1024 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset()));
1025
1026 // It is enough that the pc() points into the right code
1027 // segment. It does not have to be the correct return pc.
1028 __ set_last_Java_frame(rsp, rbp, (address) __ pc());
1029 #endif // _LP64
1030
1031 // change thread state
1032 #ifdef ASSERT
1033 {
1034 Label L;
1035 __ movl(t, Address(thread, JavaThread::thread_state_offset()));
1036 __ cmpl(t, _thread_in_Java);
1037 __ jcc(Assembler::equal, L);
1038 __ stop("Wrong thread state in native stub");
1039 __ bind(L);
1040 }
1041 #endif
1042
1043 // Change state to native
1044
1045 __ movl(Address(thread, JavaThread::thread_state_offset()),
1046 _thread_in_native);
1047
1048 // Call the native method.
1049 __ call(rax);
1050 // 32: result potentially in rdx:rax or ST0
1051 // 64: result potentially in rax or xmm0
1052
1053 // Verify or restore cpu control state after JNI call
1054 __ restore_cpu_control_state_after_jni();
1055
1056 // NOTE: The order of these pushes is known to frame::interpreter_frame_result
1057 // in order to extract the result of a method call. If the order of these
1058 // pushes change or anything else is added to the stack then the code in
1059 // interpreter_frame_result must also change.
1060
1061 #ifndef _LP64
1062 // save potential result in ST(0) & rdx:rax
1063 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 -
1064 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers)
1065 // It is safe to do this push because state is _thread_in_native and return address will be found
1066 // via _last_native_pc and not via _last_jave_sp
1067
1068 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result.
1069 // If the order changes or anything else is added to the stack the code in
1070 // interpreter_frame_result will have to be changed.
1071
1072 { Label L;
1073 Label push_double;
1074 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT));
1075 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE));
1076 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
1077 float_handler.addr());
1078 __ jcc(Assembler::equal, push_double);
1079 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
1080 double_handler.addr());
1081 __ jcc(Assembler::notEqual, L);
1082 __ bind(push_double);
1083 __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0).
1084 __ bind(L);
1085 }
1086 #else
1087 __ push(dtos);
1088 #endif // _LP64
1089
1090 __ push(ltos);
1091
1092 // change thread state
1093 NOT_LP64(__ get_thread(thread);)
1094 __ movl(Address(thread, JavaThread::thread_state_offset()),
1095 _thread_in_native_trans);
1096
1097 if (os::is_MP()) {
1098 if (UseMembar) {
1099 // Force this write out before the read below
1100 __ membar(Assembler::Membar_mask_bits(
1101 Assembler::LoadLoad | Assembler::LoadStore |
1102 Assembler::StoreLoad | Assembler::StoreStore));
1103 } else {
1104 // Write serialization page so VM thread can do a pseudo remote membar.
1105 // We use the current thread pointer to calculate a thread specific
1106 // offset to write to within the page. This minimizes bus traffic
1107 // due to cache line collision.
1108 __ serialize_memory(thread, rcx);
1109 }
1110 }
1111
1112 #ifndef _LP64
1113 if (AlwaysRestoreFPU) {
1114 // Make sure the control word is correct.
1115 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
1116 }
1117 #endif // _LP64
1118
1119 // check for safepoint operation in progress and/or pending suspend requests
1120 {
1121 Label Continue;
1122 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
1123 SafepointSynchronize::_not_synchronized);
1124
1125 Label L;
1126 __ jcc(Assembler::notEqual, L);
1127 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
1128 __ jcc(Assembler::equal, Continue);
1129 __ bind(L);
1130
1131 // Don't use call_VM as it will see a possible pending exception
1132 // and forward it and never return here preventing us from
1133 // clearing _last_native_pc down below. Also can't use
1134 // call_VM_leaf either as it will check to see if r13 & r14 are
1135 // preserved and correspond to the bcp/locals pointers. So we do a
1136 // runtime call by hand.
1137 //
1138 #ifndef _LP64
1139 __ push(thread);
1140 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
1141 JavaThread::check_special_condition_for_native_trans)));
1142 __ increment(rsp, wordSize);
1143 __ get_thread(thread);
1144 #else
1145 __ mov(c_rarg0, r15_thread);
1146 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
1147 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1148 __ andptr(rsp, -16); // align stack as required by ABI
1149 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
1150 __ mov(rsp, r12); // restore sp
1151 __ reinit_heapbase();
1152 #endif // _LP64
1153 __ bind(Continue);
1154 }
1155
1156 // change thread state
1157 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
1158
1159 // reset_last_Java_frame
1160 __ reset_last_Java_frame(thread, true, true);
1161
1162 // reset handle block
1163 __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
1164 __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
1165
1166 // If result is an oop unbox and store it in frame where gc will see it
1167 // and result handler will pick it up
1168
1169 {
1170 Label no_oop, store_result;
1171 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
1172 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
1173 __ jcc(Assembler::notEqual, no_oop);
1174 // retrieve result
1175 __ pop(ltos);
1176 __ testptr(rax, rax);
1177 __ jcc(Assembler::zero, store_result);
1178 __ movptr(rax, Address(rax, 0));
1179 __ bind(store_result);
1180 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax);
1181 // keep stack depth as expected by pushing oop which will eventually be discarded
1182 __ push(ltos);
1183 __ bind(no_oop);
1184 }
1185
1186
1187 {
1188 Label no_reguard;
1189 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()),
1190 JavaThread::stack_guard_yellow_disabled);
1191 __ jcc(Assembler::notEqual, no_reguard);
1192
1193 __ pusha(); // XXX only save smashed registers
1194 #ifndef _LP64
1195 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
1196 __ popa();
1197 #else
1198 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
1199 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1200 __ andptr(rsp, -16); // align stack as required by ABI
1201 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
1202 __ mov(rsp, r12); // restore sp
1203 __ popa(); // XXX only restore smashed registers
1204 __ reinit_heapbase();
1205 #endif // _LP64
1206
1207 __ bind(no_reguard);
1208 }
1209
1210
1211 // The method register is junk from after the thread_in_native transition
1212 // until here. Also can't call_VM until the bcp has been
1213 // restored. Need bcp for throwing exception below so get it now.
1214 __ get_method(method);
1215
1216 // restore rsi/r13 to have legal interpreter frame, i.e., bci == 0 <=>
1217 // r13 == code_base()
1218 __ movptr(rbcp, Address(method, Method::const_offset())); // get ConstMethod*
1219 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase
1220
1221 // handle exceptions (exception handling will handle unlocking!)
1222 {
1223 Label L;
1224 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
1225 __ jcc(Assembler::zero, L);
1226 // Note: At some point we may want to unify this with the code
1227 // used in call_VM_base(); i.e., we should use the
1228 // StubRoutines::forward_exception code. For now this doesn't work
1229 // here because the rsp is not correctly set at this point.
1230 __ MacroAssembler::call_VM(noreg,
1231 CAST_FROM_FN_PTR(address,
1232 InterpreterRuntime::throw_pending_exception));
1233 __ should_not_reach_here();
1234 __ bind(L);
1235 }
1236
1237 // do unlocking if necessary
1238 {
1239 Label L;
1240 __ movl(t, Address(method, Method::access_flags_offset()));
1241 __ testl(t, JVM_ACC_SYNCHRONIZED);
1242 __ jcc(Assembler::zero, L);
1243 // the code below should be shared with interpreter macro
1244 // assembler implementation
1245 {
1246 Label unlock;
1247 // BasicObjectLock will be first in list, since this is a
1248 // synchronized method. However, need to check that the object
1249 // has not been unlocked by an explicit monitorexit bytecode.
1250 const Address monitor(rbp,
1251 (intptr_t)(frame::interpreter_frame_initial_sp_offset *
1252 wordSize - (int)sizeof(BasicObjectLock)));
1253
1254 const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
1255
1256 // monitor expect in c_rarg1 for slow unlock path
1257 __ lea(regmon, monitor); // address of first monitor
1258
1259 __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes()));
1260 __ testptr(t, t);
1261 __ jcc(Assembler::notZero, unlock);
1262
1263 // Entry already unlocked, need to throw exception
1264 __ MacroAssembler::call_VM(noreg,
1265 CAST_FROM_FN_PTR(address,
1266 InterpreterRuntime::throw_illegal_monitor_state_exception));
1267 __ should_not_reach_here();
1268
1269 __ bind(unlock);
1270 __ unlock_object(regmon);
1271 }
1272 __ bind(L);
1273 }
1274
1275 // jvmti support
1276 // Note: This must happen _after_ handling/throwing any exceptions since
1277 // the exception handler code notifies the runtime of method exits
1278 // too. If this happens before, method entry/exit notifications are
1279 // not properly paired (was bug - gri 11/22/99).
1280 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1281
1282 // restore potential result in edx:eax, call result handler to
1283 // restore potential result in ST0 & handle result
1284
1285 __ pop(ltos);
1286 LP64_ONLY( __ pop(dtos);)
1287
1288 __ movptr(t, Address(rbp,
1289 (frame::interpreter_frame_result_handler_offset) * wordSize));
1290 __ call(t);
1291
1292 // remove activation
1293 __ movptr(t, Address(rbp,
1294 frame::interpreter_frame_sender_sp_offset *
1295 wordSize)); // get sender sp
1296 __ leave(); // remove frame anchor
1297 __ pop(rdi); // get return address
1298 __ mov(rsp, t); // set sp to sender sp
1299 __ jmp(rdi);
1300
1301 if (inc_counter) {
1302 // Handle overflow of counter and compile method
1303 __ bind(invocation_counter_overflow);
1304 generate_counter_overflow(&continue_after_compile);
1305 }
1306
1307 return entry_point;
1308 }
1309
1310 //
1311 // Generic interpreted method entry to (asm) interpreter
1312 //
1313 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
1314 // determine code generation flags
1315 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
1316
1317 // ebx: Method*
1318 // rbcp: sender sp
1319 address entry_point = __ pc();
1320
1321 const Address constMethod(rbx, Method::const_offset());
1322 const Address access_flags(rbx, Method::access_flags_offset());
1323 const Address size_of_parameters(rdx,
1324 ConstMethod::size_of_parameters_offset());
1325 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset());
1326
1327
1328 // get parameter size (always needed)
1329 __ movptr(rdx, constMethod);
1330 __ load_unsigned_short(rcx, size_of_parameters);
1331
1332 // rbx: Method*
1333 // rcx: size of parameters
1334 // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i )
1335
1336 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
1337 __ subl(rdx, rcx); // rdx = no. of additional locals
1338
1339 // YYY
1340 // __ incrementl(rdx);
1341 // __ andl(rdx, -2);
1342
1343 // see if we've got enough room on the stack for locals plus overhead.
1344 generate_stack_overflow_check();
1345
1346 // get return address
1347 __ pop(rax);
1348
1349 // compute beginning of parameters (r14)
1350 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
1351
1352 // rdx - # of additional locals
1353 // allocate space for locals
1354 // explicitly initialize locals
1355 {
1356 Label exit, loop;
1357 __ testl(rdx, rdx);
1358 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
1359 __ bind(loop);
1360 __ push((int) NULL_WORD); // initialize local variables
1361 __ decrementl(rdx); // until everything initialized
1362 __ jcc(Assembler::greater, loop);
1363 __ bind(exit);
1364 }
1365
1366 // initialize fixed part of activation frame
1367 generate_fixed_frame(false);
1368
1369 // make sure method is not native & not abstract
1370 #ifdef ASSERT
1374 __ testl(rax, JVM_ACC_NATIVE);
1375 __ jcc(Assembler::zero, L);
1376 __ stop("tried to execute native method as non-native");
1377 __ bind(L);
1378 }
1379 {
1380 Label L;
1381 __ testl(rax, JVM_ACC_ABSTRACT);
1382 __ jcc(Assembler::zero, L);
1383 __ stop("tried to execute abstract method in interpreter");
1384 __ bind(L);
1385 }
1386 #endif
1387
1388 // Since at this point in the method invocation the exception
1389 // handler would try to exit the monitor of synchronized methods
1390 // which hasn't been entered yet, we set the thread local variable
1391 // _do_not_unlock_if_synchronized to true. The remove_activation
1392 // will check this flag.
1393
1394 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1395 NOT_LP64(__ get_thread(thread);)
1396 const Address do_not_unlock_if_synchronized(thread,
1397 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1398 __ movbool(do_not_unlock_if_synchronized, true);
1399
1400 __ profile_parameters_type(rax, rcx, rdx);
1401 // increment invocation count & check for overflow
1402 Label invocation_counter_overflow;
1403 Label profile_method;
1404 Label profile_method_continue;
1405 if (inc_counter) {
1406 generate_counter_incr(&invocation_counter_overflow,
1407 &profile_method,
1408 &profile_method_continue);
1409 if (ProfileInterpreter) {
1410 __ bind(profile_method_continue);
1411 }
1412 }
1413
1414 Label continue_after_compile;
1415 __ bind(continue_after_compile);
1416
1417 // check for synchronized interpreted methods
1418 bang_stack_shadow_pages(false);
1419
1420 // reset the _do_not_unlock_if_synchronized flag
1421 NOT_LP64(__ get_thread(thread);)
1422 __ movbool(do_not_unlock_if_synchronized, false);
1423
1424 // check for synchronized methods
1425 // Must happen AFTER invocation_counter check and stack overflow check,
1426 // so method is not locked if overflows.
1427 if (synchronized) {
1428 // Allocate monitor and lock method
1429 lock_method();
1430 } else {
1431 // no synchronization necessary
1432 #ifdef ASSERT
1433 {
1434 Label L;
1435 __ movl(rax, access_flags);
1436 __ testl(rax, JVM_ACC_SYNCHRONIZED);
1437 __ jcc(Assembler::zero, L);
1438 __ stop("method needs synchronization");
1439 __ bind(L);
1440 }
1441 #endif
1461 __ dispatch_next(vtos);
1462
1463 // invocation counter overflow
1464 if (inc_counter) {
1465 if (ProfileInterpreter) {
1466 // We have decided to profile this method in the interpreter
1467 __ bind(profile_method);
1468 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1469 __ set_method_data_pointer_for_bcp();
1470 __ get_method(rbx);
1471 __ jmp(profile_method_continue);
1472 }
1473 // Handle overflow of counter and compile method
1474 __ bind(invocation_counter_overflow);
1475 generate_counter_overflow(&continue_after_compile);
1476 }
1477
1478 return entry_point;
1479 }
1480
1481 //-----------------------------------------------------------------------------
1482 // Exceptions
1483
1484 void TemplateInterpreterGenerator::generate_throw_exception() {
1485 // Entry point in previous activation (i.e., if the caller was
1486 // interpreted)
1487 Interpreter::_rethrow_exception_entry = __ pc();
1488 // Restore sp to interpreter_frame_last_sp even though we are going
1489 // to empty the expression stack for the exception processing.
1490 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
1491 // rax: exception
1492 // rdx: return address/pc that threw exception
1493 __ restore_bcp(); // r13/rsi points to call/send
1494 __ restore_locals();
1495 LP64_ONLY(__ reinit_heapbase();) // restore r12 as heapbase.
1496 // Entry point for exceptions thrown within interpreter code
1497 Interpreter::_throw_exception_entry = __ pc();
1498 // expression stack is undefined here
1499 // rax: exception
1500 // r13/rsi: exception bcp
1501 __ verify_oop(rax);
1502 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
1503 LP64_ONLY(__ mov(c_rarg1, rax);)
1504
1505 // expression stack must be empty before entering the VM in case of
1506 // an exception
1507 __ empty_expression_stack();
1508 // find exception handler address and preserve exception oop
1509 __ call_VM(rdx,
1510 CAST_FROM_FN_PTR(address,
1511 InterpreterRuntime::exception_handler_for_exception),
1512 rarg);
1513 // rax: exception handler entry point
1514 // rdx: preserved exception oop
1515 // r13/rsi: bcp for exception handler
1516 __ push_ptr(rdx); // push exception which is now the only value on the stack
1517 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!)
1518
1519 // If the exception is not handled in the current frame the frame is
1520 // removed and the exception is rethrown (i.e. exception
1521 // continuation is _rethrow_exception).
1522 //
1523 // Note: At this point the bci is still the bxi for the instruction
1524 // which caused the exception and the expression stack is
1525 // empty. Thus, for any VM calls at this point, GC will find a legal
1526 // oop map (with empty expression stack).
1527
1528 // In current activation
1529 // tos: exception
1530 // esi: exception bcp
1531
1532 //
1533 // JVMTI PopFrame support
1534 //
1535
1536 Interpreter::_remove_activation_preserving_args_entry = __ pc();
1537 __ empty_expression_stack();
1538 // Set the popframe_processing bit in pending_popframe_condition
1539 // indicating that we are currently handling popframe, so that
1540 // call_VMs that may happen later do not trigger new popframe
1541 // handling cycles.
1542 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
1543 NOT_LP64(__ get_thread(thread);)
1544 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset()));
1545 __ orl(rdx, JavaThread::popframe_processing_bit);
1546 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx);
1547
1548 {
1549 // Check to see whether we are returning to a deoptimized frame.
1550 // (The PopFrame call ensures that the caller of the popped frame is
1551 // either interpreted or compiled and deoptimizes it if compiled.)
1552 // In this case, we can't call dispatch_next() after the frame is
1553 // popped, but instead must save the incoming arguments and restore
1554 // them after deoptimization has occurred.
1555 //
1556 // Note that we don't compare the return PC against the
1557 // deoptimization blob's unpack entry because of the presence of
1558 // adapter frames in C2.
1559 Label caller_not_deoptimized;
1560 Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
1561 __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize));
1562 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1563 InterpreterRuntime::interpreter_contains), rarg);
1564 __ testl(rax, rax);
1565 __ jcc(Assembler::notZero, caller_not_deoptimized);
1566
1567 // Compute size of arguments for saving when returning to
1568 // deoptimized caller
1569 __ get_method(rax);
1570 __ movptr(rax, Address(rax, Method::const_offset()));
1571 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod::
1572 size_of_parameters_offset())));
1573 __ shll(rax, Interpreter::logStackElementSize);
1574 __ restore_locals();
1575 __ subptr(rlocals, rax);
1576 __ addptr(rlocals, wordSize);
1577 // Save these arguments
1578 NOT_LP64(__ get_thread(thread);)
1579 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1580 Deoptimization::
1581 popframe_preserve_args),
1582 thread, rax, rlocals);
1583
1584 __ remove_activation(vtos, rdx,
1585 /* throw_monitor_exception */ false,
1586 /* install_monitor_exception */ false,
1587 /* notify_jvmdi */ false);
1588
1589 // Inform deoptimization that it is responsible for restoring
1590 // these arguments
1591 NOT_LP64(__ get_thread(thread);)
1592 __ movl(Address(thread, JavaThread::popframe_condition_offset()),
1593 JavaThread::popframe_force_deopt_reexecution_bit);
1594
1595 // Continue in deoptimization handler
1596 __ jmp(rdx);
1597
1598 __ bind(caller_not_deoptimized);
1599 }
1600
1601 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */
1602 /* throw_monitor_exception */ false,
1603 /* install_monitor_exception */ false,
1604 /* notify_jvmdi */ false);
1605
1606 // Finish with popframe handling
1607 // A previous I2C followed by a deoptimization might have moved the
1608 // outgoing arguments further up the stack. PopFrame expects the
1609 // mutations to those outgoing arguments to be preserved and other
1610 // constraints basically require this frame to look exactly as
1611 // though it had previously invoked an interpreted activation with
1612 // no space between the top of the expression stack (current
1613 // last_sp) and the top of stack. Rather than force deopt to
1614 // maintain this kind of invariant all the time we call a small
1615 // fixup routine to move the mutated arguments onto the top of our
1616 // expression stack if necessary.
1617 #ifndef _LP64
1618 __ mov(rax, rsp);
1619 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1620 __ get_thread(thread);
1621 // PC must point into interpreter here
1622 __ set_last_Java_frame(thread, noreg, rbp, __ pc());
1623 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx);
1624 __ get_thread(thread);
1625 #else
1626 __ mov(c_rarg1, rsp);
1627 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1628 // PC must point into interpreter here
1629 __ set_last_Java_frame(noreg, rbp, __ pc());
1630 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2);
1631 #endif
1632 __ reset_last_Java_frame(thread, true, true);
1633
1634 // Restore the last_sp and null it out
1635 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1636 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
1637
1638 __ restore_bcp();
1639 __ restore_locals();
1640 // The method data pointer was incremented already during
1641 // call profiling. We have to restore the mdp for the current bcp.
1642 if (ProfileInterpreter) {
1643 __ set_method_data_pointer_for_bcp();
1644 }
1645
1646 // Clear the popframe condition flag
1647 NOT_LP64(__ get_thread(thread);)
1648 __ movl(Address(thread, JavaThread::popframe_condition_offset()),
1649 JavaThread::popframe_inactive);
1650
1651 #if INCLUDE_JVMTI
1652 {
1653 Label L_done;
1654 const Register local0 = rlocals;
1655
1656 __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic);
1657 __ jcc(Assembler::notEqual, L_done);
1658
1659 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
1660 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
1661
1662 __ get_method(rdx);
1663 __ movptr(rax, Address(local0, 0));
1664 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp);
1665
1666 __ testptr(rax, rax);
1667 __ jcc(Assembler::zero, L_done);
1668
1669 __ movptr(Address(rbx, 0), rax);
1670 __ bind(L_done);
1671 }
1672 #endif // INCLUDE_JVMTI
1673
1674 __ dispatch_next(vtos);
1675 // end of PopFrame support
1676
1677 Interpreter::_remove_activation_entry = __ pc();
1678
1679 // preserve exception over this code sequence
1680 __ pop_ptr(rax);
1681 NOT_LP64(__ get_thread(thread);)
1682 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax);
1683 // remove the activation (without doing throws on illegalMonitorExceptions)
1684 __ remove_activation(vtos, rdx, false, true, false);
1685 // restore exception
1686 NOT_LP64(__ get_thread(thread);)
1687 __ get_vm_result(rax, thread);
1688
1689 // In between activations - previous activation type unknown yet
1690 // compute continuation point - the continuation point expects the
1691 // following registers set up:
1692 //
1693 // rax: exception
1694 // rdx: return address/pc that threw exception
1695 // rsp: expression stack of caller
1696 // rbp: ebp of caller
1697 __ push(rax); // save exception
1698 __ push(rdx); // save return address
1699 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1700 SharedRuntime::exception_handler_for_return_address),
1701 thread, rdx);
1702 __ mov(rbx, rax); // save exception handler
1703 __ pop(rdx); // restore return address
1704 __ pop(rax); // restore exception
1705 // Note that an "issuing PC" is actually the next PC after the call
1706 __ jmp(rbx); // jump to exception
1707 // handler of caller
1708 }
1709
1710
1711 //
1712 // JVMTI ForceEarlyReturn support
1713 //
1714 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1715 address entry = __ pc();
1716
1717 __ restore_bcp();
1718 __ restore_locals();
1719 __ empty_expression_stack();
1720 __ load_earlyret_value(state); // 32 bits returns value in rdx, so don't reuse
1721
1722 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
1723 NOT_LP64(__ get_thread(thread);)
1724 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset()));
1725 Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset());
1726
1727 // Clear the earlyret state
1728 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive);
1729
1730 __ remove_activation(state, rsi,
1731 false, /* throw_monitor_exception */
1732 false, /* install_monitor_exception */
1733 true); /* notify_jvmdi */
1734 __ jmp(rsi);
1735
1736 return entry;
1737 } // end of ForceEarlyReturn support
1738
1739
1740 //-----------------------------------------------------------------------------
1741 // Helper for vtos entry point generation
1742
1743 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
1744 address& bep,
1745 address& cep,
1746 address& sep,
1747 address& aep,
1748 address& iep,
1749 address& lep,
1750 address& fep,
1751 address& dep,
1752 address& vep) {
1753 assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1754 Label L;
1755 aep = __ pc(); __ push_ptr(); __ jmp(L);
1756 #ifndef _LP64
1757 fep = __ pc(); __ push(ftos); __ jmp(L);
1758 dep = __ pc(); __ push(dtos); __ jmp(L);
1759 #else
1760 fep = __ pc(); __ push_f(xmm0); __ jmp(L);
1761 dep = __ pc(); __ push_d(xmm0); __ jmp(L);
1762 #endif // _LP64
1763 lep = __ pc(); __ push_l(); __ jmp(L);
1764 bep = cep = sep =
1765 iep = __ pc(); __ push_i();
1766 vep = __ pc();
1767 __ bind(L);
1768 generate_and_dispatch(t);
1769 }
1770
1771
1772 //-----------------------------------------------------------------------------
1773 // Generation of individual instructions
1774
1775 // helpers for generate_and_dispatch
1776
1777
1778 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
1779 : TemplateInterpreterGenerator(code) {
1780 generate_all(); // down here so it can be "virtual"
1781 }
1782
1783 //-----------------------------------------------------------------------------
1784
1785 // Non-product code
1786 #ifndef PRODUCT
1787
1788 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1789 address entry = __ pc();
1790
1791 #ifndef _LP64
1792 // prepare expression stack
1793 __ pop(rcx); // pop return address so expression stack is 'pure'
1794 __ push(state); // save tosca
1795
1796 // pass tosca registers as arguments & call tracer
1797 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx);
1798 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state)
1799 __ pop(state); // restore tosca
1800
1801 // return
1802 __ jmp(rcx);
1803 #else
1804 __ push(state);
1805 __ push(c_rarg0);
1806 __ push(c_rarg1);
1807 __ push(c_rarg2);
1808 __ push(c_rarg3);
1809 __ mov(c_rarg2, rax); // Pass itos
1810 #ifdef _WIN64
1811 __ movflt(xmm3, xmm0); // Pass ftos
1812 #endif
1813 __ call_VM(noreg,
1814 CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
1815 c_rarg1, c_rarg2, c_rarg3);
1816 __ pop(c_rarg3);
1817 __ pop(c_rarg2);
1818 __ pop(c_rarg1);
1819 __ pop(c_rarg0);
1820 __ pop(state);
1821 __ ret(0); // return from result handler
1822 #endif // _LP64
1823
1824 return entry;
1825 }
1826
1827 void TemplateInterpreterGenerator::count_bytecode() {
1828 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value));
1829 }
1830
1831 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1832 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]));
1833 }
1834
1835 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1836 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index));
1837 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes);
1838 __ orl(rbx,
1839 ((int) t->bytecode()) <<
1840 BytecodePairHistogram::log2_number_of_codes);
1841 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx);
1842 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters));
1843 __ incrementl(Address(rscratch1, rbx, Address::times_4));
1844 }
1845
1846
1847 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
1848 // Call a little run-time stub to avoid blow-up for each bytecode.
1849 // The run-time runtime saves the right registers, depending on
1850 // the tosca in-state for the given template.
1851
1852 assert(Interpreter::trace_code(t->tos_in()) != NULL,
1853 "entry must have been generated");
1854 #ifndef _LP64
1855 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
1856 #else
1857 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
1858 __ andptr(rsp, -16); // align stack as required by ABI
1859 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
1860 __ mov(rsp, r12); // restore sp
1861 __ reinit_heapbase();
1862 #endif // _LP64
1863 }
1864
1865
1866 void TemplateInterpreterGenerator::stop_interpreter_at() {
1867 Label L;
1868 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value),
1869 StopInterpreterAt);
1870 __ jcc(Assembler::notEqual, L);
1871 __ int3();
1872 __ bind(L);
1873 }
1874 #endif // !PRODUCT
1875 #endif // ! CC_INTERP
|