396 //
397
398 // increment invocation count & check for overflow
399 //
400 // Note: checking for negative value instead of overflow
401 // so we have a 'sticky' overflow test
402 //
403 // Lmethod: method
404 // ??: invocation counter
405 //
406 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
407 Label done;
408 const Register Rcounters = G3_scratch;
409
410 __ ld_ptr(STATE(_method), G5_method);
411 __ get_method_counters(G5_method, Rcounters, done);
412
413 // Update standard invocation counters
414 __ increment_invocation_counter(Rcounters, O0, G4_scratch);
415 if (ProfileInterpreter) {
416 Address interpreter_invocation_counter(Rcounters, 0,
417 in_bytes(MethodCounters::interpreter_invocation_counter_offset()));
418 __ ld(interpreter_invocation_counter, G4_scratch);
419 __ inc(G4_scratch);
420 __ st(G4_scratch, interpreter_invocation_counter);
421 }
422
423 Address invocation_limit(G3_scratch, (address)&InvocationCounter::InterpreterInvocationLimit);
424 __ sethi(invocation_limit);
425 __ ld(invocation_limit, G3_scratch);
426 __ cmp(O0, G3_scratch);
427 __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
428 __ delayed()->nop();
429 __ bind(done);
430 }
431
432 address InterpreterGenerator::generate_empty_entry(void) {
433
434 // A method that does nothing but return...
435
436 address entry = __ pc();
437 Label slow_path;
438
439 // do nothing for empty methods (do not even increment invocation counter)
440 if ( UseFastEmptyMethods) {
441 // If we need a safepoint check, generate full interpreter entry.
442 Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
443 __ load_contents(sync_state, G3_scratch);
444 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
445 __ br(Assembler::notEqual, false, Assembler::pn, frame_manager_entry);
446 __ delayed()->nop();
447
448 // Code: _return
449 __ retl();
450 __ delayed()->mov(O5_savedSP, SP);
451 return entry;
452 }
453 return NULL;
454 }
455
456 // Call an accessor method (assuming it is resolved, otherwise drop into
457 // vanilla (slow path) entry
458
459 // Generates code to elide accessor methods
460 // Uses G3_scratch and G1_scratch as scratch
461 address InterpreterGenerator::generate_accessor_entry(void) {
462
463 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
464 // parameter size = 1
465 // Note: We can only use this code if the getfield has been resolved
466 // and if we don't have a null-pointer exception => check for
467 // these conditions first and use slow path if necessary.
468 address entry = __ pc();
469 Label slow_path;
470
471 if ( UseFastAccessorMethods) {
472 // Check if we need to reach a safepoint and generate full interpreter
473 // frame if so.
474 Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
475 __ load_contents(sync_state, G3_scratch);
476 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
477 __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
478 __ delayed()->nop();
479
480 // Check if local 0 != NULL
481 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
482 __ tst(Otos_i); // check if local 0 == NULL and go the slow path
483 __ brx(Assembler::zero, false, Assembler::pn, slow_path);
484 __ delayed()->nop();
485
486
487 // read first instruction word and extract bytecode @ 1 and index @ 2
488 // get first 4 bytes of the bytecodes (big endian!)
489 __ ld_ptr(Address(G5_method, 0, in_bytes(Method::const_offset())), G1_scratch);
490 __ ld(Address(G1_scratch, 0, in_bytes(ConstMethod::codes_offset())), G1_scratch);
491
492 // move index @ 2 far left then to the right most two bytes.
493 __ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
494 __ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
495 ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
496
497 // get constant pool cache
498 __ ld_ptr(G5_method, in_bytes(Method::const_offset()), G3_scratch);
499 __ ld_ptr(G3_scratch, in_bytes(ConstMethod::constants_offset()), G3_scratch);
500 __ ld_ptr(G3_scratch, ConstantPool::cache_offset_in_bytes(), G3_scratch);
501
502 // get specific constant pool cache entry
503 __ add(G3_scratch, G1_scratch, G3_scratch);
504
505 // Check the constant Pool cache entry to see if it has been resolved.
506 // If not, need the slow path.
507 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
508 __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::indices_offset()), G1_scratch);
509 __ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
510 __ and3(G1_scratch, 0xFF, G1_scratch);
573 #endif // INCLUDE_ALL_GCS
574
575 // If G1 is not enabled then attempt to go through the accessor entry point
576 // Reference.get is an accessor
577 return generate_accessor_entry();
578 }
579
580 //
581 // Interpreter stub for calling a native method. (C++ interpreter)
582 // This sets up a somewhat different looking stack for calling the native method
583 // than the typical interpreter frame setup.
584 //
585
586 address InterpreterGenerator::generate_native_entry(bool synchronized) {
587 address entry = __ pc();
588
589 // the following temporary registers are used during frame creation
590 const Register Gtmp1 = G3_scratch ;
591 const Register Gtmp2 = G1_scratch;
592 const Register RconstMethod = Gtmp1;
593 const Address constMethod(G5_method, 0, in_bytes(Method::const_offset()));
594 const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
595
596 bool inc_counter = UseCompiler || CountCompiledCalls;
597
598 // make sure registers are different!
599 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
600
601 const Address access_flags (G5_method, 0, in_bytes(Method::access_flags_offset()));
602
603 Label Lentry;
604 __ bind(Lentry);
605
606 const Register Glocals_size = G3;
607 assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
608
609 // make sure method is native & not abstract
610 // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
611 #ifdef ASSERT
612 __ ld(access_flags, Gtmp1);
613 {
614 Label L;
615 __ btst(JVM_ACC_NATIVE, Gtmp1);
616 __ br(Assembler::notZero, false, Assembler::pt, L);
617 __ delayed()->nop();
618 __ stop("tried to execute non-native method as native");
619 __ bind(L);
620 }
621 { Label L;
626 __ bind(L);
627 }
628 #endif // ASSERT
629
630 __ ld_ptr(constMethod, RconstMethod);
631 __ lduh(size_of_parameters, Gtmp1);
632 __ sll(Gtmp1, LogBytesPerWord, Gtmp2); // parameter size in bytes
633 __ add(Gargs, Gtmp2, Gargs); // points to first local + BytesPerWord
634 // NEW
635 __ add(Gargs, -wordSize, Gargs); // points to first local[0]
636 // generate the code to allocate the interpreter stack frame
637 // NEW FRAME ALLOCATED HERE
638 // save callers original sp
639 // __ mov(SP, I5_savedSP->after_restore());
640
641 generate_compute_interpreter_state(Lstate, G0, true);
642
643 // At this point Lstate points to new interpreter state
644 //
645
646 const Address do_not_unlock_if_synchronized(G2_thread, 0,
647 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
648 // Since at this point in the method invocation the exception handler
649 // would try to exit the monitor of synchronized methods which hasn't
650 // been entered yet, we set the thread local variable
651 // _do_not_unlock_if_synchronized to true. If any exception was thrown by
652 // runtime, exception handling i.e. unlock_if_synchronized_method will
653 // check this thread local flag.
654 // This flag has two effects, one is to force an unwind in the topmost
655 // interpreter frame and not perform an unlock while doing so.
656
657 __ movbool(true, G3_scratch);
658 __ stbool(G3_scratch, do_not_unlock_if_synchronized);
659
660
661 // increment invocation counter and check for overflow
662 //
663 // Note: checking for negative value instead of overflow
664 // so we have a 'sticky' overflow test (may be of
665 // importance as soon as we have true MT/MP)
666 Label invocation_counter_overflow;
700
701 // __ verify_thread(); kills L1,L2 can't use at the moment
702
703 // jvmti/jvmpi support
704 __ notify_method_entry();
705
706 // native call
707
708 // (note that O0 is never an oop--at most it is a handle)
709 // It is important not to smash any handles created by this call,
710 // until any oop handle in O0 is dereferenced.
711
712 // (note that the space for outgoing params is preallocated)
713
714 // get signature handler
715
716 Label pending_exception_present;
717
718 { Label L;
719 __ ld_ptr(STATE(_method), G5_method);
720 __ ld_ptr(Address(G5_method, 0, in_bytes(Method::signature_handler_offset())), G3_scratch);
721 __ tst(G3_scratch);
722 __ brx(Assembler::notZero, false, Assembler::pt, L);
723 __ delayed()->nop();
724 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), G5_method, false);
725 __ ld_ptr(STATE(_method), G5_method);
726
727 Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
728 __ ld_ptr(exception_addr, G3_scratch);
729 __ br_notnull_short(G3_scratch, Assembler::pn, pending_exception_present);
730 __ ld_ptr(Address(G5_method, 0, in_bytes(Method::signature_handler_offset())), G3_scratch);
731 __ bind(L);
732 }
733
734 // Push a new frame so that the args will really be stored in
735 // Copy a few locals across so the new frame has the variables
736 // we need but these values will be dead at the jni call and
737 // therefore not gc volatile like the values in the current
738 // frame (Lstate in particular)
739
740 // Flush the state pointer to the register save area
741 // Which is the only register we need for a stack walk.
742 __ st_ptr(Lstate, SP, (Lstate->sp_offset_in_saved_window() * wordSize) + STACK_BIAS);
743
744 __ mov(Lstate, O1); // Need to pass the state pointer across the frame
745
746 // Calculate current frame size
747 __ sub(SP, FP, O3); // Calculate negative of current frame size
748 __ save(SP, O3, SP); // Allocate an identical sized frame
749
750 __ mov(I1, Lstate); // In the "natural" register.
754 // below (and fix I7 so the stack trace doesn't have a meaningless frame
755 // in it).
756
757
758 // call signature handler
759 __ ld_ptr(STATE(_method), Lmethod);
760 __ ld_ptr(STATE(_locals), Llocals);
761
762 __ callr(G3_scratch, 0);
763 __ delayed()->nop();
764 __ ld_ptr(STATE(_thread), G2_thread); // restore thread (shouldn't be needed)
765
766 { Label not_static;
767
768 __ ld_ptr(STATE(_method), G5_method);
769 __ ld(access_flags, O0);
770 __ btst(JVM_ACC_STATIC, O0);
771 __ br( Assembler::zero, false, Assembler::pt, not_static);
772 __ delayed()->
773 // get native function entry point(O0 is a good temp until the very end)
774 ld_ptr(Address(G5_method, 0, in_bytes(Method::native_function_offset())), O0);
775 // for static methods insert the mirror argument
776 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
777
778 __ ld_ptr(Address(G5_method, 0, in_bytes(Method:: const_offset())), O1);
779 __ ld_ptr(Address(O1, 0, in_bytes(ConstMethod::constants_offset())), O1);
780 __ ld_ptr(Address(O1, 0, ConstantPool::pool_holder_offset_in_bytes()), O1);
781 __ ld_ptr(O1, mirror_offset, O1);
782 // where the mirror handle body is allocated:
783 #ifdef ASSERT
784 if (!PrintSignatureHandlers) // do not dirty the output with this
785 { Label L;
786 __ tst(O1);
787 __ brx(Assembler::notZero, false, Assembler::pt, L);
788 __ delayed()->nop();
789 __ stop("mirror is missing");
790 __ bind(L);
791 }
792 #endif // ASSERT
793 __ st_ptr(O1, STATE(_oop_temp));
794 __ add(STATE(_oop_temp), O1); // this is really an LEA not an add
795 __ bind(not_static);
796 }
797
798 // At this point, arguments have been copied off of stack into
799 // their JNI positions, which are O1..O5 and SP[68..].
800 // Oops are boxed in-place on the stack, with handles copied to arguments.
814 // setup the java frame anchor
815 //
816 // The scavenge function only needs to know that the PC of this frame is
817 // in the interpreter method entry code, it doesn't need to know the exact
818 // PC and hence we can use O7 which points to the return address from the
819 // previous call in the code stream (signature handler function)
820 //
821 // The other trick is we set last_Java_sp to FP instead of the usual SP because
822 // we have pushed the extra frame in order to protect the volatile register(s)
823 // in that frame when we return from the jni call
824 //
825
826
827 __ set_last_Java_frame(FP, O7);
828 __ mov(O7, I7); // make dummy interpreter frame look like one above,
829 // not meaningless information that'll confuse me.
830
831 // flush the windows now. We don't care about the current (protection) frame
832 // only the outer frames
833
834 __ flush_windows();
835
836 // mark windows as flushed
837 Address flags(G2_thread,
838 0,
839 in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset()));
840 __ set(JavaFrameAnchor::flushed, G3_scratch);
841 __ st(G3_scratch, flags);
842
843 // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
844
845 Address thread_state(G2_thread, 0, in_bytes(JavaThread::thread_state_offset()));
846 #ifdef ASSERT
847 { Label L;
848 __ ld(thread_state, G3_scratch);
849 __ cmp(G3_scratch, _thread_in_Java);
850 __ br(Assembler::equal, false, Assembler::pt, L);
851 __ delayed()->nop();
852 __ stop("Wrong thread state in native stub");
853 __ bind(L);
854 }
855 #endif // ASSERT
856 __ set(_thread_in_native, G3_scratch);
857 __ st(G3_scratch, thread_state);
858
859 // Call the jni method, using the delay slot to set the JNIEnv* argument.
860 __ callr(O0, 0);
861 __ delayed()->
862 add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
863 __ ld_ptr(STATE(_thread), G2_thread); // restore thread
864
865 // must we block?
866
867 // Block, if necessary, before resuming in _thread_in_Java state.
868 // In order for GC to work, don't clear the last_Java_sp until after blocking.
869 { Label no_block;
870 Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
871
872 // Switch thread to "native transition" state before reading the synchronization state.
873 // This additional state is necessary because reading and testing the synchronization
874 // state is not atomic w.r.t. GC, as this scenario demonstrates:
875 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
876 // VM thread changes sync state to synchronizing and suspends threads for GC.
877 // Thread A is resumed to finish this native method, but doesn't block here since it
878 // didn't see any synchronization is progress, and escapes.
879 __ set(_thread_in_native_trans, G3_scratch);
880 __ st(G3_scratch, thread_state);
881 if(os::is_MP()) {
882 // Write serialization page so VM thread can do a pseudo remote membar.
883 // We use the current thread pointer to calculate a thread specific
884 // offset to write to within the page. This minimizes bus traffic
885 // due to cache line collision.
886 __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
887 }
888 __ load_contents(sync_state, G3_scratch);
889 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
890
891
892 Label L;
893 Address suspend_state(G2_thread, 0, in_bytes(JavaThread::suspend_flags_offset()));
894 __ br(Assembler::notEqual, false, Assembler::pn, L);
895 __ delayed()->
896 ld(suspend_state, G3_scratch);
897 __ cmp(G3_scratch, 0);
898 __ br(Assembler::equal, false, Assembler::pt, no_block);
899 __ delayed()->nop();
900 __ bind(L);
901
902 // Block. Save any potential method result value before the operation and
903 // use a leaf call to leave the last_Java_frame setup undisturbed.
904 save_native_result();
905 __ call_VM_leaf(noreg,
906 CAST_FROM_FN_PTR(address, JavaThread::check_safepoint_and_suspend_for_native_trans),
907 G2_thread);
908 __ ld_ptr(STATE(_thread), G2_thread); // restore thread
909 // Restore any method result value
910 restore_native_result();
911 __ bind(no_block);
912 }
913
948 __ addcc(G0, O0, O0);
949 __ brx(Assembler::notZero, true, Assembler::pt, store_result); // if result is not NULL:
950 __ delayed()->ld_ptr(O0, 0, O0); // unbox it
951 __ mov(G0, O0);
952
953 __ bind(store_result);
954 // Store it where gc will look for it and result handler expects it.
955 __ st_ptr(O0, STATE(_oop_temp));
956
957 __ bind(no_oop);
958
959 }
960
961 // reset handle block
962 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), G3_scratch);
963 __ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
964
965
966 // handle exceptions (exception handling will handle unlocking!)
967 { Label L;
968 Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
969
970 __ ld_ptr(exception_addr, Gtemp);
971 __ tst(Gtemp);
972 __ brx(Assembler::equal, false, Assembler::pt, L);
973 __ delayed()->nop();
974 __ bind(pending_exception_present);
975 // With c++ interpreter we just leave it pending caller will do the correct thing. However...
976 // Like x86 we ignore the result of the native call and leave the method locked. This
977 // seems wrong to leave things locked.
978
979 __ br(Assembler::always, false, Assembler::pt, StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
980 __ delayed()->restore(I5_savedSP, G0, SP); // remove interpreter frame
981
982 __ bind(L);
983 }
984
985 // jvmdi/jvmpi support (preserves thread register)
986 __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI);
987
988 if (synchronized) {
1038 return entry;
1039 }
1040
1041 void CppInterpreterGenerator::generate_compute_interpreter_state(const Register state,
1042 const Register prev_state,
1043 bool native) {
1044
1045 // On entry
1046 // G5_method - caller's method
1047 // Gargs - points to initial parameters (i.e. locals[0])
1048 // G2_thread - valid? (C1 only??)
1049 // "prev_state" - contains any previous frame manager state which we must save a link
1050 //
1051 // On return
1052 // "state" is a pointer to the newly allocated state object. We must allocate and initialize
1053 // a new interpretState object and the method expression stack.
1054
1055 assert_different_registers(state, prev_state);
1056 assert_different_registers(prev_state, G3_scratch);
1057 const Register Gtmp = G3_scratch;
1058 const Address constMethod (G5_method, 0, in_bytes(Method::const_offset()));
1059 const Address access_flags (G5_method, 0, in_bytes(Method::access_flags_offset()));
1060
1061 // slop factor is two extra slots on the expression stack so that
1062 // we always have room to store a result when returning from a call without parameters
1063 // that returns a result.
1064
1065 const int slop_factor = 2*wordSize;
1066
1067 const int fixed_size = ((sizeof(BytecodeInterpreter) + slop_factor) >> LogBytesPerWord) + // what is the slop factor?
1068 Method::extra_stack_entries() + // extra stack for jsr 292
1069 frame::memory_parameter_word_sp_offset + // register save area + param window
1070 (native ? frame::interpreter_frame_extra_outgoing_argument_words : 0); // JNI, class
1071
1072 // XXX G5_method valid
1073
1074 // Now compute new frame size
1075
1076 if (native) {
1077 const Register RconstMethod = Gtmp;
1078 const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
1079 __ ld_ptr(constMethod, RconstMethod);
1080 __ lduh( size_of_parameters, Gtmp );
1081 __ calc_mem_param_words(Gtmp, Gtmp); // space for native call parameters passed on the stack in words
1082 } else {
1083 // Full size expression stack
1084 __ ld_ptr(constMethod, Gtmp);
1085 __ lduh(Gtmp, in_bytes(ConstMethod::max_stack_offset()), Gtmp);
1086 }
1087 __ add(Gtmp, fixed_size, Gtmp); // plus the fixed portion
1088
1089 __ neg(Gtmp); // negative space for stack/parameters in words
1090 __ and3(Gtmp, -WordsPerLong, Gtmp); // make multiple of 2 (SP must be 2-word aligned)
1091 __ sll(Gtmp, LogBytesPerWord, Gtmp); // negative space for frame in bytes
1092
1093 // Need to do stack size check here before we fault on large frames
1094
1095 Label stack_ok;
1096
1097 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
1098 (StackRedPages+StackYellowPages);
1229
1230 if (!native) {
1231 //
1232 // Code to initialize locals
1233 //
1234 Register init_value = noreg; // will be G0 if we must clear locals
1235 // Now zero locals
1236 if (true /* zerolocals */ || ClearInterpreterLocals) {
1237 // explicitly initialize locals
1238 init_value = G0;
1239 } else {
1240 #ifdef ASSERT
1241 // initialize locals to a garbage pattern for better debugging
1242 init_value = O3;
1243 __ set( 0x0F0F0F0F, init_value );
1244 #endif // ASSERT
1245 }
1246 if (init_value != noreg) {
1247 Label clear_loop;
1248 const Register RconstMethod = O1;
1249 const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
1250 const Address size_of_locals (RconstMethod, 0, in_bytes(ConstMethod::size_of_locals_offset()));
1251
1252 // NOTE: If you change the frame layout, this code will need to
1253 // be updated!
1254 __ ld_ptr( constMethod, RconstMethod );
1255 __ lduh( size_of_locals, O2 );
1256 __ lduh( size_of_parameters, O1 );
1257 __ sll( O2, LogBytesPerWord, O2);
1258 __ sll( O1, LogBytesPerWord, O1 );
1259 __ ld_ptr(XXX_STATE(_locals), L2_scratch);
1260 __ sub( L2_scratch, O2, O2 );
1261 __ sub( L2_scratch, O1, O1 );
1262
1263 __ bind( clear_loop );
1264 __ inc( O2, wordSize );
1265
1266 __ cmp( O2, O1 );
1267 __ br( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop );
1268 __ delayed()->st_ptr( init_value, O2, 0 );
1269 }
1270 }
1479 __ delayed()->nop(); \
1480 __ breakpoint_trap(); \
1481 __ emit_int32(marker); \
1482 __ bind(skip); \
1483 }
1484 #else
1485 #define VALIDATE_STATE(scratch, marker)
1486 #endif /* ASSERT */
1487
1488 void CppInterpreterGenerator::adjust_callers_stack(Register args) {
1489 //
1490 // Adjust caller's stack so that all the locals can be contiguous with
1491 // the parameters.
1492 // Worries about stack overflow make this a pain.
1493 //
1494 // Destroys args, G3_scratch, G3_scratch
1495 // In/Out O5_savedSP (sender's original SP)
1496 //
1497 // assert_different_registers(state, prev_state);
1498 const Register Gtmp = G3_scratch;
1499 const RconstMethod = G3_scratch;
1500 const Register tmp = O2;
1501 const Address constMethod(G5_method, 0, in_bytes(Method::const_offset()));
1502 const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
1503 const Address size_of_locals (RconstMethod, 0, in_bytes(ConstMethod::size_of_locals_offset()));
1504
1505 __ ld_ptr(constMethod, RconstMethod);
1506 __ lduh(size_of_parameters, tmp);
1507 __ sll(tmp, LogBytesPerWord, Gargs); // parameter size in bytes
1508 __ add(args, Gargs, Gargs); // points to first local + BytesPerWord
1509 // NEW
1510 __ add(Gargs, -wordSize, Gargs); // points to first local[0]
1511 // determine extra space for non-argument locals & adjust caller's SP
1512 // Gtmp1: parameter size in words
1513 __ lduh(size_of_locals, Gtmp);
1514 __ compute_extra_locals_size_in_bytes(tmp, Gtmp, Gtmp);
1515
1516 #if 1
1517 // c2i adapters place the final interpreter argument in the register save area for O0/I0
1518 // the call_stub will place the final interpreter argument at
1519 // frame::memory_parameter_word_sp_offset. This is mostly not noticable for either asm
1520 // or c++ interpreter. However with the c++ interpreter when we do a recursive call
1521 // and try to make it look good in the debugger we will store the argument to
1522 // RecursiveInterpreterActivation in the register argument save area. Without allocating
1523 // extra space for the compiler this will overwrite locals in the local array of the
1538 // G5_method: Method*
1539 // G2_thread: thread (unused)
1540 // Gargs: bottom of args (sender_sp)
1541 // O5: sender's sp
1542
1543 // A single frame manager is plenty as we don't specialize for synchronized. We could and
1544 // the code is pretty much ready. Would need to change the test below and for good measure
1545 // modify generate_interpreter_state to only do the (pre) sync stuff stuff for synchronized
1546 // routines. Not clear this is worth it yet.
1547
1548 if (interpreter_frame_manager) {
1549 return interpreter_frame_manager;
1550 }
1551
1552 __ bind(frame_manager_entry);
1553
1554 // the following temporary registers are used during frame creation
1555 const Register Gtmp1 = G3_scratch;
1556 // const Register Lmirror = L1; // native mirror (native calls only)
1557
1558 const Address constMethod (G5_method, 0, in_bytes(Method::const_offset()));
1559 const Address access_flags (G5_method, 0, in_bytes(Method::access_flags_offset()));
1560
1561 address entry_point = __ pc();
1562 __ mov(G0, prevState); // no current activation
1563
1564
1565 Label re_dispatch;
1566
1567 __ bind(re_dispatch);
1568
1569 // Interpreter needs to have locals completely contiguous. In order to do that
1570 // We must adjust the caller's stack pointer for any locals beyond just the
1571 // parameters
1572 adjust_callers_stack(Gargs);
1573
1574 // O5_savedSP still contains sender's sp
1575
1576 // NEW FRAME
1577
1578 generate_compute_interpreter_state(Lstate, prevState, false);
1579
1692 __ set((int)BytecodeInterpreter::deopt_resume, L1_scratch);
1693 __ ba(call_interpreter);
1694 __ delayed()->st(L1_scratch, STATE(_msg));
1695
1696 // Current frame has caught an exception we need to dispatch to the
1697 // handler. We can get here because a native interpreter frame caught
1698 // an exception in which case there is no handler and we must rethrow
1699 // If it is a vanilla interpreted frame the we simply drop into the
1700 // interpreter and let it do the lookup.
1701
1702 Interpreter::_rethrow_exception_entry = __ pc();
1703
1704 Label return_with_exception;
1705 Label unwind_and_forward;
1706
1707 // O0: exception
1708 // O7: throwing pc
1709
1710 // We want exception in the thread no matter what we ultimately decide about frame type.
1711
1712 Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
1713 __ verify_thread();
1714 __ st_ptr(O0, exception_addr);
1715
1716 // get the Method*
1717 __ ld_ptr(STATE(_method), G5_method);
1718
1719 // if this current frame vanilla or native?
1720
1721 __ ld(access_flags, Gtmp1);
1722 __ btst(JVM_ACC_NATIVE, Gtmp1);
1723 __ br(Assembler::zero, false, Assembler::pt, return_with_exception); // vanilla interpreted frame handle directly
1724 __ delayed()->nop();
1725
1726 // We drop thru to unwind a native interpreted frame with a pending exception
1727 // We jump here for the initial interpreter frame with exception pending
1728 // We unwind the current acivation and forward it to our caller.
1729
1730 __ bind(unwind_and_forward);
1731
1732 // Unwind frame and jump to forward exception. unwinding will place throwing pc in O7
|
396 //
397
398 // increment invocation count & check for overflow
399 //
400 // Note: checking for negative value instead of overflow
401 // so we have a 'sticky' overflow test
402 //
403 // Lmethod: method
404 // ??: invocation counter
405 //
406 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
407 Label done;
408 const Register Rcounters = G3_scratch;
409
410 __ ld_ptr(STATE(_method), G5_method);
411 __ get_method_counters(G5_method, Rcounters, done);
412
413 // Update standard invocation counters
414 __ increment_invocation_counter(Rcounters, O0, G4_scratch);
415 if (ProfileInterpreter) {
416 Address interpreter_invocation_counter(Rcounters,
417 in_bytes(MethodCounters::interpreter_invocation_counter_offset()));
418 __ ld(interpreter_invocation_counter, G4_scratch);
419 __ inc(G4_scratch);
420 __ st(G4_scratch, interpreter_invocation_counter);
421 }
422
423 AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit);
424 __ load_contents(invocation_limit, G3_scratch);
425 __ cmp(O0, G3_scratch);
426 __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
427 __ delayed()->nop();
428 __ bind(done);
429 }
430
431 address InterpreterGenerator::generate_empty_entry(void) {
432
433 // A method that does nothing but return...
434
435 address entry = __ pc();
436 Label slow_path;
437
438 // do nothing for empty methods (do not even increment invocation counter)
439 if ( UseFastEmptyMethods) {
440 // If we need a safepoint check, generate full interpreter entry.
441 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
442 __ load_contents(sync_state, G3_scratch);
443 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
444 __ br(Assembler::notEqual, false, Assembler::pn, frame_manager_entry);
445 __ delayed()->nop();
446
447 // Code: _return
448 __ retl();
449 __ delayed()->mov(O5_savedSP, SP);
450 return entry;
451 }
452 return NULL;
453 }
454
455 // Call an accessor method (assuming it is resolved, otherwise drop into
456 // vanilla (slow path) entry
457
458 // Generates code to elide accessor methods
459 // Uses G3_scratch and G1_scratch as scratch
460 address InterpreterGenerator::generate_accessor_entry(void) {
461
462 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
463 // parameter size = 1
464 // Note: We can only use this code if the getfield has been resolved
465 // and if we don't have a null-pointer exception => check for
466 // these conditions first and use slow path if necessary.
467 address entry = __ pc();
468 Label slow_path;
469
470 if ( UseFastAccessorMethods) {
471 // Check if we need to reach a safepoint and generate full interpreter
472 // frame if so.
473 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
474 __ load_contents(sync_state, G3_scratch);
475 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
476 __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
477 __ delayed()->nop();
478
479 // Check if local 0 != NULL
480 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
481 __ tst(Otos_i); // check if local 0 == NULL and go the slow path
482 __ brx(Assembler::zero, false, Assembler::pn, slow_path);
483 __ delayed()->nop();
484
485
486 // read first instruction word and extract bytecode @ 1 and index @ 2
487 // get first 4 bytes of the bytecodes (big endian!)
488 __ ld_ptr(Address(G5_method, in_bytes(Method::const_offset())), G1_scratch);
489 __ ld(Address(G1_scratch, in_bytes(ConstMethod::codes_offset())), G1_scratch);
490
491 // move index @ 2 far left then to the right most two bytes.
492 __ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
493 __ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
494 ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
495
496 // get constant pool cache
497 __ ld_ptr(G5_method, in_bytes(Method::const_offset()), G3_scratch);
498 __ ld_ptr(G3_scratch, in_bytes(ConstMethod::constants_offset()), G3_scratch);
499 __ ld_ptr(G3_scratch, ConstantPool::cache_offset_in_bytes(), G3_scratch);
500
501 // get specific constant pool cache entry
502 __ add(G3_scratch, G1_scratch, G3_scratch);
503
504 // Check the constant Pool cache entry to see if it has been resolved.
505 // If not, need the slow path.
506 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
507 __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::indices_offset()), G1_scratch);
508 __ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
509 __ and3(G1_scratch, 0xFF, G1_scratch);
572 #endif // INCLUDE_ALL_GCS
573
574 // If G1 is not enabled then attempt to go through the accessor entry point
575 // Reference.get is an accessor
576 return generate_accessor_entry();
577 }
578
579 //
580 // Interpreter stub for calling a native method. (C++ interpreter)
581 // This sets up a somewhat different looking stack for calling the native method
582 // than the typical interpreter frame setup.
583 //
584
585 address InterpreterGenerator::generate_native_entry(bool synchronized) {
586 address entry = __ pc();
587
588 // the following temporary registers are used during frame creation
589 const Register Gtmp1 = G3_scratch ;
590 const Register Gtmp2 = G1_scratch;
591 const Register RconstMethod = Gtmp1;
592 const Address constMethod(G5_method, in_bytes(Method::const_offset()));
593 const Address size_of_parameters(RconstMethod, in_bytes(ConstMethod::size_of_parameters_offset()));
594
595 bool inc_counter = UseCompiler || CountCompiledCalls;
596
597 // make sure registers are different!
598 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
599
600 const Address access_flags (G5_method, in_bytes(Method::access_flags_offset()));
601
602 Label Lentry;
603 __ bind(Lentry);
604
605 const Register Glocals_size = G3;
606 assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
607
608 // make sure method is native & not abstract
609 // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
610 #ifdef ASSERT
611 __ ld(access_flags, Gtmp1);
612 {
613 Label L;
614 __ btst(JVM_ACC_NATIVE, Gtmp1);
615 __ br(Assembler::notZero, false, Assembler::pt, L);
616 __ delayed()->nop();
617 __ stop("tried to execute non-native method as native");
618 __ bind(L);
619 }
620 { Label L;
625 __ bind(L);
626 }
627 #endif // ASSERT
628
629 __ ld_ptr(constMethod, RconstMethod);
630 __ lduh(size_of_parameters, Gtmp1);
631 __ sll(Gtmp1, LogBytesPerWord, Gtmp2); // parameter size in bytes
632 __ add(Gargs, Gtmp2, Gargs); // points to first local + BytesPerWord
633 // NEW
634 __ add(Gargs, -wordSize, Gargs); // points to first local[0]
635 // generate the code to allocate the interpreter stack frame
636 // NEW FRAME ALLOCATED HERE
637 // save callers original sp
638 // __ mov(SP, I5_savedSP->after_restore());
639
640 generate_compute_interpreter_state(Lstate, G0, true);
641
642 // At this point Lstate points to new interpreter state
643 //
644
645 const Address do_not_unlock_if_synchronized(G2_thread,
646 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
647 // Since at this point in the method invocation the exception handler
648 // would try to exit the monitor of synchronized methods which hasn't
649 // been entered yet, we set the thread local variable
650 // _do_not_unlock_if_synchronized to true. If any exception was thrown by
651 // runtime, exception handling i.e. unlock_if_synchronized_method will
652 // check this thread local flag.
653 // This flag has two effects, one is to force an unwind in the topmost
654 // interpreter frame and not perform an unlock while doing so.
655
656 __ movbool(true, G3_scratch);
657 __ stbool(G3_scratch, do_not_unlock_if_synchronized);
658
659
660 // increment invocation counter and check for overflow
661 //
662 // Note: checking for negative value instead of overflow
663 // so we have a 'sticky' overflow test (may be of
664 // importance as soon as we have true MT/MP)
665 Label invocation_counter_overflow;
699
700 // __ verify_thread(); kills L1,L2 can't use at the moment
701
702 // jvmti/jvmpi support
703 __ notify_method_entry();
704
705 // native call
706
707 // (note that O0 is never an oop--at most it is a handle)
708 // It is important not to smash any handles created by this call,
709 // until any oop handle in O0 is dereferenced.
710
711 // (note that the space for outgoing params is preallocated)
712
713 // get signature handler
714
715 Label pending_exception_present;
716
717 { Label L;
718 __ ld_ptr(STATE(_method), G5_method);
719 __ ld_ptr(Address(G5_method, in_bytes(Method::signature_handler_offset())), G3_scratch);
720 __ tst(G3_scratch);
721 __ brx(Assembler::notZero, false, Assembler::pt, L);
722 __ delayed()->nop();
723 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), G5_method, false);
724 __ ld_ptr(STATE(_method), G5_method);
725
726 Address exception_addr(G2_thread, in_bytes(Thread::pending_exception_offset()));
727 __ ld_ptr(exception_addr, G3_scratch);
728 __ br_notnull_short(G3_scratch, Assembler::pn, pending_exception_present);
729 __ ld_ptr(Address(G5_method, in_bytes(Method::signature_handler_offset())), G3_scratch);
730 __ bind(L);
731 }
732
733 // Push a new frame so that the args will really be stored in
734 // Copy a few locals across so the new frame has the variables
735 // we need but these values will be dead at the jni call and
736 // therefore not gc volatile like the values in the current
737 // frame (Lstate in particular)
738
739 // Flush the state pointer to the register save area
740 // Which is the only register we need for a stack walk.
741 __ st_ptr(Lstate, SP, (Lstate->sp_offset_in_saved_window() * wordSize) + STACK_BIAS);
742
743 __ mov(Lstate, O1); // Need to pass the state pointer across the frame
744
745 // Calculate current frame size
746 __ sub(SP, FP, O3); // Calculate negative of current frame size
747 __ save(SP, O3, SP); // Allocate an identical sized frame
748
749 __ mov(I1, Lstate); // In the "natural" register.
753 // below (and fix I7 so the stack trace doesn't have a meaningless frame
754 // in it).
755
756
757 // call signature handler
758 __ ld_ptr(STATE(_method), Lmethod);
759 __ ld_ptr(STATE(_locals), Llocals);
760
761 __ callr(G3_scratch, 0);
762 __ delayed()->nop();
763 __ ld_ptr(STATE(_thread), G2_thread); // restore thread (shouldn't be needed)
764
765 { Label not_static;
766
767 __ ld_ptr(STATE(_method), G5_method);
768 __ ld(access_flags, O0);
769 __ btst(JVM_ACC_STATIC, O0);
770 __ br( Assembler::zero, false, Assembler::pt, not_static);
771 __ delayed()->
772 // get native function entry point(O0 is a good temp until the very end)
773 ld_ptr(Address(G5_method, in_bytes(Method::native_function_offset())), O0);
774 // for static methods insert the mirror argument
775 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
776
777 __ ld_ptr(Address(G5_method, in_bytes(Method:: const_offset())), O1);
778 __ ld_ptr(Address(O1, in_bytes(ConstMethod::constants_offset())), O1);
779 __ ld_ptr(Address(O1, ConstantPool::pool_holder_offset_in_bytes()), O1);
780 __ ld_ptr(O1, mirror_offset, O1);
781 // where the mirror handle body is allocated:
782 #ifdef ASSERT
783 if (!PrintSignatureHandlers) // do not dirty the output with this
784 { Label L;
785 __ tst(O1);
786 __ brx(Assembler::notZero, false, Assembler::pt, L);
787 __ delayed()->nop();
788 __ stop("mirror is missing");
789 __ bind(L);
790 }
791 #endif // ASSERT
792 __ st_ptr(O1, STATE(_oop_temp));
793 __ add(STATE(_oop_temp), O1); // this is really an LEA not an add
794 __ bind(not_static);
795 }
796
797 // At this point, arguments have been copied off of stack into
798 // their JNI positions, which are O1..O5 and SP[68..].
799 // Oops are boxed in-place on the stack, with handles copied to arguments.
813 // setup the java frame anchor
814 //
815 // The scavenge function only needs to know that the PC of this frame is
816 // in the interpreter method entry code, it doesn't need to know the exact
817 // PC and hence we can use O7 which points to the return address from the
818 // previous call in the code stream (signature handler function)
819 //
820 // The other trick is we set last_Java_sp to FP instead of the usual SP because
821 // we have pushed the extra frame in order to protect the volatile register(s)
822 // in that frame when we return from the jni call
823 //
824
825
826 __ set_last_Java_frame(FP, O7);
827 __ mov(O7, I7); // make dummy interpreter frame look like one above,
828 // not meaningless information that'll confuse me.
829
830 // flush the windows now. We don't care about the current (protection) frame
831 // only the outer frames
832
833 __ flushw();
834
835 // mark windows as flushed
836 Address flags(G2_thread,
837 in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset()));
838 __ set(JavaFrameAnchor::flushed, G3_scratch);
839 __ st(G3_scratch, flags);
840
841 // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
842
843 Address thread_state(G2_thread, in_bytes(JavaThread::thread_state_offset()));
844 #ifdef ASSERT
845 { Label L;
846 __ ld(thread_state, G3_scratch);
847 __ cmp(G3_scratch, _thread_in_Java);
848 __ br(Assembler::equal, false, Assembler::pt, L);
849 __ delayed()->nop();
850 __ stop("Wrong thread state in native stub");
851 __ bind(L);
852 }
853 #endif // ASSERT
854 __ set(_thread_in_native, G3_scratch);
855 __ st(G3_scratch, thread_state);
856
857 // Call the jni method, using the delay slot to set the JNIEnv* argument.
858 __ callr(O0, 0);
859 __ delayed()->
860 add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
861 __ ld_ptr(STATE(_thread), G2_thread); // restore thread
862
863 // must we block?
864
865 // Block, if necessary, before resuming in _thread_in_Java state.
866 // In order for GC to work, don't clear the last_Java_sp until after blocking.
867 { Label no_block;
868 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
869
870 // Switch thread to "native transition" state before reading the synchronization state.
871 // This additional state is necessary because reading and testing the synchronization
872 // state is not atomic w.r.t. GC, as this scenario demonstrates:
873 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
874 // VM thread changes sync state to synchronizing and suspends threads for GC.
875 // Thread A is resumed to finish this native method, but doesn't block here since it
876 // didn't see any synchronization is progress, and escapes.
877 __ set(_thread_in_native_trans, G3_scratch);
878 __ st(G3_scratch, thread_state);
879 if(os::is_MP()) {
880 // Write serialization page so VM thread can do a pseudo remote membar.
881 // We use the current thread pointer to calculate a thread specific
882 // offset to write to within the page. This minimizes bus traffic
883 // due to cache line collision.
884 __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
885 }
886 __ load_contents(sync_state, G3_scratch);
887 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
888
889
890 Label L;
891 Address suspend_state(G2_thread, in_bytes(JavaThread::suspend_flags_offset()));
892 __ br(Assembler::notEqual, false, Assembler::pn, L);
893 __ delayed()->
894 ld(suspend_state, G3_scratch);
895 __ cmp(G3_scratch, 0);
896 __ br(Assembler::equal, false, Assembler::pt, no_block);
897 __ delayed()->nop();
898 __ bind(L);
899
900 // Block. Save any potential method result value before the operation and
901 // use a leaf call to leave the last_Java_frame setup undisturbed.
902 save_native_result();
903 __ call_VM_leaf(noreg,
904 CAST_FROM_FN_PTR(address, JavaThread::check_safepoint_and_suspend_for_native_trans),
905 G2_thread);
906 __ ld_ptr(STATE(_thread), G2_thread); // restore thread
907 // Restore any method result value
908 restore_native_result();
909 __ bind(no_block);
910 }
911
946 __ addcc(G0, O0, O0);
947 __ brx(Assembler::notZero, true, Assembler::pt, store_result); // if result is not NULL:
948 __ delayed()->ld_ptr(O0, 0, O0); // unbox it
949 __ mov(G0, O0);
950
951 __ bind(store_result);
952 // Store it where gc will look for it and result handler expects it.
953 __ st_ptr(O0, STATE(_oop_temp));
954
955 __ bind(no_oop);
956
957 }
958
959 // reset handle block
960 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), G3_scratch);
961 __ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
962
963
964 // handle exceptions (exception handling will handle unlocking!)
965 { Label L;
966 Address exception_addr (G2_thread, in_bytes(Thread::pending_exception_offset()));
967
968 __ ld_ptr(exception_addr, Gtemp);
969 __ tst(Gtemp);
970 __ brx(Assembler::equal, false, Assembler::pt, L);
971 __ delayed()->nop();
972 __ bind(pending_exception_present);
973 // With c++ interpreter we just leave it pending caller will do the correct thing. However...
974 // Like x86 we ignore the result of the native call and leave the method locked. This
975 // seems wrong to leave things locked.
976
977 __ br(Assembler::always, false, Assembler::pt, StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
978 __ delayed()->restore(I5_savedSP, G0, SP); // remove interpreter frame
979
980 __ bind(L);
981 }
982
983 // jvmdi/jvmpi support (preserves thread register)
984 __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI);
985
986 if (synchronized) {
1036 return entry;
1037 }
1038
1039 void CppInterpreterGenerator::generate_compute_interpreter_state(const Register state,
1040 const Register prev_state,
1041 bool native) {
1042
1043 // On entry
1044 // G5_method - caller's method
1045 // Gargs - points to initial parameters (i.e. locals[0])
1046 // G2_thread - valid? (C1 only??)
1047 // "prev_state" - contains any previous frame manager state which we must save a link
1048 //
1049 // On return
1050 // "state" is a pointer to the newly allocated state object. We must allocate and initialize
1051 // a new interpretState object and the method expression stack.
1052
1053 assert_different_registers(state, prev_state);
1054 assert_different_registers(prev_state, G3_scratch);
1055 const Register Gtmp = G3_scratch;
1056 const Address constMethod (G5_method, in_bytes(Method::const_offset()));
1057 const Address access_flags (G5_method, in_bytes(Method::access_flags_offset()));
1058
1059 // slop factor is two extra slots on the expression stack so that
1060 // we always have room to store a result when returning from a call without parameters
1061 // that returns a result.
1062
1063 const int slop_factor = 2*wordSize;
1064
1065 const int fixed_size = ((sizeof(BytecodeInterpreter) + slop_factor) >> LogBytesPerWord) + // what is the slop factor?
1066 Method::extra_stack_entries() + // extra stack for jsr 292
1067 frame::memory_parameter_word_sp_offset + // register save area + param window
1068 (native ? frame::interpreter_frame_extra_outgoing_argument_words : 0); // JNI, class
1069
1070 // XXX G5_method valid
1071
1072 // Now compute new frame size
1073
1074 if (native) {
1075 const Register RconstMethod = Gtmp;
1076 const Address size_of_parameters(RconstMethod, in_bytes(ConstMethod::size_of_parameters_offset()));
1077 __ ld_ptr(constMethod, RconstMethod);
1078 __ lduh( size_of_parameters, Gtmp );
1079 __ calc_mem_param_words(Gtmp, Gtmp); // space for native call parameters passed on the stack in words
1080 } else {
1081 // Full size expression stack
1082 __ ld_ptr(constMethod, Gtmp);
1083 __ lduh(Gtmp, in_bytes(ConstMethod::max_stack_offset()), Gtmp);
1084 }
1085 __ add(Gtmp, fixed_size, Gtmp); // plus the fixed portion
1086
1087 __ neg(Gtmp); // negative space for stack/parameters in words
1088 __ and3(Gtmp, -WordsPerLong, Gtmp); // make multiple of 2 (SP must be 2-word aligned)
1089 __ sll(Gtmp, LogBytesPerWord, Gtmp); // negative space for frame in bytes
1090
1091 // Need to do stack size check here before we fault on large frames
1092
1093 Label stack_ok;
1094
1095 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
1096 (StackRedPages+StackYellowPages);
1227
1228 if (!native) {
1229 //
1230 // Code to initialize locals
1231 //
1232 Register init_value = noreg; // will be G0 if we must clear locals
1233 // Now zero locals
1234 if (true /* zerolocals */ || ClearInterpreterLocals) {
1235 // explicitly initialize locals
1236 init_value = G0;
1237 } else {
1238 #ifdef ASSERT
1239 // initialize locals to a garbage pattern for better debugging
1240 init_value = O3;
1241 __ set( 0x0F0F0F0F, init_value );
1242 #endif // ASSERT
1243 }
1244 if (init_value != noreg) {
1245 Label clear_loop;
1246 const Register RconstMethod = O1;
1247 const Address size_of_parameters(RconstMethod, in_bytes(ConstMethod::size_of_parameters_offset()));
1248 const Address size_of_locals (RconstMethod, in_bytes(ConstMethod::size_of_locals_offset()));
1249
1250 // NOTE: If you change the frame layout, this code will need to
1251 // be updated!
1252 __ ld_ptr( constMethod, RconstMethod );
1253 __ lduh( size_of_locals, O2 );
1254 __ lduh( size_of_parameters, O1 );
1255 __ sll( O2, LogBytesPerWord, O2);
1256 __ sll( O1, LogBytesPerWord, O1 );
1257 __ ld_ptr(XXX_STATE(_locals), L2_scratch);
1258 __ sub( L2_scratch, O2, O2 );
1259 __ sub( L2_scratch, O1, O1 );
1260
1261 __ bind( clear_loop );
1262 __ inc( O2, wordSize );
1263
1264 __ cmp( O2, O1 );
1265 __ br( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop );
1266 __ delayed()->st_ptr( init_value, O2, 0 );
1267 }
1268 }
1477 __ delayed()->nop(); \
1478 __ breakpoint_trap(); \
1479 __ emit_int32(marker); \
1480 __ bind(skip); \
1481 }
1482 #else
1483 #define VALIDATE_STATE(scratch, marker)
1484 #endif /* ASSERT */
1485
1486 void CppInterpreterGenerator::adjust_callers_stack(Register args) {
1487 //
1488 // Adjust caller's stack so that all the locals can be contiguous with
1489 // the parameters.
1490 // Worries about stack overflow make this a pain.
1491 //
1492 // Destroys args, G3_scratch, G3_scratch
1493 // In/Out O5_savedSP (sender's original SP)
1494 //
1495 // assert_different_registers(state, prev_state);
1496 const Register Gtmp = G3_scratch;
1497 const Register RconstMethod = G3_scratch;
1498 const Register tmp = O2;
1499 const Address constMethod(G5_method, in_bytes(Method::const_offset()));
1500 const Address size_of_parameters(RconstMethod, in_bytes(ConstMethod::size_of_parameters_offset()));
1501 const Address size_of_locals (RconstMethod, in_bytes(ConstMethod::size_of_locals_offset()));
1502
1503 __ ld_ptr(constMethod, RconstMethod);
1504 __ lduh(size_of_parameters, tmp);
1505 __ sll(tmp, LogBytesPerWord, Gargs); // parameter size in bytes
1506 __ add(args, Gargs, Gargs); // points to first local + BytesPerWord
1507 // NEW
1508 __ add(Gargs, -wordSize, Gargs); // points to first local[0]
1509 // determine extra space for non-argument locals & adjust caller's SP
1510 // Gtmp1: parameter size in words
1511 __ lduh(size_of_locals, Gtmp);
1512 __ compute_extra_locals_size_in_bytes(tmp, Gtmp, Gtmp);
1513
1514 #if 1
1515 // c2i adapters place the final interpreter argument in the register save area for O0/I0
1516 // the call_stub will place the final interpreter argument at
1517 // frame::memory_parameter_word_sp_offset. This is mostly not noticable for either asm
1518 // or c++ interpreter. However with the c++ interpreter when we do a recursive call
1519 // and try to make it look good in the debugger we will store the argument to
1520 // RecursiveInterpreterActivation in the register argument save area. Without allocating
1521 // extra space for the compiler this will overwrite locals in the local array of the
1536 // G5_method: Method*
1537 // G2_thread: thread (unused)
1538 // Gargs: bottom of args (sender_sp)
1539 // O5: sender's sp
1540
1541 // A single frame manager is plenty as we don't specialize for synchronized. We could and
1542 // the code is pretty much ready. Would need to change the test below and for good measure
1543 // modify generate_interpreter_state to only do the (pre) sync stuff stuff for synchronized
1544 // routines. Not clear this is worth it yet.
1545
1546 if (interpreter_frame_manager) {
1547 return interpreter_frame_manager;
1548 }
1549
1550 __ bind(frame_manager_entry);
1551
1552 // the following temporary registers are used during frame creation
1553 const Register Gtmp1 = G3_scratch;
1554 // const Register Lmirror = L1; // native mirror (native calls only)
1555
1556 const Address constMethod (G5_method, in_bytes(Method::const_offset()));
1557 const Address access_flags (G5_method, in_bytes(Method::access_flags_offset()));
1558
1559 address entry_point = __ pc();
1560 __ mov(G0, prevState); // no current activation
1561
1562
1563 Label re_dispatch;
1564
1565 __ bind(re_dispatch);
1566
1567 // Interpreter needs to have locals completely contiguous. In order to do that
1568 // We must adjust the caller's stack pointer for any locals beyond just the
1569 // parameters
1570 adjust_callers_stack(Gargs);
1571
1572 // O5_savedSP still contains sender's sp
1573
1574 // NEW FRAME
1575
1576 generate_compute_interpreter_state(Lstate, prevState, false);
1577
1690 __ set((int)BytecodeInterpreter::deopt_resume, L1_scratch);
1691 __ ba(call_interpreter);
1692 __ delayed()->st(L1_scratch, STATE(_msg));
1693
1694 // Current frame has caught an exception we need to dispatch to the
1695 // handler. We can get here because a native interpreter frame caught
1696 // an exception in which case there is no handler and we must rethrow
1697 // If it is a vanilla interpreted frame the we simply drop into the
1698 // interpreter and let it do the lookup.
1699
1700 Interpreter::_rethrow_exception_entry = __ pc();
1701
1702 Label return_with_exception;
1703 Label unwind_and_forward;
1704
1705 // O0: exception
1706 // O7: throwing pc
1707
1708 // We want exception in the thread no matter what we ultimately decide about frame type.
1709
1710 Address exception_addr (G2_thread, in_bytes(Thread::pending_exception_offset()));
1711 __ verify_thread();
1712 __ st_ptr(O0, exception_addr);
1713
1714 // get the Method*
1715 __ ld_ptr(STATE(_method), G5_method);
1716
1717 // if this current frame vanilla or native?
1718
1719 __ ld(access_flags, Gtmp1);
1720 __ btst(JVM_ACC_NATIVE, Gtmp1);
1721 __ br(Assembler::zero, false, Assembler::pt, return_with_exception); // vanilla interpreted frame handle directly
1722 __ delayed()->nop();
1723
1724 // We drop thru to unwind a native interpreted frame with a pending exception
1725 // We jump here for the initial interpreter frame with exception pending
1726 // We unwind the current acivation and forward it to our caller.
1727
1728 __ bind(unwind_and_forward);
1729
1730 // Unwind frame and jump to forward exception. unwinding will place throwing pc in O7
|