153 static void do_oop_load(InterpreterMacroAssembler* _masm,
154 Address src,
155 Register dst,
156 DecoratorSet decorators) {
157 __ load_heap_oop(dst, src, r10, r1, decorators);
158 }
159
160 Address TemplateTable::at_bcp(int offset) {
161 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
162 return Address(rbcp, offset);
163 }
164
165 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
166 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
167 int byte_no)
168 {
169 if (!RewriteBytecodes) return;
170 Label L_patch_done;
171
172 switch (bc) {
173 case Bytecodes::_fast_aputfield:
174 case Bytecodes::_fast_bputfield:
175 case Bytecodes::_fast_zputfield:
176 case Bytecodes::_fast_cputfield:
177 case Bytecodes::_fast_dputfield:
178 case Bytecodes::_fast_fputfield:
179 case Bytecodes::_fast_iputfield:
180 case Bytecodes::_fast_lputfield:
181 case Bytecodes::_fast_sputfield:
182 {
183 // We skip bytecode quickening for putfield instructions when
184 // the put_code written to the constant pool cache is zero.
185 // This is required so that every execution of this instruction
186 // calls out to InterpreterRuntime::resolve_get_put to do
187 // additional, required work.
188 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
189 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
190 __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
191 __ movw(bc_reg, bc);
192 __ cbzw(temp_reg, L_patch_done); // don't patch
791 void TemplateTable::daload()
792 {
793 transition(itos, dtos);
794 __ mov(r1, r0);
795 __ pop_ptr(r0);
796 // r0: array
797 // r1: index
798 index_check(r0, r1); // leaves index in r1, kills rscratch1
799 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
800 __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
801 }
802
803 void TemplateTable::aaload()
804 {
805 transition(itos, atos);
806 __ mov(r1, r0);
807 __ pop_ptr(r0);
808 // r0: array
809 // r1: index
810 index_check(r0, r1); // leaves index in r1, kills rscratch1
811 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
812 do_oop_load(_masm,
813 Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)),
814 r0,
815 IS_ARRAY);
816 }
817
818 void TemplateTable::baload()
819 {
820 transition(itos, itos);
821 __ mov(r1, r0);
822 __ pop_ptr(r0);
823 // r0: array
824 // r1: index
825 index_check(r0, r1); // leaves index in r1, kills rscratch1
826 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
827 __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(0)), noreg, noreg);
828 }
829
830 void TemplateTable::caload()
831 {
832 transition(itos, itos);
833 __ mov(r1, r0);
834 __ pop_ptr(r0);
835 // r0: array
1092 __ pop_ptr(r3);
1093 // v0: value
1094 // r1: index
1095 // r3: array
1096 index_check(r3, r1); // prefer index in r1
1097 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
1098 __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg);
1099 }
1100
1101 void TemplateTable::aastore() {
1102 Label is_null, ok_is_subtype, done;
1103 transition(vtos, vtos);
1104 // stack: ..., array, index, value
1105 __ ldr(r0, at_tos()); // value
1106 __ ldr(r2, at_tos_p1()); // index
1107 __ ldr(r3, at_tos_p2()); // array
1108
1109 Address element_address(r3, r4, Address::uxtw(LogBytesPerHeapOop));
1110
1111 index_check(r3, r2); // kills r1
1112 __ add(r4, r2, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
1113
1114 // do array store check - check for NULL value first
1115 __ cbz(r0, is_null);
1116
1117 // Move subklass into r1
1118 __ load_klass(r1, r0);
1119 // Move superklass into r0
1120 __ load_klass(r0, r3);
1121 __ ldr(r0, Address(r0,
1122 ObjArrayKlass::element_klass_offset()));
1123 // Compress array + index*oopSize + 12 into a single register. Frees r2.
1124
1125 // Generate subtype check. Blows r2, r5
1126 // Superklass in r0. Subklass in r1.
1127 __ gen_subtype_check(r1, ok_is_subtype);
1128
1129 // Come here on failure
1130 // object is at TOS
1131 __ b(Interpreter::_throw_ArrayStoreException_entry);
1132
1133 // Come here on success
1134 __ bind(ok_is_subtype);
1135
1136 // Get the value we will store
1137 __ ldr(r0, at_tos());
1138 // Now store using the appropriate barrier
1139 do_oop_store(_masm, element_address, r0, IS_ARRAY);
1140 __ b(done);
1141
1142 // Have a NULL in r0, r3=array, r2=index. Store NULL at ary[idx]
1143 __ bind(is_null);
1144 __ profile_null_seen(r2);
1145
1146 // Store a NULL
1147 do_oop_store(_masm, element_address, noreg, IS_ARRAY);
1148
1149 // Pop stack arguments
1150 __ bind(done);
1151 __ add(esp, esp, 3 * Interpreter::stackElementSize);
1152 }
1153
1154 void TemplateTable::bastore()
1155 {
1156 transition(itos, vtos);
1157 __ pop_i(r1);
1158 __ pop_ptr(r3);
1159 // r0: value
1160 // r1: index
1161 // r3: array
1162 index_check(r3, r1); // prefer index in r1
1163
1164 // Need to check whether array is boolean or byte
1165 // since both types share the bastore bytecode.
1166 __ load_klass(r2, r3);
1167 __ ldrw(r2, Address(r2, Klass::layout_helper_offset()));
2008 }
2009
2010 void TemplateTable::if_nullcmp(Condition cc)
2011 {
2012 transition(atos, vtos);
2013 // assume branch is more often taken than not (loops use backward branches)
2014 Label not_taken;
2015 if (cc == equal)
2016 __ cbnz(r0, not_taken);
2017 else
2018 __ cbz(r0, not_taken);
2019 branch(false, false);
2020 __ bind(not_taken);
2021 __ profile_not_taken_branch(r0);
2022 }
2023
2024 void TemplateTable::if_acmp(Condition cc)
2025 {
2026 transition(atos, vtos);
2027 // assume branch is more often taken than not (loops use backward branches)
2028 Label not_taken;
2029 __ pop_ptr(r1);
2030 __ cmpoop(r1, r0);
2031 __ br(j_not(cc), not_taken);
2032 branch(false, false);
2033 __ bind(not_taken);
2034 __ profile_not_taken_branch(r0);
2035 }
2036
2037 void TemplateTable::ret() {
2038 transition(vtos, vtos);
2039 // We might be moving to a safepoint. The thread which calls
2040 // Interpreter::notice_safepoints() will effectively flush its cache
2041 // when it makes a system call, but we need to do something to
2042 // ensure that we see the changed dispatch table.
2043 __ membar(MacroAssembler::LoadLoad);
2044
2045 locals_index(r1);
2046 __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
2047 __ profile_ret(r1, r2);
2048 __ ldr(rbcp, Address(rmethod, Method::const_offset()));
2049 __ lea(rbcp, Address(rbcp, r1));
2050 __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
2051 __ dispatch_next(vtos, 0, /*generate_poll*/true);
2480 // 8179954: We need to make sure that the code generated for
2481 // volatile accesses forms a sequentially-consistent set of
2482 // operations when combined with STLR and LDAR. Without a leading
2483 // membar it's possible for a simple Dekker test to fail if loads
2484 // use LDR;DMB but stores use STLR. This can happen if C2 compiles
2485 // the stores in one method and we interpret the loads in another.
2486 if (! UseBarriersForVolatile) {
2487 Label notVolatile;
2488 __ tbz(raw_flags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2489 __ membar(MacroAssembler::AnyAny);
2490 __ bind(notVolatile);
2491 }
2492
2493 const Address field(obj, off);
2494
2495 Label Done, notByte, notBool, notInt, notShort, notChar,
2496 notLong, notFloat, notObj, notDouble;
2497
2498 // x86 uses a shift and mask or wings it with a shift plus assert
2499 // the mask is not needed. aarch64 just uses bitfield extract
2500 __ ubfxw(flags, raw_flags, ConstantPoolCacheEntry::tos_state_shift,
2501 ConstantPoolCacheEntry::tos_state_bits);
2502
2503 assert(btos == 0, "change code, btos != 0");
2504 __ cbnz(flags, notByte);
2505
2506 // Don't rewrite getstatic, only getfield
2507 if (is_static) rc = may_not_rewrite;
2508
2509 // btos
2510 __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
2511 __ push(btos);
2512 // Rewrite bytecode to be faster
2513 if (rc == may_rewrite) {
2514 patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2515 }
2516 __ b(Done);
2517
2518 __ bind(notByte);
2519 __ cmp(flags, (u1)ztos);
2520 __ br(Assembler::NE, notBool);
2521
2522 // ztos (same code as btos)
2523 __ access_load_at(T_BOOLEAN, IN_HEAP, r0, field, noreg, noreg);
2524 __ push(ztos);
2525 // Rewrite bytecode to be faster
2526 if (rc == may_rewrite) {
2527 // use btos rewriting, no truncating to t/f bit is needed for getfield.
2528 patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2529 }
2530 __ b(Done);
2531
2532 __ bind(notBool);
2533 __ cmp(flags, (u1)atos);
2534 __ br(Assembler::NE, notObj);
2535 // atos
2536 do_oop_load(_masm, field, r0, IN_HEAP);
2537 __ push(atos);
2538 if (rc == may_rewrite) {
2539 patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2540 }
2541 __ b(Done);
2542
2543 __ bind(notObj);
2544 __ cmp(flags, (u1)itos);
2545 __ br(Assembler::NE, notInt);
2546 // itos
2547 __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
2548 __ push(itos);
2549 // Rewrite bytecode to be faster
2550 if (rc == may_rewrite) {
2551 patch_bytecode(Bytecodes::_fast_igetfield, bc, r1);
2552 }
2553 __ b(Done);
2554
2555 __ bind(notInt);
2556 __ cmp(flags, (u1)ctos);
2557 __ br(Assembler::NE, notChar);
2558 // ctos
2559 __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
2560 __ push(ctos);
2561 // Rewrite bytecode to be faster
2691 // c_rarg1: object pointer set up above (NULL if static)
2692 // c_rarg2: cache entry pointer
2693 // c_rarg3: jvalue object on the stack
2694 __ call_VM(noreg,
2695 CAST_FROM_FN_PTR(address,
2696 InterpreterRuntime::post_field_modification),
2697 c_rarg1, c_rarg2, c_rarg3);
2698 __ get_cache_and_index_at_bcp(cache, index, 1);
2699 __ bind(L1);
2700 }
2701 }
2702
2703 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2704 transition(vtos, vtos);
2705
2706 const Register cache = r2;
2707 const Register index = r3;
2708 const Register obj = r2;
2709 const Register off = r19;
2710 const Register flags = r0;
2711 const Register bc = r4;
2712
2713 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2714 jvmti_post_field_mod(cache, index, is_static);
2715 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2716
2717 Label Done;
2718 __ mov(r5, flags);
2719
2720 {
2721 Label notVolatile;
2722 __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2723 __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
2724 __ bind(notVolatile);
2725 }
2726
2727 // field address
2728 const Address field(obj, off);
2729
2730 Label notByte, notBool, notInt, notShort, notChar,
2731 notLong, notFloat, notObj, notDouble;
2732
2733 // x86 uses a shift and mask or wings it with a shift plus assert
2734 // the mask is not needed. aarch64 just uses bitfield extract
2735 __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift, ConstantPoolCacheEntry::tos_state_bits);
2736
2737 assert(btos == 0, "change code, btos != 0");
2738 __ cbnz(flags, notByte);
2739
2740 // Don't rewrite putstatic, only putfield
2741 if (is_static) rc = may_not_rewrite;
2742
2743 // btos
2744 {
2745 __ pop(btos);
2746 if (!is_static) pop_and_check_object(obj);
2747 __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg);
2748 if (rc == may_rewrite) {
2749 patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no);
2750 }
2751 __ b(Done);
2752 }
2755 __ cmp(flags, (u1)ztos);
2756 __ br(Assembler::NE, notBool);
2757
2758 // ztos
2759 {
2760 __ pop(ztos);
2761 if (!is_static) pop_and_check_object(obj);
2762 __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg);
2763 if (rc == may_rewrite) {
2764 patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
2765 }
2766 __ b(Done);
2767 }
2768
2769 __ bind(notBool);
2770 __ cmp(flags, (u1)atos);
2771 __ br(Assembler::NE, notObj);
2772
2773 // atos
2774 {
2775 __ pop(atos);
2776 if (!is_static) pop_and_check_object(obj);
2777 // Store into the field
2778 do_oop_store(_masm, field, r0, IN_HEAP);
2779 if (rc == may_rewrite) {
2780 patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
2781 }
2782 __ b(Done);
2783 }
2784
2785 __ bind(notObj);
2786 __ cmp(flags, (u1)itos);
2787 __ br(Assembler::NE, notInt);
2788
2789 // itos
2790 {
2791 __ pop(itos);
2792 if (!is_static) pop_and_check_object(obj);
2793 __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg);
2794 if (rc == may_rewrite) {
2795 patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no);
2796 }
2797 __ b(Done);
2798 }
2799
2800 __ bind(notInt);
2801 __ cmp(flags, (u1)ctos);
2802 __ br(Assembler::NE, notChar);
2902 void TemplateTable::putstatic(int byte_no) {
2903 putfield_or_static(byte_no, true);
2904 }
2905
2906 void TemplateTable::jvmti_post_fast_field_mod()
2907 {
2908 if (JvmtiExport::can_post_field_modification()) {
2909 // Check to see if a field modification watch has been set before
2910 // we take the time to call into the VM.
2911 Label L2;
2912 __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2913 __ ldrw(c_rarg3, Address(rscratch1));
2914 __ cbzw(c_rarg3, L2);
2915 __ pop_ptr(r19); // copy the object pointer from tos
2916 __ verify_oop(r19);
2917 __ push_ptr(r19); // put the object pointer back on tos
2918 // Save tos values before call_VM() clobbers them. Since we have
2919 // to do it for every data type, we use the saved values as the
2920 // jvalue object.
2921 switch (bytecode()) { // load values into the jvalue object
2922 case Bytecodes::_fast_aputfield: __ push_ptr(r0); break;
2923 case Bytecodes::_fast_bputfield: // fall through
2924 case Bytecodes::_fast_zputfield: // fall through
2925 case Bytecodes::_fast_sputfield: // fall through
2926 case Bytecodes::_fast_cputfield: // fall through
2927 case Bytecodes::_fast_iputfield: __ push_i(r0); break;
2928 case Bytecodes::_fast_dputfield: __ push_d(); break;
2929 case Bytecodes::_fast_fputfield: __ push_f(); break;
2930 case Bytecodes::_fast_lputfield: __ push_l(r0); break;
2931
2932 default:
2933 ShouldNotReachHere();
2934 }
2935 __ mov(c_rarg3, esp); // points to jvalue on the stack
2936 // access constant pool cache entry
2937 __ get_cache_entry_pointer_at_bcp(c_rarg2, r0, 1);
2938 __ verify_oop(r19);
2939 // r19: object pointer copied above
2940 // c_rarg2: cache entry pointer
2941 // c_rarg3: jvalue object on the stack
2942 __ call_VM(noreg,
2943 CAST_FROM_FN_PTR(address,
2944 InterpreterRuntime::post_field_modification),
2945 r19, c_rarg2, c_rarg3);
2946
2947 switch (bytecode()) { // restore tos values
2948 case Bytecodes::_fast_aputfield: __ pop_ptr(r0); break;
2949 case Bytecodes::_fast_bputfield: // fall through
2950 case Bytecodes::_fast_zputfield: // fall through
2951 case Bytecodes::_fast_sputfield: // fall through
2952 case Bytecodes::_fast_cputfield: // fall through
2953 case Bytecodes::_fast_iputfield: __ pop_i(r0); break;
2954 case Bytecodes::_fast_dputfield: __ pop_d(); break;
2955 case Bytecodes::_fast_fputfield: __ pop_f(); break;
2956 case Bytecodes::_fast_lputfield: __ pop_l(r0); break;
2957 default: break;
2958 }
2959 __ bind(L2);
2960 }
2961 }
2962
2963 void TemplateTable::fast_storefield(TosState state)
2964 {
2965 transition(state, vtos);
2966
2967 ByteSize base = ConstantPoolCache::base_offset();
2978 // replace index with field offset from cache entry
2979 __ ldr(r1, Address(r2, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2980
2981 {
2982 Label notVolatile;
2983 __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2984 __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
2985 __ bind(notVolatile);
2986 }
2987
2988 Label notVolatile;
2989
2990 // Get object from stack
2991 pop_and_check_object(r2);
2992
2993 // field address
2994 const Address field(r2, r1);
2995
2996 // access field
2997 switch (bytecode()) {
2998 case Bytecodes::_fast_aputfield:
2999 do_oop_store(_masm, field, r0, IN_HEAP);
3000 break;
3001 case Bytecodes::_fast_lputfield:
3002 __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg);
3003 break;
3004 case Bytecodes::_fast_iputfield:
3005 __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg);
3006 break;
3007 case Bytecodes::_fast_zputfield:
3008 __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg);
3009 break;
3010 case Bytecodes::_fast_bputfield:
3011 __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg);
3012 break;
3013 case Bytecodes::_fast_sputfield:
3014 __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg);
3015 break;
3016 case Bytecodes::_fast_cputfield:
3017 __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg);
3071 // r0: object
3072 __ verify_oop(r0);
3073 __ null_check(r0);
3074 const Address field(r0, r1);
3075
3076 // 8179954: We need to make sure that the code generated for
3077 // volatile accesses forms a sequentially-consistent set of
3078 // operations when combined with STLR and LDAR. Without a leading
3079 // membar it's possible for a simple Dekker test to fail if loads
3080 // use LDR;DMB but stores use STLR. This can happen if C2 compiles
3081 // the stores in one method and we interpret the loads in another.
3082 if (! UseBarriersForVolatile) {
3083 Label notVolatile;
3084 __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3085 __ membar(MacroAssembler::AnyAny);
3086 __ bind(notVolatile);
3087 }
3088
3089 // access field
3090 switch (bytecode()) {
3091 case Bytecodes::_fast_agetfield:
3092 do_oop_load(_masm, field, r0, IN_HEAP);
3093 __ verify_oop(r0);
3094 break;
3095 case Bytecodes::_fast_lgetfield:
3096 __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
3097 break;
3098 case Bytecodes::_fast_igetfield:
3099 __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
3100 break;
3101 case Bytecodes::_fast_bgetfield:
3102 __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
3103 break;
3104 case Bytecodes::_fast_sgetfield:
3105 __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
3106 break;
3107 case Bytecodes::_fast_cgetfield:
3108 __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
3109 break;
3110 case Bytecodes::_fast_fgetfield:
3627 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), r0);
3628 __ pop(atos); // restore the return value
3629
3630 }
3631 __ b(done);
3632 }
3633
3634 // slow case
3635 __ bind(slow_case);
3636 __ get_constant_pool(c_rarg1);
3637 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3638 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3639 __ verify_oop(r0);
3640
3641 // continue
3642 __ bind(done);
3643 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3644 __ membar(Assembler::StoreStore);
3645 }
3646
3647 void TemplateTable::newarray() {
3648 transition(itos, atos);
3649 __ load_unsigned_byte(c_rarg1, at_bcp(1));
3650 __ mov(c_rarg2, r0);
3651 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3652 c_rarg1, c_rarg2);
3653 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3654 __ membar(Assembler::StoreStore);
3655 }
3656
3657 void TemplateTable::anewarray() {
3658 transition(itos, atos);
3659 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3660 __ get_constant_pool(c_rarg1);
3661 __ mov(c_rarg3, r0);
3662 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3663 c_rarg1, c_rarg2, c_rarg3);
3664 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3665 __ membar(Assembler::StoreStore);
3666 }
3698 __ bind(quicked);
3699 __ mov(r3, r0); // Save object in r3; r0 needed for subtype check
3700 __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1); // r0 = klass
3701
3702 __ bind(resolved);
3703 __ load_klass(r19, r3);
3704
3705 // Generate subtype check. Blows r2, r5. Object in r3.
3706 // Superklass in r0. Subklass in r19.
3707 __ gen_subtype_check(r19, ok_is_subtype);
3708
3709 // Come here on failure
3710 __ push(r3);
3711 // object is at TOS
3712 __ b(Interpreter::_throw_ClassCastException_entry);
3713
3714 // Come here on success
3715 __ bind(ok_is_subtype);
3716 __ mov(r0, r3); // Restore object in r3
3717
3718 // Collect counts on whether this test sees NULLs a lot or not.
3719 if (ProfileInterpreter) {
3720 __ b(done);
3721 __ bind(is_null);
3722 __ profile_null_seen(r2);
3723 } else {
3724 __ bind(is_null); // same as 'done'
3725 }
3726 __ bind(done);
3727 }
3728
3729 void TemplateTable::instanceof() {
3730 transition(atos, itos);
3731 Label done, is_null, ok_is_subtype, quicked, resolved;
3732 __ cbz(r0, is_null);
3733
3734 // Get cpool & tags index
3735 __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
3736 __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
3737 // See if bytecode has already been quicked
3738 __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
3739 __ lea(r1, Address(rscratch1, r19));
3740 __ ldarb(r1, r1);
3741 __ cmp(r1, (u1)JVM_CONSTANT_Class);
3742 __ br(Assembler::EQ, quicked);
3743
3744 __ push(atos); // save receiver for result, and for GC
3745 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
|
153 static void do_oop_load(InterpreterMacroAssembler* _masm,
154 Address src,
155 Register dst,
156 DecoratorSet decorators) {
157 __ load_heap_oop(dst, src, r10, r1, decorators);
158 }
159
160 Address TemplateTable::at_bcp(int offset) {
161 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
162 return Address(rbcp, offset);
163 }
164
165 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
166 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
167 int byte_no)
168 {
169 if (!RewriteBytecodes) return;
170 Label L_patch_done;
171
172 switch (bc) {
173 case Bytecodes::_fast_qputfield:
174 case Bytecodes::_fast_aputfield:
175 case Bytecodes::_fast_bputfield:
176 case Bytecodes::_fast_zputfield:
177 case Bytecodes::_fast_cputfield:
178 case Bytecodes::_fast_dputfield:
179 case Bytecodes::_fast_fputfield:
180 case Bytecodes::_fast_iputfield:
181 case Bytecodes::_fast_lputfield:
182 case Bytecodes::_fast_sputfield:
183 {
184 // We skip bytecode quickening for putfield instructions when
185 // the put_code written to the constant pool cache is zero.
186 // This is required so that every execution of this instruction
187 // calls out to InterpreterRuntime::resolve_get_put to do
188 // additional, required work.
189 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
190 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
191 __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
192 __ movw(bc_reg, bc);
193 __ cbzw(temp_reg, L_patch_done); // don't patch
792 void TemplateTable::daload()
793 {
794 transition(itos, dtos);
795 __ mov(r1, r0);
796 __ pop_ptr(r0);
797 // r0: array
798 // r1: index
799 index_check(r0, r1); // leaves index in r1, kills rscratch1
800 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
801 __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
802 }
803
804 void TemplateTable::aaload()
805 {
806 transition(itos, atos);
807 __ mov(r1, r0);
808 __ pop_ptr(r0);
809 // r0: array
810 // r1: index
811 index_check(r0, r1); // leaves index in r1, kills rscratch1
812 if (EnableValhalla && ValueArrayFlatten) {
813 Label is_flat_array, done;
814
815 __ test_flat_array_oop(r0, r10 /*temp*/, is_flat_array);
816 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
817 do_oop_load(_masm, Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)), r0, IS_ARRAY);
818
819 __ b(done);
820 __ bind(is_flat_array);
821 __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_load), r0, r1);
822 __ bind(done);
823 } else {
824 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
825 do_oop_load(_masm, Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)), r0, IS_ARRAY);
826 }
827 }
828
829 void TemplateTable::baload()
830 {
831 transition(itos, itos);
832 __ mov(r1, r0);
833 __ pop_ptr(r0);
834 // r0: array
835 // r1: index
836 index_check(r0, r1); // leaves index in r1, kills rscratch1
837 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
838 __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(0)), noreg, noreg);
839 }
840
841 void TemplateTable::caload()
842 {
843 transition(itos, itos);
844 __ mov(r1, r0);
845 __ pop_ptr(r0);
846 // r0: array
1103 __ pop_ptr(r3);
1104 // v0: value
1105 // r1: index
1106 // r3: array
1107 index_check(r3, r1); // prefer index in r1
1108 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
1109 __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg);
1110 }
1111
1112 void TemplateTable::aastore() {
1113 Label is_null, ok_is_subtype, done;
1114 transition(vtos, vtos);
1115 // stack: ..., array, index, value
1116 __ ldr(r0, at_tos()); // value
1117 __ ldr(r2, at_tos_p1()); // index
1118 __ ldr(r3, at_tos_p2()); // array
1119
1120 Address element_address(r3, r4, Address::uxtw(LogBytesPerHeapOop));
1121
1122 index_check(r3, r2); // kills r1
1123
1124 // DMS CHECK: what does line below do?
1125 __ add(r4, r2, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
1126
1127 // do array store check - check for NULL value first
1128 __ cbz(r0, is_null);
1129
1130 // Load array klass to r1, check if it is flat and bail out to ususal way
1131 Label is_flat_array;
1132 if (ValueArrayFlatten) {
1133 __ load_klass(r1, r3);
1134 __ test_flat_array_klass(r1, r10 /*temp*/, is_flat_array);
1135 }
1136
1137 // Move subklass into r1
1138 __ load_klass(r1, r0);
1139 // Move superklass into r0
1140 __ load_klass(r0, r3);
1141 __ ldr(r0, Address(r0,
1142 ObjArrayKlass::element_klass_offset()));
1143 // Compress array + index*oopSize + 12 into a single register. Frees r2.
1144
1145 // Generate subtype check. Blows r2, r5
1146 // Superklass in r0. Subklass in r1.
1147 __ gen_subtype_check(r1, ok_is_subtype);
1148
1149 // Come here on failure
1150 // object is at TOS
1151 __ b(Interpreter::_throw_ArrayStoreException_entry);
1152
1153 // Come here on success
1154 __ bind(ok_is_subtype);
1155
1156 // Get the value we will store
1157 __ ldr(r0, at_tos());
1158 // Now store using the appropriate barrier
1159 do_oop_store(_masm, element_address, r0, IS_ARRAY);
1160 __ b(done);
1161
1162 // Have a NULL in r0, r3=array, r2=index. Store NULL at ary[idx]
1163 __ bind(is_null);
1164 __ profile_null_seen(r2);
1165
1166 if (EnableValhalla) {
1167 Label is_null_into_value_array_npe, store_null;
1168
1169 __ load_klass(r0, r3);
1170 // No way to store null in flat array
1171 __ test_flat_array_klass(r0, r1, is_null_into_value_array_npe);
1172
1173 // Use case for storing values in objArray where element_klass is specifically
1174 // a value type because they could not be flattened "for reasons",
1175 // these need to have the same semantics as flat arrays, i.e. NPE
1176 __ ldr(r0, Address(r0, ObjArrayKlass::element_klass_offset()));
1177 __ test_klass_is_value(r0, r1, is_null_into_value_array_npe);
1178 __ b(store_null);
1179
1180 __ bind(is_null_into_value_array_npe);
1181 __ b(ExternalAddress(Interpreter::_throw_NullPointerException_entry));
1182
1183 __ bind(store_null);
1184 }
1185
1186 // Store a NULL
1187 do_oop_store(_masm, element_address, noreg, IS_ARRAY);
1188 __ b(done);
1189
1190
1191 if (EnableValhalla) {
1192 // r0 - value, r2 - index, r3 - array. r1 - loaded array klass
1193 // store non-null value
1194 __ bind(is_flat_array);
1195
1196 // Simplistic type check...
1197 Label is_type_ok;
1198
1199 // Profile the not-null value's klass.
1200 // Load value class
1201 __ load_klass(r10, r0);
1202 __ profile_typecheck(r2, r1, r0); // blows r2, and r0
1203
1204 // flat value array needs exact type match
1205 // is "r10 == r0" (value subclass == array element superclass)
1206
1207 // Move element klass into r0
1208 __ ldr(r0, Address(r1, ArrayKlass::element_klass_offset()));
1209 __ cmp(r0, r10);
1210 __ br(Assembler::EQ, is_type_ok);
1211
1212 __ profile_typecheck_failed(r2);
1213 __ b(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
1214 __ bind(is_type_ok);
1215
1216 // DMS CHECK: Reload from TOS to be safe,
1217 // DMS CHECK: Because of profile_typecheck that blows r2 and r0. Should we really do it?
1218 __ ldr(r1, at_tos()); // value
1219 __ mov(r2, r3); // array
1220 __ ldr(r3, at_tos_p1()); // index
1221 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_store), r1, r2, r3);
1222 }
1223
1224
1225 // Pop stack arguments
1226 __ bind(done);
1227 __ add(esp, esp, 3 * Interpreter::stackElementSize);
1228 }
1229
1230 void TemplateTable::bastore()
1231 {
1232 transition(itos, vtos);
1233 __ pop_i(r1);
1234 __ pop_ptr(r3);
1235 // r0: value
1236 // r1: index
1237 // r3: array
1238 index_check(r3, r1); // prefer index in r1
1239
1240 // Need to check whether array is boolean or byte
1241 // since both types share the bastore bytecode.
1242 __ load_klass(r2, r3);
1243 __ ldrw(r2, Address(r2, Klass::layout_helper_offset()));
2084 }
2085
2086 void TemplateTable::if_nullcmp(Condition cc)
2087 {
2088 transition(atos, vtos);
2089 // assume branch is more often taken than not (loops use backward branches)
2090 Label not_taken;
2091 if (cc == equal)
2092 __ cbnz(r0, not_taken);
2093 else
2094 __ cbz(r0, not_taken);
2095 branch(false, false);
2096 __ bind(not_taken);
2097 __ profile_not_taken_branch(r0);
2098 }
2099
2100 void TemplateTable::if_acmp(Condition cc)
2101 {
2102 transition(atos, vtos);
2103 // assume branch is more often taken than not (loops use backward branches)
2104 Label taken, not_taken;
2105 __ pop_ptr(r1);
2106 __ cmpoop(r1, r0);
2107
2108 if (EnableValhalla) {
2109 __ br(Assembler::NE, (cc == not_equal) ? taken : not_taken);
2110 __ cbz(r1, (cc == equal) ? taken : not_taken);
2111 __ ldr(r2, Address(r1, oopDesc::mark_offset_in_bytes()));
2112 // DMS CHECK: Is code below correct or we should use tbz?
2113 __ andr(r2, r2, markOopDesc::always_locked_pattern && 0xF);
2114 __ cmp(r2, (u1) markOopDesc::always_locked_pattern);
2115 cc = (cc == equal) ? not_equal : equal;
2116 }
2117
2118
2119 __ br(j_not(cc), not_taken);
2120 __ bind(taken);
2121 branch(false, false);
2122 __ bind(not_taken);
2123 __ profile_not_taken_branch(r0);
2124 }
2125
2126 void TemplateTable::ret() {
2127 transition(vtos, vtos);
2128 // We might be moving to a safepoint. The thread which calls
2129 // Interpreter::notice_safepoints() will effectively flush its cache
2130 // when it makes a system call, but we need to do something to
2131 // ensure that we see the changed dispatch table.
2132 __ membar(MacroAssembler::LoadLoad);
2133
2134 locals_index(r1);
2135 __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
2136 __ profile_ret(r1, r2);
2137 __ ldr(rbcp, Address(rmethod, Method::const_offset()));
2138 __ lea(rbcp, Address(rbcp, r1));
2139 __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
2140 __ dispatch_next(vtos, 0, /*generate_poll*/true);
2569 // 8179954: We need to make sure that the code generated for
2570 // volatile accesses forms a sequentially-consistent set of
2571 // operations when combined with STLR and LDAR. Without a leading
2572 // membar it's possible for a simple Dekker test to fail if loads
2573 // use LDR;DMB but stores use STLR. This can happen if C2 compiles
2574 // the stores in one method and we interpret the loads in another.
2575 if (! UseBarriersForVolatile) {
2576 Label notVolatile;
2577 __ tbz(raw_flags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2578 __ membar(MacroAssembler::AnyAny);
2579 __ bind(notVolatile);
2580 }
2581
2582 const Address field(obj, off);
2583
2584 Label Done, notByte, notBool, notInt, notShort, notChar,
2585 notLong, notFloat, notObj, notDouble;
2586
2587 // x86 uses a shift and mask or wings it with a shift plus assert
2588 // the mask is not needed. aarch64 just uses bitfield extract
2589 __ ubfxw(flags, raw_flags, ConstantPoolCacheEntry::tos_state_shift, ConstantPoolCacheEntry::tos_state_bits);
2590
2591 assert(btos == 0, "change code, btos != 0");
2592 __ cbnz(flags, notByte);
2593
2594 // Don't rewrite getstatic, only getfield
2595 if (is_static) rc = may_not_rewrite;
2596
2597 // btos
2598 __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
2599 __ push(btos);
2600 // Rewrite bytecode to be faster
2601 if (rc == may_rewrite) {
2602 patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2603 }
2604 __ b(Done);
2605
2606 __ bind(notByte);
2607 __ cmp(flags, (u1)ztos);
2608 __ br(Assembler::NE, notBool);
2609
2610 // ztos (same code as btos)
2611 __ access_load_at(T_BOOLEAN, IN_HEAP, r0, field, noreg, noreg);
2612 __ push(ztos);
2613 // Rewrite bytecode to be faster
2614 if (rc == may_rewrite) {
2615 // use btos rewriting, no truncating to t/f bit is needed for getfield.
2616 patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2617 }
2618 __ b(Done);
2619
2620 __ bind(notBool);
2621 __ cmp(flags, (u1)atos);
2622 __ br(Assembler::NE, notObj);
2623 // atos
2624 if (!EnableValhalla) {
2625 do_oop_load(_masm, field, r0, IN_HEAP);
2626 __ push(atos);
2627 if (rc == may_rewrite) {
2628 patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2629 }
2630 __ b(Done);
2631 } else { // Valhalla
2632
2633 if (is_static) {
2634 __ load_heap_oop(r0, field);
2635 Label isFlattenable, isUninitialized;
2636 // Issue below if the static field has not been initialized yet
2637 __ test_field_is_flattenable(raw_flags, r10, isFlattenable);
2638 // Not flattenable case
2639 __ push(atos);
2640 __ b(Done);
2641 // Flattenable case, must not return null even if uninitialized
2642 __ bind(isFlattenable);
2643 __ cbz(r0, isUninitialized);
2644 __ push(atos);
2645 __ b(Done);
2646 __ bind(isUninitialized);
2647 __ andw(raw_flags, raw_flags, ConstantPoolCacheEntry::field_index_mask);
2648 __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_static_value_field), obj, raw_flags);
2649 __ verify_oop(r0);
2650 __ push(atos);
2651 __ b(Done);
2652 } else {
2653 Label isFlattened, isInitialized, isFlattenable, rewriteFlattenable;
2654 __ test_field_is_flattenable(raw_flags, r10, isFlattenable);
2655 // Non-flattenable field case, also covers the object case
2656 __ load_heap_oop(r0, field);
2657 __ push(atos);
2658 if (rc == may_rewrite) {
2659 patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2660 }
2661 __ b(Done);
2662 __ bind(isFlattenable);
2663 __ test_field_is_flattened(raw_flags, r10, isFlattened);
2664 // Non-flattened field case
2665 __ load_heap_oop(r0, field);
2666 __ cbnz(r0, isInitialized);
2667 __ andw(raw_flags, raw_flags, ConstantPoolCacheEntry::field_index_mask);
2668 __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_instance_value_field), obj, raw_flags);
2669 __ bind(isInitialized);
2670 __ verify_oop(r0);
2671 __ push(atos);
2672 __ b(rewriteFlattenable);
2673 __ bind(isFlattened);
2674 __ ldr(r10, Address(cache, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f1_offset())));
2675 __ andw(raw_flags, raw_flags, ConstantPoolCacheEntry::field_index_mask);
2676 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flattened_field), obj, raw_flags, r10);
2677 __ verify_oop(r0);
2678 __ push(atos);
2679 __ bind(rewriteFlattenable);
2680 if (rc == may_rewrite) {
2681 patch_bytecode(Bytecodes::_fast_qgetfield, bc, r1);
2682 }
2683 __ b(Done);
2684 }
2685 }
2686
2687 __ bind(notObj);
2688 __ cmp(flags, (u1)itos);
2689 __ br(Assembler::NE, notInt);
2690 // itos
2691 __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
2692 __ push(itos);
2693 // Rewrite bytecode to be faster
2694 if (rc == may_rewrite) {
2695 patch_bytecode(Bytecodes::_fast_igetfield, bc, r1);
2696 }
2697 __ b(Done);
2698
2699 __ bind(notInt);
2700 __ cmp(flags, (u1)ctos);
2701 __ br(Assembler::NE, notChar);
2702 // ctos
2703 __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
2704 __ push(ctos);
2705 // Rewrite bytecode to be faster
2835 // c_rarg1: object pointer set up above (NULL if static)
2836 // c_rarg2: cache entry pointer
2837 // c_rarg3: jvalue object on the stack
2838 __ call_VM(noreg,
2839 CAST_FROM_FN_PTR(address,
2840 InterpreterRuntime::post_field_modification),
2841 c_rarg1, c_rarg2, c_rarg3);
2842 __ get_cache_and_index_at_bcp(cache, index, 1);
2843 __ bind(L1);
2844 }
2845 }
2846
2847 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2848 transition(vtos, vtos);
2849
2850 const Register cache = r2;
2851 const Register index = r3;
2852 const Register obj = r2;
2853 const Register off = r19;
2854 const Register flags = r0;
2855 const Register flags2 = r6;
2856 const Register bc = r4;
2857
2858 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2859 jvmti_post_field_mod(cache, index, is_static);
2860 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2861
2862 Label Done;
2863 __ mov(r5, flags);
2864
2865 {
2866 Label notVolatile;
2867 __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2868 __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
2869 __ bind(notVolatile);
2870 }
2871
2872 // field address
2873 const Address field(obj, off);
2874
2875 Label notByte, notBool, notInt, notShort, notChar,
2876 notLong, notFloat, notObj, notDouble;
2877
2878 __ mov(flags2, flags);
2879
2880 // x86 uses a shift and mask or wings it with a shift plus assert
2881 // the mask is not needed. aarch64 just uses bitfield extract
2882 __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift, ConstantPoolCacheEntry::tos_state_bits);
2883
2884 assert(btos == 0, "change code, btos != 0");
2885 __ cbnz(flags, notByte);
2886
2887 // Don't rewrite putstatic, only putfield
2888 if (is_static) rc = may_not_rewrite;
2889
2890 // btos
2891 {
2892 __ pop(btos);
2893 if (!is_static) pop_and_check_object(obj);
2894 __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg);
2895 if (rc == may_rewrite) {
2896 patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no);
2897 }
2898 __ b(Done);
2899 }
2902 __ cmp(flags, (u1)ztos);
2903 __ br(Assembler::NE, notBool);
2904
2905 // ztos
2906 {
2907 __ pop(ztos);
2908 if (!is_static) pop_and_check_object(obj);
2909 __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg);
2910 if (rc == may_rewrite) {
2911 patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
2912 }
2913 __ b(Done);
2914 }
2915
2916 __ bind(notBool);
2917 __ cmp(flags, (u1)atos);
2918 __ br(Assembler::NE, notObj);
2919
2920 // atos
2921 {
2922 if (!EnableValhalla) {
2923 __ pop(atos);
2924 if (!is_static) pop_and_check_object(obj);
2925 // Store into the field
2926 do_oop_store(_masm, field, r0, IN_HEAP);
2927 if (rc == may_rewrite) {
2928 patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
2929 }
2930 __ b(Done);
2931 } else { // Valhalla
2932
2933 __ pop(atos);
2934 if (is_static) {
2935 Label notFlattenable;
2936 __ test_field_is_not_flattenable(flags2, r10, notFlattenable);
2937 __ null_check(r0);
2938 __ bind(notFlattenable);
2939 do_oop_store(_masm, field, r0, IN_HEAP);
2940 __ b(Done);
2941 } else {
2942 Label isFlattenable, isFlattened, notBuffered, notBuffered2, rewriteNotFlattenable, rewriteFlattenable;
2943 __ test_field_is_flattenable(flags2, r10, isFlattenable);
2944 // Not flattenable case, covers not flattenable values and objects
2945 pop_and_check_object(obj);
2946 // Store into the field
2947 do_oop_store(_masm, field, r0, IN_HEAP);
2948 __ bind(rewriteNotFlattenable);
2949 if (rc == may_rewrite) {
2950 patch_bytecode(Bytecodes::_fast_aputfield, bc, r19, true, byte_no);
2951 }
2952 __ b(Done);
2953 // Implementation of the flattenable semantic
2954 __ bind(isFlattenable);
2955 __ null_check(r0);
2956 __ test_field_is_flattened(flags2, r10, isFlattened);
2957 // Not flattened case
2958 pop_and_check_object(obj);
2959 // Store into the field
2960 do_oop_store(_masm, field, r0, IN_HEAP);
2961 __ b(rewriteFlattenable);
2962 __ bind(isFlattened);
2963 pop_and_check_object(obj);
2964 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_flattened_value), r0, off, obj);
2965 __ bind(rewriteFlattenable);
2966 if (rc == may_rewrite) {
2967 patch_bytecode(Bytecodes::_fast_qputfield, bc, r19, true, byte_no);
2968 }
2969 __ b(Done);
2970 }
2971 } // Valhalla
2972 }
2973
2974 __ bind(notObj);
2975 __ cmp(flags, (u1)itos);
2976 __ br(Assembler::NE, notInt);
2977
2978 // itos
2979 {
2980 __ pop(itos);
2981 if (!is_static) pop_and_check_object(obj);
2982 __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg);
2983 if (rc == may_rewrite) {
2984 patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no);
2985 }
2986 __ b(Done);
2987 }
2988
2989 __ bind(notInt);
2990 __ cmp(flags, (u1)ctos);
2991 __ br(Assembler::NE, notChar);
3091 void TemplateTable::putstatic(int byte_no) {
3092 putfield_or_static(byte_no, true);
3093 }
3094
3095 void TemplateTable::jvmti_post_fast_field_mod()
3096 {
3097 if (JvmtiExport::can_post_field_modification()) {
3098 // Check to see if a field modification watch has been set before
3099 // we take the time to call into the VM.
3100 Label L2;
3101 __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3102 __ ldrw(c_rarg3, Address(rscratch1));
3103 __ cbzw(c_rarg3, L2);
3104 __ pop_ptr(r19); // copy the object pointer from tos
3105 __ verify_oop(r19);
3106 __ push_ptr(r19); // put the object pointer back on tos
3107 // Save tos values before call_VM() clobbers them. Since we have
3108 // to do it for every data type, we use the saved values as the
3109 // jvalue object.
3110 switch (bytecode()) { // load values into the jvalue object
3111 case Bytecodes::_fast_qputfield: //fall through
3112 case Bytecodes::_fast_aputfield: __ push_ptr(r0); break;
3113 case Bytecodes::_fast_bputfield: // fall through
3114 case Bytecodes::_fast_zputfield: // fall through
3115 case Bytecodes::_fast_sputfield: // fall through
3116 case Bytecodes::_fast_cputfield: // fall through
3117 case Bytecodes::_fast_iputfield: __ push_i(r0); break;
3118 case Bytecodes::_fast_dputfield: __ push_d(); break;
3119 case Bytecodes::_fast_fputfield: __ push_f(); break;
3120 case Bytecodes::_fast_lputfield: __ push_l(r0); break;
3121
3122 default:
3123 ShouldNotReachHere();
3124 }
3125 __ mov(c_rarg3, esp); // points to jvalue on the stack
3126 // access constant pool cache entry
3127 __ get_cache_entry_pointer_at_bcp(c_rarg2, r0, 1);
3128 __ verify_oop(r19);
3129 // r19: object pointer copied above
3130 // c_rarg2: cache entry pointer
3131 // c_rarg3: jvalue object on the stack
3132 __ call_VM(noreg,
3133 CAST_FROM_FN_PTR(address,
3134 InterpreterRuntime::post_field_modification),
3135 r19, c_rarg2, c_rarg3);
3136
3137 switch (bytecode()) { // restore tos values
3138 case Bytecodes::_fast_qputfield: //fall through
3139 case Bytecodes::_fast_aputfield: __ pop_ptr(r0); break;
3140 case Bytecodes::_fast_bputfield: // fall through
3141 case Bytecodes::_fast_zputfield: // fall through
3142 case Bytecodes::_fast_sputfield: // fall through
3143 case Bytecodes::_fast_cputfield: // fall through
3144 case Bytecodes::_fast_iputfield: __ pop_i(r0); break;
3145 case Bytecodes::_fast_dputfield: __ pop_d(); break;
3146 case Bytecodes::_fast_fputfield: __ pop_f(); break;
3147 case Bytecodes::_fast_lputfield: __ pop_l(r0); break;
3148 default: break;
3149 }
3150 __ bind(L2);
3151 }
3152 }
3153
3154 void TemplateTable::fast_storefield(TosState state)
3155 {
3156 transition(state, vtos);
3157
3158 ByteSize base = ConstantPoolCache::base_offset();
3169 // replace index with field offset from cache entry
3170 __ ldr(r1, Address(r2, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
3171
3172 {
3173 Label notVolatile;
3174 __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3175 __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
3176 __ bind(notVolatile);
3177 }
3178
3179 Label notVolatile;
3180
3181 // Get object from stack
3182 pop_and_check_object(r2);
3183
3184 // field address
3185 const Address field(r2, r1);
3186
3187 // access field
3188 switch (bytecode()) {
3189 case Bytecodes::_fast_qputfield: //fall through
3190 {
3191 Label isFlattened, done;
3192 __ null_check(r0);
3193 __ test_field_is_flattened(r3, r10, isFlattened);
3194 // No Flattened case
3195 do_oop_store(_masm, field, r0, IN_HEAP);
3196 __ b(done);
3197 __ bind(isFlattened);
3198 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_flattened_value), r0, r1, r2);
3199 __ bind(done);
3200 }
3201 break;
3202 case Bytecodes::_fast_aputfield:
3203 do_oop_store(_masm, field, r0, IN_HEAP);
3204 break;
3205 case Bytecodes::_fast_lputfield:
3206 __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg);
3207 break;
3208 case Bytecodes::_fast_iputfield:
3209 __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg);
3210 break;
3211 case Bytecodes::_fast_zputfield:
3212 __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg);
3213 break;
3214 case Bytecodes::_fast_bputfield:
3215 __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg);
3216 break;
3217 case Bytecodes::_fast_sputfield:
3218 __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg);
3219 break;
3220 case Bytecodes::_fast_cputfield:
3221 __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg);
3275 // r0: object
3276 __ verify_oop(r0);
3277 __ null_check(r0);
3278 const Address field(r0, r1);
3279
3280 // 8179954: We need to make sure that the code generated for
3281 // volatile accesses forms a sequentially-consistent set of
3282 // operations when combined with STLR and LDAR. Without a leading
3283 // membar it's possible for a simple Dekker test to fail if loads
3284 // use LDR;DMB but stores use STLR. This can happen if C2 compiles
3285 // the stores in one method and we interpret the loads in another.
3286 if (! UseBarriersForVolatile) {
3287 Label notVolatile;
3288 __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3289 __ membar(MacroAssembler::AnyAny);
3290 __ bind(notVolatile);
3291 }
3292
3293 // access field
3294 switch (bytecode()) {
3295 case Bytecodes::_fast_qgetfield:
3296 {
3297 Label isFlattened, isInitialized, Done;
3298 // DMS CHECK: We don't need to reload multiple times, but stay close to original code
3299 __ ldrw(r10, Address(r2, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())));
3300 __ test_field_is_flattened(r10, r10, isFlattened);
3301 // Non-flattened field case
3302 __ mov(r10, r0);
3303 __ load_heap_oop(r0, field);
3304 __ cbnz(r0, isInitialized);
3305 __ mov(r0, r10);
3306 __ ldrw(r10, Address(r2, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())));
3307 __ andw(r10, r10, ConstantPoolCacheEntry::field_index_mask);
3308 __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_instance_value_field), r0, r10);
3309 __ bind(isInitialized);
3310 __ verify_oop(r0);
3311 __ b(Done);
3312 __ bind(isFlattened);
3313 __ ldrw(r10, Address(r2, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())));
3314 __ andw(r10, r10, ConstantPoolCacheEntry::field_index_mask);
3315 __ ldr(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f1_offset())));
3316 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flattened_field), r0, r10, r3);
3317 __ verify_oop(r0);
3318 __ bind(Done);
3319 }
3320 break;
3321 case Bytecodes::_fast_agetfield:
3322 do_oop_load(_masm, field, r0, IN_HEAP);
3323 __ verify_oop(r0);
3324 break;
3325 case Bytecodes::_fast_lgetfield:
3326 __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
3327 break;
3328 case Bytecodes::_fast_igetfield:
3329 __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
3330 break;
3331 case Bytecodes::_fast_bgetfield:
3332 __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
3333 break;
3334 case Bytecodes::_fast_sgetfield:
3335 __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
3336 break;
3337 case Bytecodes::_fast_cgetfield:
3338 __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
3339 break;
3340 case Bytecodes::_fast_fgetfield:
3857 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), r0);
3858 __ pop(atos); // restore the return value
3859
3860 }
3861 __ b(done);
3862 }
3863
3864 // slow case
3865 __ bind(slow_case);
3866 __ get_constant_pool(c_rarg1);
3867 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3868 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3869 __ verify_oop(r0);
3870
3871 // continue
3872 __ bind(done);
3873 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3874 __ membar(Assembler::StoreStore);
3875 }
3876
3877 void TemplateTable::defaultvalue() {
3878 transition(vtos, atos);
3879 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3880 __ get_constant_pool(c_rarg1);
3881 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::defaultvalue),
3882 c_rarg1, c_rarg2);
3883 __ verify_oop(r0);
3884 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3885 __ membar(Assembler::StoreStore);
3886 }
3887
3888 void TemplateTable::withfield() {
3889 transition(vtos, atos);
3890 resolve_cache_and_index(f2_byte, c_rarg1 /*cache*/, c_rarg2 /*index*/, sizeof(u2));
3891
3892 // n.b. unlike x86 cache is now rcpool plus the indexed offset
3893 // so using rcpool to meet shared code expectations
3894
3895 call_VM(r1, CAST_FROM_FN_PTR(address, InterpreterRuntime::withfield), rcpool);
3896 __ verify_oop(r1);
3897 __ add(esp, esp, r0);
3898 __ mov(r0, r1);
3899 }
3900
3901 void TemplateTable::newarray() {
3902 transition(itos, atos);
3903 __ load_unsigned_byte(c_rarg1, at_bcp(1));
3904 __ mov(c_rarg2, r0);
3905 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3906 c_rarg1, c_rarg2);
3907 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3908 __ membar(Assembler::StoreStore);
3909 }
3910
3911 void TemplateTable::anewarray() {
3912 transition(itos, atos);
3913 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3914 __ get_constant_pool(c_rarg1);
3915 __ mov(c_rarg3, r0);
3916 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3917 c_rarg1, c_rarg2, c_rarg3);
3918 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3919 __ membar(Assembler::StoreStore);
3920 }
3952 __ bind(quicked);
3953 __ mov(r3, r0); // Save object in r3; r0 needed for subtype check
3954 __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1); // r0 = klass
3955
3956 __ bind(resolved);
3957 __ load_klass(r19, r3);
3958
3959 // Generate subtype check. Blows r2, r5. Object in r3.
3960 // Superklass in r0. Subklass in r19.
3961 __ gen_subtype_check(r19, ok_is_subtype);
3962
3963 // Come here on failure
3964 __ push(r3);
3965 // object is at TOS
3966 __ b(Interpreter::_throw_ClassCastException_entry);
3967
3968 // Come here on success
3969 __ bind(ok_is_subtype);
3970 __ mov(r0, r3); // Restore object in r3
3971
3972 __ b(done);
3973 __ bind(is_null);
3974
3975 // Collect counts on whether this test sees NULLs a lot or not.
3976 if (ProfileInterpreter) {
3977 __ profile_null_seen(r2);
3978 }
3979
3980 if (EnableValhalla) {
3981 // Get cpool & tags index
3982 __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
3983 __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
3984 // See if bytecode has already been quicked
3985 __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
3986 __ lea(r1, Address(rscratch1, r19));
3987 __ ldarb(r1, r1);
3988 // See if CP entry is a Q-descriptor
3989 __ andr (r1, r1, JVM_CONSTANT_QDESC_BIT);
3990 __ cmp(r1, (u1) JVM_CONSTANT_QDESC_BIT);
3991 __ br(Assembler::NE, done);
3992 __ b(ExternalAddress(Interpreter::_throw_NullPointerException_entry));
3993 }
3994
3995 __ bind(done);
3996 }
3997
3998 void TemplateTable::instanceof() {
3999 transition(atos, itos);
4000 Label done, is_null, ok_is_subtype, quicked, resolved;
4001 __ cbz(r0, is_null);
4002
4003 // Get cpool & tags index
4004 __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
4005 __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
4006 // See if bytecode has already been quicked
4007 __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
4008 __ lea(r1, Address(rscratch1, r19));
4009 __ ldarb(r1, r1);
4010 __ cmp(r1, (u1)JVM_CONSTANT_Class);
4011 __ br(Assembler::EQ, quicked);
4012
4013 __ push(atos); // save receiver for result, and for GC
4014 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
|