2479
2480 // Load pointer to branch table.
2481 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
2482
2483 // Get volatile flag.
2484 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2485 // Note: sync is needed before volatile load on PPC64.
2486
2487 // Check field type.
2488 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
2489
2490 #ifdef ASSERT
2491 Label LFlagInvalid;
2492 __ cmpldi(CCR0, Rflags, number_of_states);
2493 __ bge(CCR0, LFlagInvalid);
2494 #endif
2495
2496 // Load from branch table and dispatch (volatile case: one instruction ahead).
2497 __ sldi(Rflags, Rflags, LogBytesPerWord);
2498 __ cmpwi(CCR6, Rscratch, 1); // Volatile?
2499 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2500 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0.
2501 }
2502 __ ldx(Rbtable, Rbtable, Rflags);
2503
2504 // Get the obj from stack.
2505 if (!is_static) {
2506 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
2507 } else {
2508 __ verify_oop(Rclass_or_obj);
2509 }
2510
2511 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2512 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point.
2513 }
2514 __ mtctr(Rbtable);
2515 __ bctr();
2516
2517 #ifdef ASSERT
2518 __ bind(LFlagInvalid);
2519 __ stop("got invalid flag", 0x654);
2520 #endif
2521
2522 if (!is_static && rc == may_not_rewrite) {
2523 // We reuse the code from is_static. It's jumped to via the table above.
2524 return;
2525 }
2526
2527 #ifdef ASSERT
2528 // __ bind(Lvtos);
2529 address pc_before_fence = __ pc();
2530 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2531 assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
2816 jvmti_post_field_mod(Rcache, Rscratch, is_static);
2817 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static);
2818
2819 // Load pointer to branch table.
2820 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
2821
2822 // Get volatile flag.
2823 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2824
2825 // Check the field type.
2826 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
2827
2828 #ifdef ASSERT
2829 Label LFlagInvalid;
2830 __ cmpldi(CCR0, Rflags, number_of_states);
2831 __ bge(CCR0, LFlagInvalid);
2832 #endif
2833
2834 // Load from branch table and dispatch (volatile case: one instruction ahead).
2835 __ sldi(Rflags, Rflags, LogBytesPerWord);
2836 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2837 __ cmpwi(CR_is_vol, Rscratch, 1); // Volatile?
2838 }
2839 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0.
2840 __ ldx(Rbtable, Rbtable, Rflags);
2841
2842 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point.
2843 __ mtctr(Rbtable);
2844 __ bctr();
2845
2846 #ifdef ASSERT
2847 __ bind(LFlagInvalid);
2848 __ stop("got invalid flag", 0x656);
2849
2850 // __ bind(Lvtos);
2851 address pc_before_release = __ pc();
2852 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2853 assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
2854 assert(branch_table[vtos] == 0, "can't compute twice");
2855 branch_table[vtos] = __ pc(); // non-volatile_entry point
2856 __ stop("vtos unexpected", 0x657);
2857 #endif
2858
2859 __ align(32, 28, 28); // Align pop.
2860 // __ bind(Ldtos);
2861 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2862 assert(branch_table[dtos] == 0, "can't compute twice");
2863 branch_table[dtos] = __ pc(); // non-volatile_entry point
2864 __ pop(dtos);
2865 if (!is_static) {
2866 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
2867 }
2868 __ stfdx(F15_ftos, Rclass_or_obj, Roffset);
2869 if (!is_static && rc == may_rewrite) {
2870 patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no);
2871 }
2872 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2873 __ beq(CR_is_vol, Lvolatile); // Volatile?
2874 }
2875 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2876
2877 __ align(32, 28, 28); // Align pop.
2878 // __ bind(Lftos);
2879 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2880 assert(branch_table[ftos] == 0, "can't compute twice");
2881 branch_table[ftos] = __ pc(); // non-volatile_entry point
2882 __ pop(ftos);
2883 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2884 __ stfsx(F15_ftos, Rclass_or_obj, Roffset);
2885 if (!is_static && rc == may_rewrite) {
2886 patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no);
2887 }
2888 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2889 __ beq(CR_is_vol, Lvolatile); // Volatile?
2890 }
2891 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2892
2893 __ align(32, 28, 28); // Align pop.
2894 // __ bind(Litos);
2895 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2896 assert(branch_table[itos] == 0, "can't compute twice");
2897 branch_table[itos] = __ pc(); // non-volatile_entry point
2898 __ pop(itos);
2899 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2900 __ stwx(R17_tos, Rclass_or_obj, Roffset);
2901 if (!is_static && rc == may_rewrite) {
2902 patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no);
2903 }
2904 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2905 __ beq(CR_is_vol, Lvolatile); // Volatile?
2906 }
2907 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2908
2909 __ align(32, 28, 28); // Align pop.
2910 // __ bind(Lltos);
2911 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2912 assert(branch_table[ltos] == 0, "can't compute twice");
2913 branch_table[ltos] = __ pc(); // non-volatile_entry point
2914 __ pop(ltos);
2915 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2916 __ stdx(R17_tos, Rclass_or_obj, Roffset);
2917 if (!is_static && rc == may_rewrite) {
2918 patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no);
2919 }
2920 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2921 __ beq(CR_is_vol, Lvolatile); // Volatile?
2922 }
2923 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2924
2925 __ align(32, 28, 28); // Align pop.
2926 // __ bind(Lbtos);
2927 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2928 assert(branch_table[btos] == 0, "can't compute twice");
2929 branch_table[btos] = __ pc(); // non-volatile_entry point
2930 __ pop(btos);
2931 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2932 __ stbx(R17_tos, Rclass_or_obj, Roffset);
2933 if (!is_static && rc == may_rewrite) {
2934 patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no);
2935 }
2936 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2937 __ beq(CR_is_vol, Lvolatile); // Volatile?
2938 }
2939 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2940
2941 __ align(32, 28, 28); // Align pop.
2942 // __ bind(Lztos);
2943 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2944 assert(branch_table[ztos] == 0, "can't compute twice");
2945 branch_table[ztos] = __ pc(); // non-volatile_entry point
2946 __ pop(ztos);
2947 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2948 __ andi(R17_tos, R17_tos, 0x1);
2949 __ stbx(R17_tos, Rclass_or_obj, Roffset);
2950 if (!is_static && rc == may_rewrite) {
2951 patch_bytecode(Bytecodes::_fast_zputfield, Rbc, Rscratch, true, byte_no);
2952 }
2953 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2954 __ beq(CR_is_vol, Lvolatile); // Volatile?
2955 }
2956 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2957
2958 __ align(32, 28, 28); // Align pop.
2959 // __ bind(Lctos);
2960 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2961 assert(branch_table[ctos] == 0, "can't compute twice");
2962 branch_table[ctos] = __ pc(); // non-volatile_entry point
2963 __ pop(ctos);
2964 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1..
2965 __ sthx(R17_tos, Rclass_or_obj, Roffset);
2966 if (!is_static && rc == may_rewrite) {
2967 patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no);
2968 }
2969 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2970 __ beq(CR_is_vol, Lvolatile); // Volatile?
2971 }
2972 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2973
2974 __ align(32, 28, 28); // Align pop.
2975 // __ bind(Lstos);
2976 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2977 assert(branch_table[stos] == 0, "can't compute twice");
2978 branch_table[stos] = __ pc(); // non-volatile_entry point
2979 __ pop(stos);
2980 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2981 __ sthx(R17_tos, Rclass_or_obj, Roffset);
2982 if (!is_static && rc == may_rewrite) {
2983 patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no);
2984 }
2985 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2986 __ beq(CR_is_vol, Lvolatile); // Volatile?
2987 }
2988 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2989
2990 __ align(32, 28, 28); // Align pop.
2991 // __ bind(Latos);
2992 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2993 assert(branch_table[atos] == 0, "can't compute twice");
2994 branch_table[atos] = __ pc(); // non-volatile_entry point
2995 __ pop(atos);
2996 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1
2997 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, IN_HEAP);
2998 if (!is_static && rc == may_rewrite) {
2999 patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no);
3000 }
3001 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
3002 __ beq(CR_is_vol, Lvolatile); // Volatile?
3003 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
3004
3005 __ align(32, 12);
3006 __ bind(Lvolatile);
3007 __ fence();
3008 }
3009 // fallthru: __ b(Lexit);
3010
3011 #ifdef ASSERT
3012 for (int i = 0; i<number_of_states; ++i) {
3013 assert(branch_table[i], "put initialization");
3014 //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)",
3015 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i]));
3016 }
3017 #endif
3018 }
3019
3020 void TemplateTable::putfield(int byte_no) {
3021 putfield_or_static(byte_no, false);
3039
3040 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
3041 Rclass_or_obj = R31, // Needs to survive C call.
3042 Roffset = R22_tmp2, // Needs to survive C call.
3043 Rflags = R3_ARG1,
3044 Rscratch = R11_scratch1,
3045 Rscratch2 = R12_scratch2,
3046 Rscratch3 = R4_ARG2;
3047 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
3048
3049 // Constant pool already resolved => Load flags and offset of field.
3050 __ get_cache_and_index_at_bcp(Rcache, 1);
3051 jvmti_post_field_mod(Rcache, Rscratch, false /* not static */);
3052 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
3053
3054 // Get the obj and the final store addr.
3055 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
3056
3057 // Get volatile flag.
3058 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
3059 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); }
3060 {
3061 Label LnotVolatile;
3062 __ beq(CCR0, LnotVolatile);
3063 __ release();
3064 __ align(32, 12);
3065 __ bind(LnotVolatile);
3066 }
3067
3068 // Do the store and fencing.
3069 switch(bytecode()) {
3070 case Bytecodes::_fast_aputfield:
3071 // Store into the field.
3072 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, IN_HEAP);
3073 break;
3074
3075 case Bytecodes::_fast_iputfield:
3076 __ stwx(R17_tos, Rclass_or_obj, Roffset);
3077 break;
3078
3079 case Bytecodes::_fast_lputfield:
3086 case Bytecodes::_fast_bputfield:
3087 __ stbx(R17_tos, Rclass_or_obj, Roffset);
3088 break;
3089
3090 case Bytecodes::_fast_cputfield:
3091 case Bytecodes::_fast_sputfield:
3092 __ sthx(R17_tos, Rclass_or_obj, Roffset);
3093 break;
3094
3095 case Bytecodes::_fast_fputfield:
3096 __ stfsx(F15_ftos, Rclass_or_obj, Roffset);
3097 break;
3098
3099 case Bytecodes::_fast_dputfield:
3100 __ stfdx(F15_ftos, Rclass_or_obj, Roffset);
3101 break;
3102
3103 default: ShouldNotReachHere();
3104 }
3105
3106 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
3107 Label LVolatile;
3108 __ beq(CR_is_vol, LVolatile);
3109 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
3110
3111 __ align(32, 12);
3112 __ bind(LVolatile);
3113 __ fence();
3114 }
3115 }
3116
3117 void TemplateTable::fast_accessfield(TosState state) {
3118 transition(atos, state);
3119
3120 Label LisVolatile;
3121 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3122
3123 const Register Rcache = R3_ARG1,
3124 Rclass_or_obj = R17_tos,
3125 Roffset = R22_tmp2,
3126 Rflags = R23_tmp3,
3131 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
3132
3133 // JVMTI support
3134 jvmti_post_field_access(Rcache, Rscratch, false, true);
3135
3136 // Get the load address.
3137 __ null_check_throw(Rclass_or_obj, -1, Rscratch);
3138
3139 // Get volatile flag.
3140 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
3141 __ bne(CCR0, LisVolatile);
3142
3143 switch(bytecode()) {
3144 case Bytecodes::_fast_agetfield:
3145 {
3146 do_oop_load(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, /* nv temp */ Rflags, IN_HEAP);
3147 __ verify_oop(R17_tos);
3148 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3149
3150 __ bind(LisVolatile);
3151 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3152 do_oop_load(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, /* nv temp */ Rflags, IN_HEAP);
3153 __ verify_oop(R17_tos);
3154 __ twi_0(R17_tos);
3155 __ isync();
3156 break;
3157 }
3158 case Bytecodes::_fast_igetfield:
3159 {
3160 __ lwax(R17_tos, Rclass_or_obj, Roffset);
3161 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3162
3163 __ bind(LisVolatile);
3164 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3165 __ lwax(R17_tos, Rclass_or_obj, Roffset);
3166 __ twi_0(R17_tos);
3167 __ isync();
3168 break;
3169 }
3170 case Bytecodes::_fast_lgetfield:
3171 {
3172 __ ldx(R17_tos, Rclass_or_obj, Roffset);
3173 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3174
3175 __ bind(LisVolatile);
3176 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3177 __ ldx(R17_tos, Rclass_or_obj, Roffset);
3178 __ twi_0(R17_tos);
3179 __ isync();
3180 break;
3181 }
3182 case Bytecodes::_fast_bgetfield:
3183 {
3184 __ lbzx(R17_tos, Rclass_or_obj, Roffset);
3185 __ extsb(R17_tos, R17_tos);
3186 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3187
3188 __ bind(LisVolatile);
3189 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3190 __ lbzx(R17_tos, Rclass_or_obj, Roffset);
3191 __ twi_0(R17_tos);
3192 __ extsb(R17_tos, R17_tos);
3193 __ isync();
3194 break;
3195 }
3196 case Bytecodes::_fast_cgetfield:
3197 {
3198 __ lhzx(R17_tos, Rclass_or_obj, Roffset);
3199 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3200
3201 __ bind(LisVolatile);
3202 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3203 __ lhzx(R17_tos, Rclass_or_obj, Roffset);
3204 __ twi_0(R17_tos);
3205 __ isync();
3206 break;
3207 }
3208 case Bytecodes::_fast_sgetfield:
3209 {
3210 __ lhax(R17_tos, Rclass_or_obj, Roffset);
3211 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3212
3213 __ bind(LisVolatile);
3214 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3215 __ lhax(R17_tos, Rclass_or_obj, Roffset);
3216 __ twi_0(R17_tos);
3217 __ isync();
3218 break;
3219 }
3220 case Bytecodes::_fast_fgetfield:
3221 {
3222 __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3223 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3224
3225 __ bind(LisVolatile);
3226 Label Ldummy;
3227 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3228 __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3229 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
3230 __ bne_predict_not_taken(CCR0, Ldummy);
3231 __ bind(Ldummy);
3232 __ isync();
3233 break;
3234 }
3235 case Bytecodes::_fast_dgetfield:
3236 {
3237 __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
3238 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3239
3240 __ bind(LisVolatile);
3241 Label Ldummy;
3242 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3243 __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
3244 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
3245 __ bne_predict_not_taken(CCR0, Ldummy);
3246 __ bind(Ldummy);
3247 __ isync();
3248 break;
3249 }
3250 default: ShouldNotReachHere();
3251 }
3252 }
3253
3254 void TemplateTable::fast_xaccess(TosState state) {
3255 transition(vtos, state);
3256
3257 Label LisVolatile;
3258 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3259 const Register Rcache = R3_ARG1,
3260 Rclass_or_obj = R17_tos,
3261 Roffset = R22_tmp2,
3262 Rflags = R23_tmp3,
3271 // JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches.
3272
3273 // Needed to report exception at the correct bcp.
3274 __ addi(R14_bcp, R14_bcp, 1);
3275
3276 // Get the load address.
3277 __ null_check_throw(Rclass_or_obj, -1, Rscratch);
3278
3279 // Get volatile flag.
3280 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
3281 __ bne(CCR0, LisVolatile);
3282
3283 switch(state) {
3284 case atos:
3285 {
3286 do_oop_load(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, /* nv temp */ Rflags, IN_HEAP);
3287 __ verify_oop(R17_tos);
3288 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
3289
3290 __ bind(LisVolatile);
3291 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3292 do_oop_load(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, /* nv temp */ Rflags, IN_HEAP);
3293 __ verify_oop(R17_tos);
3294 __ twi_0(R17_tos);
3295 __ isync();
3296 break;
3297 }
3298 case itos:
3299 {
3300 __ lwax(R17_tos, Rclass_or_obj, Roffset);
3301 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
3302
3303 __ bind(LisVolatile);
3304 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3305 __ lwax(R17_tos, Rclass_or_obj, Roffset);
3306 __ twi_0(R17_tos);
3307 __ isync();
3308 break;
3309 }
3310 case ftos:
3311 {
3312 __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3313 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
3314
3315 __ bind(LisVolatile);
3316 Label Ldummy;
3317 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3318 __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3319 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
3320 __ bne_predict_not_taken(CCR0, Ldummy);
3321 __ bind(Ldummy);
3322 __ isync();
3323 break;
3324 }
3325 default: ShouldNotReachHere();
3326 }
3327 __ addi(R14_bcp, R14_bcp, -1);
3328 }
3329
3330 // ============================================================================
3331 // Calls
3332
3333 // Common code for invoke
3334 //
3335 // Input:
3336 // - byte_no
3337 //
|
2479
2480 // Load pointer to branch table.
2481 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
2482
2483 // Get volatile flag.
2484 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2485 // Note: sync is needed before volatile load on PPC64.
2486
2487 // Check field type.
2488 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
2489
2490 #ifdef ASSERT
2491 Label LFlagInvalid;
2492 __ cmpldi(CCR0, Rflags, number_of_states);
2493 __ bge(CCR0, LFlagInvalid);
2494 #endif
2495
2496 // Load from branch table and dispatch (volatile case: one instruction ahead).
2497 __ sldi(Rflags, Rflags, LogBytesPerWord);
2498 __ cmpwi(CCR6, Rscratch, 1); // Volatile?
2499 if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) {
2500 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0.
2501 }
2502 __ ldx(Rbtable, Rbtable, Rflags);
2503
2504 // Get the obj from stack.
2505 if (!is_static) {
2506 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
2507 } else {
2508 __ verify_oop(Rclass_or_obj);
2509 }
2510
2511 if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) {
2512 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point.
2513 }
2514 __ mtctr(Rbtable);
2515 __ bctr();
2516
2517 #ifdef ASSERT
2518 __ bind(LFlagInvalid);
2519 __ stop("got invalid flag", 0x654);
2520 #endif
2521
2522 if (!is_static && rc == may_not_rewrite) {
2523 // We reuse the code from is_static. It's jumped to via the table above.
2524 return;
2525 }
2526
2527 #ifdef ASSERT
2528 // __ bind(Lvtos);
2529 address pc_before_fence = __ pc();
2530 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2531 assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
2816 jvmti_post_field_mod(Rcache, Rscratch, is_static);
2817 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static);
2818
2819 // Load pointer to branch table.
2820 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
2821
2822 // Get volatile flag.
2823 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2824
2825 // Check the field type.
2826 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
2827
2828 #ifdef ASSERT
2829 Label LFlagInvalid;
2830 __ cmpldi(CCR0, Rflags, number_of_states);
2831 __ bge(CCR0, LFlagInvalid);
2832 #endif
2833
2834 // Load from branch table and dispatch (volatile case: one instruction ahead).
2835 __ sldi(Rflags, Rflags, LogBytesPerWord);
2836 if (!SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) {
2837 __ cmpwi(CR_is_vol, Rscratch, 1); // Volatile?
2838 }
2839 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0.
2840 __ ldx(Rbtable, Rbtable, Rflags);
2841
2842 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point.
2843 __ mtctr(Rbtable);
2844 __ bctr();
2845
2846 #ifdef ASSERT
2847 __ bind(LFlagInvalid);
2848 __ stop("got invalid flag", 0x656);
2849
2850 // __ bind(Lvtos);
2851 address pc_before_release = __ pc();
2852 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2853 assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
2854 assert(branch_table[vtos] == 0, "can't compute twice");
2855 branch_table[vtos] = __ pc(); // non-volatile_entry point
2856 __ stop("vtos unexpected", 0x657);
2857 #endif
2858
2859 __ align(32, 28, 28); // Align pop.
2860 // __ bind(Ldtos);
2861 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2862 assert(branch_table[dtos] == 0, "can't compute twice");
2863 branch_table[dtos] = __ pc(); // non-volatile_entry point
2864 __ pop(dtos);
2865 if (!is_static) {
2866 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
2867 }
2868 __ stfdx(F15_ftos, Rclass_or_obj, Roffset);
2869 if (!is_static && rc == may_rewrite) {
2870 patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no);
2871 }
2872 if (!SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) {
2873 __ beq(CR_is_vol, Lvolatile); // Volatile?
2874 }
2875 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2876
2877 __ align(32, 28, 28); // Align pop.
2878 // __ bind(Lftos);
2879 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2880 assert(branch_table[ftos] == 0, "can't compute twice");
2881 branch_table[ftos] = __ pc(); // non-volatile_entry point
2882 __ pop(ftos);
2883 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2884 __ stfsx(F15_ftos, Rclass_or_obj, Roffset);
2885 if (!is_static && rc == may_rewrite) {
2886 patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no);
2887 }
2888 if (!SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) {
2889 __ beq(CR_is_vol, Lvolatile); // Volatile?
2890 }
2891 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2892
2893 __ align(32, 28, 28); // Align pop.
2894 // __ bind(Litos);
2895 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2896 assert(branch_table[itos] == 0, "can't compute twice");
2897 branch_table[itos] = __ pc(); // non-volatile_entry point
2898 __ pop(itos);
2899 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2900 __ stwx(R17_tos, Rclass_or_obj, Roffset);
2901 if (!is_static && rc == may_rewrite) {
2902 patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no);
2903 }
2904 if (!SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) {
2905 __ beq(CR_is_vol, Lvolatile); // Volatile?
2906 }
2907 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2908
2909 __ align(32, 28, 28); // Align pop.
2910 // __ bind(Lltos);
2911 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2912 assert(branch_table[ltos] == 0, "can't compute twice");
2913 branch_table[ltos] = __ pc(); // non-volatile_entry point
2914 __ pop(ltos);
2915 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2916 __ stdx(R17_tos, Rclass_or_obj, Roffset);
2917 if (!is_static && rc == may_rewrite) {
2918 patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no);
2919 }
2920 if (!SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) {
2921 __ beq(CR_is_vol, Lvolatile); // Volatile?
2922 }
2923 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2924
2925 __ align(32, 28, 28); // Align pop.
2926 // __ bind(Lbtos);
2927 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2928 assert(branch_table[btos] == 0, "can't compute twice");
2929 branch_table[btos] = __ pc(); // non-volatile_entry point
2930 __ pop(btos);
2931 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2932 __ stbx(R17_tos, Rclass_or_obj, Roffset);
2933 if (!is_static && rc == may_rewrite) {
2934 patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no);
2935 }
2936 if (!SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) {
2937 __ beq(CR_is_vol, Lvolatile); // Volatile?
2938 }
2939 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2940
2941 __ align(32, 28, 28); // Align pop.
2942 // __ bind(Lztos);
2943 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2944 assert(branch_table[ztos] == 0, "can't compute twice");
2945 branch_table[ztos] = __ pc(); // non-volatile_entry point
2946 __ pop(ztos);
2947 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2948 __ andi(R17_tos, R17_tos, 0x1);
2949 __ stbx(R17_tos, Rclass_or_obj, Roffset);
2950 if (!is_static && rc == may_rewrite) {
2951 patch_bytecode(Bytecodes::_fast_zputfield, Rbc, Rscratch, true, byte_no);
2952 }
2953 if (!SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) {
2954 __ beq(CR_is_vol, Lvolatile); // Volatile?
2955 }
2956 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2957
2958 __ align(32, 28, 28); // Align pop.
2959 // __ bind(Lctos);
2960 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2961 assert(branch_table[ctos] == 0, "can't compute twice");
2962 branch_table[ctos] = __ pc(); // non-volatile_entry point
2963 __ pop(ctos);
2964 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1..
2965 __ sthx(R17_tos, Rclass_or_obj, Roffset);
2966 if (!is_static && rc == may_rewrite) {
2967 patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no);
2968 }
2969 if (!SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) {
2970 __ beq(CR_is_vol, Lvolatile); // Volatile?
2971 }
2972 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2973
2974 __ align(32, 28, 28); // Align pop.
2975 // __ bind(Lstos);
2976 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2977 assert(branch_table[stos] == 0, "can't compute twice");
2978 branch_table[stos] = __ pc(); // non-volatile_entry point
2979 __ pop(stos);
2980 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2981 __ sthx(R17_tos, Rclass_or_obj, Roffset);
2982 if (!is_static && rc == may_rewrite) {
2983 patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no);
2984 }
2985 if (!SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) {
2986 __ beq(CR_is_vol, Lvolatile); // Volatile?
2987 }
2988 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2989
2990 __ align(32, 28, 28); // Align pop.
2991 // __ bind(Latos);
2992 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2993 assert(branch_table[atos] == 0, "can't compute twice");
2994 branch_table[atos] = __ pc(); // non-volatile_entry point
2995 __ pop(atos);
2996 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1
2997 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, IN_HEAP);
2998 if (!is_static && rc == may_rewrite) {
2999 patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no);
3000 }
3001 if (!SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) {
3002 __ beq(CR_is_vol, Lvolatile); // Volatile?
3003 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
3004
3005 __ align(32, 12);
3006 __ bind(Lvolatile);
3007 __ fence();
3008 }
3009 // fallthru: __ b(Lexit);
3010
3011 #ifdef ASSERT
3012 for (int i = 0; i<number_of_states; ++i) {
3013 assert(branch_table[i], "put initialization");
3014 //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)",
3015 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i]));
3016 }
3017 #endif
3018 }
3019
3020 void TemplateTable::putfield(int byte_no) {
3021 putfield_or_static(byte_no, false);
3039
3040 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
3041 Rclass_or_obj = R31, // Needs to survive C call.
3042 Roffset = R22_tmp2, // Needs to survive C call.
3043 Rflags = R3_ARG1,
3044 Rscratch = R11_scratch1,
3045 Rscratch2 = R12_scratch2,
3046 Rscratch3 = R4_ARG2;
3047 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
3048
3049 // Constant pool already resolved => Load flags and offset of field.
3050 __ get_cache_and_index_at_bcp(Rcache, 1);
3051 jvmti_post_field_mod(Rcache, Rscratch, false /* not static */);
3052 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
3053
3054 // Get the obj and the final store addr.
3055 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
3056
3057 // Get volatile flag.
3058 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
3059 if (!SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) { __ cmpdi(CR_is_vol, Rscratch, 1); }
3060 {
3061 Label LnotVolatile;
3062 __ beq(CCR0, LnotVolatile);
3063 __ release();
3064 __ align(32, 12);
3065 __ bind(LnotVolatile);
3066 }
3067
3068 // Do the store and fencing.
3069 switch(bytecode()) {
3070 case Bytecodes::_fast_aputfield:
3071 // Store into the field.
3072 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, IN_HEAP);
3073 break;
3074
3075 case Bytecodes::_fast_iputfield:
3076 __ stwx(R17_tos, Rclass_or_obj, Roffset);
3077 break;
3078
3079 case Bytecodes::_fast_lputfield:
3086 case Bytecodes::_fast_bputfield:
3087 __ stbx(R17_tos, Rclass_or_obj, Roffset);
3088 break;
3089
3090 case Bytecodes::_fast_cputfield:
3091 case Bytecodes::_fast_sputfield:
3092 __ sthx(R17_tos, Rclass_or_obj, Roffset);
3093 break;
3094
3095 case Bytecodes::_fast_fputfield:
3096 __ stfsx(F15_ftos, Rclass_or_obj, Roffset);
3097 break;
3098
3099 case Bytecodes::_fast_dputfield:
3100 __ stfdx(F15_ftos, Rclass_or_obj, Roffset);
3101 break;
3102
3103 default: ShouldNotReachHere();
3104 }
3105
3106 if (!SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) {
3107 Label LVolatile;
3108 __ beq(CR_is_vol, LVolatile);
3109 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
3110
3111 __ align(32, 12);
3112 __ bind(LVolatile);
3113 __ fence();
3114 }
3115 }
3116
3117 void TemplateTable::fast_accessfield(TosState state) {
3118 transition(atos, state);
3119
3120 Label LisVolatile;
3121 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3122
3123 const Register Rcache = R3_ARG1,
3124 Rclass_or_obj = R17_tos,
3125 Roffset = R22_tmp2,
3126 Rflags = R23_tmp3,
3131 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
3132
3133 // JVMTI support
3134 jvmti_post_field_access(Rcache, Rscratch, false, true);
3135
3136 // Get the load address.
3137 __ null_check_throw(Rclass_or_obj, -1, Rscratch);
3138
3139 // Get volatile flag.
3140 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
3141 __ bne(CCR0, LisVolatile);
3142
3143 switch(bytecode()) {
3144 case Bytecodes::_fast_agetfield:
3145 {
3146 do_oop_load(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, /* nv temp */ Rflags, IN_HEAP);
3147 __ verify_oop(R17_tos);
3148 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3149
3150 __ bind(LisVolatile);
3151 if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) { __ fence(); }
3152 do_oop_load(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, /* nv temp */ Rflags, IN_HEAP);
3153 __ verify_oop(R17_tos);
3154 __ twi_0(R17_tos);
3155 __ isync();
3156 break;
3157 }
3158 case Bytecodes::_fast_igetfield:
3159 {
3160 __ lwax(R17_tos, Rclass_or_obj, Roffset);
3161 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3162
3163 __ bind(LisVolatile);
3164 if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) { __ fence(); }
3165 __ lwax(R17_tos, Rclass_or_obj, Roffset);
3166 __ twi_0(R17_tos);
3167 __ isync();
3168 break;
3169 }
3170 case Bytecodes::_fast_lgetfield:
3171 {
3172 __ ldx(R17_tos, Rclass_or_obj, Roffset);
3173 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3174
3175 __ bind(LisVolatile);
3176 if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) { __ fence(); }
3177 __ ldx(R17_tos, Rclass_or_obj, Roffset);
3178 __ twi_0(R17_tos);
3179 __ isync();
3180 break;
3181 }
3182 case Bytecodes::_fast_bgetfield:
3183 {
3184 __ lbzx(R17_tos, Rclass_or_obj, Roffset);
3185 __ extsb(R17_tos, R17_tos);
3186 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3187
3188 __ bind(LisVolatile);
3189 if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) { __ fence(); }
3190 __ lbzx(R17_tos, Rclass_or_obj, Roffset);
3191 __ twi_0(R17_tos);
3192 __ extsb(R17_tos, R17_tos);
3193 __ isync();
3194 break;
3195 }
3196 case Bytecodes::_fast_cgetfield:
3197 {
3198 __ lhzx(R17_tos, Rclass_or_obj, Roffset);
3199 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3200
3201 __ bind(LisVolatile);
3202 if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) { __ fence(); }
3203 __ lhzx(R17_tos, Rclass_or_obj, Roffset);
3204 __ twi_0(R17_tos);
3205 __ isync();
3206 break;
3207 }
3208 case Bytecodes::_fast_sgetfield:
3209 {
3210 __ lhax(R17_tos, Rclass_or_obj, Roffset);
3211 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3212
3213 __ bind(LisVolatile);
3214 if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) { __ fence(); }
3215 __ lhax(R17_tos, Rclass_or_obj, Roffset);
3216 __ twi_0(R17_tos);
3217 __ isync();
3218 break;
3219 }
3220 case Bytecodes::_fast_fgetfield:
3221 {
3222 __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3223 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3224
3225 __ bind(LisVolatile);
3226 Label Ldummy;
3227 if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) { __ fence(); }
3228 __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3229 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
3230 __ bne_predict_not_taken(CCR0, Ldummy);
3231 __ bind(Ldummy);
3232 __ isync();
3233 break;
3234 }
3235 case Bytecodes::_fast_dgetfield:
3236 {
3237 __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
3238 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3239
3240 __ bind(LisVolatile);
3241 Label Ldummy;
3242 if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) { __ fence(); }
3243 __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
3244 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
3245 __ bne_predict_not_taken(CCR0, Ldummy);
3246 __ bind(Ldummy);
3247 __ isync();
3248 break;
3249 }
3250 default: ShouldNotReachHere();
3251 }
3252 }
3253
3254 void TemplateTable::fast_xaccess(TosState state) {
3255 transition(vtos, state);
3256
3257 Label LisVolatile;
3258 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3259 const Register Rcache = R3_ARG1,
3260 Rclass_or_obj = R17_tos,
3261 Roffset = R22_tmp2,
3262 Rflags = R23_tmp3,
3271 // JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches.
3272
3273 // Needed to report exception at the correct bcp.
3274 __ addi(R14_bcp, R14_bcp, 1);
3275
3276 // Get the load address.
3277 __ null_check_throw(Rclass_or_obj, -1, Rscratch);
3278
3279 // Get volatile flag.
3280 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
3281 __ bne(CCR0, LisVolatile);
3282
3283 switch(state) {
3284 case atos:
3285 {
3286 do_oop_load(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, /* nv temp */ Rflags, IN_HEAP);
3287 __ verify_oop(R17_tos);
3288 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
3289
3290 __ bind(LisVolatile);
3291 if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) { __ fence(); }
3292 do_oop_load(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, /* nv temp */ Rflags, IN_HEAP);
3293 __ verify_oop(R17_tos);
3294 __ twi_0(R17_tos);
3295 __ isync();
3296 break;
3297 }
3298 case itos:
3299 {
3300 __ lwax(R17_tos, Rclass_or_obj, Roffset);
3301 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
3302
3303 __ bind(LisVolatile);
3304 if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) { __ fence(); }
3305 __ lwax(R17_tos, Rclass_or_obj, Roffset);
3306 __ twi_0(R17_tos);
3307 __ isync();
3308 break;
3309 }
3310 case ftos:
3311 {
3312 __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3313 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
3314
3315 __ bind(LisVolatile);
3316 Label Ldummy;
3317 if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) { __ fence(); }
3318 __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3319 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
3320 __ bne_predict_not_taken(CCR0, Ldummy);
3321 __ bind(Ldummy);
3322 __ isync();
3323 break;
3324 }
3325 default: ShouldNotReachHere();
3326 }
3327 __ addi(R14_bcp, R14_bcp, -1);
3328 }
3329
3330 // ============================================================================
3331 // Calls
3332
3333 // Common code for invoke
3334 //
3335 // Input:
3336 // - byte_no
3337 //
|