< prev index next >

src/cpu/aarch64/vm/templateTable_aarch64.cpp

Print this page
rev 13551 : imported patch gcinterface-aarch64-5.patch


   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.hpp"

  28 #include "interpreter/interpreter.hpp"
  29 #include "interpreter/interpreterRuntime.hpp"
  30 #include "interpreter/interp_masm.hpp"
  31 #include "interpreter/templateTable.hpp"
  32 #include "memory/universe.inline.hpp"
  33 #include "oops/methodData.hpp"
  34 #include "oops/method.hpp"
  35 #include "oops/objArrayKlass.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "prims/methodHandles.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "runtime/synchronizer.hpp"
  41 
  42 #define __ _masm->
  43 
  44 // Platform-dependent initialization
  45 
  46 void TemplateTable::pd_initialize() {
  47   // No aarch64 specific initialization


 124 
 125 // Condition conversion
 126 static Assembler::Condition j_not(TemplateTable::Condition cc) {
 127   switch (cc) {
 128   case TemplateTable::equal        : return Assembler::NE;
 129   case TemplateTable::not_equal    : return Assembler::EQ;
 130   case TemplateTable::less         : return Assembler::GE;
 131   case TemplateTable::less_equal   : return Assembler::GT;
 132   case TemplateTable::greater      : return Assembler::LE;
 133   case TemplateTable::greater_equal: return Assembler::LT;
 134   }
 135   ShouldNotReachHere();
 136   return Assembler::EQ;
 137 }
 138 
 139 
 140 // Miscelaneous helper routines
 141 // Store an oop (or NULL) at the Address described by obj.
 142 // If val == noreg this means store a NULL
 143 static void do_oop_store(InterpreterMacroAssembler* _masm,
 144                          Address obj,
 145                          Register val,
 146                          BarrierSet::Name barrier,
 147                          bool precise) {
 148   assert(val == noreg || val == r0, "parameter is just for looks");
 149   switch (barrier) {
 150 #if INCLUDE_ALL_GCS
 151     case BarrierSet::G1BarrierSet:
 152       {
 153         // flatten object address if needed
 154         if (obj.index() == noreg && obj.offset() == 0) {
 155           if (obj.base() != r3) {
 156             __ mov(r3, obj.base());
 157           }
 158         } else {
 159           __ lea(r3, obj);
 160         }
 161         __ g1_write_barrier_pre(r3 /* obj */,
 162                                 r1 /* pre_val */,
 163                                 rthread /* thread */,
 164                                 r10  /* tmp */,
 165                                 val != noreg /* tosca_live */,
 166                                 false /* expand_call */);
 167         if (val == noreg) {
 168           __ store_heap_oop_null(Address(r3, 0));
 169         } else {
 170           // G1 barrier needs uncompressed oop for region cross check.
 171           Register new_val = val;
 172           if (UseCompressedOops) {
 173             new_val = rscratch2;
 174             __ mov(new_val, val);
 175           }
 176           __ store_heap_oop(Address(r3, 0), val);
 177           __ g1_write_barrier_post(r3 /* store_adr */,
 178                                    new_val /* new_val */,
 179                                    rthread /* thread */,
 180                                    r10 /* tmp */,
 181                                    r1 /* tmp2 */);
 182         }
 183 
 184       }
 185       break;
 186 #endif // INCLUDE_ALL_GCS
 187     case BarrierSet::CardTableModRef:
 188       {
 189         if (val == noreg) {
 190           __ store_heap_oop_null(obj);
 191         } else {
 192           __ store_heap_oop(obj, val);
 193           // flatten object address if needed
 194           if (!precise || (obj.index() == noreg && obj.offset() == 0)) {
 195             __ store_check(obj.base());
 196           } else {
 197             __ lea(r3, obj);
 198             __ store_check(r3);
 199           }
 200         }
 201       }
 202       break;
 203     case BarrierSet::ModRef:
 204       if (val == noreg) {
 205         __ store_heap_oop_null(obj);
 206       } else {
 207         __ store_heap_oop(obj, val);
 208       }
 209       break;
 210     default      :
 211       ShouldNotReachHere();
 212 
 213   }





 214 }
 215 
 216 Address TemplateTable::at_bcp(int offset) {
 217   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 218   return Address(rbcp, offset);
 219 }
 220 
 221 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 222                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 223                                    int byte_no)
 224 {
 225   if (!RewriteBytecodes)  return;
 226   Label L_patch_done;
 227 
 228   switch (bc) {
 229   case Bytecodes::_fast_aputfield:
 230   case Bytecodes::_fast_bputfield:
 231   case Bytecodes::_fast_zputfield:
 232   case Bytecodes::_fast_cputfield:
 233   case Bytecodes::_fast_dputfield:


 710   transition(itos, dtos);
 711   __ mov(r1, r0);
 712   __ pop_ptr(r0);
 713   // r0: array
 714   // r1: index
 715   index_check(r0, r1); // leaves index in r1, kills rscratch1
 716   __ lea(r1,  Address(r0, r1, Address::uxtw(3)));
 717   __ ldrd(v0, Address(r1,  arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
 718 }
 719 
 720 void TemplateTable::aaload()
 721 {
 722   transition(itos, atos);
 723   __ mov(r1, r0);
 724   __ pop_ptr(r0);
 725   // r0: array
 726   // r1: index
 727   index_check(r0, r1); // leaves index in r1, kills rscratch1
 728   int s = (UseCompressedOops ? 2 : 3);
 729   __ lea(r1, Address(r0, r1, Address::uxtw(s)));
 730   __ load_heap_oop(r0, Address(r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));



 731 }
 732 
 733 void TemplateTable::baload()
 734 {
 735   transition(itos, itos);
 736   __ mov(r1, r0);
 737   __ pop_ptr(r0);
 738   // r0: array
 739   // r1: index
 740   index_check(r0, r1); // leaves index in r1, kills rscratch1
 741   __ lea(r1,  Address(r0, r1, Address::uxtw(0)));
 742   __ load_signed_byte(r0, Address(r1,  arrayOopDesc::base_offset_in_bytes(T_BYTE)));
 743 }
 744 
 745 void TemplateTable::caload()
 746 {
 747   transition(itos, itos);
 748   __ mov(r1, r0);
 749   __ pop_ptr(r0);
 750   // r0: array


1038   // Move superklass into r0
1039   __ load_klass(r0, r3);
1040   __ ldr(r0, Address(r0,
1041                      ObjArrayKlass::element_klass_offset()));
1042   // Compress array + index*oopSize + 12 into a single register.  Frees r2.
1043 
1044   // Generate subtype check.  Blows r2, r5
1045   // Superklass in r0.  Subklass in r1.
1046   __ gen_subtype_check(r1, ok_is_subtype);
1047 
1048   // Come here on failure
1049   // object is at TOS
1050   __ b(Interpreter::_throw_ArrayStoreException_entry);
1051 
1052   // Come here on success
1053   __ bind(ok_is_subtype);
1054 
1055   // Get the value we will store
1056   __ ldr(r0, at_tos());
1057   // Now store using the appropriate barrier
1058   do_oop_store(_masm, element_address, r0, _bs->kind(), true);
1059   __ b(done);
1060 
1061   // Have a NULL in r0, r3=array, r2=index.  Store NULL at ary[idx]
1062   __ bind(is_null);
1063   __ profile_null_seen(r2);
1064 
1065   // Store a NULL
1066   do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
1067 
1068   // Pop stack arguments
1069   __ bind(done);
1070   __ add(esp, esp, 3 * Interpreter::stackElementSize);
1071 }
1072 
1073 void TemplateTable::bastore()
1074 {
1075   transition(itos, vtos);
1076   __ pop_i(r1);
1077   __ pop_ptr(r3);
1078   // r0: value
1079   // r1: index
1080   // r3: array
1081   index_check(r3, r1); // prefer index in r1
1082 
1083   // Need to check whether array is boolean or byte
1084   // since both types share the bastore bytecode.
1085   __ load_klass(r2, r3);
1086   __ ldrw(r2, Address(r2, Klass::layout_helper_offset()));


2438   __ b(Done);
2439 
2440   __ bind(notByte);
2441   __ cmp(flags, ztos);
2442   __ br(Assembler::NE, notBool);
2443 
2444   // ztos (same code as btos)
2445   __ ldrsb(r0, field);
2446   __ push(ztos);
2447   // Rewrite bytecode to be faster
2448   if (rc == may_rewrite) {
2449     // use btos rewriting, no truncating to t/f bit is needed for getfield.
2450     patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2451   }
2452   __ b(Done);
2453 
2454   __ bind(notBool);
2455   __ cmp(flags, atos);
2456   __ br(Assembler::NE, notObj);
2457   // atos
2458   __ load_heap_oop(r0, field);
2459   __ push(atos);
2460   if (rc == may_rewrite) {
2461     patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2462   }
2463   __ b(Done);
2464 
2465   __ bind(notObj);
2466   __ cmp(flags, itos);
2467   __ br(Assembler::NE, notInt);
2468   // itos
2469   __ ldrw(r0, field);
2470   __ push(itos);
2471   // Rewrite bytecode to be faster
2472   if (rc == may_rewrite) {
2473     patch_bytecode(Bytecodes::_fast_igetfield, bc, r1);
2474   }
2475   __ b(Done);
2476 
2477   __ bind(notInt);
2478   __ cmp(flags, ctos);


2681   {
2682     __ pop(ztos);
2683     if (!is_static) pop_and_check_object(obj);
2684     __ andw(r0, r0, 0x1);
2685     __ strb(r0, field);
2686     if (rc == may_rewrite) {
2687       patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
2688     }
2689     __ b(Done);
2690   }
2691 
2692   __ bind(notBool);
2693   __ cmp(flags, atos);
2694   __ br(Assembler::NE, notObj);
2695 
2696   // atos
2697   {
2698     __ pop(atos);
2699     if (!is_static) pop_and_check_object(obj);
2700     // Store into the field
2701     do_oop_store(_masm, field, r0, _bs->kind(), false);
2702     if (rc == may_rewrite) {
2703       patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
2704     }
2705     __ b(Done);
2706   }
2707 
2708   __ bind(notObj);
2709   __ cmp(flags, itos);
2710   __ br(Assembler::NE, notInt);
2711 
2712   // itos
2713   {
2714     __ pop(itos);
2715     if (!is_static) pop_and_check_object(obj);
2716     __ strw(r0, field);
2717     if (rc == may_rewrite) {
2718       patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no);
2719     }
2720     __ b(Done);
2721   }


2901   __ ldr(r1, Address(r2, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2902 
2903   {
2904     Label notVolatile;
2905     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2906     __ membar(MacroAssembler::StoreStore);
2907     __ bind(notVolatile);
2908   }
2909 
2910   Label notVolatile;
2911 
2912   // Get object from stack
2913   pop_and_check_object(r2);
2914 
2915   // field address
2916   const Address field(r2, r1);
2917 
2918   // access field
2919   switch (bytecode()) {
2920   case Bytecodes::_fast_aputfield:
2921     do_oop_store(_masm, field, r0, _bs->kind(), false);
2922     break;
2923   case Bytecodes::_fast_lputfield:
2924     __ str(r0, field);
2925     break;
2926   case Bytecodes::_fast_iputfield:
2927     __ strw(r0, field);
2928     break;
2929   case Bytecodes::_fast_zputfield:
2930     __ andw(r0, r0, 0x1);  // boolean is true if LSB is 1
2931     // fall through to bputfield
2932   case Bytecodes::_fast_bputfield:
2933     __ strb(r0, field);
2934     break;
2935   case Bytecodes::_fast_sputfield:
2936     // fall through
2937   case Bytecodes::_fast_cputfield:
2938     __ strh(r0, field);
2939     break;
2940   case Bytecodes::_fast_fputfield:
2941     __ strs(v0, field);


2993   __ verify_oop(r0);
2994   __ null_check(r0);
2995   const Address field(r0, r1);
2996 
2997   // 8179954: We need to make sure that the code generated for
2998   // volatile accesses forms a sequentially-consistent set of
2999   // operations when combined with STLR and LDAR.  Without a leading
3000   // membar it's possible for a simple Dekker test to fail if loads
3001   // use LDR;DMB but stores use STLR.  This can happen if C2 compiles
3002   // the stores in one method and we interpret the loads in another.
3003   if (! UseBarriersForVolatile) {
3004     Label notVolatile;
3005     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3006     __ membar(MacroAssembler::AnyAny);
3007     __ bind(notVolatile);
3008   }
3009 
3010   // access field
3011   switch (bytecode()) {
3012   case Bytecodes::_fast_agetfield:
3013     __ load_heap_oop(r0, field);
3014     __ verify_oop(r0);
3015     break;
3016   case Bytecodes::_fast_lgetfield:
3017     __ ldr(r0, field);
3018     break;
3019   case Bytecodes::_fast_igetfield:
3020     __ ldrw(r0, field);
3021     break;
3022   case Bytecodes::_fast_bgetfield:
3023     __ load_signed_byte(r0, field);
3024     break;
3025   case Bytecodes::_fast_sgetfield:
3026     __ load_signed_short(r0, field);
3027     break;
3028   case Bytecodes::_fast_cgetfield:
3029     __ load_unsigned_short(r0, field);
3030     break;
3031   case Bytecodes::_fast_fgetfield:
3032     __ ldrs(v0, field);
3033     break;


3063   // use LDR;DMB but stores use STLR.  This can happen if C2 compiles
3064   // the stores in one method and we interpret the loads in another.
3065   if (! UseBarriersForVolatile) {
3066     Label notVolatile;
3067     __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3068                                      ConstantPoolCacheEntry::flags_offset())));
3069     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3070     __ membar(MacroAssembler::AnyAny);
3071     __ bind(notVolatile);
3072   }
3073 
3074   // make sure exception is reported in correct bcp range (getfield is
3075   // next instruction)
3076   __ increment(rbcp);
3077   __ null_check(r0);
3078   switch (state) {
3079   case itos:
3080     __ ldrw(r0, Address(r0, r1, Address::lsl(0)));
3081     break;
3082   case atos:
3083     __ load_heap_oop(r0, Address(r0, r1, Address::lsl(0)));
3084     __ verify_oop(r0);
3085     break;
3086   case ftos:
3087     __ ldrs(v0, Address(r0, r1, Address::lsl(0)));
3088     break;
3089   default:
3090     ShouldNotReachHere();
3091   }
3092 
3093   {
3094     Label notVolatile;
3095     __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3096                                      ConstantPoolCacheEntry::flags_offset())));
3097     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3098     __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
3099     __ bind(notVolatile);
3100   }
3101 
3102   __ decrement(rbcp);
3103 }




   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.hpp"
  28 #include "gc/shared/barrierSetCodeGen.hpp"
  29 #include "interpreter/interpreter.hpp"
  30 #include "interpreter/interpreterRuntime.hpp"
  31 #include "interpreter/interp_masm.hpp"
  32 #include "interpreter/templateTable.hpp"
  33 #include "memory/universe.inline.hpp"
  34 #include "oops/methodData.hpp"
  35 #include "oops/method.hpp"
  36 #include "oops/objArrayKlass.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "prims/methodHandles.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "runtime/synchronizer.hpp"
  42 
  43 #define __ _masm->
  44 
  45 // Platform-dependent initialization
  46 
  47 void TemplateTable::pd_initialize() {
  48   // No aarch64 specific initialization


 125 
 126 // Condition conversion
 127 static Assembler::Condition j_not(TemplateTable::Condition cc) {
 128   switch (cc) {
 129   case TemplateTable::equal        : return Assembler::NE;
 130   case TemplateTable::not_equal    : return Assembler::EQ;
 131   case TemplateTable::less         : return Assembler::GE;
 132   case TemplateTable::less_equal   : return Assembler::GT;
 133   case TemplateTable::greater      : return Assembler::LE;
 134   case TemplateTable::greater_equal: return Assembler::LT;
 135   }
 136   ShouldNotReachHere();
 137   return Assembler::EQ;
 138 }
 139 
 140 
 141 // Miscelaneous helper routines
 142 // Store an oop (or NULL) at the Address described by obj.
 143 // If val == noreg this means store a NULL
 144 static void do_oop_store(InterpreterMacroAssembler* _masm,
 145                          Address dst,
 146                          Register val,
 147                          DecoratorSet decorators) {

 148   assert(val == noreg || val == r0, "parameter is just for looks");
 149   BarrierSetCodeGen *code_gen = Universe::heap()->barrier_set()->code_gen();
 150   code_gen->store_at(_masm, decorators, T_OBJECT, dst, val, /*tmp1*/ r10, /*tmp2*/ r1);
 151 }




























































 152 
 153 static void do_oop_load(InterpreterMacroAssembler* _masm,
 154                         Address src,
 155                         Register dst,
 156                         DecoratorSet decorators) {
 157   BarrierSetCodeGen *code_gen = Universe::heap()->barrier_set()->code_gen();
 158   code_gen->load_at(_masm, decorators, T_OBJECT, dst, src, /*tmp1*/ r10, /*tmp_thread*/ r1);
 159 }
 160 
 161 Address TemplateTable::at_bcp(int offset) {
 162   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 163   return Address(rbcp, offset);
 164 }
 165 
 166 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 167                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 168                                    int byte_no)
 169 {
 170   if (!RewriteBytecodes)  return;
 171   Label L_patch_done;
 172 
 173   switch (bc) {
 174   case Bytecodes::_fast_aputfield:
 175   case Bytecodes::_fast_bputfield:
 176   case Bytecodes::_fast_zputfield:
 177   case Bytecodes::_fast_cputfield:
 178   case Bytecodes::_fast_dputfield:


 655   transition(itos, dtos);
 656   __ mov(r1, r0);
 657   __ pop_ptr(r0);
 658   // r0: array
 659   // r1: index
 660   index_check(r0, r1); // leaves index in r1, kills rscratch1
 661   __ lea(r1,  Address(r0, r1, Address::uxtw(3)));
 662   __ ldrd(v0, Address(r1,  arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
 663 }
 664 
 665 void TemplateTable::aaload()
 666 {
 667   transition(itos, atos);
 668   __ mov(r1, r0);
 669   __ pop_ptr(r0);
 670   // r0: array
 671   // r1: index
 672   index_check(r0, r1); // leaves index in r1, kills rscratch1
 673   int s = (UseCompressedOops ? 2 : 3);
 674   __ lea(r1, Address(r0, r1, Address::uxtw(s)));
 675   do_oop_load(_masm,
 676               Address(r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
 677               r0,
 678               ACCESS_IN_HEAP | ACCESS_IN_HEAP_ARRAY);
 679 }
 680 
 681 void TemplateTable::baload()
 682 {
 683   transition(itos, itos);
 684   __ mov(r1, r0);
 685   __ pop_ptr(r0);
 686   // r0: array
 687   // r1: index
 688   index_check(r0, r1); // leaves index in r1, kills rscratch1
 689   __ lea(r1,  Address(r0, r1, Address::uxtw(0)));
 690   __ load_signed_byte(r0, Address(r1,  arrayOopDesc::base_offset_in_bytes(T_BYTE)));
 691 }
 692 
 693 void TemplateTable::caload()
 694 {
 695   transition(itos, itos);
 696   __ mov(r1, r0);
 697   __ pop_ptr(r0);
 698   // r0: array


 986   // Move superklass into r0
 987   __ load_klass(r0, r3);
 988   __ ldr(r0, Address(r0,
 989                      ObjArrayKlass::element_klass_offset()));
 990   // Compress array + index*oopSize + 12 into a single register.  Frees r2.
 991 
 992   // Generate subtype check.  Blows r2, r5
 993   // Superklass in r0.  Subklass in r1.
 994   __ gen_subtype_check(r1, ok_is_subtype);
 995 
 996   // Come here on failure
 997   // object is at TOS
 998   __ b(Interpreter::_throw_ArrayStoreException_entry);
 999 
1000   // Come here on success
1001   __ bind(ok_is_subtype);
1002 
1003   // Get the value we will store
1004   __ ldr(r0, at_tos());
1005   // Now store using the appropriate barrier
1006   do_oop_store(_masm, element_address, r0, ACCESS_IN_HEAP | ACCESS_IN_HEAP_ARRAY);
1007   __ b(done);
1008 
1009   // Have a NULL in r0, r3=array, r2=index.  Store NULL at ary[idx]
1010   __ bind(is_null);
1011   __ profile_null_seen(r2);
1012 
1013   // Store a NULL
1014   do_oop_store(_masm, element_address, noreg, ACCESS_IN_HEAP | ACCESS_IN_HEAP_ARRAY);
1015 
1016   // Pop stack arguments
1017   __ bind(done);
1018   __ add(esp, esp, 3 * Interpreter::stackElementSize);
1019 }
1020 
1021 void TemplateTable::bastore()
1022 {
1023   transition(itos, vtos);
1024   __ pop_i(r1);
1025   __ pop_ptr(r3);
1026   // r0: value
1027   // r1: index
1028   // r3: array
1029   index_check(r3, r1); // prefer index in r1
1030 
1031   // Need to check whether array is boolean or byte
1032   // since both types share the bastore bytecode.
1033   __ load_klass(r2, r3);
1034   __ ldrw(r2, Address(r2, Klass::layout_helper_offset()));


2386   __ b(Done);
2387 
2388   __ bind(notByte);
2389   __ cmp(flags, ztos);
2390   __ br(Assembler::NE, notBool);
2391 
2392   // ztos (same code as btos)
2393   __ ldrsb(r0, field);
2394   __ push(ztos);
2395   // Rewrite bytecode to be faster
2396   if (rc == may_rewrite) {
2397     // use btos rewriting, no truncating to t/f bit is needed for getfield.
2398     patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2399   }
2400   __ b(Done);
2401 
2402   __ bind(notBool);
2403   __ cmp(flags, atos);
2404   __ br(Assembler::NE, notObj);
2405   // atos
2406   do_oop_load(_masm, field, r0, ACCESS_IN_HEAP);
2407   __ push(atos);
2408   if (rc == may_rewrite) {
2409     patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2410   }
2411   __ b(Done);
2412 
2413   __ bind(notObj);
2414   __ cmp(flags, itos);
2415   __ br(Assembler::NE, notInt);
2416   // itos
2417   __ ldrw(r0, field);
2418   __ push(itos);
2419   // Rewrite bytecode to be faster
2420   if (rc == may_rewrite) {
2421     patch_bytecode(Bytecodes::_fast_igetfield, bc, r1);
2422   }
2423   __ b(Done);
2424 
2425   __ bind(notInt);
2426   __ cmp(flags, ctos);


2629   {
2630     __ pop(ztos);
2631     if (!is_static) pop_and_check_object(obj);
2632     __ andw(r0, r0, 0x1);
2633     __ strb(r0, field);
2634     if (rc == may_rewrite) {
2635       patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
2636     }
2637     __ b(Done);
2638   }
2639 
2640   __ bind(notBool);
2641   __ cmp(flags, atos);
2642   __ br(Assembler::NE, notObj);
2643 
2644   // atos
2645   {
2646     __ pop(atos);
2647     if (!is_static) pop_and_check_object(obj);
2648     // Store into the field
2649     do_oop_store(_masm, field, r0, ACCESS_IN_HEAP);
2650     if (rc == may_rewrite) {
2651       patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
2652     }
2653     __ b(Done);
2654   }
2655 
2656   __ bind(notObj);
2657   __ cmp(flags, itos);
2658   __ br(Assembler::NE, notInt);
2659 
2660   // itos
2661   {
2662     __ pop(itos);
2663     if (!is_static) pop_and_check_object(obj);
2664     __ strw(r0, field);
2665     if (rc == may_rewrite) {
2666       patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no);
2667     }
2668     __ b(Done);
2669   }


2849   __ ldr(r1, Address(r2, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2850 
2851   {
2852     Label notVolatile;
2853     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2854     __ membar(MacroAssembler::StoreStore);
2855     __ bind(notVolatile);
2856   }
2857 
2858   Label notVolatile;
2859 
2860   // Get object from stack
2861   pop_and_check_object(r2);
2862 
2863   // field address
2864   const Address field(r2, r1);
2865 
2866   // access field
2867   switch (bytecode()) {
2868   case Bytecodes::_fast_aputfield:
2869     do_oop_store(_masm, field, r0, ACCESS_IN_HEAP);
2870     break;
2871   case Bytecodes::_fast_lputfield:
2872     __ str(r0, field);
2873     break;
2874   case Bytecodes::_fast_iputfield:
2875     __ strw(r0, field);
2876     break;
2877   case Bytecodes::_fast_zputfield:
2878     __ andw(r0, r0, 0x1);  // boolean is true if LSB is 1
2879     // fall through to bputfield
2880   case Bytecodes::_fast_bputfield:
2881     __ strb(r0, field);
2882     break;
2883   case Bytecodes::_fast_sputfield:
2884     // fall through
2885   case Bytecodes::_fast_cputfield:
2886     __ strh(r0, field);
2887     break;
2888   case Bytecodes::_fast_fputfield:
2889     __ strs(v0, field);


2941   __ verify_oop(r0);
2942   __ null_check(r0);
2943   const Address field(r0, r1);
2944 
2945   // 8179954: We need to make sure that the code generated for
2946   // volatile accesses forms a sequentially-consistent set of
2947   // operations when combined with STLR and LDAR.  Without a leading
2948   // membar it's possible for a simple Dekker test to fail if loads
2949   // use LDR;DMB but stores use STLR.  This can happen if C2 compiles
2950   // the stores in one method and we interpret the loads in another.
2951   if (! UseBarriersForVolatile) {
2952     Label notVolatile;
2953     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2954     __ membar(MacroAssembler::AnyAny);
2955     __ bind(notVolatile);
2956   }
2957 
2958   // access field
2959   switch (bytecode()) {
2960   case Bytecodes::_fast_agetfield:
2961     do_oop_load(_masm, field, r0, ACCESS_IN_HEAP);
2962     __ verify_oop(r0);
2963     break;
2964   case Bytecodes::_fast_lgetfield:
2965     __ ldr(r0, field);
2966     break;
2967   case Bytecodes::_fast_igetfield:
2968     __ ldrw(r0, field);
2969     break;
2970   case Bytecodes::_fast_bgetfield:
2971     __ load_signed_byte(r0, field);
2972     break;
2973   case Bytecodes::_fast_sgetfield:
2974     __ load_signed_short(r0, field);
2975     break;
2976   case Bytecodes::_fast_cgetfield:
2977     __ load_unsigned_short(r0, field);
2978     break;
2979   case Bytecodes::_fast_fgetfield:
2980     __ ldrs(v0, field);
2981     break;


3011   // use LDR;DMB but stores use STLR.  This can happen if C2 compiles
3012   // the stores in one method and we interpret the loads in another.
3013   if (! UseBarriersForVolatile) {
3014     Label notVolatile;
3015     __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3016                                      ConstantPoolCacheEntry::flags_offset())));
3017     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3018     __ membar(MacroAssembler::AnyAny);
3019     __ bind(notVolatile);
3020   }
3021 
3022   // make sure exception is reported in correct bcp range (getfield is
3023   // next instruction)
3024   __ increment(rbcp);
3025   __ null_check(r0);
3026   switch (state) {
3027   case itos:
3028     __ ldrw(r0, Address(r0, r1, Address::lsl(0)));
3029     break;
3030   case atos:
3031     do_oop_load(_masm, Address(r0, r1, Address::lsl(0)), r0, ACCESS_IN_HEAP);
3032     __ verify_oop(r0);
3033     break;
3034   case ftos:
3035     __ ldrs(v0, Address(r0, r1, Address::lsl(0)));
3036     break;
3037   default:
3038     ShouldNotReachHere();
3039   }
3040 
3041   {
3042     Label notVolatile;
3043     __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3044                                      ConstantPoolCacheEntry::flags_offset())));
3045     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3046     __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
3047     __ bind(notVolatile);
3048   }
3049 
3050   __ decrement(rbcp);
3051 }


< prev index next >