< prev index next >

src/cpu/aarch64/vm/templateTable_aarch64.cpp

Print this page
rev 10997 : 8154957: AArch64: Better byte behavior
Summary:  The fix for 8132051 is needed for AArch64.
Reviewed-by: roland


 212       ShouldNotReachHere();
 213 
 214   }
 215 }
 216 
 217 Address TemplateTable::at_bcp(int offset) {
 218   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 219   return Address(rbcp, offset);
 220 }
 221 
 222 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 223                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 224                                    int byte_no)
 225 {
 226   if (!RewriteBytecodes)  return;
 227   Label L_patch_done;
 228 
 229   switch (bc) {
 230   case Bytecodes::_fast_aputfield:
 231   case Bytecodes::_fast_bputfield:

 232   case Bytecodes::_fast_cputfield:
 233   case Bytecodes::_fast_dputfield:
 234   case Bytecodes::_fast_fputfield:
 235   case Bytecodes::_fast_iputfield:
 236   case Bytecodes::_fast_lputfield:
 237   case Bytecodes::_fast_sputfield:
 238     {
 239       // We skip bytecode quickening for putfield instructions when
 240       // the put_code written to the constant pool cache is zero.
 241       // This is required so that every execution of this instruction
 242       // calls out to InterpreterRuntime::resolve_get_put to do
 243       // additional, required work.
 244       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 245       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 246       __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
 247       __ movw(bc_reg, bc);
 248       __ cmpw(temp_reg, (unsigned) 0);
 249       __ br(Assembler::EQ, L_patch_done);  // don't patch
 250     }
 251     break;


1065   __ bind(is_null);
1066   __ profile_null_seen(r2);
1067 
1068   // Store a NULL
1069   do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
1070 
1071   // Pop stack arguments
1072   __ bind(done);
1073   __ add(esp, esp, 3 * Interpreter::stackElementSize);
1074 }
1075 
1076 void TemplateTable::bastore()
1077 {
1078   transition(itos, vtos);
1079   __ pop_i(r1);
1080   __ pop_ptr(r3);
1081   // r0: value
1082   // r1: index
1083   // r3: array
1084   index_check(r3, r1); // prefer index in r1











1085   __ lea(rscratch1, Address(r3, r1, Address::uxtw(0)));
1086   __ strb(r0, Address(rscratch1,
1087                       arrayOopDesc::base_offset_in_bytes(T_BYTE)));
1088 }
1089 
1090 void TemplateTable::castore()
1091 {
1092   transition(itos, vtos);
1093   __ pop_i(r1);
1094   __ pop_ptr(r3);
1095   // r0: value
1096   // r1: index
1097   // r3: array
1098   index_check(r3, r1); // prefer index in r1
1099   __ lea(rscratch1, Address(r3, r1, Address::uxtw(1)));
1100   __ strh(r0, Address(rscratch1,
1101                       arrayOopDesc::base_offset_in_bytes(T_CHAR)));
1102 }
1103 
1104 void TemplateTable::sastore()


2176     assert(state == vtos, "only valid state");
2177 
2178     __ ldr(c_rarg1, aaddress(0));
2179     __ load_klass(r3, c_rarg1);
2180     __ ldrw(r3, Address(r3, Klass::access_flags_offset()));
2181     __ tst(r3, JVM_ACC_HAS_FINALIZER);
2182     Label skip_register_finalizer;
2183     __ br(Assembler::EQ, skip_register_finalizer);
2184 
2185     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
2186 
2187     __ bind(skip_register_finalizer);
2188   }
2189 
2190   // Issue a StoreStore barrier after all stores but before return
2191   // from any constructor for any class with a final field.  We don't
2192   // know if this is a finalizer, so we always do so.
2193   if (_desc->bytecode() == Bytecodes::_return)
2194     __ membar(MacroAssembler::StoreStore);
2195 







2196   __ remove_activation(state);
2197   __ ret(lr);
2198 }
2199 
2200 // ----------------------------------------------------------------------------
2201 // Volatile variables demand their effects be made known to all CPU's
2202 // in order.  Store buffers on most chips allow reads & writes to
2203 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2204 // without some kind of memory barrier (i.e., it's not sufficient that
2205 // the interpreter does not reorder volatile references, the hardware
2206 // also must not reorder them).
2207 //
2208 // According to the new Java Memory Model (JMM):
2209 // (1) All volatiles are serialized wrt to each other.  ALSO reads &
2210 //     writes act as aquire & release, so:
2211 // (2) A read cannot let unrelated NON-volatile memory refs that
2212 //     happen after the read float up to before the read.  It's OK for
2213 //     non-volatile memory refs that happen before the volatile read to
2214 //     float down below it.
2215 // (3) Similar a volatile write cannot let unrelated NON-volatile


2369 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc)
2370 {
2371   const Register cache = r2;
2372   const Register index = r3;
2373   const Register obj   = r4;
2374   const Register off   = r19;
2375   const Register flags = r0;
2376   const Register bc    = r4; // uses same reg as obj, so don't mix them
2377 
2378   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2379   jvmti_post_field_access(cache, index, is_static, false);
2380   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2381 
2382   if (!is_static) {
2383     // obj is on the stack
2384     pop_and_check_object(obj);
2385   }
2386 
2387   const Address field(obj, off);
2388 
2389   Label Done, notByte, notInt, notShort, notChar,
2390               notLong, notFloat, notObj, notDouble;
2391 
2392   // x86 uses a shift and mask or wings it with a shift plus assert
2393   // the mask is not needed. aarch64 just uses bitfield extract
2394   __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift,  ConstantPoolCacheEntry::tos_state_bits);
2395 
2396   assert(btos == 0, "change code, btos != 0");
2397   __ cbnz(flags, notByte);
2398 
2399   // Don't rewrite getstatic, only getfield
2400   if (is_static) rc = may_not_rewrite;
2401 
2402   // btos
2403   __ load_signed_byte(r0, field);
2404   __ push(btos);
2405   // Rewrite bytecode to be faster
2406   if (rc == may_rewrite) {
2407     patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2408   }
2409   __ b(Done);
2410 
2411   __ bind(notByte);














2412   __ cmp(flags, atos);
2413   __ br(Assembler::NE, notObj);
2414   // atos
2415   __ load_heap_oop(r0, field);
2416   __ push(atos);
2417   if (rc == may_rewrite) {
2418     patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2419   }
2420   __ b(Done);
2421 
2422   __ bind(notObj);
2423   __ cmp(flags, itos);
2424   __ br(Assembler::NE, notInt);
2425   // itos
2426   __ ldrw(r0, field);
2427   __ push(itos);
2428   // Rewrite bytecode to be faster
2429   if (rc == may_rewrite) {
2430     patch_bytecode(Bytecodes::_fast_igetfield, bc, r1);
2431   }


2587   const Register flags = r0;
2588   const Register bc    = r4;
2589 
2590   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2591   jvmti_post_field_mod(cache, index, is_static);
2592   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2593 
2594   Label Done;
2595   __ mov(r5, flags);
2596 
2597   {
2598     Label notVolatile;
2599     __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2600     __ membar(MacroAssembler::StoreStore);
2601     __ bind(notVolatile);
2602   }
2603 
2604   // field address
2605   const Address field(obj, off);
2606 
2607   Label notByte, notInt, notShort, notChar,
2608         notLong, notFloat, notObj, notDouble;
2609 
2610   // x86 uses a shift and mask or wings it with a shift plus assert
2611   // the mask is not needed. aarch64 just uses bitfield extract
2612   __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift,  ConstantPoolCacheEntry::tos_state_bits);
2613 
2614   assert(btos == 0, "change code, btos != 0");
2615   __ cbnz(flags, notByte);
2616 
2617   // Don't rewrite putstatic, only putfield
2618   if (is_static) rc = may_not_rewrite;
2619 
2620   // btos
2621   {
2622     __ pop(btos);
2623     if (!is_static) pop_and_check_object(obj);
2624     __ strb(r0, field);
2625     if (rc == may_rewrite) {
2626       patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no);
2627     }
2628     __ b(Done);
2629   }
2630 
2631   __ bind(notByte);
















2632   __ cmp(flags, atos);
2633   __ br(Assembler::NE, notObj);
2634 
2635   // atos
2636   {
2637     __ pop(atos);
2638     if (!is_static) pop_and_check_object(obj);
2639     // Store into the field
2640     do_oop_store(_masm, field, r0, _bs->kind(), false);
2641     if (rc == may_rewrite) {
2642       patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
2643     }
2644     __ b(Done);
2645   }
2646 
2647   __ bind(notObj);
2648   __ cmp(flags, itos);
2649   __ br(Assembler::NE, notInt);
2650 
2651   // itos


2766 }
2767 
2768 void TemplateTable::jvmti_post_fast_field_mod()
2769 {
2770   if (JvmtiExport::can_post_field_modification()) {
2771     // Check to see if a field modification watch has been set before
2772     // we take the time to call into the VM.
2773     Label L2;
2774     __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2775     __ ldrw(c_rarg3, Address(rscratch1));
2776     __ cbzw(c_rarg3, L2);
2777     __ pop_ptr(r19);                  // copy the object pointer from tos
2778     __ verify_oop(r19);
2779     __ push_ptr(r19);                 // put the object pointer back on tos
2780     // Save tos values before call_VM() clobbers them. Since we have
2781     // to do it for every data type, we use the saved values as the
2782     // jvalue object.
2783     switch (bytecode()) {          // load values into the jvalue object
2784     case Bytecodes::_fast_aputfield: __ push_ptr(r0); break;
2785     case Bytecodes::_fast_bputfield: // fall through

2786     case Bytecodes::_fast_sputfield: // fall through
2787     case Bytecodes::_fast_cputfield: // fall through
2788     case Bytecodes::_fast_iputfield: __ push_i(r0); break;
2789     case Bytecodes::_fast_dputfield: __ push_d(); break;
2790     case Bytecodes::_fast_fputfield: __ push_f(); break;
2791     case Bytecodes::_fast_lputfield: __ push_l(r0); break;
2792 
2793     default:
2794       ShouldNotReachHere();
2795     }
2796     __ mov(c_rarg3, esp);             // points to jvalue on the stack
2797     // access constant pool cache entry
2798     __ get_cache_entry_pointer_at_bcp(c_rarg2, r0, 1);
2799     __ verify_oop(r19);
2800     // r19: object pointer copied above
2801     // c_rarg2: cache entry pointer
2802     // c_rarg3: jvalue object on the stack
2803     __ call_VM(noreg,
2804                CAST_FROM_FN_PTR(address,
2805                                 InterpreterRuntime::post_field_modification),
2806                r19, c_rarg2, c_rarg3);
2807 
2808     switch (bytecode()) {             // restore tos values
2809     case Bytecodes::_fast_aputfield: __ pop_ptr(r0); break;
2810     case Bytecodes::_fast_bputfield: // fall through

2811     case Bytecodes::_fast_sputfield: // fall through
2812     case Bytecodes::_fast_cputfield: // fall through
2813     case Bytecodes::_fast_iputfield: __ pop_i(r0); break;
2814     case Bytecodes::_fast_dputfield: __ pop_d(); break;
2815     case Bytecodes::_fast_fputfield: __ pop_f(); break;
2816     case Bytecodes::_fast_lputfield: __ pop_l(r0); break;
2817     }
2818     __ bind(L2);
2819   }
2820 }
2821 
2822 void TemplateTable::fast_storefield(TosState state)
2823 {
2824   transition(state, vtos);
2825 
2826   ByteSize base = ConstantPoolCache::base_offset();
2827 
2828   jvmti_post_fast_field_mod();
2829 
2830   // access constant pool cache


2846 
2847   Label notVolatile;
2848 
2849   // Get object from stack
2850   pop_and_check_object(r2);
2851 
2852   // field address
2853   const Address field(r2, r1);
2854 
2855   // access field
2856   switch (bytecode()) {
2857   case Bytecodes::_fast_aputfield:
2858     do_oop_store(_masm, field, r0, _bs->kind(), false);
2859     break;
2860   case Bytecodes::_fast_lputfield:
2861     __ str(r0, field);
2862     break;
2863   case Bytecodes::_fast_iputfield:
2864     __ strw(r0, field);
2865     break;



2866   case Bytecodes::_fast_bputfield:
2867     __ strb(r0, field);
2868     break;
2869   case Bytecodes::_fast_sputfield:
2870     // fall through
2871   case Bytecodes::_fast_cputfield:
2872     __ strh(r0, field);
2873     break;
2874   case Bytecodes::_fast_fputfield:
2875     __ strs(v0, field);
2876     break;
2877   case Bytecodes::_fast_dputfield:
2878     __ strd(v0, field);
2879     break;
2880   default:
2881     ShouldNotReachHere();
2882   }
2883 
2884   {
2885     Label notVolatile;




 212       ShouldNotReachHere();
 213 
 214   }
 215 }
 216 
 217 Address TemplateTable::at_bcp(int offset) {
 218   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 219   return Address(rbcp, offset);
 220 }
 221 
 222 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 223                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 224                                    int byte_no)
 225 {
 226   if (!RewriteBytecodes)  return;
 227   Label L_patch_done;
 228 
 229   switch (bc) {
 230   case Bytecodes::_fast_aputfield:
 231   case Bytecodes::_fast_bputfield:
 232   case Bytecodes::_fast_zputfield:
 233   case Bytecodes::_fast_cputfield:
 234   case Bytecodes::_fast_dputfield:
 235   case Bytecodes::_fast_fputfield:
 236   case Bytecodes::_fast_iputfield:
 237   case Bytecodes::_fast_lputfield:
 238   case Bytecodes::_fast_sputfield:
 239     {
 240       // We skip bytecode quickening for putfield instructions when
 241       // the put_code written to the constant pool cache is zero.
 242       // This is required so that every execution of this instruction
 243       // calls out to InterpreterRuntime::resolve_get_put to do
 244       // additional, required work.
 245       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 246       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 247       __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
 248       __ movw(bc_reg, bc);
 249       __ cmpw(temp_reg, (unsigned) 0);
 250       __ br(Assembler::EQ, L_patch_done);  // don't patch
 251     }
 252     break;


1066   __ bind(is_null);
1067   __ profile_null_seen(r2);
1068 
1069   // Store a NULL
1070   do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
1071 
1072   // Pop stack arguments
1073   __ bind(done);
1074   __ add(esp, esp, 3 * Interpreter::stackElementSize);
1075 }
1076 
1077 void TemplateTable::bastore()
1078 {
1079   transition(itos, vtos);
1080   __ pop_i(r1);
1081   __ pop_ptr(r3);
1082   // r0: value
1083   // r1: index
1084   // r3: array
1085   index_check(r3, r1); // prefer index in r1
1086 
1087   // Need to check whether array is boolean or byte
1088   // since both types share the bastore bytecode.
1089   __ load_klass(r2, r3);
1090   __ ldrw(r2, Address(r2, Klass::layout_helper_offset()));
1091   int diffbit_index = exact_log2(Klass::layout_helper_boolean_diffbit());
1092   Label L_skip;
1093   __ tbz(r2, diffbit_index, L_skip);
1094   __ andw(r0, r0, 1);  // if it is a T_BOOLEAN array, mask the stored value to 0/1
1095   __ bind(L_skip);
1096 
1097   __ lea(rscratch1, Address(r3, r1, Address::uxtw(0)));
1098   __ strb(r0, Address(rscratch1,
1099                       arrayOopDesc::base_offset_in_bytes(T_BYTE)));
1100 }
1101 
1102 void TemplateTable::castore()
1103 {
1104   transition(itos, vtos);
1105   __ pop_i(r1);
1106   __ pop_ptr(r3);
1107   // r0: value
1108   // r1: index
1109   // r3: array
1110   index_check(r3, r1); // prefer index in r1
1111   __ lea(rscratch1, Address(r3, r1, Address::uxtw(1)));
1112   __ strh(r0, Address(rscratch1,
1113                       arrayOopDesc::base_offset_in_bytes(T_CHAR)));
1114 }
1115 
1116 void TemplateTable::sastore()


2188     assert(state == vtos, "only valid state");
2189 
2190     __ ldr(c_rarg1, aaddress(0));
2191     __ load_klass(r3, c_rarg1);
2192     __ ldrw(r3, Address(r3, Klass::access_flags_offset()));
2193     __ tst(r3, JVM_ACC_HAS_FINALIZER);
2194     Label skip_register_finalizer;
2195     __ br(Assembler::EQ, skip_register_finalizer);
2196 
2197     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
2198 
2199     __ bind(skip_register_finalizer);
2200   }
2201 
2202   // Issue a StoreStore barrier after all stores but before return
2203   // from any constructor for any class with a final field.  We don't
2204   // know if this is a finalizer, so we always do so.
2205   if (_desc->bytecode() == Bytecodes::_return)
2206     __ membar(MacroAssembler::StoreStore);
2207 
2208   // Narrow result if state is itos but result type is smaller.
2209   // Need to narrow in the return bytecode rather than in generate_return_entry
2210   // since compiled code callers expect the result to already be narrowed.
2211   if (state == itos) {
2212     __ narrow(r0);
2213   }
2214 
2215   __ remove_activation(state);
2216   __ ret(lr);
2217 }
2218 
2219 // ----------------------------------------------------------------------------
2220 // Volatile variables demand their effects be made known to all CPU's
2221 // in order.  Store buffers on most chips allow reads & writes to
2222 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2223 // without some kind of memory barrier (i.e., it's not sufficient that
2224 // the interpreter does not reorder volatile references, the hardware
2225 // also must not reorder them).
2226 //
2227 // According to the new Java Memory Model (JMM):
2228 // (1) All volatiles are serialized wrt to each other.  ALSO reads &
2229 //     writes act as aquire & release, so:
2230 // (2) A read cannot let unrelated NON-volatile memory refs that
2231 //     happen after the read float up to before the read.  It's OK for
2232 //     non-volatile memory refs that happen before the volatile read to
2233 //     float down below it.
2234 // (3) Similar a volatile write cannot let unrelated NON-volatile


2388 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc)
2389 {
2390   const Register cache = r2;
2391   const Register index = r3;
2392   const Register obj   = r4;
2393   const Register off   = r19;
2394   const Register flags = r0;
2395   const Register bc    = r4; // uses same reg as obj, so don't mix them
2396 
2397   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2398   jvmti_post_field_access(cache, index, is_static, false);
2399   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2400 
2401   if (!is_static) {
2402     // obj is on the stack
2403     pop_and_check_object(obj);
2404   }
2405 
2406   const Address field(obj, off);
2407 
2408   Label Done, notByte, notBool, notInt, notShort, notChar,
2409               notLong, notFloat, notObj, notDouble;
2410 
2411   // x86 uses a shift and mask or wings it with a shift plus assert
2412   // the mask is not needed. aarch64 just uses bitfield extract
2413   __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift,  ConstantPoolCacheEntry::tos_state_bits);
2414 
2415   assert(btos == 0, "change code, btos != 0");
2416   __ cbnz(flags, notByte);
2417 
2418   // Don't rewrite getstatic, only getfield
2419   if (is_static) rc = may_not_rewrite;
2420 
2421   // btos
2422   __ load_signed_byte(r0, field);
2423   __ push(btos);
2424   // Rewrite bytecode to be faster
2425   if (rc == may_rewrite) {
2426     patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2427   }
2428   __ b(Done);
2429 
2430   __ bind(notByte);
2431   __ cmp(flags, ztos);
2432   __ br(Assembler::NE, notBool);
2433 
2434   // ztos (same code as btos)
2435   __ ldrsb(r0, field);
2436   __ push(ztos);
2437   // Rewrite bytecode to be faster
2438   if (!is_static) {
2439     // use btos rewriting, no truncating to t/f bit is needed for getfield.
2440     patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2441   }
2442   __ b(Done);
2443 
2444   __ bind(notBool);
2445   __ cmp(flags, atos);
2446   __ br(Assembler::NE, notObj);
2447   // atos
2448   __ load_heap_oop(r0, field);
2449   __ push(atos);
2450   if (rc == may_rewrite) {
2451     patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2452   }
2453   __ b(Done);
2454 
2455   __ bind(notObj);
2456   __ cmp(flags, itos);
2457   __ br(Assembler::NE, notInt);
2458   // itos
2459   __ ldrw(r0, field);
2460   __ push(itos);
2461   // Rewrite bytecode to be faster
2462   if (rc == may_rewrite) {
2463     patch_bytecode(Bytecodes::_fast_igetfield, bc, r1);
2464   }


2620   const Register flags = r0;
2621   const Register bc    = r4;
2622 
2623   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2624   jvmti_post_field_mod(cache, index, is_static);
2625   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2626 
2627   Label Done;
2628   __ mov(r5, flags);
2629 
2630   {
2631     Label notVolatile;
2632     __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2633     __ membar(MacroAssembler::StoreStore);
2634     __ bind(notVolatile);
2635   }
2636 
2637   // field address
2638   const Address field(obj, off);
2639 
2640   Label notByte, notBool, notInt, notShort, notChar,
2641         notLong, notFloat, notObj, notDouble;
2642 
2643   // x86 uses a shift and mask or wings it with a shift plus assert
2644   // the mask is not needed. aarch64 just uses bitfield extract
2645   __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift,  ConstantPoolCacheEntry::tos_state_bits);
2646 
2647   assert(btos == 0, "change code, btos != 0");
2648   __ cbnz(flags, notByte);
2649 
2650   // Don't rewrite putstatic, only putfield
2651   if (is_static) rc = may_not_rewrite;
2652 
2653   // btos
2654   {
2655     __ pop(btos);
2656     if (!is_static) pop_and_check_object(obj);
2657     __ strb(r0, field);
2658     if (rc == may_rewrite) {
2659       patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no);
2660     }
2661     __ b(Done);
2662   }
2663 
2664   __ bind(notByte);
2665   __ cmp(flags, ztos);
2666   __ br(Assembler::NE, notBool);
2667 
2668   // ztos
2669   {
2670     __ pop(ztos);
2671     if (!is_static) pop_and_check_object(obj);
2672     __ andw(r0, r0, 0x1);
2673     __ strb(r0, field);
2674     if (!is_static) {
2675       patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
2676     }
2677     __ b(Done);
2678   }
2679 
2680   __ bind(notBool);
2681   __ cmp(flags, atos);
2682   __ br(Assembler::NE, notObj);
2683 
2684   // atos
2685   {
2686     __ pop(atos);
2687     if (!is_static) pop_and_check_object(obj);
2688     // Store into the field
2689     do_oop_store(_masm, field, r0, _bs->kind(), false);
2690     if (rc == may_rewrite) {
2691       patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
2692     }
2693     __ b(Done);
2694   }
2695 
2696   __ bind(notObj);
2697   __ cmp(flags, itos);
2698   __ br(Assembler::NE, notInt);
2699 
2700   // itos


2815 }
2816 
2817 void TemplateTable::jvmti_post_fast_field_mod()
2818 {
2819   if (JvmtiExport::can_post_field_modification()) {
2820     // Check to see if a field modification watch has been set before
2821     // we take the time to call into the VM.
2822     Label L2;
2823     __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2824     __ ldrw(c_rarg3, Address(rscratch1));
2825     __ cbzw(c_rarg3, L2);
2826     __ pop_ptr(r19);                  // copy the object pointer from tos
2827     __ verify_oop(r19);
2828     __ push_ptr(r19);                 // put the object pointer back on tos
2829     // Save tos values before call_VM() clobbers them. Since we have
2830     // to do it for every data type, we use the saved values as the
2831     // jvalue object.
2832     switch (bytecode()) {          // load values into the jvalue object
2833     case Bytecodes::_fast_aputfield: __ push_ptr(r0); break;
2834     case Bytecodes::_fast_bputfield: // fall through
2835     case Bytecodes::_fast_zputfield: // fall through
2836     case Bytecodes::_fast_sputfield: // fall through
2837     case Bytecodes::_fast_cputfield: // fall through
2838     case Bytecodes::_fast_iputfield: __ push_i(r0); break;
2839     case Bytecodes::_fast_dputfield: __ push_d(); break;
2840     case Bytecodes::_fast_fputfield: __ push_f(); break;
2841     case Bytecodes::_fast_lputfield: __ push_l(r0); break;
2842 
2843     default:
2844       ShouldNotReachHere();
2845     }
2846     __ mov(c_rarg3, esp);             // points to jvalue on the stack
2847     // access constant pool cache entry
2848     __ get_cache_entry_pointer_at_bcp(c_rarg2, r0, 1);
2849     __ verify_oop(r19);
2850     // r19: object pointer copied above
2851     // c_rarg2: cache entry pointer
2852     // c_rarg3: jvalue object on the stack
2853     __ call_VM(noreg,
2854                CAST_FROM_FN_PTR(address,
2855                                 InterpreterRuntime::post_field_modification),
2856                r19, c_rarg2, c_rarg3);
2857 
2858     switch (bytecode()) {             // restore tos values
2859     case Bytecodes::_fast_aputfield: __ pop_ptr(r0); break;
2860     case Bytecodes::_fast_bputfield: // fall through
2861     case Bytecodes::_fast_zputfield: // fall through
2862     case Bytecodes::_fast_sputfield: // fall through
2863     case Bytecodes::_fast_cputfield: // fall through
2864     case Bytecodes::_fast_iputfield: __ pop_i(r0); break;
2865     case Bytecodes::_fast_dputfield: __ pop_d(); break;
2866     case Bytecodes::_fast_fputfield: __ pop_f(); break;
2867     case Bytecodes::_fast_lputfield: __ pop_l(r0); break;
2868     }
2869     __ bind(L2);
2870   }
2871 }
2872 
2873 void TemplateTable::fast_storefield(TosState state)
2874 {
2875   transition(state, vtos);
2876 
2877   ByteSize base = ConstantPoolCache::base_offset();
2878 
2879   jvmti_post_fast_field_mod();
2880 
2881   // access constant pool cache


2897 
2898   Label notVolatile;
2899 
2900   // Get object from stack
2901   pop_and_check_object(r2);
2902 
2903   // field address
2904   const Address field(r2, r1);
2905 
2906   // access field
2907   switch (bytecode()) {
2908   case Bytecodes::_fast_aputfield:
2909     do_oop_store(_masm, field, r0, _bs->kind(), false);
2910     break;
2911   case Bytecodes::_fast_lputfield:
2912     __ str(r0, field);
2913     break;
2914   case Bytecodes::_fast_iputfield:
2915     __ strw(r0, field);
2916     break;
2917   case Bytecodes::_fast_zputfield:
2918     __ andw(r0, r0, 0x1);  // boolean is true if LSB is 1
2919     // fall through to bputfield
2920   case Bytecodes::_fast_bputfield:
2921     __ strb(r0, field);
2922     break;
2923   case Bytecodes::_fast_sputfield:
2924     // fall through
2925   case Bytecodes::_fast_cputfield:
2926     __ strh(r0, field);
2927     break;
2928   case Bytecodes::_fast_fputfield:
2929     __ strs(v0, field);
2930     break;
2931   case Bytecodes::_fast_dputfield:
2932     __ strd(v0, field);
2933     break;
2934   default:
2935     ShouldNotReachHere();
2936   }
2937 
2938   {
2939     Label notVolatile;


< prev index next >