< prev index next >

src/cpu/aarch64/vm/templateTable_aarch64.cpp

Print this page
rev 8300 : 8079507: aarch64: fails to build due to changes to template interpreter
Summary: fix by merging in changes from fix for 8074345
Reviewed-by: duke


 485   __ lea (r2, Address(r1, r0, Address::lsl(3)));
 486   __ ldrd(v0, Address(r2, base_offset));
 487   __ push_d();
 488   __ b(Done);
 489 
 490   __ bind(Long);
 491   // ltos
 492   __ lea(r0, Address(r1, r0, Address::lsl(3)));
 493   __ ldr(r0, Address(r0, base_offset));
 494   __ push_l();
 495 
 496   __ bind(Done);
 497 }
 498 
 499 void TemplateTable::locals_index(Register reg, int offset)
 500 {
 501   __ ldrb(reg, at_bcp(offset));
 502   __ neg(reg, reg);
 503 }
 504 
 505 void TemplateTable::iload()
 506 {







 507   transition(vtos, itos);
 508   if (RewriteFrequentPairs) {
 509     // TODO : check x86 code for what to do here
 510     __ call_Unimplemented();
 511   } else {
 512     locals_index(r1);
 513     __ ldr(r0, iaddress(r1));
 514   }
 515 
 516 }
 517 
 518 void TemplateTable::fast_iload2()
 519 {
 520   __ call_Unimplemented();
 521 }
 522 
 523 void TemplateTable::fast_iload()
 524 {
 525   __ call_Unimplemented();
 526 }
 527 
 528 void TemplateTable::lload()


 742 }
 743 
 744 void TemplateTable::fload(int n)
 745 {
 746   transition(vtos, ftos);
 747   __ ldrs(v0, faddress(n));
 748 }
 749 
 750 void TemplateTable::dload(int n)
 751 {
 752   transition(vtos, dtos);
 753   __ ldrd(v0, daddress(n));
 754 }
 755 
 756 void TemplateTable::aload(int n)
 757 {
 758   transition(vtos, atos);
 759   __ ldr(r0, iaddress(n));
 760 }
 761 
 762 void TemplateTable::aload_0()
 763 {







 764   // According to bytecode histograms, the pairs:
 765   //
 766   // _aload_0, _fast_igetfield
 767   // _aload_0, _fast_agetfield
 768   // _aload_0, _fast_fgetfield
 769   //
 770   // occur frequently. If RewriteFrequentPairs is set, the (slow)
 771   // _aload_0 bytecode checks if the next bytecode is either
 772   // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
 773   // rewrites the current bytecode into a pair bytecode; otherwise it
 774   // rewrites the current bytecode into _fast_aload_0 that doesn't do
 775   // the pair check anymore.
 776   //
 777   // Note: If the next bytecode is _getfield, the rewrite must be
 778   //       delayed, otherwise we may miss an opportunity for a pair.
 779   //
 780   // Also rewrite frequent pairs
 781   //   aload_0, aload_1
 782   //   aload_0, iload_1
 783   // These bytecodes with a small amount of code are most profitable
 784   // to rewrite
 785   if (RewriteFrequentPairs) {
 786     __ call_Unimplemented();
 787   } else {
 788     aload(0);
 789   }
 790 }
 791 
 792 void TemplateTable::istore()
 793 {
 794   transition(itos, vtos);
 795   locals_index(r1);
 796   // FIXME: We're being very pernickerty here storing a jint in a
 797   // local with strw, which costs an extra instruction over what we'd
 798   // be able to do with a simple str.  We should just store the whole
 799   // word.
 800   __ lea(rscratch1, iaddress(r1));
 801   __ strw(r0, Address(rscratch1));
 802 }
 803 
 804 void TemplateTable::lstore()
 805 {


2115 //     write.  It's OK for non-volatile memory refs that happen after the
2116 //     volatile write to float up before it.
2117 //
2118 // We only put in barriers around volatile refs (they are expensive),
2119 // not _between_ memory refs (that would require us to track the
2120 // flavor of the previous memory refs).  Requirements (2) and (3)
2121 // require some barriers before volatile stores and after volatile
2122 // loads.  These nearly cover requirement (1) but miss the
2123 // volatile-store-volatile-load case.  This final case is placed after
2124 // volatile-stores although it could just as well go before
2125 // volatile-loads.
2126 
2127 void TemplateTable::resolve_cache_and_index(int byte_no,
2128                                             Register Rcache,
2129                                             Register index,
2130                                             size_t index_size) {
2131   const Register temp = r19;
2132   assert_different_registers(Rcache, index, temp);
2133 
2134   Label resolved;







2135   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2136   __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2137   __ cmp(temp, (int) bytecode());  // have we resolved this bytecode?
2138   __ br(Assembler::EQ, resolved);
2139 
2140   // resolve first time through
2141   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2142   __ mov(temp, (int) bytecode());
2143   __ call_VM(noreg, entry, temp);
2144 
2145   // Update registers with resolved info
2146   __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2147   // n.b. unlike x86 Rcache is now rcpool plus the indexed offset
2148   // so all clients ofthis method must be modified accordingly
2149   __ bind(resolved);
2150 }
2151 
2152 // The Rcache and index registers must be set before call
2153 // n.b unlike x86 cache already includes the index offset
2154 void TemplateTable::load_field_cp_cache_entry(Register obj,
2155                                               Register cache,
2156                                               Register index,
2157                                               Register off,
2158                                               Register flags,
2159                                               bool is_static = false) {
2160   assert_different_registers(cache, index, flags, off);
2161 
2162   ByteSize cp_base_offset = ConstantPoolCache::base_offset();


2240       __ verify_oop(c_rarg1);
2241     }
2242     // c_rarg1: object pointer or NULL
2243     // c_rarg2: cache entry pointer
2244     // c_rarg3: jvalue object on the stack
2245     __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2246                                        InterpreterRuntime::post_field_access),
2247                c_rarg1, c_rarg2, c_rarg3);
2248     __ get_cache_and_index_at_bcp(cache, index, 1);
2249     __ bind(L1);
2250   }
2251 }
2252 
2253 void TemplateTable::pop_and_check_object(Register r)
2254 {
2255   __ pop_ptr(r);
2256   __ null_check(r);  // for field access must check obj.
2257   __ verify_oop(r);
2258 }
2259 
2260 void TemplateTable::getfield_or_static(int byte_no, bool is_static)
2261 {
2262   const Register cache = r2;
2263   const Register index = r3;
2264   const Register obj   = r4;
2265   const Register off   = r19;
2266   const Register flags = r0;
2267   const Register bc    = r4; // uses same reg as obj, so don't mix them
2268 
2269   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2270   jvmti_post_field_access(cache, index, is_static, false);
2271   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2272 
2273   if (!is_static) {
2274     // obj is on the stack
2275     pop_and_check_object(obj);
2276   }
2277 
2278   const Address field(obj, off);
2279 
2280   Label Done, notByte, notInt, notShort, notChar,
2281               notLong, notFloat, notObj, notDouble;
2282 
2283   // x86 uses a shift and mask or wings it with a shift plus assert
2284   // the mask is not needed. aarch64 just uses bitfield extract
2285   __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift,  ConstantPoolCacheEntry::tos_state_bits);
2286 
2287   assert(btos == 0, "change code, btos != 0");
2288   __ cbnz(flags, notByte);
2289 
2290   // btos
2291   __ load_signed_byte(r0, field);
2292   __ push(btos);
2293   // Rewrite bytecode to be faster
2294   if (!is_static) {
2295     patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2296   }
2297   __ b(Done);
2298 
2299   __ bind(notByte);
2300   __ cmp(flags, atos);
2301   __ br(Assembler::NE, notObj);
2302   // atos
2303   __ load_heap_oop(r0, field);
2304   __ push(atos);
2305   if (!is_static) {
2306     patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2307   }
2308   __ b(Done);
2309 
2310   __ bind(notObj);
2311   __ cmp(flags, itos);
2312   __ br(Assembler::NE, notInt);
2313   // itos
2314   __ ldrw(r0, field);
2315   __ push(itos);
2316   // Rewrite bytecode to be faster
2317   if (!is_static) {
2318     patch_bytecode(Bytecodes::_fast_igetfield, bc, r1);
2319   }
2320   __ b(Done);
2321 
2322   __ bind(notInt);
2323   __ cmp(flags, ctos);
2324   __ br(Assembler::NE, notChar);
2325   // ctos
2326   __ load_unsigned_short(r0, field);
2327   __ push(ctos);
2328   // Rewrite bytecode to be faster
2329   if (!is_static) {
2330     patch_bytecode(Bytecodes::_fast_cgetfield, bc, r1);
2331   }
2332   __ b(Done);
2333 
2334   __ bind(notChar);
2335   __ cmp(flags, stos);
2336   __ br(Assembler::NE, notShort);
2337   // stos
2338   __ load_signed_short(r0, field);
2339   __ push(stos);
2340   // Rewrite bytecode to be faster
2341   if (!is_static) {
2342     patch_bytecode(Bytecodes::_fast_sgetfield, bc, r1);
2343   }
2344   __ b(Done);
2345 
2346   __ bind(notShort);
2347   __ cmp(flags, ltos);
2348   __ br(Assembler::NE, notLong);
2349   // ltos
2350   __ ldr(r0, field);
2351   __ push(ltos);
2352   // Rewrite bytecode to be faster
2353   if (!is_static) {
2354     patch_bytecode(Bytecodes::_fast_lgetfield, bc, r1);
2355   }
2356   __ b(Done);
2357 
2358   __ bind(notLong);
2359   __ cmp(flags, ftos);
2360   __ br(Assembler::NE, notFloat);
2361   // ftos
2362   __ ldrs(v0, field);
2363   __ push(ftos);
2364   // Rewrite bytecode to be faster
2365   if (!is_static) {
2366     patch_bytecode(Bytecodes::_fast_fgetfield, bc, r1);
2367   }
2368   __ b(Done);
2369 
2370   __ bind(notFloat);
2371 #ifdef ASSERT
2372   __ cmp(flags, dtos);
2373   __ br(Assembler::NE, notDouble);
2374 #endif
2375   // dtos
2376   __ ldrd(v0, field);
2377   __ push(dtos);
2378   // Rewrite bytecode to be faster
2379   if (!is_static) {
2380     patch_bytecode(Bytecodes::_fast_dgetfield, bc, r1);
2381   }
2382 #ifdef ASSERT
2383   __ b(Done);
2384 
2385   __ bind(notDouble);
2386   __ stop("Bad state");
2387 #endif
2388 
2389   __ bind(Done);
2390   // It's really not worth bothering to check whether this field
2391   // really is volatile in the slow case.
2392   __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
2393 }
2394 
2395 
2396 void TemplateTable::getfield(int byte_no)
2397 {
2398   getfield_or_static(byte_no, false);
2399 }
2400 




2401 void TemplateTable::getstatic(int byte_no)
2402 {
2403   getfield_or_static(byte_no, true);
2404 }
2405 
2406 // The registers cache and index expected to be set before call.
2407 // The function may destroy various registers, just not the cache and index registers.
2408 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2409   transition(vtos, vtos);
2410 
2411   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2412 
2413   if (JvmtiExport::can_post_field_modification()) {
2414     // Check to see if a field modification watch has been set before
2415     // we take the time to call into the VM.
2416     Label L1;
2417     assert_different_registers(cache, index, r0);
2418     __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2419     __ ldrw(r0, Address(rscratch1));
2420     __ cbz(r0, L1);


2444       __ bind(ok);
2445       __ ldr(c_rarg1, at_tos_p2()); // ltos (two word jvalue)
2446       __ bind(nope2);
2447     }
2448     // cache entry pointer
2449     __ add(c_rarg2, c_rarg2, in_bytes(cp_base_offset));
2450     // object (tos)
2451     __ mov(c_rarg3, esp);
2452     // c_rarg1: object pointer set up above (NULL if static)
2453     // c_rarg2: cache entry pointer
2454     // c_rarg3: jvalue object on the stack
2455     __ call_VM(noreg,
2456                CAST_FROM_FN_PTR(address,
2457                                 InterpreterRuntime::post_field_modification),
2458                c_rarg1, c_rarg2, c_rarg3);
2459     __ get_cache_and_index_at_bcp(cache, index, 1);
2460     __ bind(L1);
2461   }
2462 }
2463 
2464 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2465   transition(vtos, vtos);
2466 
2467   const Register cache = r2;
2468   const Register index = r3;
2469   const Register obj   = r2;
2470   const Register off   = r19;
2471   const Register flags = r0;
2472   const Register bc    = r4;
2473 
2474   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2475   jvmti_post_field_mod(cache, index, is_static);
2476   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2477 
2478   Label Done;
2479   __ mov(r5, flags);
2480 
2481   {
2482     Label notVolatile;
2483     __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2484     __ membar(MacroAssembler::StoreStore);


2486   }
2487 
2488   // field address
2489   const Address field(obj, off);
2490 
2491   Label notByte, notInt, notShort, notChar,
2492         notLong, notFloat, notObj, notDouble;
2493 
2494   // x86 uses a shift and mask or wings it with a shift plus assert
2495   // the mask is not needed. aarch64 just uses bitfield extract
2496   __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift,  ConstantPoolCacheEntry::tos_state_bits);
2497 
2498   assert(btos == 0, "change code, btos != 0");
2499   __ cbnz(flags, notByte);
2500 
2501   // btos
2502   {
2503     __ pop(btos);
2504     if (!is_static) pop_and_check_object(obj);
2505     __ strb(r0, field);
2506     if (!is_static) {
2507       patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no);
2508     }
2509     __ b(Done);
2510   }
2511 
2512   __ bind(notByte);
2513   __ cmp(flags, atos);
2514   __ br(Assembler::NE, notObj);
2515 
2516   // atos
2517   {
2518     __ pop(atos);
2519     if (!is_static) pop_and_check_object(obj);
2520     // Store into the field
2521     do_oop_store(_masm, field, r0, _bs->kind(), false);
2522     if (!is_static) {
2523       patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
2524     }
2525     __ b(Done);
2526   }
2527 
2528   __ bind(notObj);
2529   __ cmp(flags, itos);
2530   __ br(Assembler::NE, notInt);
2531 
2532   // itos
2533   {
2534     __ pop(itos);
2535     if (!is_static) pop_and_check_object(obj);
2536     __ strw(r0, field);
2537     if (!is_static) {
2538       patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no);
2539     }
2540     __ b(Done);
2541   }
2542 
2543   __ bind(notInt);
2544   __ cmp(flags, ctos);
2545   __ br(Assembler::NE, notChar);
2546 
2547   // ctos
2548   {
2549     __ pop(ctos);
2550     if (!is_static) pop_and_check_object(obj);
2551     __ strh(r0, field);
2552     if (!is_static) {
2553       patch_bytecode(Bytecodes::_fast_cputfield, bc, r1, true, byte_no);
2554     }
2555     __ b(Done);
2556   }
2557 
2558   __ bind(notChar);
2559   __ cmp(flags, stos);
2560   __ br(Assembler::NE, notShort);
2561 
2562   // stos
2563   {
2564     __ pop(stos);
2565     if (!is_static) pop_and_check_object(obj);
2566     __ strh(r0, field);
2567     if (!is_static) {
2568       patch_bytecode(Bytecodes::_fast_sputfield, bc, r1, true, byte_no);
2569     }
2570     __ b(Done);
2571   }
2572 
2573   __ bind(notShort);
2574   __ cmp(flags, ltos);
2575   __ br(Assembler::NE, notLong);
2576 
2577   // ltos
2578   {
2579     __ pop(ltos);
2580     if (!is_static) pop_and_check_object(obj);
2581     __ str(r0, field);
2582     if (!is_static) {
2583       patch_bytecode(Bytecodes::_fast_lputfield, bc, r1, true, byte_no);
2584     }
2585     __ b(Done);
2586   }
2587 
2588   __ bind(notLong);
2589   __ cmp(flags, ftos);
2590   __ br(Assembler::NE, notFloat);
2591 
2592   // ftos
2593   {
2594     __ pop(ftos);
2595     if (!is_static) pop_and_check_object(obj);
2596     __ strs(v0, field);
2597     if (!is_static) {
2598       patch_bytecode(Bytecodes::_fast_fputfield, bc, r1, true, byte_no);
2599     }
2600     __ b(Done);
2601   }
2602 
2603   __ bind(notFloat);
2604 #ifdef ASSERT
2605   __ cmp(flags, dtos);
2606   __ br(Assembler::NE, notDouble);
2607 #endif
2608 
2609   // dtos
2610   {
2611     __ pop(dtos);
2612     if (!is_static) pop_and_check_object(obj);
2613     __ strd(v0, field);
2614     if (!is_static) {
2615       patch_bytecode(Bytecodes::_fast_dputfield, bc, r1, true, byte_no);
2616     }
2617   }
2618 
2619 #ifdef ASSERT
2620   __ b(Done);
2621 
2622   __ bind(notDouble);
2623   __ stop("Bad state");
2624 #endif
2625 
2626   __ bind(Done);
2627 
2628   {
2629     Label notVolatile;
2630     __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2631     __ membar(MacroAssembler::StoreLoad);
2632     __ bind(notVolatile);
2633   }
2634 }
2635 
2636 void TemplateTable::putfield(int byte_no)
2637 {
2638   putfield_or_static(byte_no, false);




2639 }
2640 
2641 void TemplateTable::putstatic(int byte_no) {
2642   putfield_or_static(byte_no, true);
2643 }
2644 
2645 void TemplateTable::jvmti_post_fast_field_mod()
2646 {
2647   if (JvmtiExport::can_post_field_modification()) {
2648     // Check to see if a field modification watch has been set before
2649     // we take the time to call into the VM.
2650     Label L2;
2651     __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2652     __ ldrw(c_rarg3, Address(rscratch1));
2653     __ cbzw(c_rarg3, L2);
2654     __ pop_ptr(r19);                  // copy the object pointer from tos
2655     __ verify_oop(r19);
2656     __ push_ptr(r19);                 // put the object pointer back on tos
2657     // Save tos values before call_VM() clobbers them. Since we have
2658     // to do it for every data type, we use the saved values as the




 485   __ lea (r2, Address(r1, r0, Address::lsl(3)));
 486   __ ldrd(v0, Address(r2, base_offset));
 487   __ push_d();
 488   __ b(Done);
 489 
 490   __ bind(Long);
 491   // ltos
 492   __ lea(r0, Address(r1, r0, Address::lsl(3)));
 493   __ ldr(r0, Address(r0, base_offset));
 494   __ push_l();
 495 
 496   __ bind(Done);
 497 }
 498 
 499 void TemplateTable::locals_index(Register reg, int offset)
 500 {
 501   __ ldrb(reg, at_bcp(offset));
 502   __ neg(reg, reg);
 503 }
 504 
 505 void TemplateTable::iload() {
 506   iload_internal();
 507 }
 508 
 509 void TemplateTable::nofast_iload() {
 510   iload_internal(may_not_rewrite);
 511 }
 512 
 513 void TemplateTable::iload_internal(RewriteControl rc) {
 514   transition(vtos, itos);
 515   if (RewriteFrequentPairs && rc == may_rewrite) {
 516     // TODO : check x86 code for what to do here
 517     __ call_Unimplemented();
 518   } else {
 519     locals_index(r1);
 520     __ ldr(r0, iaddress(r1));
 521   }
 522 
 523 }
 524 
 525 void TemplateTable::fast_iload2()
 526 {
 527   __ call_Unimplemented();
 528 }
 529 
 530 void TemplateTable::fast_iload()
 531 {
 532   __ call_Unimplemented();
 533 }
 534 
 535 void TemplateTable::lload()


 749 }
 750 
 751 void TemplateTable::fload(int n)
 752 {
 753   transition(vtos, ftos);
 754   __ ldrs(v0, faddress(n));
 755 }
 756 
 757 void TemplateTable::dload(int n)
 758 {
 759   transition(vtos, dtos);
 760   __ ldrd(v0, daddress(n));
 761 }
 762 
 763 void TemplateTable::aload(int n)
 764 {
 765   transition(vtos, atos);
 766   __ ldr(r0, iaddress(n));
 767 }
 768 
 769 void TemplateTable::aload_0() {
 770   aload_0_internal();
 771 }
 772 
 773 void TemplateTable::nofast_aload_0() {
 774   aload_0_internal(may_not_rewrite);
 775 }
 776 
 777 void TemplateTable::aload_0_internal(RewriteControl rc) {
 778   // According to bytecode histograms, the pairs:
 779   //
 780   // _aload_0, _fast_igetfield
 781   // _aload_0, _fast_agetfield
 782   // _aload_0, _fast_fgetfield
 783   //
 784   // occur frequently. If RewriteFrequentPairs is set, the (slow)
 785   // _aload_0 bytecode checks if the next bytecode is either
 786   // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
 787   // rewrites the current bytecode into a pair bytecode; otherwise it
 788   // rewrites the current bytecode into _fast_aload_0 that doesn't do
 789   // the pair check anymore.
 790   //
 791   // Note: If the next bytecode is _getfield, the rewrite must be
 792   //       delayed, otherwise we may miss an opportunity for a pair.
 793   //
 794   // Also rewrite frequent pairs
 795   //   aload_0, aload_1
 796   //   aload_0, iload_1
 797   // These bytecodes with a small amount of code are most profitable
 798   // to rewrite
 799   if (RewriteFrequentPairs && rc == may_rewrite) {
 800     __ call_Unimplemented();
 801   } else {
 802     aload(0);
 803   }
 804 }
 805 
 806 void TemplateTable::istore()
 807 {
 808   transition(itos, vtos);
 809   locals_index(r1);
 810   // FIXME: We're being very pernickerty here storing a jint in a
 811   // local with strw, which costs an extra instruction over what we'd
 812   // be able to do with a simple str.  We should just store the whole
 813   // word.
 814   __ lea(rscratch1, iaddress(r1));
 815   __ strw(r0, Address(rscratch1));
 816 }
 817 
 818 void TemplateTable::lstore()
 819 {


2129 //     write.  It's OK for non-volatile memory refs that happen after the
2130 //     volatile write to float up before it.
2131 //
2132 // We only put in barriers around volatile refs (they are expensive),
2133 // not _between_ memory refs (that would require us to track the
2134 // flavor of the previous memory refs).  Requirements (2) and (3)
2135 // require some barriers before volatile stores and after volatile
2136 // loads.  These nearly cover requirement (1) but miss the
2137 // volatile-store-volatile-load case.  This final case is placed after
2138 // volatile-stores although it could just as well go before
2139 // volatile-loads.
2140 
2141 void TemplateTable::resolve_cache_and_index(int byte_no,
2142                                             Register Rcache,
2143                                             Register index,
2144                                             size_t index_size) {
2145   const Register temp = r19;
2146   assert_different_registers(Rcache, index, temp);
2147 
2148   Label resolved;
2149 
2150   Bytecodes::Code code = bytecode();
2151   switch (code) {
2152   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2153   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2154   }
2155 
2156   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2157   __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2158   __ cmp(temp, (int) code);  // have we resolved this bytecode?
2159   __ br(Assembler::EQ, resolved);
2160 
2161   // resolve first time through
2162   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2163   __ mov(temp, (int) code);
2164   __ call_VM(noreg, entry, temp);
2165 
2166   // Update registers with resolved info
2167   __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2168   // n.b. unlike x86 Rcache is now rcpool plus the indexed offset
2169   // so all clients ofthis method must be modified accordingly
2170   __ bind(resolved);
2171 }
2172 
2173 // The Rcache and index registers must be set before call
2174 // n.b unlike x86 cache already includes the index offset
2175 void TemplateTable::load_field_cp_cache_entry(Register obj,
2176                                               Register cache,
2177                                               Register index,
2178                                               Register off,
2179                                               Register flags,
2180                                               bool is_static = false) {
2181   assert_different_registers(cache, index, flags, off);
2182 
2183   ByteSize cp_base_offset = ConstantPoolCache::base_offset();


2261       __ verify_oop(c_rarg1);
2262     }
2263     // c_rarg1: object pointer or NULL
2264     // c_rarg2: cache entry pointer
2265     // c_rarg3: jvalue object on the stack
2266     __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2267                                        InterpreterRuntime::post_field_access),
2268                c_rarg1, c_rarg2, c_rarg3);
2269     __ get_cache_and_index_at_bcp(cache, index, 1);
2270     __ bind(L1);
2271   }
2272 }
2273 
2274 void TemplateTable::pop_and_check_object(Register r)
2275 {
2276   __ pop_ptr(r);
2277   __ null_check(r);  // for field access must check obj.
2278   __ verify_oop(r);
2279 }
2280 
2281 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc)
2282 {
2283   const Register cache = r2;
2284   const Register index = r3;
2285   const Register obj   = r4;
2286   const Register off   = r19;
2287   const Register flags = r0;
2288   const Register bc    = r4; // uses same reg as obj, so don't mix them
2289 
2290   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2291   jvmti_post_field_access(cache, index, is_static, false);
2292   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2293 
2294   if (!is_static) {
2295     // obj is on the stack
2296     pop_and_check_object(obj);
2297   }
2298 
2299   const Address field(obj, off);
2300 
2301   Label Done, notByte, notInt, notShort, notChar,
2302               notLong, notFloat, notObj, notDouble;
2303 
2304   // x86 uses a shift and mask or wings it with a shift plus assert
2305   // the mask is not needed. aarch64 just uses bitfield extract
2306   __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift,  ConstantPoolCacheEntry::tos_state_bits);
2307 
2308   assert(btos == 0, "change code, btos != 0");
2309   __ cbnz(flags, notByte);
2310 
2311   // btos
2312   __ load_signed_byte(r0, field);
2313   __ push(btos);
2314   // Rewrite bytecode to be faster
2315   if (!is_static && rc == may_rewrite) {
2316     patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2317   }
2318   __ b(Done);
2319 
2320   __ bind(notByte);
2321   __ cmp(flags, atos);
2322   __ br(Assembler::NE, notObj);
2323   // atos
2324   __ load_heap_oop(r0, field);
2325   __ push(atos);
2326   if (!is_static && rc == may_rewrite) {
2327     patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2328   }
2329   __ b(Done);
2330 
2331   __ bind(notObj);
2332   __ cmp(flags, itos);
2333   __ br(Assembler::NE, notInt);
2334   // itos
2335   __ ldrw(r0, field);
2336   __ push(itos);
2337   // Rewrite bytecode to be faster
2338   if (!is_static && rc == may_rewrite) {
2339     patch_bytecode(Bytecodes::_fast_igetfield, bc, r1);
2340   }
2341   __ b(Done);
2342 
2343   __ bind(notInt);
2344   __ cmp(flags, ctos);
2345   __ br(Assembler::NE, notChar);
2346   // ctos
2347   __ load_unsigned_short(r0, field);
2348   __ push(ctos);
2349   // Rewrite bytecode to be faster
2350   if (!is_static && rc == may_rewrite) {
2351     patch_bytecode(Bytecodes::_fast_cgetfield, bc, r1);
2352   }
2353   __ b(Done);
2354 
2355   __ bind(notChar);
2356   __ cmp(flags, stos);
2357   __ br(Assembler::NE, notShort);
2358   // stos
2359   __ load_signed_short(r0, field);
2360   __ push(stos);
2361   // Rewrite bytecode to be faster
2362   if (!is_static && rc == may_rewrite) {
2363     patch_bytecode(Bytecodes::_fast_sgetfield, bc, r1);
2364   }
2365   __ b(Done);
2366 
2367   __ bind(notShort);
2368   __ cmp(flags, ltos);
2369   __ br(Assembler::NE, notLong);
2370   // ltos
2371   __ ldr(r0, field);
2372   __ push(ltos);
2373   // Rewrite bytecode to be faster
2374   if (!is_static && rc == may_rewrite) {
2375     patch_bytecode(Bytecodes::_fast_lgetfield, bc, r1);
2376   }
2377   __ b(Done);
2378 
2379   __ bind(notLong);
2380   __ cmp(flags, ftos);
2381   __ br(Assembler::NE, notFloat);
2382   // ftos
2383   __ ldrs(v0, field);
2384   __ push(ftos);
2385   // Rewrite bytecode to be faster
2386   if (!is_static && rc == may_rewrite) {
2387     patch_bytecode(Bytecodes::_fast_fgetfield, bc, r1);
2388   }
2389   __ b(Done);
2390 
2391   __ bind(notFloat);
2392 #ifdef ASSERT
2393   __ cmp(flags, dtos);
2394   __ br(Assembler::NE, notDouble);
2395 #endif
2396   // dtos
2397   __ ldrd(v0, field);
2398   __ push(dtos);
2399   // Rewrite bytecode to be faster
2400   if (!is_static && rc == may_rewrite) {
2401     patch_bytecode(Bytecodes::_fast_dgetfield, bc, r1);
2402   }
2403 #ifdef ASSERT
2404   __ b(Done);
2405 
2406   __ bind(notDouble);
2407   __ stop("Bad state");
2408 #endif
2409 
2410   __ bind(Done);
2411   // It's really not worth bothering to check whether this field
2412   // really is volatile in the slow case.
2413   __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
2414 }
2415 
2416 
2417 void TemplateTable::getfield(int byte_no)
2418 {
2419   getfield_or_static(byte_no, false);
2420 }
2421 
2422 void TemplateTable::nofast_getfield(int byte_no) {
2423   getfield_or_static(byte_no, false, may_not_rewrite);
2424 }
2425 
2426 void TemplateTable::getstatic(int byte_no)
2427 {
2428   getfield_or_static(byte_no, true);
2429 }
2430 
2431 // The registers cache and index expected to be set before call.
2432 // The function may destroy various registers, just not the cache and index registers.
2433 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2434   transition(vtos, vtos);
2435 
2436   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2437 
2438   if (JvmtiExport::can_post_field_modification()) {
2439     // Check to see if a field modification watch has been set before
2440     // we take the time to call into the VM.
2441     Label L1;
2442     assert_different_registers(cache, index, r0);
2443     __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2444     __ ldrw(r0, Address(rscratch1));
2445     __ cbz(r0, L1);


2469       __ bind(ok);
2470       __ ldr(c_rarg1, at_tos_p2()); // ltos (two word jvalue)
2471       __ bind(nope2);
2472     }
2473     // cache entry pointer
2474     __ add(c_rarg2, c_rarg2, in_bytes(cp_base_offset));
2475     // object (tos)
2476     __ mov(c_rarg3, esp);
2477     // c_rarg1: object pointer set up above (NULL if static)
2478     // c_rarg2: cache entry pointer
2479     // c_rarg3: jvalue object on the stack
2480     __ call_VM(noreg,
2481                CAST_FROM_FN_PTR(address,
2482                                 InterpreterRuntime::post_field_modification),
2483                c_rarg1, c_rarg2, c_rarg3);
2484     __ get_cache_and_index_at_bcp(cache, index, 1);
2485     __ bind(L1);
2486   }
2487 }
2488 
2489 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2490   transition(vtos, vtos);
2491 
2492   const Register cache = r2;
2493   const Register index = r3;
2494   const Register obj   = r2;
2495   const Register off   = r19;
2496   const Register flags = r0;
2497   const Register bc    = r4;
2498 
2499   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2500   jvmti_post_field_mod(cache, index, is_static);
2501   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2502 
2503   Label Done;
2504   __ mov(r5, flags);
2505 
2506   {
2507     Label notVolatile;
2508     __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2509     __ membar(MacroAssembler::StoreStore);


2511   }
2512 
2513   // field address
2514   const Address field(obj, off);
2515 
2516   Label notByte, notInt, notShort, notChar,
2517         notLong, notFloat, notObj, notDouble;
2518 
2519   // x86 uses a shift and mask or wings it with a shift plus assert
2520   // the mask is not needed. aarch64 just uses bitfield extract
2521   __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift,  ConstantPoolCacheEntry::tos_state_bits);
2522 
2523   assert(btos == 0, "change code, btos != 0");
2524   __ cbnz(flags, notByte);
2525 
2526   // btos
2527   {
2528     __ pop(btos);
2529     if (!is_static) pop_and_check_object(obj);
2530     __ strb(r0, field);
2531     if (!is_static && rc == may_rewrite) {
2532       patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no);
2533     }
2534     __ b(Done);
2535   }
2536 
2537   __ bind(notByte);
2538   __ cmp(flags, atos);
2539   __ br(Assembler::NE, notObj);
2540 
2541   // atos
2542   {
2543     __ pop(atos);
2544     if (!is_static) pop_and_check_object(obj);
2545     // Store into the field
2546     do_oop_store(_masm, field, r0, _bs->kind(), false);
2547     if (!is_static && rc == may_rewrite) {
2548       patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
2549     }
2550     __ b(Done);
2551   }
2552 
2553   __ bind(notObj);
2554   __ cmp(flags, itos);
2555   __ br(Assembler::NE, notInt);
2556 
2557   // itos
2558   {
2559     __ pop(itos);
2560     if (!is_static) pop_and_check_object(obj);
2561     __ strw(r0, field);
2562     if (!is_static && rc == may_rewrite) {
2563       patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no);
2564     }
2565     __ b(Done);
2566   }
2567 
2568   __ bind(notInt);
2569   __ cmp(flags, ctos);
2570   __ br(Assembler::NE, notChar);
2571 
2572   // ctos
2573   {
2574     __ pop(ctos);
2575     if (!is_static) pop_and_check_object(obj);
2576     __ strh(r0, field);
2577     if (!is_static && rc == may_rewrite) {
2578       patch_bytecode(Bytecodes::_fast_cputfield, bc, r1, true, byte_no);
2579     }
2580     __ b(Done);
2581   }
2582 
2583   __ bind(notChar);
2584   __ cmp(flags, stos);
2585   __ br(Assembler::NE, notShort);
2586 
2587   // stos
2588   {
2589     __ pop(stos);
2590     if (!is_static) pop_and_check_object(obj);
2591     __ strh(r0, field);
2592     if (!is_static && rc == may_rewrite) {
2593       patch_bytecode(Bytecodes::_fast_sputfield, bc, r1, true, byte_no);
2594     }
2595     __ b(Done);
2596   }
2597 
2598   __ bind(notShort);
2599   __ cmp(flags, ltos);
2600   __ br(Assembler::NE, notLong);
2601 
2602   // ltos
2603   {
2604     __ pop(ltos);
2605     if (!is_static) pop_and_check_object(obj);
2606     __ str(r0, field);
2607     if (!is_static && rc == may_rewrite) {
2608       patch_bytecode(Bytecodes::_fast_lputfield, bc, r1, true, byte_no);
2609     }
2610     __ b(Done);
2611   }
2612 
2613   __ bind(notLong);
2614   __ cmp(flags, ftos);
2615   __ br(Assembler::NE, notFloat);
2616 
2617   // ftos
2618   {
2619     __ pop(ftos);
2620     if (!is_static) pop_and_check_object(obj);
2621     __ strs(v0, field);
2622     if (!is_static && rc == may_rewrite) {
2623       patch_bytecode(Bytecodes::_fast_fputfield, bc, r1, true, byte_no);
2624     }
2625     __ b(Done);
2626   }
2627 
2628   __ bind(notFloat);
2629 #ifdef ASSERT
2630   __ cmp(flags, dtos);
2631   __ br(Assembler::NE, notDouble);
2632 #endif
2633 
2634   // dtos
2635   {
2636     __ pop(dtos);
2637     if (!is_static) pop_and_check_object(obj);
2638     __ strd(v0, field);
2639     if (!is_static && rc == may_rewrite) {
2640       patch_bytecode(Bytecodes::_fast_dputfield, bc, r1, true, byte_no);
2641     }
2642   }
2643 
2644 #ifdef ASSERT
2645   __ b(Done);
2646 
2647   __ bind(notDouble);
2648   __ stop("Bad state");
2649 #endif
2650 
2651   __ bind(Done);
2652 
2653   {
2654     Label notVolatile;
2655     __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2656     __ membar(MacroAssembler::StoreLoad);
2657     __ bind(notVolatile);
2658   }
2659 }
2660 
2661 void TemplateTable::putfield(int byte_no)
2662 {
2663   putfield_or_static(byte_no, false);
2664 }
2665 
2666 void TemplateTable::nofast_putfield(int byte_no) {
2667   putfield_or_static(byte_no, false, may_not_rewrite);
2668 }
2669 
2670 void TemplateTable::putstatic(int byte_no) {
2671   putfield_or_static(byte_no, true);
2672 }
2673 
2674 void TemplateTable::jvmti_post_fast_field_mod()
2675 {
2676   if (JvmtiExport::can_post_field_modification()) {
2677     // Check to see if a field modification watch has been set before
2678     // we take the time to call into the VM.
2679     Label L2;
2680     __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2681     __ ldrw(c_rarg3, Address(rscratch1));
2682     __ cbzw(c_rarg3, L2);
2683     __ pop_ptr(r19);                  // copy the object pointer from tos
2684     __ verify_oop(r19);
2685     __ push_ptr(r19);                 // put the object pointer back on tos
2686     // Save tos values before call_VM() clobbers them. Since we have
2687     // to do it for every data type, we use the saved values as the


< prev index next >