< prev index next >

src/cpu/ppc/vm/templateTable_ppc_64.cpp

Print this page
rev 8025 : 8076163: ppc: port "8074345: Enable RewriteBytecodes when VM runs with CDS"


 421   // Check out Conversions.java for an example.
 422   // Also ConstantPool::header_size() is 20, which makes it very difficult
 423   // to double-align double on the constant pool. SG, 11/7/97
 424   __ lfdx(F15_ftos, Rcpool, Rindex);
 425   __ push(dtos);
 426   __ b(Lexit);
 427 
 428   __ bind(Llong);
 429   __ ldx(R17_tos, Rcpool, Rindex);
 430   __ push(ltos);
 431 
 432   __ bind(Lexit);
 433 }
 434 
 435 // Get the locals index located in the bytecode stream at bcp + offset.
 436 void TemplateTable::locals_index(Register Rdst, int offset) {
 437   __ lbz(Rdst, offset, R14_bcp);
 438 }
 439 
 440 void TemplateTable::iload() {








 441   transition(vtos, itos);
 442 
 443   // Get the local value into tos
 444   const Register Rindex = R22_tmp2;
 445   locals_index(Rindex);
 446 
 447   // Rewrite iload,iload  pair into fast_iload2
 448   //         iload,caload pair into fast_icaload
 449   if (RewriteFrequentPairs) {
 450     Label Lrewrite, Ldone;
 451     Register Rnext_byte  = R3_ARG1,
 452              Rrewrite_to = R6_ARG4,
 453              Rscratch    = R11_scratch1;
 454 
 455     // get next byte
 456     __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp);
 457 
 458     // if _iload, wait to rewrite to iload2. We only want to rewrite the
 459     // last two iloads in a pair. Comparing against fast_iload means that
 460     // the next bytecode is neither an iload or a caload, and therefore
 461     // an iload pair.
 462     __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload);
 463     __ beq(CCR0, Ldone);
 464 
 465     __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload);
 466     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2);
 467     __ beq(CCR1, Lrewrite);
 468 
 469     __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload);


 693 
 694 void TemplateTable::fload(int n) {
 695   transition(vtos, ftos);
 696 
 697   __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals);
 698 }
 699 
 700 void TemplateTable::dload(int n) {
 701   transition(vtos, dtos);
 702 
 703   __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
 704 }
 705 
 706 void TemplateTable::aload(int n) {
 707   transition(vtos, atos);
 708 
 709   __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
 710 }
 711 
 712 void TemplateTable::aload_0() {








 713   transition(vtos, atos);
 714   // According to bytecode histograms, the pairs:
 715   //
 716   // _aload_0, _fast_igetfield
 717   // _aload_0, _fast_agetfield
 718   // _aload_0, _fast_fgetfield
 719   //
 720   // occur frequently. If RewriteFrequentPairs is set, the (slow)
 721   // _aload_0 bytecode checks if the next bytecode is either
 722   // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
 723   // rewrites the current bytecode into a pair bytecode; otherwise it
 724   // rewrites the current bytecode into _0 that doesn't do
 725   // the pair check anymore.
 726   //
 727   // Note: If the next bytecode is _getfield, the rewrite must be
 728   //       delayed, otherwise we may miss an opportunity for a pair.
 729   //
 730   // Also rewrite frequent pairs
 731   //   aload_0, aload_1
 732   //   aload_0, iload_1
 733   // These bytecodes with a small amount of code are most profitable
 734   // to rewrite.
 735 
 736   if (RewriteFrequentPairs) {
 737 
 738     Label Lrewrite, Ldont_rewrite;
 739     Register Rnext_byte  = R3_ARG1,
 740              Rrewrite_to = R6_ARG4,
 741              Rscratch    = R11_scratch1;
 742 
 743     // Get next byte.
 744     __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp);
 745 
 746     // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair.
 747     __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield);
 748     __ beq(CCR0, Ldont_rewrite);
 749 
 750     __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield);
 751     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0);
 752     __ beq(CCR1, Lrewrite);
 753 
 754     __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield);
 755     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0);
 756     __ beq(CCR0, Lrewrite);


2128 // want to float loads above this check.
2129 // See also comments in ConstantPoolCacheEntry::bytecode_1(),
2130 // ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1();
2131 
2132 // Call into the VM if call site is not yet resolved
2133 //
2134 // Input regs:
2135 //   - None, all passed regs are outputs.
2136 //
2137 // Returns:
2138 //   - Rcache:  The const pool cache entry that contains the resolved result.
2139 //   - Rresult: Either noreg or output for f1/f2.
2140 //
2141 // Kills:
2142 //   - Rscratch
2143 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) {
2144 
2145   __ get_cache_and_index_at_bcp(Rcache, 1, index_size);
2146   Label Lresolved, Ldone;
2147 






2148   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2149   // We are resolved if the indices offset contains the current bytecode.
2150 #if defined(VM_LITTLE_ENDIAN)
2151   __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache);
2152 #else
2153   __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache);
2154 #endif
2155   // Acquire by cmp-br-isync (see below).
2156   __ cmpdi(CCR0, Rscratch, (int)bytecode());
2157   __ beq(CCR0, Lresolved);
2158 
2159   address entry = NULL;
2160   switch (bytecode()) {
2161     case Bytecodes::_getstatic      : // fall through
2162     case Bytecodes::_putstatic      : // fall through
2163     case Bytecodes::_getfield       : // fall through
2164     case Bytecodes::_putfield       : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2165     case Bytecodes::_invokevirtual  : // fall through
2166     case Bytecodes::_invokespecial  : // fall through
2167     case Bytecodes::_invokestatic   : // fall through
2168     case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2169     case Bytecodes::_invokehandle   : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
2170     case Bytecodes::_invokedynamic  : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2171     default                         : ShouldNotReachHere(); break;


2172   }
2173   __ li(R4_ARG2, (int)bytecode());
2174   __ call_VM(noreg, entry, R4_ARG2, true);
2175 
2176   // Update registers with resolved info.
2177   __ get_cache_and_index_at_bcp(Rcache, 1, index_size);
2178   __ b(Ldone);
2179 
2180   __ bind(Lresolved);
2181   __ isync(); // Order load wrt. succeeding loads.
2182   __ bind(Ldone);
2183 }
2184 
2185 // Load the constant pool cache entry at field accesses into registers.
2186 // The Rcache and Rindex registers must be set before call.
2187 // Input:
2188 //   - Rcache, Rindex
2189 // Output:
2190 //   - Robj, Roffset, Rflags
2191 void TemplateTable::load_field_cp_cache_entry(Register Robj,
2192                                               Register Rcache,
2193                                               Register Rindex /* unused on PPC64 */,


2334       __ get_cache_and_index_at_bcp(Rcache, 1);
2335     }
2336 
2337     __ align(32, 12);
2338     __ bind(Lno_field_access_post);
2339   }
2340 }
2341 
2342 // kills R11_scratch1
2343 void TemplateTable::pop_and_check_object(Register Roop) {
2344   Register Rtmp = R11_scratch1;
2345 
2346   assert_different_registers(Rtmp, Roop);
2347   __ pop_ptr(Roop);
2348   // For field access must check obj.
2349   __ null_check_throw(Roop, -1, Rtmp);
2350   __ verify_oop(Roop);
2351 }
2352 
2353 // PPC64: implement volatile loads as fence-store-acquire.
2354 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2355   transition(vtos, vtos);
2356 
2357   Label Lacquire, Lisync;
2358 
2359   const Register Rcache        = R3_ARG1,
2360                  Rclass_or_obj = R22_tmp2,
2361                  Roffset       = R23_tmp3,
2362                  Rflags        = R31,
2363                  Rbtable       = R5_ARG3,
2364                  Rbc           = R6_ARG4,
2365                  Rscratch      = R12_scratch2;
2366 
2367   static address field_branch_table[number_of_states],
2368                  static_branch_table[number_of_states];
2369 
2370   address* branch_table = is_static ? static_branch_table : field_branch_table;
2371 
2372   // Get field offset.
2373   resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2));
2374 
2375   // JVMTI support
2376   jvmti_post_field_access(Rcache, Rscratch, is_static, false);
2377 
2378   // Load after possible GC.
2379   load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static);
2380 
2381   // Load pointer to branch table.
2382   __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
2383 
2384   // Get volatile flag.
2385   __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2386   // Note: sync is needed before volatile load on PPC64.
2387 
2388   // Check field type.
2389   __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
2390 


2401     __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0.
2402   }
2403   __ ldx(Rbtable, Rbtable, Rflags);
2404 
2405   // Get the obj from stack.
2406   if (!is_static) {
2407     pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
2408   } else {
2409     __ verify_oop(Rclass_or_obj);
2410   }
2411 
2412   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2413     __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point.
2414   }
2415   __ mtctr(Rbtable);
2416   __ bctr();
2417 
2418 #ifdef ASSERT
2419   __ bind(LFlagInvalid);
2420   __ stop("got invalid flag", 0x654);






2421 

2422   // __ bind(Lvtos);
2423   address pc_before_fence = __ pc();
2424   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2425   assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
2426   assert(branch_table[vtos] == 0, "can't compute twice");
2427   branch_table[vtos] = __ pc(); // non-volatile_entry point
2428   __ stop("vtos unexpected", 0x655);
2429 #endif
2430 
2431   __ align(32, 28, 28); // Align load.
2432   // __ bind(Ldtos);
2433   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2434   assert(branch_table[dtos] == 0, "can't compute twice");
2435   branch_table[dtos] = __ pc(); // non-volatile_entry point
2436   __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
2437   __ push(dtos);
2438   if (!is_static) patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch);


2439   {
2440     Label acquire_double;
2441     __ beq(CCR6, acquire_double); // Volatile?
2442     __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2443 
2444     __ bind(acquire_double);
2445     __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
2446     __ beq_predict_taken(CCR0, Lisync);
2447     __ b(Lisync); // In case of NAN.
2448   }
2449 
2450   __ align(32, 28, 28); // Align load.
2451   // __ bind(Lftos);
2452   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2453   assert(branch_table[ftos] == 0, "can't compute twice");
2454   branch_table[ftos] = __ pc(); // non-volatile_entry point
2455   __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
2456   __ push(ftos);
2457   if (!is_static) { patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch); }


2458   {
2459     Label acquire_float;
2460     __ beq(CCR6, acquire_float); // Volatile?
2461     __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2462 
2463     __ bind(acquire_float);
2464     __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
2465     __ beq_predict_taken(CCR0, Lisync);
2466     __ b(Lisync); // In case of NAN.
2467   }
2468 
2469   __ align(32, 28, 28); // Align load.
2470   // __ bind(Litos);
2471   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2472   assert(branch_table[itos] == 0, "can't compute twice");
2473   branch_table[itos] = __ pc(); // non-volatile_entry point
2474   __ lwax(R17_tos, Rclass_or_obj, Roffset);
2475   __ push(itos);
2476   if (!is_static) patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch);


2477   __ beq(CCR6, Lacquire); // Volatile?
2478   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2479 
2480   __ align(32, 28, 28); // Align load.
2481   // __ bind(Lltos);
2482   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2483   assert(branch_table[ltos] == 0, "can't compute twice");
2484   branch_table[ltos] = __ pc(); // non-volatile_entry point
2485   __ ldx(R17_tos, Rclass_or_obj, Roffset);
2486   __ push(ltos);
2487   if (!is_static) patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch);


2488   __ beq(CCR6, Lacquire); // Volatile?
2489   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2490 
2491   __ align(32, 28, 28); // Align load.
2492   // __ bind(Lbtos);
2493   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2494   assert(branch_table[btos] == 0, "can't compute twice");
2495   branch_table[btos] = __ pc(); // non-volatile_entry point
2496   __ lbzx(R17_tos, Rclass_or_obj, Roffset);
2497   __ extsb(R17_tos, R17_tos);
2498   __ push(btos);
2499   if (!is_static) patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch);


2500   __ beq(CCR6, Lacquire); // Volatile?
2501   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2502 
2503   __ align(32, 28, 28); // Align load.
2504   // __ bind(Lctos);
2505   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2506   assert(branch_table[ctos] == 0, "can't compute twice");
2507   branch_table[ctos] = __ pc(); // non-volatile_entry point
2508   __ lhzx(R17_tos, Rclass_or_obj, Roffset);
2509   __ push(ctos);
2510   if (!is_static) patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch);


2511   __ beq(CCR6, Lacquire); // Volatile?
2512   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2513 
2514   __ align(32, 28, 28); // Align load.
2515   // __ bind(Lstos);
2516   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2517   assert(branch_table[stos] == 0, "can't compute twice");
2518   branch_table[stos] = __ pc(); // non-volatile_entry point
2519   __ lhax(R17_tos, Rclass_or_obj, Roffset);
2520   __ push(stos);
2521   if (!is_static) patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch);


2522   __ beq(CCR6, Lacquire); // Volatile?
2523   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2524 
2525   __ align(32, 28, 28); // Align load.
2526   // __ bind(Latos);
2527   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2528   assert(branch_table[atos] == 0, "can't compute twice");
2529   branch_table[atos] = __ pc(); // non-volatile_entry point
2530   __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
2531   __ verify_oop(R17_tos);
2532   __ push(atos);
2533   //__ dcbt(R17_tos); // prefetch
2534   if (!is_static) patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch);


2535   __ beq(CCR6, Lacquire); // Volatile?
2536   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2537 
2538   __ align(32, 12);
2539   __ bind(Lacquire);
2540   __ twi_0(R17_tos);
2541   __ bind(Lisync);
2542   __ isync(); // acquire
2543 
2544 #ifdef ASSERT
2545   for (int i = 0; i<number_of_states; ++i) {
2546     assert(branch_table[i], "get initialization");
2547     //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)",
2548     //              is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i]));
2549   }
2550 #endif
2551 }
2552 
2553 void TemplateTable::getfield(int byte_no) {
2554   getfield_or_static(byte_no, false);
2555 }
2556 




2557 void TemplateTable::getstatic(int byte_no) {
2558   getfield_or_static(byte_no, true);
2559 }
2560 
2561 // The registers cache and index expected to be set before call.
2562 // The function may destroy various registers, just not the cache and index registers.
2563 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) {
2564 
2565   assert_different_registers(Rcache, Rscratch, R6_ARG4);
2566 
2567   if (JvmtiExport::can_post_field_modification()) {
2568     Label Lno_field_mod_post;
2569 
2570     // Check if post field access in enabled.
2571     int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true);
2572     __ lwz(Rscratch, offs, Rscratch);
2573 
2574     __ cmpwi(CCR0, Rscratch, 0);
2575     __ beq(CCR0, Lno_field_mod_post);
2576 


2627 
2628     // In case of the fast versions, value lives in registers => put it back on tos.
2629     switch(bytecode()) {
2630       case Bytecodes::_fast_aputfield: __ pop_ptr(); break;
2631       case Bytecodes::_fast_iputfield: // Fall through
2632       case Bytecodes::_fast_bputfield: // Fall through
2633       case Bytecodes::_fast_cputfield: // Fall through
2634       case Bytecodes::_fast_sputfield: __ pop_i(); break;
2635       case Bytecodes::_fast_lputfield: __ pop_l(); break;
2636       case Bytecodes::_fast_fputfield: __ pop_f(); break;
2637       case Bytecodes::_fast_dputfield: __ pop_d(); break;
2638       default: break; // Nothin' to do.
2639     }
2640 
2641     __ align(32, 12);
2642     __ bind(Lno_field_mod_post);
2643   }
2644 }
2645 
2646 // PPC64: implement volatile stores as release-store (return bytecode contains an additional release).
2647 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2648   Label Lvolatile;
2649 
2650   const Register Rcache        = R5_ARG3,  // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
2651                  Rclass_or_obj = R31,      // Needs to survive C call.
2652                  Roffset       = R22_tmp2, // Needs to survive C call.
2653                  Rflags        = R3_ARG1,
2654                  Rbtable       = R4_ARG2,
2655                  Rscratch      = R11_scratch1,
2656                  Rscratch2     = R12_scratch2,
2657                  Rscratch3     = R6_ARG4,
2658                  Rbc           = Rscratch3;
2659   const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
2660 
2661   static address field_branch_table[number_of_states],

2662                  static_branch_table[number_of_states];
2663 
2664   address* branch_table = is_static ? static_branch_table : field_branch_table;

2665 
2666   // Stack (grows up):
2667   //  value
2668   //  obj
2669 
2670   // Load the field offset.
2671   resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2));
2672   jvmti_post_field_mod(Rcache, Rscratch, is_static);
2673   load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static);
2674 
2675   // Load pointer to branch table.
2676   __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
2677 
2678   // Get volatile flag.
2679   __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2680 
2681   // Check the field type.
2682   __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
2683 
2684 #ifdef ASSERT
2685   Label LFlagInvalid;
2686   __ cmpldi(CCR0, Rflags, number_of_states);
2687   __ bge(CCR0, LFlagInvalid);
2688 #endif
2689 
2690   // Load from branch table and dispatch (volatile case: one instruction ahead).
2691   __ sldi(Rflags, Rflags, LogBytesPerWord);
2692   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpwi(CR_is_vol, Rscratch, 1); } // Volatile?


2693   __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0.
2694   __ ldx(Rbtable, Rbtable, Rflags);
2695 
2696   __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point.
2697   __ mtctr(Rbtable);
2698   __ bctr();
2699 
2700 #ifdef ASSERT
2701   __ bind(LFlagInvalid);
2702   __ stop("got invalid flag", 0x656);
2703 
2704   // __ bind(Lvtos);
2705   address pc_before_release = __ pc();
2706   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2707   assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
2708   assert(branch_table[vtos] == 0, "can't compute twice");
2709   branch_table[vtos] = __ pc(); // non-volatile_entry point
2710   __ stop("vtos unexpected", 0x657);
2711 #endif
2712 
2713   __ align(32, 28, 28); // Align pop.
2714   // __ bind(Ldtos);
2715   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2716   assert(branch_table[dtos] == 0, "can't compute twice");
2717   branch_table[dtos] = __ pc(); // non-volatile_entry point
2718   __ pop(dtos);
2719   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.


2720   __ stfdx(F15_ftos, Rclass_or_obj, Roffset);
2721   if (!is_static) { patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no); }


2722   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2723     __ beq(CR_is_vol, Lvolatile); // Volatile?
2724   }
2725   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2726 
2727   __ align(32, 28, 28); // Align pop.
2728   // __ bind(Lftos);
2729   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2730   assert(branch_table[ftos] == 0, "can't compute twice");
2731   branch_table[ftos] = __ pc(); // non-volatile_entry point
2732   __ pop(ftos);
2733   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2734   __ stfsx(F15_ftos, Rclass_or_obj, Roffset);
2735   if (!is_static) { patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no); }


2736   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2737     __ beq(CR_is_vol, Lvolatile); // Volatile?
2738   }
2739   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2740 
2741   __ align(32, 28, 28); // Align pop.
2742   // __ bind(Litos);
2743   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2744   assert(branch_table[itos] == 0, "can't compute twice");
2745   branch_table[itos] = __ pc(); // non-volatile_entry point
2746   __ pop(itos);
2747   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2748   __ stwx(R17_tos, Rclass_or_obj, Roffset);
2749   if (!is_static) { patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no); }


2750   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2751     __ beq(CR_is_vol, Lvolatile); // Volatile?
2752   }
2753   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2754 
2755   __ align(32, 28, 28); // Align pop.
2756   // __ bind(Lltos);
2757   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2758   assert(branch_table[ltos] == 0, "can't compute twice");
2759   branch_table[ltos] = __ pc(); // non-volatile_entry point
2760   __ pop(ltos);
2761   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2762   __ stdx(R17_tos, Rclass_or_obj, Roffset);
2763   if (!is_static) { patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no); }


2764   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2765     __ beq(CR_is_vol, Lvolatile); // Volatile?
2766   }
2767   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2768 
2769   __ align(32, 28, 28); // Align pop.
2770   // __ bind(Lbtos);
2771   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2772   assert(branch_table[btos] == 0, "can't compute twice");
2773   branch_table[btos] = __ pc(); // non-volatile_entry point
2774   __ pop(btos);
2775   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2776   __ stbx(R17_tos, Rclass_or_obj, Roffset);
2777   if (!is_static) { patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no); }


2778   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2779     __ beq(CR_is_vol, Lvolatile); // Volatile?
2780   }
2781   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2782 
2783   __ align(32, 28, 28); // Align pop.
2784   // __ bind(Lctos);
2785   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2786   assert(branch_table[ctos] == 0, "can't compute twice");
2787   branch_table[ctos] = __ pc(); // non-volatile_entry point
2788   __ pop(ctos);
2789   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1..
2790   __ sthx(R17_tos, Rclass_or_obj, Roffset);
2791   if (!is_static) { patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no); }


2792   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2793     __ beq(CR_is_vol, Lvolatile); // Volatile?
2794   }
2795   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2796 
2797   __ align(32, 28, 28); // Align pop.
2798   // __ bind(Lstos);
2799   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2800   assert(branch_table[stos] == 0, "can't compute twice");
2801   branch_table[stos] = __ pc(); // non-volatile_entry point
2802   __ pop(stos);
2803   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2804   __ sthx(R17_tos, Rclass_or_obj, Roffset);
2805   if (!is_static) { patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no); }


2806   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2807     __ beq(CR_is_vol, Lvolatile); // Volatile?
2808   }
2809   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2810 
2811   __ align(32, 28, 28); // Align pop.
2812   // __ bind(Latos);
2813   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2814   assert(branch_table[atos] == 0, "can't compute twice");
2815   branch_table[atos] = __ pc(); // non-volatile_entry point
2816   __ pop(atos);
2817   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1
2818   do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */);
2819   if (!is_static) { patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no); }


2820   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2821     __ beq(CR_is_vol, Lvolatile); // Volatile?
2822     __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2823 
2824     __ align(32, 12);
2825     __ bind(Lvolatile);
2826     __ fence();
2827   }
2828   // fallthru: __ b(Lexit);
2829 
2830 #ifdef ASSERT
2831   for (int i = 0; i<number_of_states; ++i) {
2832     assert(branch_table[i], "put initialization");
2833     //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)",
2834     //              is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i]));
2835   }
2836 #endif
2837 }
2838 
2839 void TemplateTable::putfield(int byte_no) {
2840   putfield_or_static(byte_no, false);
2841 }
2842 




2843 void TemplateTable::putstatic(int byte_no) {
2844   putfield_or_static(byte_no, true);
2845 }
2846 
2847 // See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job.
2848 void TemplateTable::jvmti_post_fast_field_mod() {
2849   __ should_not_reach_here();
2850 }
2851 
2852 void TemplateTable::fast_storefield(TosState state) {
2853   transition(state, vtos);
2854 
2855   const Register Rcache        = R5_ARG3,  // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
2856                  Rclass_or_obj = R31,      // Needs to survive C call.
2857                  Roffset       = R22_tmp2, // Needs to survive C call.
2858                  Rflags        = R3_ARG1,
2859                  Rscratch      = R11_scratch1,
2860                  Rscratch2     = R12_scratch2,
2861                  Rscratch3     = R4_ARG2;
2862   const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).


3243 void TemplateTable::invokevirtual(int byte_no) {
3244   transition(vtos, vtos);
3245 
3246   Register Rtable_addr = R11_scratch1,
3247            Rret_type = R12_scratch2,
3248            Rret_addr = R5_ARG3,
3249            Rflags = R22_tmp2, // Should survive C call.
3250            Rrecv = R3_ARG1,
3251            Rrecv_klass = Rrecv,
3252            Rvtableindex_or_method = R31, // Should survive C call.
3253            Rnum_params = R4_ARG2,
3254            Rnew_bc = R6_ARG4;
3255 
3256   Label LnotFinal;
3257 
3258   load_invoke_cp_cache_entry(byte_no, Rvtableindex_or_method, noreg, Rflags, /*virtual*/ true, false, false);
3259 
3260   __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
3261   __ bfalse(CCR0, LnotFinal);
3262 

3263   patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2);

3264   invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2);
3265 
3266   __ align(32, 12);
3267   __ bind(LnotFinal);
3268   // Load "this" pointer (receiver).
3269   __ rldicl(Rnum_params, Rflags, 64, 48);
3270   __ load_receiver(Rnum_params, Rrecv);
3271   __ verify_oop(Rrecv);
3272 
3273   // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
3274   __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
3275   __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table());
3276   __ sldi(Rret_type, Rret_type, LogBytesPerWord);
3277   __ ldx(Rret_addr, Rret_type, Rtable_addr);
3278   __ null_check_throw(Rrecv, oopDesc::klass_offset_in_bytes(), R11_scratch1);
3279   __ load_klass(Rrecv_klass, Rrecv);
3280   __ verify_klass_ptr(Rrecv_klass);
3281   __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false);
3282 
3283   generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1);




 421   // Check out Conversions.java for an example.
 422   // Also ConstantPool::header_size() is 20, which makes it very difficult
 423   // to double-align double on the constant pool. SG, 11/7/97
 424   __ lfdx(F15_ftos, Rcpool, Rindex);
 425   __ push(dtos);
 426   __ b(Lexit);
 427 
 428   __ bind(Llong);
 429   __ ldx(R17_tos, Rcpool, Rindex);
 430   __ push(ltos);
 431 
 432   __ bind(Lexit);
 433 }
 434 
 435 // Get the locals index located in the bytecode stream at bcp + offset.
 436 void TemplateTable::locals_index(Register Rdst, int offset) {
 437   __ lbz(Rdst, offset, R14_bcp);
 438 }
 439 
 440 void TemplateTable::iload() {
 441   iload_internal();
 442 }
 443 
 444 void TemplateTable::nofast_iload() {
 445   iload_internal(may_not_rewrite);
 446 }
 447 
 448 void TemplateTable::iload_internal(RewriteControl rc) {
 449   transition(vtos, itos);
 450 
 451   // Get the local value into tos
 452   const Register Rindex = R22_tmp2;
 453   locals_index(Rindex);
 454 
 455   // Rewrite iload,iload  pair into fast_iload2
 456   //         iload,caload pair into fast_icaload
 457   if (RewriteFrequentPairs && rc == may_rewrite) {
 458     Label Lrewrite, Ldone;
 459     Register Rnext_byte  = R3_ARG1,
 460              Rrewrite_to = R6_ARG4,
 461              Rscratch    = R11_scratch1;
 462 
 463     // get next byte
 464     __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp);
 465 
 466     // if _iload, wait to rewrite to iload2. We only want to rewrite the
 467     // last two iloads in a pair. Comparing against fast_iload means that
 468     // the next bytecode is neither an iload or a caload, and therefore
 469     // an iload pair.
 470     __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload);
 471     __ beq(CCR0, Ldone);
 472 
 473     __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload);
 474     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2);
 475     __ beq(CCR1, Lrewrite);
 476 
 477     __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload);


 701 
 702 void TemplateTable::fload(int n) {
 703   transition(vtos, ftos);
 704 
 705   __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals);
 706 }
 707 
 708 void TemplateTable::dload(int n) {
 709   transition(vtos, dtos);
 710 
 711   __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
 712 }
 713 
 714 void TemplateTable::aload(int n) {
 715   transition(vtos, atos);
 716 
 717   __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
 718 }
 719 
 720 void TemplateTable::aload_0() {
 721   aload_0_internal();
 722 }
 723 
 724 void TemplateTable::nofast_aload_0() {
 725   aload_0_internal(may_not_rewrite);
 726 }
 727 
 728 void TemplateTable::aload_0_internal(RewriteControl rc) {
 729   transition(vtos, atos);
 730   // According to bytecode histograms, the pairs:
 731   //
 732   // _aload_0, _fast_igetfield
 733   // _aload_0, _fast_agetfield
 734   // _aload_0, _fast_fgetfield
 735   //
 736   // occur frequently. If RewriteFrequentPairs is set, the (slow)
 737   // _aload_0 bytecode checks if the next bytecode is either
 738   // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
 739   // rewrites the current bytecode into a pair bytecode; otherwise it
 740   // rewrites the current bytecode into _0 that doesn't do
 741   // the pair check anymore.
 742   //
 743   // Note: If the next bytecode is _getfield, the rewrite must be
 744   //       delayed, otherwise we may miss an opportunity for a pair.
 745   //
 746   // Also rewrite frequent pairs
 747   //   aload_0, aload_1
 748   //   aload_0, iload_1
 749   // These bytecodes with a small amount of code are most profitable
 750   // to rewrite.
 751 
 752   if (RewriteFrequentPairs && rc == may_rewrite) {
 753 
 754     Label Lrewrite, Ldont_rewrite;
 755     Register Rnext_byte  = R3_ARG1,
 756              Rrewrite_to = R6_ARG4,
 757              Rscratch    = R11_scratch1;
 758 
 759     // Get next byte.
 760     __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp);
 761 
 762     // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair.
 763     __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield);
 764     __ beq(CCR0, Ldont_rewrite);
 765 
 766     __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield);
 767     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0);
 768     __ beq(CCR1, Lrewrite);
 769 
 770     __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield);
 771     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0);
 772     __ beq(CCR0, Lrewrite);


2144 // want to float loads above this check.
2145 // See also comments in ConstantPoolCacheEntry::bytecode_1(),
2146 // ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1();
2147 
2148 // Call into the VM if call site is not yet resolved
2149 //
2150 // Input regs:
2151 //   - None, all passed regs are outputs.
2152 //
2153 // Returns:
2154 //   - Rcache:  The const pool cache entry that contains the resolved result.
2155 //   - Rresult: Either noreg or output for f1/f2.
2156 //
2157 // Kills:
2158 //   - Rscratch
2159 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) {
2160 
2161   __ get_cache_and_index_at_bcp(Rcache, 1, index_size);
2162   Label Lresolved, Ldone;
2163 
2164   Bytecodes::Code code = bytecode();
2165   switch (code) {
2166   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2167   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2168   }
2169 
2170   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2171   // We are resolved if the indices offset contains the current bytecode.
2172 #if defined(VM_LITTLE_ENDIAN)
2173   __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache);
2174 #else
2175   __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache);
2176 #endif
2177   // Acquire by cmp-br-isync (see below).
2178   __ cmpdi(CCR0, Rscratch, (int)code);
2179   __ beq(CCR0, Lresolved);
2180 
2181   address entry = NULL;
2182   switch (code) {
2183     case Bytecodes::_getstatic      : // fall through
2184     case Bytecodes::_putstatic      : // fall through
2185     case Bytecodes::_getfield       : // fall through
2186     case Bytecodes::_putfield       : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2187     case Bytecodes::_invokevirtual  : // fall through
2188     case Bytecodes::_invokespecial  : // fall through
2189     case Bytecodes::_invokestatic   : // fall through
2190     case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2191     case Bytecodes::_invokehandle   : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
2192     case Bytecodes::_invokedynamic  : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2193     default                         :
2194       fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(code)));
2195       break;
2196   }
2197   __ li(R4_ARG2, code);
2198   __ call_VM(noreg, entry, R4_ARG2, true);
2199 
2200   // Update registers with resolved info.
2201   __ get_cache_and_index_at_bcp(Rcache, 1, index_size);
2202   __ b(Ldone);
2203 
2204   __ bind(Lresolved);
2205   __ isync(); // Order load wrt. succeeding loads.
2206   __ bind(Ldone);
2207 }
2208 
2209 // Load the constant pool cache entry at field accesses into registers.
2210 // The Rcache and Rindex registers must be set before call.
2211 // Input:
2212 //   - Rcache, Rindex
2213 // Output:
2214 //   - Robj, Roffset, Rflags
2215 void TemplateTable::load_field_cp_cache_entry(Register Robj,
2216                                               Register Rcache,
2217                                               Register Rindex /* unused on PPC64 */,


2358       __ get_cache_and_index_at_bcp(Rcache, 1);
2359     }
2360 
2361     __ align(32, 12);
2362     __ bind(Lno_field_access_post);
2363   }
2364 }
2365 
2366 // kills R11_scratch1
2367 void TemplateTable::pop_and_check_object(Register Roop) {
2368   Register Rtmp = R11_scratch1;
2369 
2370   assert_different_registers(Rtmp, Roop);
2371   __ pop_ptr(Roop);
2372   // For field access must check obj.
2373   __ null_check_throw(Roop, -1, Rtmp);
2374   __ verify_oop(Roop);
2375 }
2376 
2377 // PPC64: implement volatile loads as fence-store-acquire.
2378 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2379   transition(vtos, vtos);
2380 
2381   Label Lacquire, Lisync;
2382 
2383   const Register Rcache        = R3_ARG1,
2384                  Rclass_or_obj = R22_tmp2,
2385                  Roffset       = R23_tmp3,
2386                  Rflags        = R31,
2387                  Rbtable       = R5_ARG3,
2388                  Rbc           = R6_ARG4,
2389                  Rscratch      = R12_scratch2;
2390 
2391   static address field_branch_table[number_of_states],
2392                  static_branch_table[number_of_states];
2393 
2394   address* branch_table = (is_static || rc == may_not_rewrite) ? static_branch_table : field_branch_table;
2395 
2396   // Get field offset.
2397   resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2));
2398 
2399   // JVMTI support
2400   jvmti_post_field_access(Rcache, Rscratch, is_static, false);
2401 
2402   // Load after possible GC.
2403   load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static);
2404 
2405   // Load pointer to branch table.
2406   __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
2407 
2408   // Get volatile flag.
2409   __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2410   // Note: sync is needed before volatile load on PPC64.
2411 
2412   // Check field type.
2413   __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
2414 


2425     __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0.
2426   }
2427   __ ldx(Rbtable, Rbtable, Rflags);
2428 
2429   // Get the obj from stack.
2430   if (!is_static) {
2431     pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
2432   } else {
2433     __ verify_oop(Rclass_or_obj);
2434   }
2435 
2436   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2437     __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point.
2438   }
2439   __ mtctr(Rbtable);
2440   __ bctr();
2441 
2442 #ifdef ASSERT
2443   __ bind(LFlagInvalid);
2444   __ stop("got invalid flag", 0x654);
2445 #endif
2446 
2447   if (!is_static && rc == may_not_rewrite) {
2448     // We reuse the code from is_static.  It's jumped to via the table above.
2449     return;
2450   }
2451 
2452 #ifdef ASSERT
2453   // __ bind(Lvtos);
2454   address pc_before_fence = __ pc();
2455   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2456   assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
2457   assert(branch_table[vtos] == 0, "can't compute twice");
2458   branch_table[vtos] = __ pc(); // non-volatile_entry point
2459   __ stop("vtos unexpected", 0x655);
2460 #endif
2461 
2462   __ align(32, 28, 28); // Align load.
2463   // __ bind(Ldtos);
2464   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2465   assert(branch_table[dtos] == 0, "can't compute twice");
2466   branch_table[dtos] = __ pc(); // non-volatile_entry point
2467   __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
2468   __ push(dtos);
2469   if (!is_static && rc == may_rewrite) {
2470     patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch);
2471   }
2472   {
2473     Label acquire_double;
2474     __ beq(CCR6, acquire_double); // Volatile?
2475     __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2476 
2477     __ bind(acquire_double);
2478     __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
2479     __ beq_predict_taken(CCR0, Lisync);
2480     __ b(Lisync); // In case of NAN.
2481   }
2482 
2483   __ align(32, 28, 28); // Align load.
2484   // __ bind(Lftos);
2485   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2486   assert(branch_table[ftos] == 0, "can't compute twice");
2487   branch_table[ftos] = __ pc(); // non-volatile_entry point
2488   __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
2489   __ push(ftos);
2490   if (!is_static && rc == may_rewrite) {
2491     patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch);
2492   }
2493   {
2494     Label acquire_float;
2495     __ beq(CCR6, acquire_float); // Volatile?
2496     __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2497 
2498     __ bind(acquire_float);
2499     __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
2500     __ beq_predict_taken(CCR0, Lisync);
2501     __ b(Lisync); // In case of NAN.
2502   }
2503 
2504   __ align(32, 28, 28); // Align load.
2505   // __ bind(Litos);
2506   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2507   assert(branch_table[itos] == 0, "can't compute twice");
2508   branch_table[itos] = __ pc(); // non-volatile_entry point
2509   __ lwax(R17_tos, Rclass_or_obj, Roffset);
2510   __ push(itos);
2511   if (!is_static && rc == may_rewrite) {
2512     patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch);
2513   }
2514   __ beq(CCR6, Lacquire); // Volatile?
2515   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2516 
2517   __ align(32, 28, 28); // Align load.
2518   // __ bind(Lltos);
2519   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2520   assert(branch_table[ltos] == 0, "can't compute twice");
2521   branch_table[ltos] = __ pc(); // non-volatile_entry point
2522   __ ldx(R17_tos, Rclass_or_obj, Roffset);
2523   __ push(ltos);
2524   if (!is_static && rc == may_rewrite) {
2525     patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch);
2526   }
2527   __ beq(CCR6, Lacquire); // Volatile?
2528   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2529 
2530   __ align(32, 28, 28); // Align load.
2531   // __ bind(Lbtos);
2532   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2533   assert(branch_table[btos] == 0, "can't compute twice");
2534   branch_table[btos] = __ pc(); // non-volatile_entry point
2535   __ lbzx(R17_tos, Rclass_or_obj, Roffset);
2536   __ extsb(R17_tos, R17_tos);
2537   __ push(btos);
2538   if (!is_static && rc == may_rewrite) {
2539     patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch);
2540   }
2541   __ beq(CCR6, Lacquire); // Volatile?
2542   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2543 
2544   __ align(32, 28, 28); // Align load.
2545   // __ bind(Lctos);
2546   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2547   assert(branch_table[ctos] == 0, "can't compute twice");
2548   branch_table[ctos] = __ pc(); // non-volatile_entry point
2549   __ lhzx(R17_tos, Rclass_or_obj, Roffset);
2550   __ push(ctos);
2551   if (!is_static && rc == may_rewrite) {
2552     patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch);
2553   }
2554   __ beq(CCR6, Lacquire); // Volatile?
2555   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2556 
2557   __ align(32, 28, 28); // Align load.
2558   // __ bind(Lstos);
2559   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2560   assert(branch_table[stos] == 0, "can't compute twice");
2561   branch_table[stos] = __ pc(); // non-volatile_entry point
2562   __ lhax(R17_tos, Rclass_or_obj, Roffset);
2563   __ push(stos);
2564   if (!is_static && rc == may_rewrite) {
2565     patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch);
2566   }
2567   __ beq(CCR6, Lacquire); // Volatile?
2568   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2569 
2570   __ align(32, 28, 28); // Align load.
2571   // __ bind(Latos);
2572   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2573   assert(branch_table[atos] == 0, "can't compute twice");
2574   branch_table[atos] = __ pc(); // non-volatile_entry point
2575   __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
2576   __ verify_oop(R17_tos);
2577   __ push(atos);
2578   //__ dcbt(R17_tos); // prefetch
2579   if (!is_static && rc == may_rewrite) {
2580     patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch);
2581   }
2582   __ beq(CCR6, Lacquire); // Volatile?
2583   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2584 
2585   __ align(32, 12);
2586   __ bind(Lacquire);
2587   __ twi_0(R17_tos);
2588   __ bind(Lisync);
2589   __ isync(); // acquire
2590 
2591 #ifdef ASSERT
2592   for (int i = 0; i<number_of_states; ++i) {
2593     assert(branch_table[i], "get initialization");
2594     //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)",
2595     //              is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i]));
2596   }
2597 #endif
2598 }
2599 
2600 void TemplateTable::getfield(int byte_no) {
2601   getfield_or_static(byte_no, false);
2602 }
2603 
2604 void TemplateTable::nofast_getfield(int byte_no) {
2605   getfield_or_static(byte_no, false, may_not_rewrite);
2606 }
2607 
2608 void TemplateTable::getstatic(int byte_no) {
2609   getfield_or_static(byte_no, true);
2610 }
2611 
2612 // The registers cache and index expected to be set before call.
2613 // The function may destroy various registers, just not the cache and index registers.
2614 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) {
2615 
2616   assert_different_registers(Rcache, Rscratch, R6_ARG4);
2617 
2618   if (JvmtiExport::can_post_field_modification()) {
2619     Label Lno_field_mod_post;
2620 
2621     // Check if post field access in enabled.
2622     int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true);
2623     __ lwz(Rscratch, offs, Rscratch);
2624 
2625     __ cmpwi(CCR0, Rscratch, 0);
2626     __ beq(CCR0, Lno_field_mod_post);
2627 


2678 
2679     // In case of the fast versions, value lives in registers => put it back on tos.
2680     switch(bytecode()) {
2681       case Bytecodes::_fast_aputfield: __ pop_ptr(); break;
2682       case Bytecodes::_fast_iputfield: // Fall through
2683       case Bytecodes::_fast_bputfield: // Fall through
2684       case Bytecodes::_fast_cputfield: // Fall through
2685       case Bytecodes::_fast_sputfield: __ pop_i(); break;
2686       case Bytecodes::_fast_lputfield: __ pop_l(); break;
2687       case Bytecodes::_fast_fputfield: __ pop_f(); break;
2688       case Bytecodes::_fast_dputfield: __ pop_d(); break;
2689       default: break; // Nothin' to do.
2690     }
2691 
2692     __ align(32, 12);
2693     __ bind(Lno_field_mod_post);
2694   }
2695 }
2696 
2697 // PPC64: implement volatile stores as release-store (return bytecode contains an additional release).
2698 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2699   Label Lvolatile;
2700 
2701   const Register Rcache        = R5_ARG3,  // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
2702                  Rclass_or_obj = R31,      // Needs to survive C call.
2703                  Roffset       = R22_tmp2, // Needs to survive C call.
2704                  Rflags        = R3_ARG1,
2705                  Rbtable       = R4_ARG2,
2706                  Rscratch      = R11_scratch1,
2707                  Rscratch2     = R12_scratch2,
2708                  Rscratch3     = R6_ARG4,
2709                  Rbc           = Rscratch3;
2710   const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
2711 
2712   static address field_rw_branch_table[number_of_states],
2713                  field_norw_branch_table[number_of_states],
2714                  static_branch_table[number_of_states];
2715 
2716   address* branch_table = is_static ? static_branch_table :
2717     (rc == may_rewrite ? field_rw_branch_table : field_norw_branch_table);
2718 
2719   // Stack (grows up):
2720   //  value
2721   //  obj
2722 
2723   // Load the field offset.
2724   resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2));
2725   jvmti_post_field_mod(Rcache, Rscratch, is_static);
2726   load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static);
2727 
2728   // Load pointer to branch table.
2729   __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
2730 
2731   // Get volatile flag.
2732   __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2733 
2734   // Check the field type.
2735   __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
2736 
2737 #ifdef ASSERT
2738   Label LFlagInvalid;
2739   __ cmpldi(CCR0, Rflags, number_of_states);
2740   __ bge(CCR0, LFlagInvalid);
2741 #endif
2742 
2743   // Load from branch table and dispatch (volatile case: one instruction ahead).
2744   __ sldi(Rflags, Rflags, LogBytesPerWord);
2745   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2746     __ cmpwi(CR_is_vol, Rscratch, 1);  // Volatile?
2747   }
2748   __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0.
2749   __ ldx(Rbtable, Rbtable, Rflags);
2750 
2751   __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point.
2752   __ mtctr(Rbtable);
2753   __ bctr();
2754 
2755 #ifdef ASSERT
2756   __ bind(LFlagInvalid);
2757   __ stop("got invalid flag", 0x656);
2758 
2759   // __ bind(Lvtos);
2760   address pc_before_release = __ pc();
2761   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2762   assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
2763   assert(branch_table[vtos] == 0, "can't compute twice");
2764   branch_table[vtos] = __ pc(); // non-volatile_entry point
2765   __ stop("vtos unexpected", 0x657);
2766 #endif
2767 
2768   __ align(32, 28, 28); // Align pop.
2769   // __ bind(Ldtos);
2770   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2771   assert(branch_table[dtos] == 0, "can't compute twice");
2772   branch_table[dtos] = __ pc(); // non-volatile_entry point
2773   __ pop(dtos);
2774   if (!is_static) {
2775     pop_and_check_object(Rclass_or_obj);  // Kills R11_scratch1.
2776   }
2777   __ stfdx(F15_ftos, Rclass_or_obj, Roffset);
2778   if (!is_static && rc == may_rewrite) {
2779     patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no);
2780   }
2781   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2782     __ beq(CR_is_vol, Lvolatile); // Volatile?
2783   }
2784   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2785 
2786   __ align(32, 28, 28); // Align pop.
2787   // __ bind(Lftos);
2788   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2789   assert(branch_table[ftos] == 0, "can't compute twice");
2790   branch_table[ftos] = __ pc(); // non-volatile_entry point
2791   __ pop(ftos);
2792   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2793   __ stfsx(F15_ftos, Rclass_or_obj, Roffset);
2794   if (!is_static && rc == may_rewrite) {
2795     patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no);
2796   }
2797   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2798     __ beq(CR_is_vol, Lvolatile); // Volatile?
2799   }
2800   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2801 
2802   __ align(32, 28, 28); // Align pop.
2803   // __ bind(Litos);
2804   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2805   assert(branch_table[itos] == 0, "can't compute twice");
2806   branch_table[itos] = __ pc(); // non-volatile_entry point
2807   __ pop(itos);
2808   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2809   __ stwx(R17_tos, Rclass_or_obj, Roffset);
2810   if (!is_static && rc == may_rewrite) {
2811     patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no);
2812   }
2813   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2814     __ beq(CR_is_vol, Lvolatile); // Volatile?
2815   }
2816   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2817 
2818   __ align(32, 28, 28); // Align pop.
2819   // __ bind(Lltos);
2820   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2821   assert(branch_table[ltos] == 0, "can't compute twice");
2822   branch_table[ltos] = __ pc(); // non-volatile_entry point
2823   __ pop(ltos);
2824   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2825   __ stdx(R17_tos, Rclass_or_obj, Roffset);
2826   if (!is_static && rc == may_rewrite) {
2827     patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no);
2828   }
2829   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2830     __ beq(CR_is_vol, Lvolatile); // Volatile?
2831   }
2832   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2833 
2834   __ align(32, 28, 28); // Align pop.
2835   // __ bind(Lbtos);
2836   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2837   assert(branch_table[btos] == 0, "can't compute twice");
2838   branch_table[btos] = __ pc(); // non-volatile_entry point
2839   __ pop(btos);
2840   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2841   __ stbx(R17_tos, Rclass_or_obj, Roffset);
2842   if (!is_static && rc == may_rewrite) {
2843     patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no);
2844   }
2845   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2846     __ beq(CR_is_vol, Lvolatile); // Volatile?
2847   }
2848   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2849 
2850   __ align(32, 28, 28); // Align pop.
2851   // __ bind(Lctos);
2852   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2853   assert(branch_table[ctos] == 0, "can't compute twice");
2854   branch_table[ctos] = __ pc(); // non-volatile_entry point
2855   __ pop(ctos);
2856   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1..
2857   __ sthx(R17_tos, Rclass_or_obj, Roffset);
2858   if (!is_static && rc == may_rewrite) {
2859     patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no);
2860   }
2861   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2862     __ beq(CR_is_vol, Lvolatile); // Volatile?
2863   }
2864   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2865 
2866   __ align(32, 28, 28); // Align pop.
2867   // __ bind(Lstos);
2868   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2869   assert(branch_table[stos] == 0, "can't compute twice");
2870   branch_table[stos] = __ pc(); // non-volatile_entry point
2871   __ pop(stos);
2872   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2873   __ sthx(R17_tos, Rclass_or_obj, Roffset);
2874   if (!is_static && rc == may_rewrite) {
2875     patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no);
2876   }
2877   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2878     __ beq(CR_is_vol, Lvolatile); // Volatile?
2879   }
2880   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2881 
2882   __ align(32, 28, 28); // Align pop.
2883   // __ bind(Latos);
2884   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2885   assert(branch_table[atos] == 0, "can't compute twice");
2886   branch_table[atos] = __ pc(); // non-volatile_entry point
2887   __ pop(atos);
2888   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1
2889   do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */);
2890   if (!is_static && rc == may_rewrite) {
2891     patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no);
2892   }
2893   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2894     __ beq(CR_is_vol, Lvolatile); // Volatile?
2895     __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2896 
2897     __ align(32, 12);
2898     __ bind(Lvolatile);
2899     __ fence();
2900   }
2901   // fallthru: __ b(Lexit);
2902 
2903 #ifdef ASSERT
2904   for (int i = 0; i<number_of_states; ++i) {
2905     assert(branch_table[i], "put initialization");
2906     //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)",
2907     //              is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i]));
2908   }
2909 #endif
2910 }
2911 
2912 void TemplateTable::putfield(int byte_no) {
2913   putfield_or_static(byte_no, false);
2914 }
2915 
2916 void TemplateTable::nofast_putfield(int byte_no) {
2917   putfield_or_static(byte_no, false, may_not_rewrite);
2918 }
2919 
2920 void TemplateTable::putstatic(int byte_no) {
2921   putfield_or_static(byte_no, true);
2922 }
2923 
2924 // See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job.
2925 void TemplateTable::jvmti_post_fast_field_mod() {
2926   __ should_not_reach_here();
2927 }
2928 
2929 void TemplateTable::fast_storefield(TosState state) {
2930   transition(state, vtos);
2931 
2932   const Register Rcache        = R5_ARG3,  // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
2933                  Rclass_or_obj = R31,      // Needs to survive C call.
2934                  Roffset       = R22_tmp2, // Needs to survive C call.
2935                  Rflags        = R3_ARG1,
2936                  Rscratch      = R11_scratch1,
2937                  Rscratch2     = R12_scratch2,
2938                  Rscratch3     = R4_ARG2;
2939   const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).


3320 void TemplateTable::invokevirtual(int byte_no) {
3321   transition(vtos, vtos);
3322 
3323   Register Rtable_addr = R11_scratch1,
3324            Rret_type = R12_scratch2,
3325            Rret_addr = R5_ARG3,
3326            Rflags = R22_tmp2, // Should survive C call.
3327            Rrecv = R3_ARG1,
3328            Rrecv_klass = Rrecv,
3329            Rvtableindex_or_method = R31, // Should survive C call.
3330            Rnum_params = R4_ARG2,
3331            Rnew_bc = R6_ARG4;
3332 
3333   Label LnotFinal;
3334 
3335   load_invoke_cp_cache_entry(byte_no, Rvtableindex_or_method, noreg, Rflags, /*virtual*/ true, false, false);
3336 
3337   __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
3338   __ bfalse(CCR0, LnotFinal);
3339 
3340   if (RewriteBytecodes && !UseSharedSpaces) {
3341     patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2);
3342   }
3343   invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2);
3344 
3345   __ align(32, 12);
3346   __ bind(LnotFinal);
3347   // Load "this" pointer (receiver).
3348   __ rldicl(Rnum_params, Rflags, 64, 48);
3349   __ load_receiver(Rnum_params, Rrecv);
3350   __ verify_oop(Rrecv);
3351 
3352   // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
3353   __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
3354   __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table());
3355   __ sldi(Rret_type, Rret_type, LogBytesPerWord);
3356   __ ldx(Rret_addr, Rret_type, Rtable_addr);
3357   __ null_check_throw(Rrecv, oopDesc::klass_offset_in_bytes(), R11_scratch1);
3358   __ load_klass(Rrecv_klass, Rrecv);
3359   __ verify_klass_ptr(Rrecv_klass);
3360   __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false);
3361 
3362   generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1);


< prev index next >