421 // Check out Conversions.java for an example.
422 // Also ConstantPool::header_size() is 20, which makes it very difficult
423 // to double-align double on the constant pool. SG, 11/7/97
424 __ lfdx(F15_ftos, Rcpool, Rindex);
425 __ push(dtos);
426 __ b(Lexit);
427
428 __ bind(Llong);
429 __ ldx(R17_tos, Rcpool, Rindex);
430 __ push(ltos);
431
432 __ bind(Lexit);
433 }
434
435 // Get the locals index located in the bytecode stream at bcp + offset.
436 void TemplateTable::locals_index(Register Rdst, int offset) {
437 __ lbz(Rdst, offset, R14_bcp);
438 }
439
440 void TemplateTable::iload() {
441 transition(vtos, itos);
442
443 // Get the local value into tos
444 const Register Rindex = R22_tmp2;
445 locals_index(Rindex);
446
447 // Rewrite iload,iload pair into fast_iload2
448 // iload,caload pair into fast_icaload
449 if (RewriteFrequentPairs) {
450 Label Lrewrite, Ldone;
451 Register Rnext_byte = R3_ARG1,
452 Rrewrite_to = R6_ARG4,
453 Rscratch = R11_scratch1;
454
455 // get next byte
456 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp);
457
458 // if _iload, wait to rewrite to iload2. We only want to rewrite the
459 // last two iloads in a pair. Comparing against fast_iload means that
460 // the next bytecode is neither an iload or a caload, and therefore
461 // an iload pair.
462 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload);
463 __ beq(CCR0, Ldone);
464
465 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload);
466 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2);
467 __ beq(CCR1, Lrewrite);
468
469 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload);
693
694 void TemplateTable::fload(int n) {
695 transition(vtos, ftos);
696
697 __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals);
698 }
699
700 void TemplateTable::dload(int n) {
701 transition(vtos, dtos);
702
703 __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
704 }
705
706 void TemplateTable::aload(int n) {
707 transition(vtos, atos);
708
709 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
710 }
711
712 void TemplateTable::aload_0() {
713 transition(vtos, atos);
714 // According to bytecode histograms, the pairs:
715 //
716 // _aload_0, _fast_igetfield
717 // _aload_0, _fast_agetfield
718 // _aload_0, _fast_fgetfield
719 //
720 // occur frequently. If RewriteFrequentPairs is set, the (slow)
721 // _aload_0 bytecode checks if the next bytecode is either
722 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
723 // rewrites the current bytecode into a pair bytecode; otherwise it
724 // rewrites the current bytecode into _0 that doesn't do
725 // the pair check anymore.
726 //
727 // Note: If the next bytecode is _getfield, the rewrite must be
728 // delayed, otherwise we may miss an opportunity for a pair.
729 //
730 // Also rewrite frequent pairs
731 // aload_0, aload_1
732 // aload_0, iload_1
733 // These bytecodes with a small amount of code are most profitable
734 // to rewrite.
735
736 if (RewriteFrequentPairs) {
737
738 Label Lrewrite, Ldont_rewrite;
739 Register Rnext_byte = R3_ARG1,
740 Rrewrite_to = R6_ARG4,
741 Rscratch = R11_scratch1;
742
743 // Get next byte.
744 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp);
745
746 // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair.
747 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield);
748 __ beq(CCR0, Ldont_rewrite);
749
750 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield);
751 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0);
752 __ beq(CCR1, Lrewrite);
753
754 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield);
755 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0);
756 __ beq(CCR0, Lrewrite);
2128 // want to float loads above this check.
2129 // See also comments in ConstantPoolCacheEntry::bytecode_1(),
2130 // ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1();
2131
2132 // Call into the VM if call site is not yet resolved
2133 //
2134 // Input regs:
2135 // - None, all passed regs are outputs.
2136 //
2137 // Returns:
2138 // - Rcache: The const pool cache entry that contains the resolved result.
2139 // - Rresult: Either noreg or output for f1/f2.
2140 //
2141 // Kills:
2142 // - Rscratch
2143 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) {
2144
2145 __ get_cache_and_index_at_bcp(Rcache, 1, index_size);
2146 Label Lresolved, Ldone;
2147
2148 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2149 // We are resolved if the indices offset contains the current bytecode.
2150 #if defined(VM_LITTLE_ENDIAN)
2151 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache);
2152 #else
2153 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache);
2154 #endif
2155 // Acquire by cmp-br-isync (see below).
2156 __ cmpdi(CCR0, Rscratch, (int)bytecode());
2157 __ beq(CCR0, Lresolved);
2158
2159 address entry = NULL;
2160 switch (bytecode()) {
2161 case Bytecodes::_getstatic : // fall through
2162 case Bytecodes::_putstatic : // fall through
2163 case Bytecodes::_getfield : // fall through
2164 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2165 case Bytecodes::_invokevirtual : // fall through
2166 case Bytecodes::_invokespecial : // fall through
2167 case Bytecodes::_invokestatic : // fall through
2168 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2169 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
2170 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2171 default : ShouldNotReachHere(); break;
2172 }
2173 __ li(R4_ARG2, (int)bytecode());
2174 __ call_VM(noreg, entry, R4_ARG2, true);
2175
2176 // Update registers with resolved info.
2177 __ get_cache_and_index_at_bcp(Rcache, 1, index_size);
2178 __ b(Ldone);
2179
2180 __ bind(Lresolved);
2181 __ isync(); // Order load wrt. succeeding loads.
2182 __ bind(Ldone);
2183 }
2184
2185 // Load the constant pool cache entry at field accesses into registers.
2186 // The Rcache and Rindex registers must be set before call.
2187 // Input:
2188 // - Rcache, Rindex
2189 // Output:
2190 // - Robj, Roffset, Rflags
2191 void TemplateTable::load_field_cp_cache_entry(Register Robj,
2192 Register Rcache,
2193 Register Rindex /* unused on PPC64 */,
2334 __ get_cache_and_index_at_bcp(Rcache, 1);
2335 }
2336
2337 __ align(32, 12);
2338 __ bind(Lno_field_access_post);
2339 }
2340 }
2341
2342 // kills R11_scratch1
2343 void TemplateTable::pop_and_check_object(Register Roop) {
2344 Register Rtmp = R11_scratch1;
2345
2346 assert_different_registers(Rtmp, Roop);
2347 __ pop_ptr(Roop);
2348 // For field access must check obj.
2349 __ null_check_throw(Roop, -1, Rtmp);
2350 __ verify_oop(Roop);
2351 }
2352
2353 // PPC64: implement volatile loads as fence-store-acquire.
2354 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2355 transition(vtos, vtos);
2356
2357 Label Lacquire, Lisync;
2358
2359 const Register Rcache = R3_ARG1,
2360 Rclass_or_obj = R22_tmp2,
2361 Roffset = R23_tmp3,
2362 Rflags = R31,
2363 Rbtable = R5_ARG3,
2364 Rbc = R6_ARG4,
2365 Rscratch = R12_scratch2;
2366
2367 static address field_branch_table[number_of_states],
2368 static_branch_table[number_of_states];
2369
2370 address* branch_table = is_static ? static_branch_table : field_branch_table;
2371
2372 // Get field offset.
2373 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2));
2374
2418 #ifdef ASSERT
2419 __ bind(LFlagInvalid);
2420 __ stop("got invalid flag", 0x654);
2421
2422 // __ bind(Lvtos);
2423 address pc_before_fence = __ pc();
2424 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2425 assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
2426 assert(branch_table[vtos] == 0, "can't compute twice");
2427 branch_table[vtos] = __ pc(); // non-volatile_entry point
2428 __ stop("vtos unexpected", 0x655);
2429 #endif
2430
2431 __ align(32, 28, 28); // Align load.
2432 // __ bind(Ldtos);
2433 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2434 assert(branch_table[dtos] == 0, "can't compute twice");
2435 branch_table[dtos] = __ pc(); // non-volatile_entry point
2436 __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
2437 __ push(dtos);
2438 if (!is_static) patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch);
2439 {
2440 Label acquire_double;
2441 __ beq(CCR6, acquire_double); // Volatile?
2442 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2443
2444 __ bind(acquire_double);
2445 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
2446 __ beq_predict_taken(CCR0, Lisync);
2447 __ b(Lisync); // In case of NAN.
2448 }
2449
2450 __ align(32, 28, 28); // Align load.
2451 // __ bind(Lftos);
2452 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2453 assert(branch_table[ftos] == 0, "can't compute twice");
2454 branch_table[ftos] = __ pc(); // non-volatile_entry point
2455 __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
2456 __ push(ftos);
2457 if (!is_static) { patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch); }
2458 {
2459 Label acquire_float;
2460 __ beq(CCR6, acquire_float); // Volatile?
2461 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2462
2463 __ bind(acquire_float);
2464 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
2465 __ beq_predict_taken(CCR0, Lisync);
2466 __ b(Lisync); // In case of NAN.
2467 }
2468
2469 __ align(32, 28, 28); // Align load.
2470 // __ bind(Litos);
2471 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2472 assert(branch_table[itos] == 0, "can't compute twice");
2473 branch_table[itos] = __ pc(); // non-volatile_entry point
2474 __ lwax(R17_tos, Rclass_or_obj, Roffset);
2475 __ push(itos);
2476 if (!is_static) patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch);
2477 __ beq(CCR6, Lacquire); // Volatile?
2478 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2479
2480 __ align(32, 28, 28); // Align load.
2481 // __ bind(Lltos);
2482 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2483 assert(branch_table[ltos] == 0, "can't compute twice");
2484 branch_table[ltos] = __ pc(); // non-volatile_entry point
2485 __ ldx(R17_tos, Rclass_or_obj, Roffset);
2486 __ push(ltos);
2487 if (!is_static) patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch);
2488 __ beq(CCR6, Lacquire); // Volatile?
2489 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2490
2491 __ align(32, 28, 28); // Align load.
2492 // __ bind(Lbtos);
2493 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2494 assert(branch_table[btos] == 0, "can't compute twice");
2495 branch_table[btos] = __ pc(); // non-volatile_entry point
2496 __ lbzx(R17_tos, Rclass_or_obj, Roffset);
2497 __ extsb(R17_tos, R17_tos);
2498 __ push(btos);
2499 if (!is_static) patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch);
2500 __ beq(CCR6, Lacquire); // Volatile?
2501 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2502
2503 __ align(32, 28, 28); // Align load.
2504 // __ bind(Lctos);
2505 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2506 assert(branch_table[ctos] == 0, "can't compute twice");
2507 branch_table[ctos] = __ pc(); // non-volatile_entry point
2508 __ lhzx(R17_tos, Rclass_or_obj, Roffset);
2509 __ push(ctos);
2510 if (!is_static) patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch);
2511 __ beq(CCR6, Lacquire); // Volatile?
2512 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2513
2514 __ align(32, 28, 28); // Align load.
2515 // __ bind(Lstos);
2516 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2517 assert(branch_table[stos] == 0, "can't compute twice");
2518 branch_table[stos] = __ pc(); // non-volatile_entry point
2519 __ lhax(R17_tos, Rclass_or_obj, Roffset);
2520 __ push(stos);
2521 if (!is_static) patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch);
2522 __ beq(CCR6, Lacquire); // Volatile?
2523 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2524
2525 __ align(32, 28, 28); // Align load.
2526 // __ bind(Latos);
2527 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2528 assert(branch_table[atos] == 0, "can't compute twice");
2529 branch_table[atos] = __ pc(); // non-volatile_entry point
2530 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
2531 __ verify_oop(R17_tos);
2532 __ push(atos);
2533 //__ dcbt(R17_tos); // prefetch
2534 if (!is_static) patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch);
2535 __ beq(CCR6, Lacquire); // Volatile?
2536 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2537
2538 __ align(32, 12);
2539 __ bind(Lacquire);
2540 __ twi_0(R17_tos);
2541 __ bind(Lisync);
2542 __ isync(); // acquire
2543
2544 #ifdef ASSERT
2545 for (int i = 0; i<number_of_states; ++i) {
2546 assert(branch_table[i], "get initialization");
2547 //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)",
2548 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i]));
2549 }
2550 #endif
2551 }
2552
2553 void TemplateTable::getfield(int byte_no) {
2554 getfield_or_static(byte_no, false);
2555 }
2556
2557 void TemplateTable::getstatic(int byte_no) {
2558 getfield_or_static(byte_no, true);
2559 }
2560
2561 // The registers cache and index expected to be set before call.
2562 // The function may destroy various registers, just not the cache and index registers.
2563 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) {
2564
2565 assert_different_registers(Rcache, Rscratch, R6_ARG4);
2566
2567 if (JvmtiExport::can_post_field_modification()) {
2568 Label Lno_field_mod_post;
2569
2570 // Check if post field access in enabled.
2571 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true);
2572 __ lwz(Rscratch, offs, Rscratch);
2573
2574 __ cmpwi(CCR0, Rscratch, 0);
2575 __ beq(CCR0, Lno_field_mod_post);
2576
2627
2628 // In case of the fast versions, value lives in registers => put it back on tos.
2629 switch(bytecode()) {
2630 case Bytecodes::_fast_aputfield: __ pop_ptr(); break;
2631 case Bytecodes::_fast_iputfield: // Fall through
2632 case Bytecodes::_fast_bputfield: // Fall through
2633 case Bytecodes::_fast_cputfield: // Fall through
2634 case Bytecodes::_fast_sputfield: __ pop_i(); break;
2635 case Bytecodes::_fast_lputfield: __ pop_l(); break;
2636 case Bytecodes::_fast_fputfield: __ pop_f(); break;
2637 case Bytecodes::_fast_dputfield: __ pop_d(); break;
2638 default: break; // Nothin' to do.
2639 }
2640
2641 __ align(32, 12);
2642 __ bind(Lno_field_mod_post);
2643 }
2644 }
2645
2646 // PPC64: implement volatile stores as release-store (return bytecode contains an additional release).
2647 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2648 Label Lvolatile;
2649
2650 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
2651 Rclass_or_obj = R31, // Needs to survive C call.
2652 Roffset = R22_tmp2, // Needs to survive C call.
2653 Rflags = R3_ARG1,
2654 Rbtable = R4_ARG2,
2655 Rscratch = R11_scratch1,
2656 Rscratch2 = R12_scratch2,
2657 Rscratch3 = R6_ARG4,
2658 Rbc = Rscratch3;
2659 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
2660
2661 static address field_branch_table[number_of_states],
2662 static_branch_table[number_of_states];
2663
2664 address* branch_table = is_static ? static_branch_table : field_branch_table;
2665
2666 // Stack (grows up):
2667 // value
2701 __ bind(LFlagInvalid);
2702 __ stop("got invalid flag", 0x656);
2703
2704 // __ bind(Lvtos);
2705 address pc_before_release = __ pc();
2706 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2707 assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
2708 assert(branch_table[vtos] == 0, "can't compute twice");
2709 branch_table[vtos] = __ pc(); // non-volatile_entry point
2710 __ stop("vtos unexpected", 0x657);
2711 #endif
2712
2713 __ align(32, 28, 28); // Align pop.
2714 // __ bind(Ldtos);
2715 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2716 assert(branch_table[dtos] == 0, "can't compute twice");
2717 branch_table[dtos] = __ pc(); // non-volatile_entry point
2718 __ pop(dtos);
2719 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2720 __ stfdx(F15_ftos, Rclass_or_obj, Roffset);
2721 if (!is_static) { patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no); }
2722 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2723 __ beq(CR_is_vol, Lvolatile); // Volatile?
2724 }
2725 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2726
2727 __ align(32, 28, 28); // Align pop.
2728 // __ bind(Lftos);
2729 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2730 assert(branch_table[ftos] == 0, "can't compute twice");
2731 branch_table[ftos] = __ pc(); // non-volatile_entry point
2732 __ pop(ftos);
2733 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2734 __ stfsx(F15_ftos, Rclass_or_obj, Roffset);
2735 if (!is_static) { patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no); }
2736 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2737 __ beq(CR_is_vol, Lvolatile); // Volatile?
2738 }
2739 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2740
2741 __ align(32, 28, 28); // Align pop.
2742 // __ bind(Litos);
2743 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2744 assert(branch_table[itos] == 0, "can't compute twice");
2745 branch_table[itos] = __ pc(); // non-volatile_entry point
2746 __ pop(itos);
2747 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2748 __ stwx(R17_tos, Rclass_or_obj, Roffset);
2749 if (!is_static) { patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no); }
2750 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2751 __ beq(CR_is_vol, Lvolatile); // Volatile?
2752 }
2753 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2754
2755 __ align(32, 28, 28); // Align pop.
2756 // __ bind(Lltos);
2757 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2758 assert(branch_table[ltos] == 0, "can't compute twice");
2759 branch_table[ltos] = __ pc(); // non-volatile_entry point
2760 __ pop(ltos);
2761 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2762 __ stdx(R17_tos, Rclass_or_obj, Roffset);
2763 if (!is_static) { patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no); }
2764 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2765 __ beq(CR_is_vol, Lvolatile); // Volatile?
2766 }
2767 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2768
2769 __ align(32, 28, 28); // Align pop.
2770 // __ bind(Lbtos);
2771 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2772 assert(branch_table[btos] == 0, "can't compute twice");
2773 branch_table[btos] = __ pc(); // non-volatile_entry point
2774 __ pop(btos);
2775 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2776 __ stbx(R17_tos, Rclass_or_obj, Roffset);
2777 if (!is_static) { patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no); }
2778 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2779 __ beq(CR_is_vol, Lvolatile); // Volatile?
2780 }
2781 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2782
2783 __ align(32, 28, 28); // Align pop.
2784 // __ bind(Lctos);
2785 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2786 assert(branch_table[ctos] == 0, "can't compute twice");
2787 branch_table[ctos] = __ pc(); // non-volatile_entry point
2788 __ pop(ctos);
2789 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1..
2790 __ sthx(R17_tos, Rclass_or_obj, Roffset);
2791 if (!is_static) { patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no); }
2792 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2793 __ beq(CR_is_vol, Lvolatile); // Volatile?
2794 }
2795 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2796
2797 __ align(32, 28, 28); // Align pop.
2798 // __ bind(Lstos);
2799 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2800 assert(branch_table[stos] == 0, "can't compute twice");
2801 branch_table[stos] = __ pc(); // non-volatile_entry point
2802 __ pop(stos);
2803 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2804 __ sthx(R17_tos, Rclass_or_obj, Roffset);
2805 if (!is_static) { patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no); }
2806 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2807 __ beq(CR_is_vol, Lvolatile); // Volatile?
2808 }
2809 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2810
2811 __ align(32, 28, 28); // Align pop.
2812 // __ bind(Latos);
2813 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2814 assert(branch_table[atos] == 0, "can't compute twice");
2815 branch_table[atos] = __ pc(); // non-volatile_entry point
2816 __ pop(atos);
2817 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1
2818 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */);
2819 if (!is_static) { patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no); }
2820 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2821 __ beq(CR_is_vol, Lvolatile); // Volatile?
2822 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2823
2824 __ align(32, 12);
2825 __ bind(Lvolatile);
2826 __ fence();
2827 }
2828 // fallthru: __ b(Lexit);
2829
2830 #ifdef ASSERT
2831 for (int i = 0; i<number_of_states; ++i) {
2832 assert(branch_table[i], "put initialization");
2833 //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)",
2834 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i]));
2835 }
2836 #endif
2837 }
2838
2839 void TemplateTable::putfield(int byte_no) {
2840 putfield_or_static(byte_no, false);
2841 }
2842
2843 void TemplateTable::putstatic(int byte_no) {
2844 putfield_or_static(byte_no, true);
2845 }
2846
2847 // See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job.
2848 void TemplateTable::jvmti_post_fast_field_mod() {
2849 __ should_not_reach_here();
2850 }
2851
2852 void TemplateTable::fast_storefield(TosState state) {
2853 transition(state, vtos);
2854
2855 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
2856 Rclass_or_obj = R31, // Needs to survive C call.
2857 Roffset = R22_tmp2, // Needs to survive C call.
2858 Rflags = R3_ARG1,
2859 Rscratch = R11_scratch1,
2860 Rscratch2 = R12_scratch2,
2861 Rscratch3 = R4_ARG2;
2862 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
|
421 // Check out Conversions.java for an example.
422 // Also ConstantPool::header_size() is 20, which makes it very difficult
423 // to double-align double on the constant pool. SG, 11/7/97
424 __ lfdx(F15_ftos, Rcpool, Rindex);
425 __ push(dtos);
426 __ b(Lexit);
427
428 __ bind(Llong);
429 __ ldx(R17_tos, Rcpool, Rindex);
430 __ push(ltos);
431
432 __ bind(Lexit);
433 }
434
435 // Get the locals index located in the bytecode stream at bcp + offset.
436 void TemplateTable::locals_index(Register Rdst, int offset) {
437 __ lbz(Rdst, offset, R14_bcp);
438 }
439
440 void TemplateTable::iload() {
441 iload_internal();
442 }
443
444 void TemplateTable::nofast_iload() {
445 iload_internal(may_not_rewrite);
446 }
447
448 void TemplateTable::iload_internal(RewriteControl rc) {
449 transition(vtos, itos);
450
451 // Get the local value into tos
452 const Register Rindex = R22_tmp2;
453 locals_index(Rindex);
454
455 // Rewrite iload,iload pair into fast_iload2
456 // iload,caload pair into fast_icaload
457 if (RewriteFrequentPairs && rc == may_rewrite) {
458 Label Lrewrite, Ldone;
459 Register Rnext_byte = R3_ARG1,
460 Rrewrite_to = R6_ARG4,
461 Rscratch = R11_scratch1;
462
463 // get next byte
464 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp);
465
466 // if _iload, wait to rewrite to iload2. We only want to rewrite the
467 // last two iloads in a pair. Comparing against fast_iload means that
468 // the next bytecode is neither an iload or a caload, and therefore
469 // an iload pair.
470 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload);
471 __ beq(CCR0, Ldone);
472
473 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload);
474 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2);
475 __ beq(CCR1, Lrewrite);
476
477 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload);
701
702 void TemplateTable::fload(int n) {
703 transition(vtos, ftos);
704
705 __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals);
706 }
707
708 void TemplateTable::dload(int n) {
709 transition(vtos, dtos);
710
711 __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
712 }
713
714 void TemplateTable::aload(int n) {
715 transition(vtos, atos);
716
717 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
718 }
719
720 void TemplateTable::aload_0() {
721 aload_0_internal();
722 }
723
724 void TemplateTable::nofast_aload_0() {
725 aload_0_internal(may_not_rewrite);
726 }
727
728 void TemplateTable::aload_0_internal(RewriteControl rc) {
729 transition(vtos, atos);
730 // According to bytecode histograms, the pairs:
731 //
732 // _aload_0, _fast_igetfield
733 // _aload_0, _fast_agetfield
734 // _aload_0, _fast_fgetfield
735 //
736 // occur frequently. If RewriteFrequentPairs is set, the (slow)
737 // _aload_0 bytecode checks if the next bytecode is either
738 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
739 // rewrites the current bytecode into a pair bytecode; otherwise it
740 // rewrites the current bytecode into _0 that doesn't do
741 // the pair check anymore.
742 //
743 // Note: If the next bytecode is _getfield, the rewrite must be
744 // delayed, otherwise we may miss an opportunity for a pair.
745 //
746 // Also rewrite frequent pairs
747 // aload_0, aload_1
748 // aload_0, iload_1
749 // These bytecodes with a small amount of code are most profitable
750 // to rewrite.
751
752 if (RewriteFrequentPairs && rc == may_rewrite) {
753
754 Label Lrewrite, Ldont_rewrite;
755 Register Rnext_byte = R3_ARG1,
756 Rrewrite_to = R6_ARG4,
757 Rscratch = R11_scratch1;
758
759 // Get next byte.
760 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp);
761
762 // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair.
763 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield);
764 __ beq(CCR0, Ldont_rewrite);
765
766 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield);
767 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0);
768 __ beq(CCR1, Lrewrite);
769
770 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield);
771 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0);
772 __ beq(CCR0, Lrewrite);
2144 // want to float loads above this check.
2145 // See also comments in ConstantPoolCacheEntry::bytecode_1(),
2146 // ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1();
2147
2148 // Call into the VM if call site is not yet resolved
2149 //
2150 // Input regs:
2151 // - None, all passed regs are outputs.
2152 //
2153 // Returns:
2154 // - Rcache: The const pool cache entry that contains the resolved result.
2155 // - Rresult: Either noreg or output for f1/f2.
2156 //
2157 // Kills:
2158 // - Rscratch
2159 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) {
2160
2161 __ get_cache_and_index_at_bcp(Rcache, 1, index_size);
2162 Label Lresolved, Ldone;
2163
2164 Bytecodes::Code code = bytecode();
2165 switch (code) {
2166 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2167 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2168 }
2169
2170 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2171 // We are resolved if the indices offset contains the current bytecode.
2172 #if defined(VM_LITTLE_ENDIAN)
2173 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache);
2174 #else
2175 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache);
2176 #endif
2177 // Acquire by cmp-br-isync (see below).
2178 __ cmpdi(CCR0, Rscratch, code);
2179 __ beq(CCR0, Lresolved);
2180
2181 address entry = NULL;
2182 switch (code) {
2183 case Bytecodes::_getstatic : // fall through
2184 case Bytecodes::_putstatic : // fall through
2185 case Bytecodes::_getfield : // fall through
2186 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2187 case Bytecodes::_invokevirtual : // fall through
2188 case Bytecodes::_invokespecial : // fall through
2189 case Bytecodes::_invokestatic : // fall through
2190 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2191 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
2192 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2193 default : ShouldNotReachHere(); break;
2194 }
2195 __ li(R4_ARG2, code);
2196 __ call_VM(noreg, entry, R4_ARG2, true);
2197
2198 // Update registers with resolved info.
2199 __ get_cache_and_index_at_bcp(Rcache, 1, index_size);
2200 __ b(Ldone);
2201
2202 __ bind(Lresolved);
2203 __ isync(); // Order load wrt. succeeding loads.
2204 __ bind(Ldone);
2205 }
2206
2207 // Load the constant pool cache entry at field accesses into registers.
2208 // The Rcache and Rindex registers must be set before call.
2209 // Input:
2210 // - Rcache, Rindex
2211 // Output:
2212 // - Robj, Roffset, Rflags
2213 void TemplateTable::load_field_cp_cache_entry(Register Robj,
2214 Register Rcache,
2215 Register Rindex /* unused on PPC64 */,
2356 __ get_cache_and_index_at_bcp(Rcache, 1);
2357 }
2358
2359 __ align(32, 12);
2360 __ bind(Lno_field_access_post);
2361 }
2362 }
2363
2364 // kills R11_scratch1
2365 void TemplateTable::pop_and_check_object(Register Roop) {
2366 Register Rtmp = R11_scratch1;
2367
2368 assert_different_registers(Rtmp, Roop);
2369 __ pop_ptr(Roop);
2370 // For field access must check obj.
2371 __ null_check_throw(Roop, -1, Rtmp);
2372 __ verify_oop(Roop);
2373 }
2374
2375 // PPC64: implement volatile loads as fence-store-acquire.
2376 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2377 transition(vtos, vtos);
2378
2379 Label Lacquire, Lisync;
2380
2381 const Register Rcache = R3_ARG1,
2382 Rclass_or_obj = R22_tmp2,
2383 Roffset = R23_tmp3,
2384 Rflags = R31,
2385 Rbtable = R5_ARG3,
2386 Rbc = R6_ARG4,
2387 Rscratch = R12_scratch2;
2388
2389 static address field_branch_table[number_of_states],
2390 static_branch_table[number_of_states];
2391
2392 address* branch_table = is_static ? static_branch_table : field_branch_table;
2393
2394 // Get field offset.
2395 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2));
2396
2440 #ifdef ASSERT
2441 __ bind(LFlagInvalid);
2442 __ stop("got invalid flag", 0x654);
2443
2444 // __ bind(Lvtos);
2445 address pc_before_fence = __ pc();
2446 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2447 assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
2448 assert(branch_table[vtos] == 0, "can't compute twice");
2449 branch_table[vtos] = __ pc(); // non-volatile_entry point
2450 __ stop("vtos unexpected", 0x655);
2451 #endif
2452
2453 __ align(32, 28, 28); // Align load.
2454 // __ bind(Ldtos);
2455 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2456 assert(branch_table[dtos] == 0, "can't compute twice");
2457 branch_table[dtos] = __ pc(); // non-volatile_entry point
2458 __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
2459 __ push(dtos);
2460
2461 if (!is_static && rc == may_rewrite) {
2462 patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch);
2463 }
2464
2465 {
2466 Label acquire_double;
2467 __ beq(CCR6, acquire_double); // Volatile?
2468 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2469
2470 __ bind(acquire_double);
2471 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
2472 __ beq_predict_taken(CCR0, Lisync);
2473 __ b(Lisync); // In case of NAN.
2474 }
2475
2476 __ align(32, 28, 28); // Align load.
2477 // __ bind(Lftos);
2478 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2479 assert(branch_table[ftos] == 0, "can't compute twice");
2480 branch_table[ftos] = __ pc(); // non-volatile_entry point
2481 __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
2482 __ push(ftos);
2483
2484 if (!is_static && rc == may_rewrite) {
2485 patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch);
2486 }
2487
2488 {
2489 Label acquire_float;
2490 __ beq(CCR6, acquire_float); // Volatile?
2491 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2492
2493 __ bind(acquire_float);
2494 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
2495 __ beq_predict_taken(CCR0, Lisync);
2496 __ b(Lisync); // In case of NAN.
2497 }
2498
2499 __ align(32, 28, 28); // Align load.
2500 // __ bind(Litos);
2501 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2502 assert(branch_table[itos] == 0, "can't compute twice");
2503 branch_table[itos] = __ pc(); // non-volatile_entry point
2504 __ lwax(R17_tos, Rclass_or_obj, Roffset);
2505 __ push(itos);
2506
2507 if (!is_static && rc == may_rewrite) {
2508 patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch);
2509 }
2510
2511 __ beq(CCR6, Lacquire); // Volatile?
2512 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2513
2514 __ align(32, 28, 28); // Align load.
2515 // __ bind(Lltos);
2516 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2517 assert(branch_table[ltos] == 0, "can't compute twice");
2518 branch_table[ltos] = __ pc(); // non-volatile_entry point
2519 __ ldx(R17_tos, Rclass_or_obj, Roffset);
2520 __ push(ltos);
2521
2522 if (!is_static && rc == may_rewrite) {
2523 patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch);
2524 }
2525
2526 __ beq(CCR6, Lacquire); // Volatile?
2527 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2528
2529 __ align(32, 28, 28); // Align load.
2530 // __ bind(Lbtos);
2531 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2532 assert(branch_table[btos] == 0, "can't compute twice");
2533 branch_table[btos] = __ pc(); // non-volatile_entry point
2534 __ lbzx(R17_tos, Rclass_or_obj, Roffset);
2535 __ extsb(R17_tos, R17_tos);
2536 __ push(btos);
2537
2538 if (!is_static && rc == may_rewrite) {
2539 patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch);
2540 }
2541
2542 __ beq(CCR6, Lacquire); // Volatile?
2543 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2544
2545 __ align(32, 28, 28); // Align load.
2546 // __ bind(Lctos);
2547 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2548 assert(branch_table[ctos] == 0, "can't compute twice");
2549 branch_table[ctos] = __ pc(); // non-volatile_entry point
2550 __ lhzx(R17_tos, Rclass_or_obj, Roffset);
2551 __ push(ctos);
2552
2553 if (!is_static && rc ==may_rewrite) {
2554 patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch);
2555 }
2556
2557 __ beq(CCR6, Lacquire); // Volatile?
2558 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2559
2560 __ align(32, 28, 28); // Align load.
2561 // __ bind(Lstos);
2562 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2563 assert(branch_table[stos] == 0, "can't compute twice");
2564 branch_table[stos] = __ pc(); // non-volatile_entry point
2565 __ lhax(R17_tos, Rclass_or_obj, Roffset);
2566 __ push(stos);
2567
2568 if (!is_statiac && rc == may_rewrite) {
2569 patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch);
2570 }
2571
2572 __ beq(CCR6, Lacquire); // Volatile?
2573 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2574
2575 __ align(32, 28, 28); // Align load.
2576 // __ bind(Latos);
2577 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2578 assert(branch_table[atos] == 0, "can't compute twice");
2579 branch_table[atos] = __ pc(); // non-volatile_entry point
2580 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
2581 __ verify_oop(R17_tos);
2582 __ push(atos);
2583
2584 //__ dcbt(R17_tos); // prefetch
2585 if (!is_static && rc == may_rewrite) {
2586 patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch);
2587 }
2588
2589 __ beq(CCR6, Lacquire); // Volatile?
2590 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2591
2592 __ align(32, 12);
2593 __ bind(Lacquire);
2594 __ twi_0(R17_tos);
2595 __ bind(Lisync);
2596 __ isync(); // acquire
2597
2598 #ifdef ASSERT
2599 for (int i = 0; i<number_of_states; ++i) {
2600 assert(branch_table[i], "get initialization");
2601 //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)",
2602 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i]));
2603 }
2604 #endif
2605 }
2606
2607 void TemplateTable::getfield(int byte_no) {
2608 getfield_or_static(byte_no, false);
2609 }
2610
2611 void TemplateTable::nofast_getfield(int byte_no) {
2612 getfield_or_static(byte_no, false, may_not_rewrite);
2613 }
2614
2615 void TemplateTable::getstatic(int byte_no) {
2616 getfield_or_static(byte_no, true);
2617 }
2618
2619 // The registers cache and index expected to be set before call.
2620 // The function may destroy various registers, just not the cache and index registers.
2621 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) {
2622
2623 assert_different_registers(Rcache, Rscratch, R6_ARG4);
2624
2625 if (JvmtiExport::can_post_field_modification()) {
2626 Label Lno_field_mod_post;
2627
2628 // Check if post field access in enabled.
2629 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true);
2630 __ lwz(Rscratch, offs, Rscratch);
2631
2632 __ cmpwi(CCR0, Rscratch, 0);
2633 __ beq(CCR0, Lno_field_mod_post);
2634
2685
2686 // In case of the fast versions, value lives in registers => put it back on tos.
2687 switch(bytecode()) {
2688 case Bytecodes::_fast_aputfield: __ pop_ptr(); break;
2689 case Bytecodes::_fast_iputfield: // Fall through
2690 case Bytecodes::_fast_bputfield: // Fall through
2691 case Bytecodes::_fast_cputfield: // Fall through
2692 case Bytecodes::_fast_sputfield: __ pop_i(); break;
2693 case Bytecodes::_fast_lputfield: __ pop_l(); break;
2694 case Bytecodes::_fast_fputfield: __ pop_f(); break;
2695 case Bytecodes::_fast_dputfield: __ pop_d(); break;
2696 default: break; // Nothin' to do.
2697 }
2698
2699 __ align(32, 12);
2700 __ bind(Lno_field_mod_post);
2701 }
2702 }
2703
2704 // PPC64: implement volatile stores as release-store (return bytecode contains an additional release).
2705 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2706 Label Lvolatile;
2707
2708 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
2709 Rclass_or_obj = R31, // Needs to survive C call.
2710 Roffset = R22_tmp2, // Needs to survive C call.
2711 Rflags = R3_ARG1,
2712 Rbtable = R4_ARG2,
2713 Rscratch = R11_scratch1,
2714 Rscratch2 = R12_scratch2,
2715 Rscratch3 = R6_ARG4,
2716 Rbc = Rscratch3;
2717 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
2718
2719 static address field_branch_table[number_of_states],
2720 static_branch_table[number_of_states];
2721
2722 address* branch_table = is_static ? static_branch_table : field_branch_table;
2723
2724 // Stack (grows up):
2725 // value
2759 __ bind(LFlagInvalid);
2760 __ stop("got invalid flag", 0x656);
2761
2762 // __ bind(Lvtos);
2763 address pc_before_release = __ pc();
2764 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2765 assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
2766 assert(branch_table[vtos] == 0, "can't compute twice");
2767 branch_table[vtos] = __ pc(); // non-volatile_entry point
2768 __ stop("vtos unexpected", 0x657);
2769 #endif
2770
2771 __ align(32, 28, 28); // Align pop.
2772 // __ bind(Ldtos);
2773 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2774 assert(branch_table[dtos] == 0, "can't compute twice");
2775 branch_table[dtos] = __ pc(); // non-volatile_entry point
2776 __ pop(dtos);
2777 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2778 __ stfdx(F15_ftos, Rclass_or_obj, Roffset);
2779
2780 if (!is_static && rc == may_rewrite) {
2781 patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no);
2782 }
2783
2784 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2785 __ beq(CR_is_vol, Lvolatile); // Volatile?
2786 }
2787 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2788
2789 __ align(32, 28, 28); // Align pop.
2790 // __ bind(Lftos);
2791 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2792 assert(branch_table[ftos] == 0, "can't compute twice");
2793 branch_table[ftos] = __ pc(); // non-volatile_entry point
2794 __ pop(ftos);
2795 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2796 __ stfsx(F15_ftos, Rclass_or_obj, Roffset);
2797
2798 if (!is_static && rc == may_rewrite) {
2799 patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no);
2800 }
2801
2802 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2803 __ beq(CR_is_vol, Lvolatile); // Volatile?
2804 }
2805 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2806
2807 __ align(32, 28, 28); // Align pop.
2808 // __ bind(Litos);
2809 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2810 assert(branch_table[itos] == 0, "can't compute twice");
2811 branch_table[itos] = __ pc(); // non-volatile_entry point
2812 __ pop(itos);
2813 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2814 __ stwx(R17_tos, Rclass_or_obj, Roffset);
2815
2816 if (!is_static && rc == may_rewrite) {
2817 patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no);
2818 }
2819
2820 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2821 __ beq(CR_is_vol, Lvolatile); // Volatile?
2822 }
2823 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2824
2825 __ align(32, 28, 28); // Align pop.
2826 // __ bind(Lltos);
2827 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2828 assert(branch_table[ltos] == 0, "can't compute twice");
2829 branch_table[ltos] = __ pc(); // non-volatile_entry point
2830 __ pop(ltos);
2831 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2832 __ stdx(R17_tos, Rclass_or_obj, Roffset);
2833
2834 if (!is_static && rc == may_rewrite) {
2835 patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no);
2836 }
2837
2838 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2839 __ beq(CR_is_vol, Lvolatile); // Volatile?
2840 }
2841 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2842
2843 __ align(32, 28, 28); // Align pop.
2844 // __ bind(Lbtos);
2845 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2846 assert(branch_table[btos] == 0, "can't compute twice");
2847 branch_table[btos] = __ pc(); // non-volatile_entry point
2848 __ pop(btos);
2849 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2850 __ stbx(R17_tos, Rclass_or_obj, Roffset);
2851
2852 if (!is_static && rc == may_rewrite) {
2853 patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no);
2854 }
2855
2856 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2857 __ beq(CR_is_vol, Lvolatile); // Volatile?
2858 }
2859 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2860
2861 __ align(32, 28, 28); // Align pop.
2862 // __ bind(Lctos);
2863 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2864 assert(branch_table[ctos] == 0, "can't compute twice");
2865 branch_table[ctos] = __ pc(); // non-volatile_entry point
2866 __ pop(ctos);
2867 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1..
2868 __ sthx(R17_tos, Rclass_or_obj, Roffset);
2869
2870 if (!is_static && rc == may_rewrite) {
2871 patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no);
2872 }
2873
2874 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2875 __ beq(CR_is_vol, Lvolatile); // Volatile?
2876 }
2877 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2878
2879 __ align(32, 28, 28); // Align pop.
2880 // __ bind(Lstos);
2881 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2882 assert(branch_table[stos] == 0, "can't compute twice");
2883 branch_table[stos] = __ pc(); // non-volatile_entry point
2884 __ pop(stos);
2885 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2886 __ sthx(R17_tos, Rclass_or_obj, Roffset);
2887
2888 if (!is_static rc == may_rewrite) {
2889 patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no);
2890 }
2891
2892 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2893 __ beq(CR_is_vol, Lvolatile); // Volatile?
2894 }
2895 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2896
2897 __ align(32, 28, 28); // Align pop.
2898 // __ bind(Latos);
2899 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2900 assert(branch_table[atos] == 0, "can't compute twice");
2901 branch_table[atos] = __ pc(); // non-volatile_entry point
2902 __ pop(atos);
2903 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1
2904 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */);
2905
2906 if (!is_static && rc == may_rewrite) {
2907 patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no);
2908 }
2909
2910 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2911 __ beq(CR_is_vol, Lvolatile); // Volatile?
2912 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2913
2914 __ align(32, 12);
2915 __ bind(Lvolatile);
2916 __ fence();
2917 }
2918 // fallthru: __ b(Lexit);
2919
2920 #ifdef ASSERT
2921 for (int i = 0; i<number_of_states; ++i) {
2922 assert(branch_table[i], "put initialization");
2923 //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)",
2924 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i]));
2925 }
2926 #endif
2927 }
2928
2929 void TemplateTable::putfield(int byte_no) {
2930 putfield_or_static(byte_no, false);
2931 }
2932
2933 void TemplateTable::nofast_putfield(int byte_no) {
2934 putfield_or_static(byte_no, false, may_not_rewrite);
2935 }
2936
2937 void TemplateTable::putstatic(int byte_no) {
2938 putfield_or_static(byte_no, true);
2939 }
2940
2941 // See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job.
2942 void TemplateTable::jvmti_post_fast_field_mod() {
2943 __ should_not_reach_here();
2944 }
2945
2946 void TemplateTable::fast_storefield(TosState state) {
2947 transition(state, vtos);
2948
2949 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
2950 Rclass_or_obj = R31, // Needs to survive C call.
2951 Roffset = R22_tmp2, // Needs to survive C call.
2952 Rflags = R3_ARG1,
2953 Rscratch = R11_scratch1,
2954 Rscratch2 = R12_scratch2,
2955 Rscratch3 = R4_ARG2;
2956 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
|