1711 } else { 1712 Register lreg = left->as_pointer_register(); 1713 Register res = dest->as_register_lo(); 1714 long con = right->as_constant_ptr()->as_jlong(); 1715 assert(Assembler::is_simm16(con), "must be simm16"); 1716 1717 switch (code) { 1718 case lir_sub: assert(Assembler::is_simm16(-con), "cannot encode"); // see do_ArithmeticOp_Long 1719 con = -con; 1720 case lir_add: if (res == lreg && con == 0) break; 1721 __ addi(res, lreg, (int)con); break; 1722 case lir_mul: if (res == lreg && con == 1) break; 1723 __ mulli(res, lreg, (int)con); break; 1724 default: ShouldNotReachHere(); 1725 } 1726 } 1727 } 1728 } 1729 1730 1731 void LIR_Assembler::fpop() { 1732 Unimplemented(); 1733 // do nothing 1734 } 1735 1736 1737 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { 1738 switch (code) { 1739 case lir_sqrt: { 1740 __ fsqrt(dest->as_double_reg(), value->as_double_reg()); 1741 break; 1742 } 1743 case lir_abs: { 1744 __ fabs(dest->as_double_reg(), value->as_double_reg()); 1745 break; 1746 } 1747 default: { 1748 ShouldNotReachHere(); 1749 break; 1750 } 1751 } 1752 } 1753 1754 1755 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { 1756 if (right->is_constant()) { // see do_LogicOp 2676 2677 if (is_64bit) { 2678 __ cmpxchgd(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr, 2679 MacroAssembler::MemBarNone, 2680 MacroAssembler::cmpxchgx_hint_atomic_update(), 2681 noreg, NULL, /*check without ldarx first*/true); 2682 } else { 2683 __ cmpxchgw(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr, 2684 MacroAssembler::MemBarNone, 2685 MacroAssembler::cmpxchgx_hint_atomic_update(), 2686 noreg, /*check without ldarx first*/true); 2687 } 2688 2689 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2690 __ isync(); 2691 } else { 2692 __ sync(); 2693 } 2694 } 2695 2696 2697 void LIR_Assembler::set_24bit_FPU() { 2698 Unimplemented(); 2699 } 2700 2701 void LIR_Assembler::reset_FPU() { 2702 Unimplemented(); 2703 } 2704 2705 2706 void LIR_Assembler::breakpoint() { 2707 __ illtrap(); 2708 } 2709 2710 2711 void LIR_Assembler::push(LIR_Opr opr) { 2712 Unimplemented(); 2713 } 2714 2715 void LIR_Assembler::pop(LIR_Opr opr) { 2716 Unimplemented(); 2717 } 2718 2719 2720 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) { 2721 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 2722 Register dst = dst_opr->as_register(); 2723 Register reg = mon_addr.base(); 2724 int offset = mon_addr.disp(); 2725 // Compute pointer to BasicLock. 2879 2880 2881 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { 2882 // tmp must be unused 2883 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 2884 assert(left->is_register(), "can only handle registers"); 2885 2886 if (left->is_single_cpu()) { 2887 __ neg(dest->as_register(), left->as_register()); 2888 } else if (left->is_single_fpu()) { 2889 __ fneg(dest->as_float_reg(), left->as_float_reg()); 2890 } else if (left->is_double_fpu()) { 2891 __ fneg(dest->as_double_reg(), left->as_double_reg()); 2892 } else { 2893 assert (left->is_double_cpu(), "Must be a long"); 2894 __ neg(dest->as_register_lo(), left->as_register_lo()); 2895 } 2896 } 2897 2898 2899 void LIR_Assembler::fxch(int i) { 2900 Unimplemented(); 2901 } 2902 2903 void LIR_Assembler::fld(int i) { 2904 Unimplemented(); 2905 } 2906 2907 void LIR_Assembler::ffree(int i) { 2908 Unimplemented(); 2909 } 2910 2911 2912 void LIR_Assembler::rt_call(LIR_Opr result, address dest, 2913 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 2914 // Stubs: Called via rt_call, but dest is a stub address (no function descriptor). 2915 if (dest == Runtime1::entry_for(Runtime1::register_finalizer_id) || 2916 dest == Runtime1::entry_for(Runtime1::new_multi_array_id )) { 2917 //__ load_const_optimized(R0, dest); 2918 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(dest)); 2919 __ mtctr(R0); 2920 __ bctrl(); 2921 assert(info != NULL, "sanity"); 2922 add_call_info_here(info); 2923 return; 2924 } 2925 2926 __ call_c_with_frame_resize(dest, /*no resizing*/ 0); 2927 if (info != NULL) { 2928 add_call_info_here(info); 2929 } 2930 } 2931 | 1711 } else { 1712 Register lreg = left->as_pointer_register(); 1713 Register res = dest->as_register_lo(); 1714 long con = right->as_constant_ptr()->as_jlong(); 1715 assert(Assembler::is_simm16(con), "must be simm16"); 1716 1717 switch (code) { 1718 case lir_sub: assert(Assembler::is_simm16(-con), "cannot encode"); // see do_ArithmeticOp_Long 1719 con = -con; 1720 case lir_add: if (res == lreg && con == 0) break; 1721 __ addi(res, lreg, (int)con); break; 1722 case lir_mul: if (res == lreg && con == 1) break; 1723 __ mulli(res, lreg, (int)con); break; 1724 default: ShouldNotReachHere(); 1725 } 1726 } 1727 } 1728 } 1729 1730 1731 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { 1732 switch (code) { 1733 case lir_sqrt: { 1734 __ fsqrt(dest->as_double_reg(), value->as_double_reg()); 1735 break; 1736 } 1737 case lir_abs: { 1738 __ fabs(dest->as_double_reg(), value->as_double_reg()); 1739 break; 1740 } 1741 default: { 1742 ShouldNotReachHere(); 1743 break; 1744 } 1745 } 1746 } 1747 1748 1749 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { 1750 if (right->is_constant()) { // see do_LogicOp 2670 2671 if (is_64bit) { 2672 __ cmpxchgd(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr, 2673 MacroAssembler::MemBarNone, 2674 MacroAssembler::cmpxchgx_hint_atomic_update(), 2675 noreg, NULL, /*check without ldarx first*/true); 2676 } else { 2677 __ cmpxchgw(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr, 2678 MacroAssembler::MemBarNone, 2679 MacroAssembler::cmpxchgx_hint_atomic_update(), 2680 noreg, /*check without ldarx first*/true); 2681 } 2682 2683 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2684 __ isync(); 2685 } else { 2686 __ sync(); 2687 } 2688 } 2689 2690 void LIR_Assembler::breakpoint() { 2691 __ illtrap(); 2692 } 2693 2694 2695 void LIR_Assembler::push(LIR_Opr opr) { 2696 Unimplemented(); 2697 } 2698 2699 void LIR_Assembler::pop(LIR_Opr opr) { 2700 Unimplemented(); 2701 } 2702 2703 2704 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) { 2705 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 2706 Register dst = dst_opr->as_register(); 2707 Register reg = mon_addr.base(); 2708 int offset = mon_addr.disp(); 2709 // Compute pointer to BasicLock. 2863 2864 2865 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { 2866 // tmp must be unused 2867 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 2868 assert(left->is_register(), "can only handle registers"); 2869 2870 if (left->is_single_cpu()) { 2871 __ neg(dest->as_register(), left->as_register()); 2872 } else if (left->is_single_fpu()) { 2873 __ fneg(dest->as_float_reg(), left->as_float_reg()); 2874 } else if (left->is_double_fpu()) { 2875 __ fneg(dest->as_double_reg(), left->as_double_reg()); 2876 } else { 2877 assert (left->is_double_cpu(), "Must be a long"); 2878 __ neg(dest->as_register_lo(), left->as_register_lo()); 2879 } 2880 } 2881 2882 2883 void LIR_Assembler::rt_call(LIR_Opr result, address dest, 2884 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 2885 // Stubs: Called via rt_call, but dest is a stub address (no function descriptor). 2886 if (dest == Runtime1::entry_for(Runtime1::register_finalizer_id) || 2887 dest == Runtime1::entry_for(Runtime1::new_multi_array_id )) { 2888 //__ load_const_optimized(R0, dest); 2889 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(dest)); 2890 __ mtctr(R0); 2891 __ bctrl(); 2892 assert(info != NULL, "sanity"); 2893 add_call_info_here(info); 2894 return; 2895 } 2896 2897 __ call_c_with_frame_resize(dest, /*no resizing*/ 0); 2898 if (info != NULL) { 2899 add_call_info_here(info); 2900 } 2901 } 2902 |