< prev index next >

src/hotspot/share/c1/c1_LinearScan.cpp

Print this page
rev 59189 : imported patch hotspot


2112 #ifdef __SOFTFP__
2113       case T_DOUBLE:  // fall through
2114 #endif // __SOFTFP__
2115       case T_LONG: {
2116         int assigned_regHi = interval->assigned_regHi();
2117         assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2118         assert(num_physical_regs(T_LONG) == 1 ||
2119                (assigned_regHi >= pd_first_cpu_reg && assigned_regHi <= pd_last_cpu_reg), "no cpu register");
2120 
2121         assert(assigned_reg != assigned_regHi, "invalid allocation");
2122         assert(num_physical_regs(T_LONG) == 1 || assigned_reg < assigned_regHi,
2123                "register numbers must be sorted (ensure that e.g. a move from eax,ebx to ebx,eax can not occur)");
2124         assert((assigned_regHi != any_reg) ^ (num_physical_regs(T_LONG) == 1), "must be match");
2125         if (requires_adjacent_regs(T_LONG)) {
2126           assert(assigned_reg % 2 == 0 && assigned_reg + 1 == assigned_regHi, "must be sequential and even");
2127         }
2128 
2129 #ifdef _LP64
2130         return LIR_OprFact::double_cpu(assigned_reg, assigned_reg);
2131 #else
2132 #if defined(SPARC) || defined(PPC32)
2133         return LIR_OprFact::double_cpu(assigned_regHi, assigned_reg);
2134 #else
2135         return LIR_OprFact::double_cpu(assigned_reg, assigned_regHi);
2136 #endif // SPARC
2137 #endif // LP64
2138       }
2139 
2140 #ifndef __SOFTFP__
2141       case T_FLOAT: {
2142 #ifdef X86
2143         if (UseSSE >= 1) {
2144           int last_xmm_reg = pd_last_xmm_reg;
2145 #ifdef _LP64
2146           if (UseAVX < 3) {
2147             last_xmm_reg = pd_first_xmm_reg + (pd_nof_xmm_regs_frame_map / 2) - 1;
2148           }
2149 #endif // LP64
2150           assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= last_xmm_reg, "no xmm register");
2151           assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2152           return LIR_OprFact::single_xmm(assigned_reg - pd_first_xmm_reg);
2153         }
2154 #endif // X86
2155 
2156         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2157         assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2158         return LIR_OprFact::single_fpu(assigned_reg - pd_first_fpu_reg);
2159       }
2160 
2161       case T_DOUBLE: {
2162 #ifdef X86
2163         if (UseSSE >= 2) {
2164           int last_xmm_reg = pd_last_xmm_reg;
2165 #ifdef _LP64
2166           if (UseAVX < 3) {
2167             last_xmm_reg = pd_first_xmm_reg + (pd_nof_xmm_regs_frame_map / 2) - 1;
2168           }
2169 #endif // LP64
2170           assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= last_xmm_reg, "no xmm register");
2171           assert(interval->assigned_regHi() == any_reg, "must not have hi register (double xmm values are stored in one register)");
2172           return LIR_OprFact::double_xmm(assigned_reg - pd_first_xmm_reg);
2173         }
2174 #endif // X86
2175 
2176 #ifdef SPARC
2177         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2178         assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register");
2179         assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even");
2180         LIR_Opr result = LIR_OprFact::double_fpu(interval->assigned_regHi() - pd_first_fpu_reg, assigned_reg - pd_first_fpu_reg);
2181 #elif defined(ARM32)
2182         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2183         assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register");
2184         assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even");
2185         LIR_Opr result = LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg, interval->assigned_regHi() - pd_first_fpu_reg);
2186 #else
2187         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2188         assert(interval->assigned_regHi() == any_reg, "must not have hi register (double fpu values are stored in one register on Intel)");
2189         LIR_Opr result = LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg);
2190 #endif
2191         return result;
2192       }
2193 #endif // __SOFTFP__
2194 
2195       default: {
2196         ShouldNotReachHere();
2197         return LIR_OprFact::illegalOpr;
2198       }
2199     }
2200   }
2201 }


2760     } else if (opr->is_double_fpu()) {
2761       // On SPARC, fpu_regnrLo/fpu_regnrHi represents the two halves of
2762       // the double as float registers in the native ordering. On X86,
2763       // fpu_regnrLo is a FPU stack slot whose VMReg represents
2764       // the low-order word of the double and fpu_regnrLo + 1 is the
2765       // name for the other half.  *first and *second must represent the
2766       // least and most significant words, respectively.
2767 
2768 #ifdef IA32
2769       // the exact location of fpu stack values is only known
2770       // during fpu stack allocation, so the stack allocator object
2771       // must be present
2772       assert(use_fpu_stack_allocation(), "should not have float stack values without fpu stack allocation (all floats must be SSE2)");
2773       assert(_fpu_stack_allocator != NULL, "must be present");
2774       opr = _fpu_stack_allocator->to_fpu_stack(opr);
2775 
2776       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrLo is used)");
2777 #endif
2778 #ifdef AMD64
2779       assert(false, "FPU not used on x86-64");
2780 #endif
2781 #ifdef SPARC
2782       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi() + 1, "assumed in calculation (only fpu_regnrHi is used)");
2783 #endif
2784 #ifdef ARM32
2785       assert(opr->fpu_regnrHi() == opr->fpu_regnrLo() + 1, "assumed in calculation (only fpu_regnrLo is used)");
2786 #endif
2787 #ifdef PPC32
2788       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrHi is used)");
2789 #endif
2790 
2791 #ifdef VM_LITTLE_ENDIAN
2792       VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrLo());
2793 #else
2794       VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrHi());
2795 #endif
2796 
2797 #ifdef _LP64
2798       first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first));
2799       second = _int_0_scope_value;
2800 #else
2801       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
2802       // %%% This is probably a waste but we'll keep things as they were for now




2112 #ifdef __SOFTFP__
2113       case T_DOUBLE:  // fall through
2114 #endif // __SOFTFP__
2115       case T_LONG: {
2116         int assigned_regHi = interval->assigned_regHi();
2117         assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2118         assert(num_physical_regs(T_LONG) == 1 ||
2119                (assigned_regHi >= pd_first_cpu_reg && assigned_regHi <= pd_last_cpu_reg), "no cpu register");
2120 
2121         assert(assigned_reg != assigned_regHi, "invalid allocation");
2122         assert(num_physical_regs(T_LONG) == 1 || assigned_reg < assigned_regHi,
2123                "register numbers must be sorted (ensure that e.g. a move from eax,ebx to ebx,eax can not occur)");
2124         assert((assigned_regHi != any_reg) ^ (num_physical_regs(T_LONG) == 1), "must be match");
2125         if (requires_adjacent_regs(T_LONG)) {
2126           assert(assigned_reg % 2 == 0 && assigned_reg + 1 == assigned_regHi, "must be sequential and even");
2127         }
2128 
2129 #ifdef _LP64
2130         return LIR_OprFact::double_cpu(assigned_reg, assigned_reg);
2131 #else
2132 #if defined(PPC32)
2133         return LIR_OprFact::double_cpu(assigned_regHi, assigned_reg);
2134 #else
2135         return LIR_OprFact::double_cpu(assigned_reg, assigned_regHi);
2136 #endif // PPC32
2137 #endif // LP64
2138       }
2139 
2140 #ifndef __SOFTFP__
2141       case T_FLOAT: {
2142 #ifdef X86
2143         if (UseSSE >= 1) {
2144           int last_xmm_reg = pd_last_xmm_reg;
2145 #ifdef _LP64
2146           if (UseAVX < 3) {
2147             last_xmm_reg = pd_first_xmm_reg + (pd_nof_xmm_regs_frame_map / 2) - 1;
2148           }
2149 #endif // LP64
2150           assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= last_xmm_reg, "no xmm register");
2151           assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2152           return LIR_OprFact::single_xmm(assigned_reg - pd_first_xmm_reg);
2153         }
2154 #endif // X86
2155 
2156         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2157         assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2158         return LIR_OprFact::single_fpu(assigned_reg - pd_first_fpu_reg);
2159       }
2160 
2161       case T_DOUBLE: {
2162 #ifdef X86
2163         if (UseSSE >= 2) {
2164           int last_xmm_reg = pd_last_xmm_reg;
2165 #ifdef _LP64
2166           if (UseAVX < 3) {
2167             last_xmm_reg = pd_first_xmm_reg + (pd_nof_xmm_regs_frame_map / 2) - 1;
2168           }
2169 #endif // LP64
2170           assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= last_xmm_reg, "no xmm register");
2171           assert(interval->assigned_regHi() == any_reg, "must not have hi register (double xmm values are stored in one register)");
2172           return LIR_OprFact::double_xmm(assigned_reg - pd_first_xmm_reg);
2173         }
2174 #endif // X86
2175 
2176 #if defined(ARM32)





2177         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2178         assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register");
2179         assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even");
2180         LIR_Opr result = LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg, interval->assigned_regHi() - pd_first_fpu_reg);
2181 #else
2182         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2183         assert(interval->assigned_regHi() == any_reg, "must not have hi register (double fpu values are stored in one register on Intel)");
2184         LIR_Opr result = LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg);
2185 #endif
2186         return result;
2187       }
2188 #endif // __SOFTFP__
2189 
2190       default: {
2191         ShouldNotReachHere();
2192         return LIR_OprFact::illegalOpr;
2193       }
2194     }
2195   }
2196 }


2755     } else if (opr->is_double_fpu()) {
2756       // On SPARC, fpu_regnrLo/fpu_regnrHi represents the two halves of
2757       // the double as float registers in the native ordering. On X86,
2758       // fpu_regnrLo is a FPU stack slot whose VMReg represents
2759       // the low-order word of the double and fpu_regnrLo + 1 is the
2760       // name for the other half.  *first and *second must represent the
2761       // least and most significant words, respectively.
2762 
2763 #ifdef IA32
2764       // the exact location of fpu stack values is only known
2765       // during fpu stack allocation, so the stack allocator object
2766       // must be present
2767       assert(use_fpu_stack_allocation(), "should not have float stack values without fpu stack allocation (all floats must be SSE2)");
2768       assert(_fpu_stack_allocator != NULL, "must be present");
2769       opr = _fpu_stack_allocator->to_fpu_stack(opr);
2770 
2771       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrLo is used)");
2772 #endif
2773 #ifdef AMD64
2774       assert(false, "FPU not used on x86-64");



2775 #endif
2776 #ifdef ARM32
2777       assert(opr->fpu_regnrHi() == opr->fpu_regnrLo() + 1, "assumed in calculation (only fpu_regnrLo is used)");
2778 #endif
2779 #ifdef PPC32
2780       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrHi is used)");
2781 #endif
2782 
2783 #ifdef VM_LITTLE_ENDIAN
2784       VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrLo());
2785 #else
2786       VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrHi());
2787 #endif
2788 
2789 #ifdef _LP64
2790       first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first));
2791       second = _int_0_scope_value;
2792 #else
2793       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
2794       // %%% This is probably a waste but we'll keep things as they were for now


< prev index next >