1 /*
2 * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
162 return -1;
163 }
164 }
165
166
167 // ********** functions for classification of intervals
168
169 bool LinearScan::is_precolored_interval(const Interval* i) {
170 return i->reg_num() < LinearScan::nof_regs;
171 }
172
173 bool LinearScan::is_virtual_interval(const Interval* i) {
174 return i->reg_num() >= LIR_OprDesc::vreg_base;
175 }
176
177 bool LinearScan::is_precolored_cpu_interval(const Interval* i) {
178 return i->reg_num() < LinearScan::nof_cpu_regs;
179 }
180
181 bool LinearScan::is_virtual_cpu_interval(const Interval* i) {
182 #if defined(__SOFTFP__) || defined(E500V2)
183 return i->reg_num() >= LIR_OprDesc::vreg_base;
184 #else
185 return i->reg_num() >= LIR_OprDesc::vreg_base && (i->type() != T_FLOAT && i->type() != T_DOUBLE);
186 #endif // __SOFTFP__ or E500V2
187 }
188
189 bool LinearScan::is_precolored_fpu_interval(const Interval* i) {
190 return i->reg_num() >= LinearScan::nof_cpu_regs && i->reg_num() < LinearScan::nof_regs;
191 }
192
193 bool LinearScan::is_virtual_fpu_interval(const Interval* i) {
194 #if defined(__SOFTFP__) || defined(E500V2)
195 return false;
196 #else
197 return i->reg_num() >= LIR_OprDesc::vreg_base && (i->type() == T_FLOAT || i->type() == T_DOUBLE);
198 #endif // __SOFTFP__ or E500V2
199 }
200
201 bool LinearScan::is_in_fpu_register(const Interval* i) {
202 // fixed intervals not needed for FPU stack allocation
203 return i->reg_num() >= nof_regs && pd_first_fpu_reg <= i->assigned_reg() && i->assigned_reg() <= pd_last_fpu_reg;
204 }
205
206 bool LinearScan::is_oop_interval(const Interval* i) {
207 // fixed intervals never contain oops
208 return i->reg_num() >= nof_regs && i->type() == T_OBJECT;
209 }
210
211
212 // ********** General helper functions
213
214 // compute next unused stack index that can be used for spilling
215 int LinearScan::allocate_spill_slot(bool double_word) {
216 int spill_slot;
217 if (double_word) {
2083 case T_OBJECT: {
2084 assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2085 assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2086 return LIR_OprFact::single_cpu_oop(assigned_reg);
2087 }
2088
2089 case T_ADDRESS: {
2090 assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2091 assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2092 return LIR_OprFact::single_cpu_address(assigned_reg);
2093 }
2094
2095 case T_METADATA: {
2096 assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2097 assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2098 return LIR_OprFact::single_cpu_metadata(assigned_reg);
2099 }
2100
2101 #ifdef __SOFTFP__
2102 case T_FLOAT: // fall through
2103 #endif // __SOFTFP__
2104 case T_INT: {
2105 assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2106 assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2107 return LIR_OprFact::single_cpu(assigned_reg);
2108 }
2109
2110 #ifdef __SOFTFP__
2111 case T_DOUBLE: // fall through
2112 #endif // __SOFTFP__
2113 case T_LONG: {
2114 int assigned_regHi = interval->assigned_regHi();
2115 assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2116 assert(num_physical_regs(T_LONG) == 1 ||
2117 (assigned_regHi >= pd_first_cpu_reg && assigned_regHi <= pd_last_cpu_reg), "no cpu register");
2118
2119 assert(assigned_reg != assigned_regHi, "invalid allocation");
2120 assert(num_physical_regs(T_LONG) == 1 || assigned_reg < assigned_regHi,
2121 "register numbers must be sorted (ensure that e.g. a move from eax,ebx to ebx,eax can not occur)");
2122 assert((assigned_regHi != any_reg) ^ (num_physical_regs(T_LONG) == 1), "must be match");
2123 if (requires_adjacent_regs(T_LONG)) {
2124 assert(assigned_reg % 2 == 0 && assigned_reg + 1 == assigned_regHi, "must be sequential and even");
2125 }
2126
2127 #ifdef _LP64
2128 return LIR_OprFact::double_cpu(assigned_reg, assigned_reg);
2129 #else
2130 #if defined(SPARC) || defined(PPC32)
2131 return LIR_OprFact::double_cpu(assigned_regHi, assigned_reg);
2159 case T_DOUBLE: {
2160 #ifdef X86
2161 if (UseSSE >= 2) {
2162 int last_xmm_reg = pd_last_xmm_reg;
2163 #ifdef _LP64
2164 if (UseAVX < 3) {
2165 last_xmm_reg = pd_first_xmm_reg + (pd_nof_xmm_regs_frame_map / 2) - 1;
2166 }
2167 #endif
2168 assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= last_xmm_reg, "no xmm register");
2169 assert(interval->assigned_regHi() == any_reg, "must not have hi register (double xmm values are stored in one register)");
2170 return LIR_OprFact::double_xmm(assigned_reg - pd_first_xmm_reg);
2171 }
2172 #endif
2173
2174 #ifdef SPARC
2175 assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2176 assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register");
2177 assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even");
2178 LIR_Opr result = LIR_OprFact::double_fpu(interval->assigned_regHi() - pd_first_fpu_reg, assigned_reg - pd_first_fpu_reg);
2179 #elif defined(ARM32)
2180 assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2181 assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register");
2182 assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even");
2183 LIR_Opr result = LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg, interval->assigned_regHi() - pd_first_fpu_reg);
2184 #else
2185 assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2186 assert(interval->assigned_regHi() == any_reg, "must not have hi register (double fpu values are stored in one register on Intel)");
2187 LIR_Opr result = LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg);
2188 #endif
2189 return result;
2190 }
2191 #endif // __SOFTFP__
2192
2193 default: {
2194 ShouldNotReachHere();
2195 return LIR_OprFact::illegalOpr;
2196 }
2197 }
2198 }
2199 }
2757 // On SPARC, fpu_regnrLo/fpu_regnrHi represents the two halves of
2758 // the double as float registers in the native ordering. On X86,
2759 // fpu_regnrLo is a FPU stack slot whose VMReg represents
2760 // the low-order word of the double and fpu_regnrLo + 1 is the
2761 // name for the other half. *first and *second must represent the
2762 // least and most significant words, respectively.
2763
2764 #ifdef X86
2765 // the exact location of fpu stack values is only known
2766 // during fpu stack allocation, so the stack allocator object
2767 // must be present
2768 assert(use_fpu_stack_allocation(), "should not have float stack values without fpu stack allocation (all floats must be SSE2)");
2769 assert(_fpu_stack_allocator != NULL, "must be present");
2770 opr = _fpu_stack_allocator->to_fpu_stack(opr);
2771
2772 assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrLo is used)");
2773 #endif
2774 #ifdef SPARC
2775 assert(opr->fpu_regnrLo() == opr->fpu_regnrHi() + 1, "assumed in calculation (only fpu_regnrHi is used)");
2776 #endif
2777 #ifdef ARM32
2778 assert(opr->fpu_regnrHi() == opr->fpu_regnrLo() + 1, "assumed in calculation (only fpu_regnrLo is used)");
2779 #endif
2780 #ifdef PPC32
2781 assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrHi is used)");
2782 #endif
2783
2784 #ifdef VM_LITTLE_ENDIAN
2785 VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrLo());
2786 #else
2787 VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrHi());
2788 #endif
2789
2790 #ifdef _LP64
2791 first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first));
2792 second = _int_0_scope_value;
2793 #else
2794 first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
2795 // %%% This is probably a waste but we'll keep things as they were for now
2796 if (true) {
2797 VMReg rname_second = rname_first->next();
|
1 /*
2 * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2015-2018, Azul Systems, Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
163 return -1;
164 }
165 }
166
167
168 // ********** functions for classification of intervals
169
170 bool LinearScan::is_precolored_interval(const Interval* i) {
171 return i->reg_num() < LinearScan::nof_regs;
172 }
173
174 bool LinearScan::is_virtual_interval(const Interval* i) {
175 return i->reg_num() >= LIR_OprDesc::vreg_base;
176 }
177
178 bool LinearScan::is_precolored_cpu_interval(const Interval* i) {
179 return i->reg_num() < LinearScan::nof_cpu_regs;
180 }
181
182 bool LinearScan::is_virtual_cpu_interval(const Interval* i) {
183 #if !defined(AARCH32) && (defined(__SOFTFP__) || defined(E500V2))
184 return i->reg_num() >= LIR_OprDesc::vreg_base;
185 #else
186 return i->reg_num() >= LIR_OprDesc::vreg_base && (AARCH32_ONLY(!hasFPU() ||) (i->type() != T_FLOAT && i->type() != T_DOUBLE));
187 #endif // __SOFTFP__ or E500V2
188 }
189
190 bool LinearScan::is_precolored_fpu_interval(const Interval* i) {
191 return i->reg_num() >= LinearScan::nof_cpu_regs && i->reg_num() < LinearScan::nof_regs;
192 }
193
194 bool LinearScan::is_virtual_fpu_interval(const Interval* i) {
195 #if !defined(AARCH32) && (defined(__SOFTFP__) || defined(E500V2))
196 return false;
197 #else
198 return i->reg_num() >= LIR_OprDesc::vreg_base && (i->type() == T_FLOAT || i->type() == T_DOUBLE) AARCH32_ONLY(&& hasFPU());
199 #endif // __SOFTFP__ or E500V2
200 }
201
202 bool LinearScan::is_in_fpu_register(const Interval* i) {
203 // fixed intervals not needed for FPU stack allocation
204 return i->reg_num() >= nof_regs && pd_first_fpu_reg <= i->assigned_reg() && i->assigned_reg() <= pd_last_fpu_reg;
205 }
206
207 bool LinearScan::is_oop_interval(const Interval* i) {
208 // fixed intervals never contain oops
209 return i->reg_num() >= nof_regs && i->type() == T_OBJECT;
210 }
211
212
213 // ********** General helper functions
214
215 // compute next unused stack index that can be used for spilling
216 int LinearScan::allocate_spill_slot(bool double_word) {
217 int spill_slot;
218 if (double_word) {
2084 case T_OBJECT: {
2085 assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2086 assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2087 return LIR_OprFact::single_cpu_oop(assigned_reg);
2088 }
2089
2090 case T_ADDRESS: {
2091 assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2092 assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2093 return LIR_OprFact::single_cpu_address(assigned_reg);
2094 }
2095
2096 case T_METADATA: {
2097 assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2098 assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2099 return LIR_OprFact::single_cpu_metadata(assigned_reg);
2100 }
2101
2102 #ifdef __SOFTFP__
2103 case T_FLOAT: // fall through
2104 #if defined(AARCH32)
2105 if(hasFPU()) {
2106 assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2107 assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2108 return LIR_OprFact::single_fpu(assigned_reg - pd_first_fpu_reg);
2109 }
2110 #endif
2111 #endif // __SOFTFP__
2112 case T_INT: {
2113 assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2114 assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2115 return LIR_OprFact::single_cpu(assigned_reg);
2116 }
2117
2118 #ifdef __SOFTFP__
2119 case T_DOUBLE: // fall through
2120 #if defined(AARCH32)
2121 if(hasFPU()) {
2122 assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2123 assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register");
2124 assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even");
2125 return LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg, interval->assigned_regHi() - pd_first_fpu_reg);
2126 }
2127 #endif
2128 #endif // __SOFTFP__
2129 case T_LONG: {
2130 int assigned_regHi = interval->assigned_regHi();
2131 assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2132 assert(num_physical_regs(T_LONG) == 1 ||
2133 (assigned_regHi >= pd_first_cpu_reg && assigned_regHi <= pd_last_cpu_reg), "no cpu register");
2134
2135 assert(assigned_reg != assigned_regHi, "invalid allocation");
2136 assert(num_physical_regs(T_LONG) == 1 || assigned_reg < assigned_regHi,
2137 "register numbers must be sorted (ensure that e.g. a move from eax,ebx to ebx,eax can not occur)");
2138 assert((assigned_regHi != any_reg) ^ (num_physical_regs(T_LONG) == 1), "must be match");
2139 if (requires_adjacent_regs(T_LONG)) {
2140 assert(assigned_reg % 2 == 0 && assigned_reg + 1 == assigned_regHi, "must be sequential and even");
2141 }
2142
2143 #ifdef _LP64
2144 return LIR_OprFact::double_cpu(assigned_reg, assigned_reg);
2145 #else
2146 #if defined(SPARC) || defined(PPC32)
2147 return LIR_OprFact::double_cpu(assigned_regHi, assigned_reg);
2175 case T_DOUBLE: {
2176 #ifdef X86
2177 if (UseSSE >= 2) {
2178 int last_xmm_reg = pd_last_xmm_reg;
2179 #ifdef _LP64
2180 if (UseAVX < 3) {
2181 last_xmm_reg = pd_first_xmm_reg + (pd_nof_xmm_regs_frame_map / 2) - 1;
2182 }
2183 #endif
2184 assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= last_xmm_reg, "no xmm register");
2185 assert(interval->assigned_regHi() == any_reg, "must not have hi register (double xmm values are stored in one register)");
2186 return LIR_OprFact::double_xmm(assigned_reg - pd_first_xmm_reg);
2187 }
2188 #endif
2189
2190 #ifdef SPARC
2191 assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2192 assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register");
2193 assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even");
2194 LIR_Opr result = LIR_OprFact::double_fpu(interval->assigned_regHi() - pd_first_fpu_reg, assigned_reg - pd_first_fpu_reg);
2195 #elif defined(ARM32) || defined(AARCH32)
2196 assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2197 assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register");
2198 assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even");
2199 LIR_Opr result = LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg, interval->assigned_regHi() - pd_first_fpu_reg);
2200 #else
2201 assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2202 assert(interval->assigned_regHi() == any_reg, "must not have hi register (double fpu values are stored in one register on Intel)");
2203 LIR_Opr result = LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg);
2204 #endif
2205 return result;
2206 }
2207 #endif // __SOFTFP__
2208
2209 default: {
2210 ShouldNotReachHere();
2211 return LIR_OprFact::illegalOpr;
2212 }
2213 }
2214 }
2215 }
2773 // On SPARC, fpu_regnrLo/fpu_regnrHi represents the two halves of
2774 // the double as float registers in the native ordering. On X86,
2775 // fpu_regnrLo is a FPU stack slot whose VMReg represents
2776 // the low-order word of the double and fpu_regnrLo + 1 is the
2777 // name for the other half. *first and *second must represent the
2778 // least and most significant words, respectively.
2779
2780 #ifdef X86
2781 // the exact location of fpu stack values is only known
2782 // during fpu stack allocation, so the stack allocator object
2783 // must be present
2784 assert(use_fpu_stack_allocation(), "should not have float stack values without fpu stack allocation (all floats must be SSE2)");
2785 assert(_fpu_stack_allocator != NULL, "must be present");
2786 opr = _fpu_stack_allocator->to_fpu_stack(opr);
2787
2788 assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrLo is used)");
2789 #endif
2790 #ifdef SPARC
2791 assert(opr->fpu_regnrLo() == opr->fpu_regnrHi() + 1, "assumed in calculation (only fpu_regnrHi is used)");
2792 #endif
2793 #if defined(ARM32) || defined(AARCH32)
2794 assert(opr->fpu_regnrHi() == opr->fpu_regnrLo() + 1, "assumed in calculation (only fpu_regnrLo is used)");
2795 #endif
2796 #ifdef PPC32
2797 assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrHi is used)");
2798 #endif
2799
2800 #ifdef VM_LITTLE_ENDIAN
2801 VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrLo());
2802 #else
2803 VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrHi());
2804 #endif
2805
2806 #ifdef _LP64
2807 first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first));
2808 second = _int_0_scope_value;
2809 #else
2810 first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
2811 // %%% This is probably a waste but we'll keep things as they were for now
2812 if (true) {
2813 VMReg rname_second = rname_first->next();
|