Print this page
*** NO COMMENTS ***
Split |
Close |
Expand all |
Collapse all |
--- old/src/cpu/sparc/vm/sparc.ad
+++ new/src/cpu/sparc/vm/sparc.ad
1 1 //
2 2 // Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
3 3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 //
5 5 // This code is free software; you can redistribute it and/or modify it
6 6 // under the terms of the GNU General Public License version 2 only, as
7 7 // published by the Free Software Foundation.
8 8 //
9 9 // This code is distributed in the hope that it will be useful, but WITHOUT
10 10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 // version 2 for more details (a copy is included in the LICENSE file that
13 13 // accompanied this code).
14 14 //
15 15 // You should have received a copy of the GNU General Public License version
16 16 // 2 along with this work; if not, write to the Free Software Foundation,
17 17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 //
19 19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 // or visit www.oracle.com if you need additional information or have any
21 21 // questions.
22 22 //
23 23 //
24 24
25 25 // SPARC Architecture Description File
26 26
27 27 //----------REGISTER DEFINITION BLOCK------------------------------------------
28 28 // This information is used by the matcher and the register allocator to
29 29 // describe individual registers and classes of registers within the target
30 30 // archtecture.
31 31 register %{
32 32 //----------Architecture Description Register Definitions----------------------
33 33 // General Registers
34 34 // "reg_def" name ( register save type, C convention save type,
35 35 // ideal register type, encoding, vm name );
36 36 // Register Save Types:
37 37 //
38 38 // NS = No-Save: The register allocator assumes that these registers
39 39 // can be used without saving upon entry to the method, &
40 40 // that they do not need to be saved at call sites.
41 41 //
42 42 // SOC = Save-On-Call: The register allocator assumes that these registers
43 43 // can be used without saving upon entry to the method,
44 44 // but that they must be saved at call sites.
45 45 //
46 46 // SOE = Save-On-Entry: The register allocator assumes that these registers
47 47 // must be saved before using them upon entry to the
48 48 // method, but they do not need to be saved at call
49 49 // sites.
50 50 //
51 51 // AS = Always-Save: The register allocator assumes that these registers
52 52 // must be saved before using them upon entry to the
53 53 // method, & that they must be saved at call sites.
54 54 //
55 55 // Ideal Register Type is used to determine how to save & restore a
56 56 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
57 57 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
58 58 //
59 59 // The encoding number is the actual bit-pattern placed into the opcodes.
60 60
61 61
62 62 // ----------------------------
63 63 // Integer/Long Registers
64 64 // ----------------------------
65 65
66 66 // Need to expose the hi/lo aspect of 64-bit registers
67 67 // This register set is used for both the 64-bit build and
68 68 // the 32-bit build with 1-register longs.
69 69
70 70 // Global Registers 0-7
71 71 reg_def R_G0H( NS, NS, Op_RegI,128, G0->as_VMReg()->next());
72 72 reg_def R_G0 ( NS, NS, Op_RegI, 0, G0->as_VMReg());
73 73 reg_def R_G1H(SOC, SOC, Op_RegI,129, G1->as_VMReg()->next());
74 74 reg_def R_G1 (SOC, SOC, Op_RegI, 1, G1->as_VMReg());
75 75 reg_def R_G2H( NS, NS, Op_RegI,130, G2->as_VMReg()->next());
76 76 reg_def R_G2 ( NS, NS, Op_RegI, 2, G2->as_VMReg());
77 77 reg_def R_G3H(SOC, SOC, Op_RegI,131, G3->as_VMReg()->next());
78 78 reg_def R_G3 (SOC, SOC, Op_RegI, 3, G3->as_VMReg());
79 79 reg_def R_G4H(SOC, SOC, Op_RegI,132, G4->as_VMReg()->next());
80 80 reg_def R_G4 (SOC, SOC, Op_RegI, 4, G4->as_VMReg());
81 81 reg_def R_G5H(SOC, SOC, Op_RegI,133, G5->as_VMReg()->next());
82 82 reg_def R_G5 (SOC, SOC, Op_RegI, 5, G5->as_VMReg());
83 83 reg_def R_G6H( NS, NS, Op_RegI,134, G6->as_VMReg()->next());
84 84 reg_def R_G6 ( NS, NS, Op_RegI, 6, G6->as_VMReg());
85 85 reg_def R_G7H( NS, NS, Op_RegI,135, G7->as_VMReg()->next());
86 86 reg_def R_G7 ( NS, NS, Op_RegI, 7, G7->as_VMReg());
87 87
88 88 // Output Registers 0-7
89 89 reg_def R_O0H(SOC, SOC, Op_RegI,136, O0->as_VMReg()->next());
90 90 reg_def R_O0 (SOC, SOC, Op_RegI, 8, O0->as_VMReg());
91 91 reg_def R_O1H(SOC, SOC, Op_RegI,137, O1->as_VMReg()->next());
92 92 reg_def R_O1 (SOC, SOC, Op_RegI, 9, O1->as_VMReg());
93 93 reg_def R_O2H(SOC, SOC, Op_RegI,138, O2->as_VMReg()->next());
94 94 reg_def R_O2 (SOC, SOC, Op_RegI, 10, O2->as_VMReg());
95 95 reg_def R_O3H(SOC, SOC, Op_RegI,139, O3->as_VMReg()->next());
96 96 reg_def R_O3 (SOC, SOC, Op_RegI, 11, O3->as_VMReg());
97 97 reg_def R_O4H(SOC, SOC, Op_RegI,140, O4->as_VMReg()->next());
98 98 reg_def R_O4 (SOC, SOC, Op_RegI, 12, O4->as_VMReg());
99 99 reg_def R_O5H(SOC, SOC, Op_RegI,141, O5->as_VMReg()->next());
100 100 reg_def R_O5 (SOC, SOC, Op_RegI, 13, O5->as_VMReg());
101 101 reg_def R_SPH( NS, NS, Op_RegI,142, SP->as_VMReg()->next());
102 102 reg_def R_SP ( NS, NS, Op_RegI, 14, SP->as_VMReg());
103 103 reg_def R_O7H(SOC, SOC, Op_RegI,143, O7->as_VMReg()->next());
104 104 reg_def R_O7 (SOC, SOC, Op_RegI, 15, O7->as_VMReg());
105 105
106 106 // Local Registers 0-7
107 107 reg_def R_L0H( NS, NS, Op_RegI,144, L0->as_VMReg()->next());
108 108 reg_def R_L0 ( NS, NS, Op_RegI, 16, L0->as_VMReg());
109 109 reg_def R_L1H( NS, NS, Op_RegI,145, L1->as_VMReg()->next());
110 110 reg_def R_L1 ( NS, NS, Op_RegI, 17, L1->as_VMReg());
111 111 reg_def R_L2H( NS, NS, Op_RegI,146, L2->as_VMReg()->next());
112 112 reg_def R_L2 ( NS, NS, Op_RegI, 18, L2->as_VMReg());
113 113 reg_def R_L3H( NS, NS, Op_RegI,147, L3->as_VMReg()->next());
114 114 reg_def R_L3 ( NS, NS, Op_RegI, 19, L3->as_VMReg());
115 115 reg_def R_L4H( NS, NS, Op_RegI,148, L4->as_VMReg()->next());
116 116 reg_def R_L4 ( NS, NS, Op_RegI, 20, L4->as_VMReg());
117 117 reg_def R_L5H( NS, NS, Op_RegI,149, L5->as_VMReg()->next());
118 118 reg_def R_L5 ( NS, NS, Op_RegI, 21, L5->as_VMReg());
119 119 reg_def R_L6H( NS, NS, Op_RegI,150, L6->as_VMReg()->next());
120 120 reg_def R_L6 ( NS, NS, Op_RegI, 22, L6->as_VMReg());
121 121 reg_def R_L7H( NS, NS, Op_RegI,151, L7->as_VMReg()->next());
122 122 reg_def R_L7 ( NS, NS, Op_RegI, 23, L7->as_VMReg());
123 123
124 124 // Input Registers 0-7
125 125 reg_def R_I0H( NS, NS, Op_RegI,152, I0->as_VMReg()->next());
126 126 reg_def R_I0 ( NS, NS, Op_RegI, 24, I0->as_VMReg());
127 127 reg_def R_I1H( NS, NS, Op_RegI,153, I1->as_VMReg()->next());
128 128 reg_def R_I1 ( NS, NS, Op_RegI, 25, I1->as_VMReg());
129 129 reg_def R_I2H( NS, NS, Op_RegI,154, I2->as_VMReg()->next());
130 130 reg_def R_I2 ( NS, NS, Op_RegI, 26, I2->as_VMReg());
131 131 reg_def R_I3H( NS, NS, Op_RegI,155, I3->as_VMReg()->next());
132 132 reg_def R_I3 ( NS, NS, Op_RegI, 27, I3->as_VMReg());
133 133 reg_def R_I4H( NS, NS, Op_RegI,156, I4->as_VMReg()->next());
134 134 reg_def R_I4 ( NS, NS, Op_RegI, 28, I4->as_VMReg());
135 135 reg_def R_I5H( NS, NS, Op_RegI,157, I5->as_VMReg()->next());
136 136 reg_def R_I5 ( NS, NS, Op_RegI, 29, I5->as_VMReg());
137 137 reg_def R_FPH( NS, NS, Op_RegI,158, FP->as_VMReg()->next());
138 138 reg_def R_FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
139 139 reg_def R_I7H( NS, NS, Op_RegI,159, I7->as_VMReg()->next());
140 140 reg_def R_I7 ( NS, NS, Op_RegI, 31, I7->as_VMReg());
141 141
142 142 // ----------------------------
143 143 // Float/Double Registers
144 144 // ----------------------------
145 145
146 146 // Float Registers
147 147 reg_def R_F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
148 148 reg_def R_F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
149 149 reg_def R_F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
150 150 reg_def R_F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
151 151 reg_def R_F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
152 152 reg_def R_F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
153 153 reg_def R_F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
154 154 reg_def R_F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
155 155 reg_def R_F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
156 156 reg_def R_F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
157 157 reg_def R_F10( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
158 158 reg_def R_F11( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
159 159 reg_def R_F12( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
160 160 reg_def R_F13( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
161 161 reg_def R_F14( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
162 162 reg_def R_F15( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
163 163 reg_def R_F16( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
164 164 reg_def R_F17( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
165 165 reg_def R_F18( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
166 166 reg_def R_F19( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
167 167 reg_def R_F20( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
168 168 reg_def R_F21( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
169 169 reg_def R_F22( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
170 170 reg_def R_F23( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
171 171 reg_def R_F24( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
172 172 reg_def R_F25( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
173 173 reg_def R_F26( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
174 174 reg_def R_F27( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
175 175 reg_def R_F28( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
176 176 reg_def R_F29( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
177 177 reg_def R_F30( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
178 178 reg_def R_F31( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
179 179
180 180 // Double Registers
181 181 // The rules of ADL require that double registers be defined in pairs.
182 182 // Each pair must be two 32-bit values, but not necessarily a pair of
183 183 // single float registers. In each pair, ADLC-assigned register numbers
184 184 // must be adjacent, with the lower number even. Finally, when the
185 185 // CPU stores such a register pair to memory, the word associated with
186 186 // the lower ADLC-assigned number must be stored to the lower address.
187 187
188 188 // These definitions specify the actual bit encodings of the sparc
189 189 // double fp register numbers. FloatRegisterImpl in register_sparc.hpp
190 190 // wants 0-63, so we have to convert every time we want to use fp regs
191 191 // with the macroassembler, using reg_to_DoubleFloatRegister_object().
192 192 // 255 is a flag meaning "don't go here".
193 193 // I believe we can't handle callee-save doubles D32 and up until
194 194 // the place in the sparc stack crawler that asserts on the 255 is
195 195 // fixed up.
196 196 reg_def R_D32 (SOC, SOC, Op_RegD, 1, F32->as_VMReg());
197 197 reg_def R_D32x(SOC, SOC, Op_RegD,255, F32->as_VMReg()->next());
198 198 reg_def R_D34 (SOC, SOC, Op_RegD, 3, F34->as_VMReg());
199 199 reg_def R_D34x(SOC, SOC, Op_RegD,255, F34->as_VMReg()->next());
200 200 reg_def R_D36 (SOC, SOC, Op_RegD, 5, F36->as_VMReg());
201 201 reg_def R_D36x(SOC, SOC, Op_RegD,255, F36->as_VMReg()->next());
202 202 reg_def R_D38 (SOC, SOC, Op_RegD, 7, F38->as_VMReg());
203 203 reg_def R_D38x(SOC, SOC, Op_RegD,255, F38->as_VMReg()->next());
204 204 reg_def R_D40 (SOC, SOC, Op_RegD, 9, F40->as_VMReg());
205 205 reg_def R_D40x(SOC, SOC, Op_RegD,255, F40->as_VMReg()->next());
206 206 reg_def R_D42 (SOC, SOC, Op_RegD, 11, F42->as_VMReg());
207 207 reg_def R_D42x(SOC, SOC, Op_RegD,255, F42->as_VMReg()->next());
208 208 reg_def R_D44 (SOC, SOC, Op_RegD, 13, F44->as_VMReg());
209 209 reg_def R_D44x(SOC, SOC, Op_RegD,255, F44->as_VMReg()->next());
210 210 reg_def R_D46 (SOC, SOC, Op_RegD, 15, F46->as_VMReg());
211 211 reg_def R_D46x(SOC, SOC, Op_RegD,255, F46->as_VMReg()->next());
212 212 reg_def R_D48 (SOC, SOC, Op_RegD, 17, F48->as_VMReg());
213 213 reg_def R_D48x(SOC, SOC, Op_RegD,255, F48->as_VMReg()->next());
214 214 reg_def R_D50 (SOC, SOC, Op_RegD, 19, F50->as_VMReg());
215 215 reg_def R_D50x(SOC, SOC, Op_RegD,255, F50->as_VMReg()->next());
216 216 reg_def R_D52 (SOC, SOC, Op_RegD, 21, F52->as_VMReg());
217 217 reg_def R_D52x(SOC, SOC, Op_RegD,255, F52->as_VMReg()->next());
218 218 reg_def R_D54 (SOC, SOC, Op_RegD, 23, F54->as_VMReg());
219 219 reg_def R_D54x(SOC, SOC, Op_RegD,255, F54->as_VMReg()->next());
220 220 reg_def R_D56 (SOC, SOC, Op_RegD, 25, F56->as_VMReg());
221 221 reg_def R_D56x(SOC, SOC, Op_RegD,255, F56->as_VMReg()->next());
222 222 reg_def R_D58 (SOC, SOC, Op_RegD, 27, F58->as_VMReg());
223 223 reg_def R_D58x(SOC, SOC, Op_RegD,255, F58->as_VMReg()->next());
224 224 reg_def R_D60 (SOC, SOC, Op_RegD, 29, F60->as_VMReg());
225 225 reg_def R_D60x(SOC, SOC, Op_RegD,255, F60->as_VMReg()->next());
226 226 reg_def R_D62 (SOC, SOC, Op_RegD, 31, F62->as_VMReg());
227 227 reg_def R_D62x(SOC, SOC, Op_RegD,255, F62->as_VMReg()->next());
228 228
229 229
230 230 // ----------------------------
231 231 // Special Registers
232 232 // Condition Codes Flag Registers
233 233 // I tried to break out ICC and XCC but it's not very pretty.
234 234 // Every Sparc instruction which defs/kills one also kills the other.
235 235 // Hence every compare instruction which defs one kind of flags ends
236 236 // up needing a kill of the other.
237 237 reg_def CCR (SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad());
238 238
239 239 reg_def FCC0(SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad());
240 240 reg_def FCC1(SOC, SOC, Op_RegFlags, 1, VMRegImpl::Bad());
241 241 reg_def FCC2(SOC, SOC, Op_RegFlags, 2, VMRegImpl::Bad());
242 242 reg_def FCC3(SOC, SOC, Op_RegFlags, 3, VMRegImpl::Bad());
243 243
244 244 // ----------------------------
245 245 // Specify the enum values for the registers. These enums are only used by the
246 246 // OptoReg "class". We can convert these enum values at will to VMReg when needed
247 247 // for visibility to the rest of the vm. The order of this enum influences the
248 248 // register allocator so having the freedom to set this order and not be stuck
249 249 // with the order that is natural for the rest of the vm is worth it.
250 250 alloc_class chunk0(
251 251 R_L0,R_L0H, R_L1,R_L1H, R_L2,R_L2H, R_L3,R_L3H, R_L4,R_L4H, R_L5,R_L5H, R_L6,R_L6H, R_L7,R_L7H,
252 252 R_G0,R_G0H, R_G1,R_G1H, R_G2,R_G2H, R_G3,R_G3H, R_G4,R_G4H, R_G5,R_G5H, R_G6,R_G6H, R_G7,R_G7H,
253 253 R_O7,R_O7H, R_SP,R_SPH, R_O0,R_O0H, R_O1,R_O1H, R_O2,R_O2H, R_O3,R_O3H, R_O4,R_O4H, R_O5,R_O5H,
254 254 R_I0,R_I0H, R_I1,R_I1H, R_I2,R_I2H, R_I3,R_I3H, R_I4,R_I4H, R_I5,R_I5H, R_FP,R_FPH, R_I7,R_I7H);
255 255
256 256 // Note that a register is not allocatable unless it is also mentioned
257 257 // in a widely-used reg_class below. Thus, R_G7 and R_G0 are outside i_reg.
258 258
259 259 alloc_class chunk1(
260 260 // The first registers listed here are those most likely to be used
261 261 // as temporaries. We move F0..F7 away from the front of the list,
262 262 // to reduce the likelihood of interferences with parameters and
263 263 // return values. Likewise, we avoid using F0/F1 for parameters,
264 264 // since they are used for return values.
265 265 // This FPU fine-tuning is worth about 1% on the SPEC geomean.
266 266 R_F8 ,R_F9 ,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,
267 267 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,
268 268 R_F24,R_F25,R_F26,R_F27,R_F28,R_F29,R_F30,R_F31,
269 269 R_F0 ,R_F1 ,R_F2 ,R_F3 ,R_F4 ,R_F5 ,R_F6 ,R_F7 , // used for arguments and return values
270 270 R_D32,R_D32x,R_D34,R_D34x,R_D36,R_D36x,R_D38,R_D38x,
271 271 R_D40,R_D40x,R_D42,R_D42x,R_D44,R_D44x,R_D46,R_D46x,
272 272 R_D48,R_D48x,R_D50,R_D50x,R_D52,R_D52x,R_D54,R_D54x,
273 273 R_D56,R_D56x,R_D58,R_D58x,R_D60,R_D60x,R_D62,R_D62x);
274 274
275 275 alloc_class chunk2(CCR, FCC0, FCC1, FCC2, FCC3);
276 276
277 277 //----------Architecture Description Register Classes--------------------------
278 278 // Several register classes are automatically defined based upon information in
279 279 // this architecture description.
280 280 // 1) reg_class inline_cache_reg ( as defined in frame section )
281 281 // 2) reg_class interpreter_method_oop_reg ( as defined in frame section )
282 282 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
283 283 //
284 284
285 285 // G0 is not included in integer class since it has special meaning.
286 286 reg_class g0_reg(R_G0);
287 287
288 288 // ----------------------------
289 289 // Integer Register Classes
290 290 // ----------------------------
291 291 // Exclusions from i_reg:
292 292 // R_G0: hardwired zero
293 293 // R_G2: reserved by HotSpot to the TLS register (invariant within Java)
294 294 // R_G6: reserved by Solaris ABI to tools
295 295 // R_G7: reserved by Solaris ABI to libthread
296 296 // R_O7: Used as a temp in many encodings
297 297 reg_class int_reg(R_G1,R_G3,R_G4,R_G5,R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
298 298
299 299 // Class for all integer registers, except the G registers. This is used for
300 300 // encodings which use G registers as temps. The regular inputs to such
301 301 // instructions use a "notemp_" prefix, as a hack to ensure that the allocator
302 302 // will not put an input into a temp register.
303 303 reg_class notemp_int_reg(R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
304 304
305 305 reg_class g1_regI(R_G1);
306 306 reg_class g3_regI(R_G3);
307 307 reg_class g4_regI(R_G4);
308 308 reg_class o0_regI(R_O0);
309 309 reg_class o7_regI(R_O7);
310 310
311 311 // ----------------------------
312 312 // Pointer Register Classes
313 313 // ----------------------------
314 314 #ifdef _LP64
315 315 // 64-bit build means 64-bit pointers means hi/lo pairs
316 316 reg_class ptr_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5,
317 317 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5,
318 318 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7,
319 319 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 );
320 320 // Lock encodings use G3 and G4 internally
321 321 reg_class lock_ptr_reg( R_G1H,R_G1, R_G5H,R_G5,
322 322 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5,
323 323 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7,
324 324 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 );
325 325 // Special class for storeP instructions, which can store SP or RPC to TLS.
326 326 // It is also used for memory addressing, allowing direct TLS addressing.
327 327 reg_class sp_ptr_reg( R_G1H,R_G1, R_G2H,R_G2, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5,
328 328 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, R_SPH,R_SP,
329 329 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7,
330 330 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5, R_FPH,R_FP );
331 331 // R_L7 is the lowest-priority callee-save (i.e., NS) register
332 332 // We use it to save R_G2 across calls out of Java.
333 333 reg_class l7_regP(R_L7H,R_L7);
334 334
335 335 // Other special pointer regs
336 336 reg_class g1_regP(R_G1H,R_G1);
337 337 reg_class g2_regP(R_G2H,R_G2);
338 338 reg_class g3_regP(R_G3H,R_G3);
339 339 reg_class g4_regP(R_G4H,R_G4);
340 340 reg_class g5_regP(R_G5H,R_G5);
341 341 reg_class i0_regP(R_I0H,R_I0);
342 342 reg_class o0_regP(R_O0H,R_O0);
343 343 reg_class o1_regP(R_O1H,R_O1);
344 344 reg_class o2_regP(R_O2H,R_O2);
345 345 reg_class o7_regP(R_O7H,R_O7);
346 346
347 347 #else // _LP64
348 348 // 32-bit build means 32-bit pointers means 1 register.
349 349 reg_class ptr_reg( R_G1, R_G3,R_G4,R_G5,
350 350 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,
351 351 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
352 352 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
353 353 // Lock encodings use G3 and G4 internally
354 354 reg_class lock_ptr_reg(R_G1, R_G5,
355 355 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,
356 356 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
357 357 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
358 358 // Special class for storeP instructions, which can store SP or RPC to TLS.
359 359 // It is also used for memory addressing, allowing direct TLS addressing.
360 360 reg_class sp_ptr_reg( R_G1,R_G2,R_G3,R_G4,R_G5,
361 361 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_SP,
362 362 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
363 363 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5,R_FP);
364 364 // R_L7 is the lowest-priority callee-save (i.e., NS) register
365 365 // We use it to save R_G2 across calls out of Java.
366 366 reg_class l7_regP(R_L7);
367 367
368 368 // Other special pointer regs
369 369 reg_class g1_regP(R_G1);
370 370 reg_class g2_regP(R_G2);
371 371 reg_class g3_regP(R_G3);
372 372 reg_class g4_regP(R_G4);
373 373 reg_class g5_regP(R_G5);
374 374 reg_class i0_regP(R_I0);
375 375 reg_class o0_regP(R_O0);
376 376 reg_class o1_regP(R_O1);
377 377 reg_class o2_regP(R_O2);
378 378 reg_class o7_regP(R_O7);
379 379 #endif // _LP64
380 380
381 381
382 382 // ----------------------------
383 383 // Long Register Classes
384 384 // ----------------------------
385 385 // Longs in 1 register. Aligned adjacent hi/lo pairs.
386 386 // Note: O7 is never in this class; it is sometimes used as an encoding temp.
387 387 reg_class long_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5
388 388 ,R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5
389 389 #ifdef _LP64
390 390 // 64-bit, longs in 1 register: use all 64-bit integer registers
391 391 // 32-bit, longs in 1 register: cannot use I's and L's. Restrict to O's and G's.
392 392 ,R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7
393 393 ,R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5
394 394 #endif // _LP64
395 395 );
396 396
397 397 reg_class g1_regL(R_G1H,R_G1);
398 398 reg_class g3_regL(R_G3H,R_G3);
399 399 reg_class o2_regL(R_O2H,R_O2);
400 400 reg_class o7_regL(R_O7H,R_O7);
401 401
402 402 // ----------------------------
403 403 // Special Class for Condition Code Flags Register
404 404 reg_class int_flags(CCR);
405 405 reg_class float_flags(FCC0,FCC1,FCC2,FCC3);
406 406 reg_class float_flag0(FCC0);
407 407
408 408
409 409 // ----------------------------
410 410 // Float Point Register Classes
411 411 // ----------------------------
412 412 // Skip F30/F31, they are reserved for mem-mem copies
413 413 reg_class sflt_reg(R_F0,R_F1,R_F2,R_F3,R_F4,R_F5,R_F6,R_F7,R_F8,R_F9,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29);
414 414
415 415 // Paired floating point registers--they show up in the same order as the floats,
416 416 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs.
417 417 reg_class dflt_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,
418 418 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29,
419 419 /* Use extra V9 double registers; this AD file does not support V8 */
420 420 R_D32,R_D32x,R_D34,R_D34x,R_D36,R_D36x,R_D38,R_D38x,R_D40,R_D40x,R_D42,R_D42x,R_D44,R_D44x,R_D46,R_D46x,
421 421 R_D48,R_D48x,R_D50,R_D50x,R_D52,R_D52x,R_D54,R_D54x,R_D56,R_D56x,R_D58,R_D58x,R_D60,R_D60x,R_D62,R_D62x
422 422 );
423 423
424 424 // Paired floating point registers--they show up in the same order as the floats,
425 425 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs.
426 426 // This class is usable for mis-aligned loads as happen in I2C adapters.
427 427 reg_class dflt_low_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,
428 428 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29);
429 429 %}
430 430
431 431 //----------DEFINITION BLOCK---------------------------------------------------
432 432 // Define name --> value mappings to inform the ADLC of an integer valued name
433 433 // Current support includes integer values in the range [0, 0x7FFFFFFF]
434 434 // Format:
435 435 // int_def <name> ( <int_value>, <expression>);
436 436 // Generated Code in ad_<arch>.hpp
437 437 // #define <name> (<expression>)
438 438 // // value == <int_value>
439 439 // Generated code in ad_<arch>.cpp adlc_verification()
440 440 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
441 441 //
442 442 definitions %{
443 443 // The default cost (of an ALU instruction).
444 444 int_def DEFAULT_COST ( 100, 100);
445 445 int_def HUGE_COST (1000000, 1000000);
446 446
447 447 // Memory refs are twice as expensive as run-of-the-mill.
448 448 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
449 449
450 450 // Branches are even more expensive.
451 451 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
452 452 int_def CALL_COST ( 300, DEFAULT_COST * 3);
453 453 %}
454 454
455 455
456 456 //----------SOURCE BLOCK-------------------------------------------------------
457 457 // This is a block of C++ code which provides values, functions, and
458 458 // definitions necessary in the rest of the architecture description
459 459 source_hpp %{
460 460 // Header information of the source block.
461 461 // Method declarations/definitions which are used outside
462 462 // the ad-scope can conveniently be defined here.
463 463 //
464 464 // To keep related declarations/definitions/uses close together,
465 465 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
466 466
467 467 // Must be visible to the DFA in dfa_sparc.cpp
468 468 extern bool can_branch_register( Node *bol, Node *cmp );
469 469
470 470 extern bool use_block_zeroing(Node* count);
471 471
472 472 // Macros to extract hi & lo halves from a long pair.
473 473 // G0 is not part of any long pair, so assert on that.
474 474 // Prevents accidentally using G1 instead of G0.
475 475 #define LONG_HI_REG(x) (x)
476 476 #define LONG_LO_REG(x) (x)
477 477
478 478 class CallStubImpl {
479 479
480 480 //--------------------------------------------------------------
481 481 //---< Used for optimization in Compile::Shorten_branches >---
482 482 //--------------------------------------------------------------
483 483
484 484 public:
485 485 // Size of call trampoline stub.
486 486 static uint size_call_trampoline() {
487 487 return 0; // no call trampolines on this platform
488 488 }
489 489
490 490 // number of relocations needed by a call trampoline stub
491 491 static uint reloc_call_trampoline() {
492 492 return 0; // no call trampolines on this platform
493 493 }
494 494 };
495 495
496 496 class HandlerImpl {
497 497
498 498 public:
499 499
500 500 static int emit_exception_handler(CodeBuffer &cbuf);
501 501 static int emit_deopt_handler(CodeBuffer& cbuf);
502 502
503 503 static uint size_exception_handler() {
504 504 return ( NativeJump::instruction_size ); // sethi;jmp;nop
505 505 }
506 506
507 507 static uint size_deopt_handler() {
508 508 return ( 4+ NativeJump::instruction_size ); // save;sethi;jmp;restore
509 509 }
510 510 };
511 511
512 512 %}
513 513
514 514 source %{
515 515 #define __ _masm.
516 516
517 517 // tertiary op of a LoadP or StoreP encoding
518 518 #define REGP_OP true
519 519
520 520 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding);
521 521 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding);
522 522 static Register reg_to_register_object(int register_encoding);
523 523
524 524 // Used by the DFA in dfa_sparc.cpp.
525 525 // Check for being able to use a V9 branch-on-register. Requires a
526 526 // compare-vs-zero, equal/not-equal, of a value which was zero- or sign-
527 527 // extended. Doesn't work following an integer ADD, for example, because of
528 528 // overflow (-1 incremented yields 0 plus a carry in the high-order word). On
529 529 // 32-bit V9 systems, interrupts currently blow away the high-order 32 bits and
530 530 // replace them with zero, which could become sign-extension in a different OS
531 531 // release. There's no obvious reason why an interrupt will ever fill these
532 532 // bits with non-zero junk (the registers are reloaded with standard LD
533 533 // instructions which either zero-fill or sign-fill).
534 534 bool can_branch_register( Node *bol, Node *cmp ) {
535 535 if( !BranchOnRegister ) return false;
536 536 #ifdef _LP64
537 537 if( cmp->Opcode() == Op_CmpP )
538 538 return true; // No problems with pointer compares
539 539 #endif
540 540 if( cmp->Opcode() == Op_CmpL )
541 541 return true; // No problems with long compares
542 542
543 543 if( !SparcV9RegsHiBitsZero ) return false;
544 544 if( bol->as_Bool()->_test._test != BoolTest::ne &&
545 545 bol->as_Bool()->_test._test != BoolTest::eq )
546 546 return false;
547 547
548 548 // Check for comparing against a 'safe' value. Any operation which
549 549 // clears out the high word is safe. Thus, loads and certain shifts
550 550 // are safe, as are non-negative constants. Any operation which
551 551 // preserves zero bits in the high word is safe as long as each of its
552 552 // inputs are safe. Thus, phis and bitwise booleans are safe if their
553 553 // inputs are safe. At present, the only important case to recognize
554 554 // seems to be loads. Constants should fold away, and shifts &
555 555 // logicals can use the 'cc' forms.
556 556 Node *x = cmp->in(1);
557 557 if( x->is_Load() ) return true;
558 558 if( x->is_Phi() ) {
559 559 for( uint i = 1; i < x->req(); i++ )
560 560 if( !x->in(i)->is_Load() )
561 561 return false;
562 562 return true;
563 563 }
564 564 return false;
565 565 }
566 566
567 567 bool use_block_zeroing(Node* count) {
568 568 // Use BIS for zeroing if count is not constant
569 569 // or it is >= BlockZeroingLowLimit.
570 570 return UseBlockZeroing && (count->find_intptr_t_con(BlockZeroingLowLimit) >= BlockZeroingLowLimit);
571 571 }
572 572
573 573 // ****************************************************************************
574 574
575 575 // REQUIRED FUNCTIONALITY
576 576
577 577 // !!!!! Special hack to get all type of calls to specify the byte offset
578 578 // from the start of the call to the point where the return address
579 579 // will point.
580 580 // The "return address" is the address of the call instruction, plus 8.
581 581
582 582 int MachCallStaticJavaNode::ret_addr_offset() {
583 583 int offset = NativeCall::instruction_size; // call; delay slot
584 584 if (_method_handle_invoke)
585 585 offset += 4; // restore SP
586 586 return offset;
587 587 }
588 588
589 589 int MachCallDynamicJavaNode::ret_addr_offset() {
590 590 int vtable_index = this->_vtable_index;
591 591 if (vtable_index < 0) {
592 592 // must be invalid_vtable_index, not nonvirtual_vtable_index
593 593 assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value");
594 594 return (NativeMovConstReg::instruction_size +
595 595 NativeCall::instruction_size); // sethi; setlo; call; delay slot
596 596 } else {
597 597 assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
598 598 int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index*vtableEntry::size_in_bytes();
599 599 int v_off = entry_offset + vtableEntry::method_offset_in_bytes();
600 600 int klass_load_size;
601 601 if (UseCompressedClassPointers) {
602 602 assert(Universe::heap() != NULL, "java heap should be initialized");
603 603 klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
604 604 } else {
605 605 klass_load_size = 1*BytesPerInstWord;
606 606 }
607 607 if (Assembler::is_simm13(v_off)) {
608 608 return klass_load_size +
609 609 (2*BytesPerInstWord + // ld_ptr, ld_ptr
610 610 NativeCall::instruction_size); // call; delay slot
611 611 } else {
612 612 return klass_load_size +
613 613 (4*BytesPerInstWord + // set_hi, set, ld_ptr, ld_ptr
614 614 NativeCall::instruction_size); // call; delay slot
615 615 }
616 616 }
617 617 }
618 618
619 619 int MachCallRuntimeNode::ret_addr_offset() {
620 620 #ifdef _LP64
621 621 if (MacroAssembler::is_far_target(entry_point())) {
622 622 return NativeFarCall::instruction_size;
623 623 } else {
624 624 return NativeCall::instruction_size;
625 625 }
626 626 #else
627 627 return NativeCall::instruction_size; // call; delay slot
628 628 #endif
629 629 }
630 630
631 631 // Indicate if the safepoint node needs the polling page as an input.
632 632 // Since Sparc does not have absolute addressing, it does.
633 633 bool SafePointNode::needs_polling_address_input() {
634 634 return true;
635 635 }
636 636
637 637 // emit an interrupt that is caught by the debugger (for debugging compiler)
638 638 void emit_break(CodeBuffer &cbuf) {
639 639 MacroAssembler _masm(&cbuf);
640 640 __ breakpoint_trap();
641 641 }
642 642
643 643 #ifndef PRODUCT
644 644 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream *st ) const {
645 645 st->print("TA");
646 646 }
647 647 #endif
648 648
649 649 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
650 650 emit_break(cbuf);
651 651 }
652 652
653 653 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
654 654 return MachNode::size(ra_);
655 655 }
656 656
657 657 // Traceable jump
658 658 void emit_jmpl(CodeBuffer &cbuf, int jump_target) {
659 659 MacroAssembler _masm(&cbuf);
660 660 Register rdest = reg_to_register_object(jump_target);
661 661 __ JMP(rdest, 0);
662 662 __ delayed()->nop();
663 663 }
664 664
665 665 // Traceable jump and set exception pc
666 666 void emit_jmpl_set_exception_pc(CodeBuffer &cbuf, int jump_target) {
667 667 MacroAssembler _masm(&cbuf);
668 668 Register rdest = reg_to_register_object(jump_target);
669 669 __ JMP(rdest, 0);
670 670 __ delayed()->add(O7, frame::pc_return_offset, Oissuing_pc );
671 671 }
672 672
673 673 void emit_nop(CodeBuffer &cbuf) {
674 674 MacroAssembler _masm(&cbuf);
675 675 __ nop();
676 676 }
677 677
678 678 void emit_illtrap(CodeBuffer &cbuf) {
679 679 MacroAssembler _masm(&cbuf);
680 680 __ illtrap(0);
681 681 }
682 682
683 683
684 684 intptr_t get_offset_from_base(const MachNode* n, const TypePtr* atype, int disp32) {
685 685 assert(n->rule() != loadUB_rule, "");
686 686
687 687 intptr_t offset = 0;
688 688 const TypePtr *adr_type = TYPE_PTR_SENTINAL; // Check for base==RegI, disp==immP
689 689 const Node* addr = n->get_base_and_disp(offset, adr_type);
690 690 assert(adr_type == (const TypePtr*)-1, "VerifyOops: no support for sparc operands with base==RegI, disp==immP");
691 691 assert(addr != NULL && addr != (Node*)-1, "invalid addr");
692 692 assert(addr->bottom_type()->isa_oopptr() == atype, "");
693 693 atype = atype->add_offset(offset);
694 694 assert(disp32 == offset, "wrong disp32");
695 695 return atype->_offset;
696 696 }
697 697
698 698
699 699 intptr_t get_offset_from_base_2(const MachNode* n, const TypePtr* atype, int disp32) {
700 700 assert(n->rule() != loadUB_rule, "");
701 701
702 702 intptr_t offset = 0;
703 703 Node* addr = n->in(2);
704 704 assert(addr->bottom_type()->isa_oopptr() == atype, "");
705 705 if (addr->is_Mach() && addr->as_Mach()->ideal_Opcode() == Op_AddP) {
706 706 Node* a = addr->in(2/*AddPNode::Address*/);
707 707 Node* o = addr->in(3/*AddPNode::Offset*/);
708 708 offset = o->is_Con() ? o->bottom_type()->is_intptr_t()->get_con() : Type::OffsetBot;
709 709 atype = a->bottom_type()->is_ptr()->add_offset(offset);
710 710 assert(atype->isa_oop_ptr(), "still an oop");
711 711 }
712 712 offset = atype->is_ptr()->_offset;
713 713 if (offset != Type::OffsetBot) offset += disp32;
714 714 return offset;
715 715 }
716 716
717 717 static inline jlong replicate_immI(int con, int count, int width) {
718 718 // Load a constant replicated "count" times with width "width"
719 719 assert(count*width == 8 && width <= 4, "sanity");
720 720 int bit_width = width * 8;
721 721 jlong val = con;
722 722 val &= (((jlong) 1) << bit_width) - 1; // mask off sign bits
723 723 for (int i = 0; i < count - 1; i++) {
724 724 val |= (val << bit_width);
725 725 }
726 726 return val;
727 727 }
728 728
729 729 static inline jlong replicate_immF(float con) {
730 730 // Replicate float con 2 times and pack into vector.
731 731 int val = *((int*)&con);
732 732 jlong lval = val;
733 733 lval = (lval << 32) | (lval & 0xFFFFFFFFl);
734 734 return lval;
735 735 }
736 736
737 737 // Standard Sparc opcode form2 field breakdown
738 738 static inline void emit2_19(CodeBuffer &cbuf, int f30, int f29, int f25, int f22, int f20, int f19, int f0 ) {
739 739 f0 &= (1<<19)-1; // Mask displacement to 19 bits
740 740 int op = (f30 << 30) |
741 741 (f29 << 29) |
742 742 (f25 << 25) |
743 743 (f22 << 22) |
744 744 (f20 << 20) |
745 745 (f19 << 19) |
746 746 (f0 << 0);
747 747 cbuf.insts()->emit_int32(op);
748 748 }
749 749
750 750 // Standard Sparc opcode form2 field breakdown
751 751 static inline void emit2_22(CodeBuffer &cbuf, int f30, int f25, int f22, int f0 ) {
752 752 f0 >>= 10; // Drop 10 bits
753 753 f0 &= (1<<22)-1; // Mask displacement to 22 bits
754 754 int op = (f30 << 30) |
755 755 (f25 << 25) |
756 756 (f22 << 22) |
757 757 (f0 << 0);
758 758 cbuf.insts()->emit_int32(op);
759 759 }
760 760
761 761 // Standard Sparc opcode form3 field breakdown
762 762 static inline void emit3(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int f5, int f0 ) {
763 763 int op = (f30 << 30) |
764 764 (f25 << 25) |
765 765 (f19 << 19) |
766 766 (f14 << 14) |
767 767 (f5 << 5) |
768 768 (f0 << 0);
769 769 cbuf.insts()->emit_int32(op);
770 770 }
771 771
772 772 // Standard Sparc opcode form3 field breakdown
773 773 static inline void emit3_simm13(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm13 ) {
774 774 simm13 &= (1<<13)-1; // Mask to 13 bits
775 775 int op = (f30 << 30) |
776 776 (f25 << 25) |
777 777 (f19 << 19) |
778 778 (f14 << 14) |
779 779 (1 << 13) | // bit to indicate immediate-mode
780 780 (simm13<<0);
781 781 cbuf.insts()->emit_int32(op);
782 782 }
783 783
784 784 static inline void emit3_simm10(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm10 ) {
785 785 simm10 &= (1<<10)-1; // Mask to 10 bits
786 786 emit3_simm13(cbuf,f30,f25,f19,f14,simm10);
787 787 }
788 788
789 789 #ifdef ASSERT
790 790 // Helper function for VerifyOops in emit_form3_mem_reg
791 791 void verify_oops_warning(const MachNode *n, int ideal_op, int mem_op) {
792 792 warning("VerifyOops encountered unexpected instruction:");
793 793 n->dump(2);
794 794 warning("Instruction has ideal_Opcode==Op_%s and op_ld==Op_%s \n", NodeClassNames[ideal_op], NodeClassNames[mem_op]);
795 795 }
796 796 #endif
797 797
798 798
799 799 void emit_form3_mem_reg(CodeBuffer &cbuf, PhaseRegAlloc* ra, const MachNode* n, int primary, int tertiary,
800 800 int src1_enc, int disp32, int src2_enc, int dst_enc) {
801 801
802 802 #ifdef ASSERT
803 803 // The following code implements the +VerifyOops feature.
804 804 // It verifies oop values which are loaded into or stored out of
805 805 // the current method activation. +VerifyOops complements techniques
806 806 // like ScavengeALot, because it eagerly inspects oops in transit,
807 807 // as they enter or leave the stack, as opposed to ScavengeALot,
808 808 // which inspects oops "at rest", in the stack or heap, at safepoints.
809 809 // For this reason, +VerifyOops can sometimes detect bugs very close
810 810 // to their point of creation. It can also serve as a cross-check
811 811 // on the validity of oop maps, when used toegether with ScavengeALot.
812 812
813 813 // It would be good to verify oops at other points, especially
814 814 // when an oop is used as a base pointer for a load or store.
815 815 // This is presently difficult, because it is hard to know when
816 816 // a base address is biased or not. (If we had such information,
817 817 // it would be easy and useful to make a two-argument version of
818 818 // verify_oop which unbiases the base, and performs verification.)
819 819
820 820 assert((uint)tertiary == 0xFFFFFFFF || tertiary == REGP_OP, "valid tertiary");
821 821 bool is_verified_oop_base = false;
822 822 bool is_verified_oop_load = false;
823 823 bool is_verified_oop_store = false;
824 824 int tmp_enc = -1;
825 825 if (VerifyOops && src1_enc != R_SP_enc) {
826 826 // classify the op, mainly for an assert check
827 827 int st_op = 0, ld_op = 0;
828 828 switch (primary) {
829 829 case Assembler::stb_op3: st_op = Op_StoreB; break;
830 830 case Assembler::sth_op3: st_op = Op_StoreC; break;
831 831 case Assembler::stx_op3: // may become StoreP or stay StoreI or StoreD0
832 832 case Assembler::stw_op3: st_op = Op_StoreI; break;
833 833 case Assembler::std_op3: st_op = Op_StoreL; break;
834 834 case Assembler::stf_op3: st_op = Op_StoreF; break;
835 835 case Assembler::stdf_op3: st_op = Op_StoreD; break;
836 836
837 837 case Assembler::ldsb_op3: ld_op = Op_LoadB; break;
838 838 case Assembler::ldub_op3: ld_op = Op_LoadUB; break;
839 839 case Assembler::lduh_op3: ld_op = Op_LoadUS; break;
840 840 case Assembler::ldsh_op3: ld_op = Op_LoadS; break;
841 841 case Assembler::ldx_op3: // may become LoadP or stay LoadI
842 842 case Assembler::ldsw_op3: // may become LoadP or stay LoadI
843 843 case Assembler::lduw_op3: ld_op = Op_LoadI; break;
844 844 case Assembler::ldd_op3: ld_op = Op_LoadL; break;
845 845 case Assembler::ldf_op3: ld_op = Op_LoadF; break;
846 846 case Assembler::lddf_op3: ld_op = Op_LoadD; break;
847 847 case Assembler::prefetch_op3: ld_op = Op_LoadI; break;
848 848
849 849 default: ShouldNotReachHere();
850 850 }
851 851 if (tertiary == REGP_OP) {
852 852 if (st_op == Op_StoreI) st_op = Op_StoreP;
853 853 else if (ld_op == Op_LoadI) ld_op = Op_LoadP;
854 854 else ShouldNotReachHere();
855 855 if (st_op) {
856 856 // a store
857 857 // inputs are (0:control, 1:memory, 2:address, 3:value)
858 858 Node* n2 = n->in(3);
859 859 if (n2 != NULL) {
860 860 const Type* t = n2->bottom_type();
861 861 is_verified_oop_store = t->isa_oop_ptr() ? (t->is_ptr()->_offset==0) : false;
862 862 }
863 863 } else {
864 864 // a load
865 865 const Type* t = n->bottom_type();
866 866 is_verified_oop_load = t->isa_oop_ptr() ? (t->is_ptr()->_offset==0) : false;
867 867 }
868 868 }
869 869
870 870 if (ld_op) {
871 871 // a Load
872 872 // inputs are (0:control, 1:memory, 2:address)
873 873 if (!(n->ideal_Opcode()==ld_op) && // Following are special cases
874 874 !(n->ideal_Opcode()==Op_LoadPLocked && ld_op==Op_LoadP) &&
875 875 !(n->ideal_Opcode()==Op_LoadI && ld_op==Op_LoadF) &&
876 876 !(n->ideal_Opcode()==Op_LoadF && ld_op==Op_LoadI) &&
877 877 !(n->ideal_Opcode()==Op_LoadRange && ld_op==Op_LoadI) &&
878 878 !(n->ideal_Opcode()==Op_LoadKlass && ld_op==Op_LoadP) &&
879 879 !(n->ideal_Opcode()==Op_LoadL && ld_op==Op_LoadI) &&
880 880 !(n->ideal_Opcode()==Op_LoadL_unaligned && ld_op==Op_LoadI) &&
881 881 !(n->ideal_Opcode()==Op_LoadD_unaligned && ld_op==Op_LoadF) &&
882 882 !(n->ideal_Opcode()==Op_ConvI2F && ld_op==Op_LoadF) &&
883 883 !(n->ideal_Opcode()==Op_ConvI2D && ld_op==Op_LoadF) &&
884 884 !(n->ideal_Opcode()==Op_PrefetchAllocation && ld_op==Op_LoadI) &&
885 885 !(n->ideal_Opcode()==Op_LoadVector && ld_op==Op_LoadD) &&
886 886 !(n->rule() == loadUB_rule)) {
887 887 verify_oops_warning(n, n->ideal_Opcode(), ld_op);
888 888 }
889 889 } else if (st_op) {
890 890 // a Store
891 891 // inputs are (0:control, 1:memory, 2:address, 3:value)
892 892 if (!(n->ideal_Opcode()==st_op) && // Following are special cases
893 893 !(n->ideal_Opcode()==Op_StoreCM && st_op==Op_StoreB) &&
894 894 !(n->ideal_Opcode()==Op_StoreI && st_op==Op_StoreF) &&
895 895 !(n->ideal_Opcode()==Op_StoreF && st_op==Op_StoreI) &&
896 896 !(n->ideal_Opcode()==Op_StoreL && st_op==Op_StoreI) &&
897 897 !(n->ideal_Opcode()==Op_StoreVector && st_op==Op_StoreD) &&
898 898 !(n->ideal_Opcode()==Op_StoreD && st_op==Op_StoreI && n->rule() == storeD0_rule)) {
899 899 verify_oops_warning(n, n->ideal_Opcode(), st_op);
900 900 }
901 901 }
902 902
903 903 if (src2_enc == R_G0_enc && n->rule() != loadUB_rule && n->ideal_Opcode() != Op_StoreCM ) {
904 904 Node* addr = n->in(2);
905 905 if (!(addr->is_Mach() && addr->as_Mach()->ideal_Opcode() == Op_AddP)) {
906 906 const TypeOopPtr* atype = addr->bottom_type()->isa_instptr(); // %%% oopptr?
907 907 if (atype != NULL) {
908 908 intptr_t offset = get_offset_from_base(n, atype, disp32);
909 909 intptr_t offset_2 = get_offset_from_base_2(n, atype, disp32);
910 910 if (offset != offset_2) {
911 911 get_offset_from_base(n, atype, disp32);
912 912 get_offset_from_base_2(n, atype, disp32);
913 913 }
914 914 assert(offset == offset_2, "different offsets");
915 915 if (offset == disp32) {
916 916 // we now know that src1 is a true oop pointer
917 917 is_verified_oop_base = true;
918 918 if (ld_op && src1_enc == dst_enc && ld_op != Op_LoadF && ld_op != Op_LoadD) {
919 919 if( primary == Assembler::ldd_op3 ) {
920 920 is_verified_oop_base = false; // Cannot 'ldd' into O7
921 921 } else {
922 922 tmp_enc = dst_enc;
923 923 dst_enc = R_O7_enc; // Load into O7; preserve source oop
924 924 assert(src1_enc != dst_enc, "");
925 925 }
926 926 }
927 927 }
928 928 if (st_op && (( offset == oopDesc::klass_offset_in_bytes())
929 929 || offset == oopDesc::mark_offset_in_bytes())) {
930 930 // loading the mark should not be allowed either, but
931 931 // we don't check this since it conflicts with InlineObjectHash
932 932 // usage of LoadINode to get the mark. We could keep the
933 933 // check if we create a new LoadMarkNode
934 934 // but do not verify the object before its header is initialized
935 935 ShouldNotReachHere();
936 936 }
937 937 }
938 938 }
939 939 }
940 940 }
941 941 #endif
942 942
943 943 uint instr = (Assembler::ldst_op << 30)
944 944 | (dst_enc << 25)
945 945 | (primary << 19)
946 946 | (src1_enc << 14);
947 947
948 948 uint index = src2_enc;
949 949 int disp = disp32;
950 950
951 951 if (src1_enc == R_SP_enc || src1_enc == R_FP_enc) {
952 952 disp += STACK_BIAS;
953 953 // Check that stack offset fits, load into O7 if not
954 954 if (!Assembler::is_simm13(disp)) {
955 955 MacroAssembler _masm(&cbuf);
956 956 __ set(disp, O7);
957 957 if (index != R_G0_enc) {
958 958 __ add(O7, reg_to_register_object(index), O7);
959 959 }
960 960 index = R_O7_enc;
961 961 disp = 0;
962 962 }
963 963 }
964 964
965 965 if( disp == 0 ) {
966 966 // use reg-reg form
967 967 // bit 13 is already zero
968 968 instr |= index;
969 969 } else {
970 970 // use reg-imm form
971 971 instr |= 0x00002000; // set bit 13 to one
972 972 instr |= disp & 0x1FFF;
973 973 }
974 974
975 975 cbuf.insts()->emit_int32(instr);
976 976
977 977 #ifdef ASSERT
978 978 if (VerifyOops) {
979 979 MacroAssembler _masm(&cbuf);
980 980 if (is_verified_oop_base) {
981 981 __ verify_oop(reg_to_register_object(src1_enc));
982 982 }
983 983 if (is_verified_oop_store) {
984 984 __ verify_oop(reg_to_register_object(dst_enc));
985 985 }
986 986 if (tmp_enc != -1) {
987 987 __ mov(O7, reg_to_register_object(tmp_enc));
988 988 }
989 989 if (is_verified_oop_load) {
990 990 __ verify_oop(reg_to_register_object(dst_enc));
991 991 }
992 992 }
993 993 #endif
994 994 }
995 995
996 996 void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, RelocationHolder const& rspec, bool preserve_g2 = false) {
997 997 // The method which records debug information at every safepoint
998 998 // expects the call to be the first instruction in the snippet as
999 999 // it creates a PcDesc structure which tracks the offset of a call
1000 1000 // from the start of the codeBlob. This offset is computed as
1001 1001 // code_end() - code_begin() of the code which has been emitted
1002 1002 // so far.
1003 1003 // In this particular case we have skirted around the problem by
1004 1004 // putting the "mov" instruction in the delay slot but the problem
1005 1005 // may bite us again at some other point and a cleaner/generic
1006 1006 // solution using relocations would be needed.
1007 1007 MacroAssembler _masm(&cbuf);
1008 1008 __ set_inst_mark();
1009 1009
1010 1010 // We flush the current window just so that there is a valid stack copy
1011 1011 // the fact that the current window becomes active again instantly is
1012 1012 // not a problem there is nothing live in it.
1013 1013
1014 1014 #ifdef ASSERT
1015 1015 int startpos = __ offset();
1016 1016 #endif /* ASSERT */
1017 1017
1018 1018 __ call((address)entry_point, rspec);
1019 1019
1020 1020 if (preserve_g2) __ delayed()->mov(G2, L7);
1021 1021 else __ delayed()->nop();
1022 1022
1023 1023 if (preserve_g2) __ mov(L7, G2);
1024 1024
1025 1025 #ifdef ASSERT
1026 1026 if (preserve_g2 && (VerifyCompiledCode || VerifyOops)) {
1027 1027 #ifdef _LP64
1028 1028 // Trash argument dump slots.
1029 1029 __ set(0xb0b8ac0db0b8ac0d, G1);
1030 1030 __ mov(G1, G5);
1031 1031 __ stx(G1, SP, STACK_BIAS + 0x80);
1032 1032 __ stx(G1, SP, STACK_BIAS + 0x88);
1033 1033 __ stx(G1, SP, STACK_BIAS + 0x90);
1034 1034 __ stx(G1, SP, STACK_BIAS + 0x98);
1035 1035 __ stx(G1, SP, STACK_BIAS + 0xA0);
1036 1036 __ stx(G1, SP, STACK_BIAS + 0xA8);
1037 1037 #else // _LP64
1038 1038 // this is also a native call, so smash the first 7 stack locations,
1039 1039 // and the various registers
1040 1040
1041 1041 // Note: [SP+0x40] is sp[callee_aggregate_return_pointer_sp_offset],
1042 1042 // while [SP+0x44..0x58] are the argument dump slots.
1043 1043 __ set((intptr_t)0xbaadf00d, G1);
1044 1044 __ mov(G1, G5);
1045 1045 __ sllx(G1, 32, G1);
1046 1046 __ or3(G1, G5, G1);
1047 1047 __ mov(G1, G5);
1048 1048 __ stx(G1, SP, 0x40);
1049 1049 __ stx(G1, SP, 0x48);
1050 1050 __ stx(G1, SP, 0x50);
1051 1051 __ stw(G1, SP, 0x58); // Do not trash [SP+0x5C] which is a usable spill slot
1052 1052 #endif // _LP64
1053 1053 }
1054 1054 #endif /*ASSERT*/
1055 1055 }
1056 1056
1057 1057 //=============================================================================
1058 1058 // REQUIRED FUNCTIONALITY for encoding
1059 1059 void emit_lo(CodeBuffer &cbuf, int val) { }
1060 1060 void emit_hi(CodeBuffer &cbuf, int val) { }
1061 1061
1062 1062
1063 1063 //=============================================================================
1064 1064 const RegMask& MachConstantBaseNode::_out_RegMask = PTR_REG_mask();
1065 1065
1066 1066 int Compile::ConstantTable::calculate_table_base_offset() const {
1067 1067 if (UseRDPCForConstantTableBase) {
1068 1068 // The table base offset might be less but then it fits into
1069 1069 // simm13 anyway and we are good (cf. MachConstantBaseNode::emit).
1070 1070 return Assembler::min_simm13();
1071 1071 } else {
1072 1072 int offset = -(size() / 2);
1073 1073 if (!Assembler::is_simm13(offset)) {
1074 1074 offset = Assembler::min_simm13();
1075 1075 }
1076 1076 return offset;
1077 1077 }
1078 1078 }
1079 1079
1080 1080 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1081 1081 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1082 1082 ShouldNotReachHere();
1083 1083 }
1084 1084
1085 1085 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1086 1086 Compile* C = ra_->C;
1087 1087 Compile::ConstantTable& constant_table = C->constant_table();
1088 1088 MacroAssembler _masm(&cbuf);
1089 1089
1090 1090 Register r = as_Register(ra_->get_encode(this));
1091 1091 CodeSection* consts_section = __ code()->consts();
1092 1092 int consts_size = consts_section->align_at_start(consts_section->size());
1093 1093 assert(constant_table.size() == consts_size, "must be: %d == %d", constant_table.size(), consts_size);
1094 1094
1095 1095 if (UseRDPCForConstantTableBase) {
1096 1096 // For the following RDPC logic to work correctly the consts
1097 1097 // section must be allocated right before the insts section. This
1098 1098 // assert checks for that. The layout and the SECT_* constants
1099 1099 // are defined in src/share/vm/asm/codeBuffer.hpp.
1100 1100 assert(CodeBuffer::SECT_CONSTS + 1 == CodeBuffer::SECT_INSTS, "must be");
1101 1101 int insts_offset = __ offset();
1102 1102
1103 1103 // Layout:
1104 1104 //
1105 1105 // |----------- consts section ------------|----------- insts section -----------...
1106 1106 // |------ constant table -----|- padding -|------------------x----
1107 1107 // \ current PC (RDPC instruction)
1108 1108 // |<------------- consts_size ----------->|<- insts_offset ->|
1109 1109 // \ table base
1110 1110 // The table base offset is later added to the load displacement
1111 1111 // so it has to be negative.
1112 1112 int table_base_offset = -(consts_size + insts_offset);
1113 1113 int disp;
1114 1114
1115 1115 // If the displacement from the current PC to the constant table
1116 1116 // base fits into simm13 we set the constant table base to the
1117 1117 // current PC.
1118 1118 if (Assembler::is_simm13(table_base_offset)) {
1119 1119 constant_table.set_table_base_offset(table_base_offset);
1120 1120 disp = 0;
1121 1121 } else {
1122 1122 // Otherwise we set the constant table base offset to the
1123 1123 // maximum negative displacement of load instructions to keep
1124 1124 // the disp as small as possible:
1125 1125 //
1126 1126 // |<------------- consts_size ----------->|<- insts_offset ->|
1127 1127 // |<--------- min_simm13 --------->|<-------- disp --------->|
1128 1128 // \ table base
1129 1129 table_base_offset = Assembler::min_simm13();
1130 1130 constant_table.set_table_base_offset(table_base_offset);
1131 1131 disp = (consts_size + insts_offset) + table_base_offset;
1132 1132 }
1133 1133
1134 1134 __ rdpc(r);
1135 1135
1136 1136 if (disp != 0) {
1137 1137 assert(r != O7, "need temporary");
1138 1138 __ sub(r, __ ensure_simm13_or_reg(disp, O7), r);
1139 1139 }
1140 1140 }
1141 1141 else {
1142 1142 // Materialize the constant table base.
1143 1143 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1144 1144 RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1145 1145 AddressLiteral base(baseaddr, rspec);
1146 1146 __ set(base, r);
1147 1147 }
1148 1148 }
1149 1149
1150 1150 uint MachConstantBaseNode::size(PhaseRegAlloc*) const {
1151 1151 if (UseRDPCForConstantTableBase) {
1152 1152 // This is really the worst case but generally it's only 1 instruction.
1153 1153 return (1 /*rdpc*/ + 1 /*sub*/ + MacroAssembler::worst_case_insts_for_set()) * BytesPerInstWord;
1154 1154 } else {
1155 1155 return MacroAssembler::worst_case_insts_for_set() * BytesPerInstWord;
1156 1156 }
1157 1157 }
1158 1158
1159 1159 #ifndef PRODUCT
1160 1160 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1161 1161 char reg[128];
1162 1162 ra_->dump_register(this, reg);
1163 1163 if (UseRDPCForConstantTableBase) {
1164 1164 st->print("RDPC %s\t! constant table base", reg);
1165 1165 } else {
1166 1166 st->print("SET &constanttable,%s\t! constant table base", reg);
1167 1167 }
1168 1168 }
1169 1169 #endif
1170 1170
1171 1171
1172 1172 //=============================================================================
1173 1173
1174 1174 #ifndef PRODUCT
1175 1175 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1176 1176 Compile* C = ra_->C;
1177 1177
1178 1178 for (int i = 0; i < OptoPrologueNops; i++) {
1179 1179 st->print_cr("NOP"); st->print("\t");
1180 1180 }
1181 1181
1182 1182 if( VerifyThread ) {
1183 1183 st->print_cr("Verify_Thread"); st->print("\t");
1184 1184 }
1185 1185
1186 1186 size_t framesize = C->frame_size_in_bytes();
1187 1187 int bangsize = C->bang_size_in_bytes();
1188 1188
1189 1189 // Calls to C2R adapters often do not accept exceptional returns.
1190 1190 // We require that their callers must bang for them. But be careful, because
1191 1191 // some VM calls (such as call site linkage) can use several kilobytes of
1192 1192 // stack. But the stack safety zone should account for that.
1193 1193 // See bugs 4446381, 4468289, 4497237.
1194 1194 if (C->need_stack_bang(bangsize)) {
1195 1195 st->print_cr("! stack bang (%d bytes)", bangsize); st->print("\t");
1196 1196 }
1197 1197
1198 1198 if (Assembler::is_simm13(-framesize)) {
1199 1199 st->print ("SAVE R_SP,-" SIZE_FORMAT ",R_SP",framesize);
1200 1200 } else {
1201 1201 st->print_cr("SETHI R_SP,hi%%(-" SIZE_FORMAT "),R_G3",framesize); st->print("\t");
1202 1202 st->print_cr("ADD R_G3,lo%%(-" SIZE_FORMAT "),R_G3",framesize); st->print("\t");
1203 1203 st->print ("SAVE R_SP,R_G3,R_SP");
1204 1204 }
1205 1205
1206 1206 }
1207 1207 #endif
1208 1208
1209 1209 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1210 1210 Compile* C = ra_->C;
1211 1211 MacroAssembler _masm(&cbuf);
1212 1212
1213 1213 for (int i = 0; i < OptoPrologueNops; i++) {
1214 1214 __ nop();
1215 1215 }
1216 1216
1217 1217 __ verify_thread();
1218 1218
1219 1219 size_t framesize = C->frame_size_in_bytes();
1220 1220 assert(framesize >= 16*wordSize, "must have room for reg. save area");
1221 1221 assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1222 1222 int bangsize = C->bang_size_in_bytes();
1223 1223
1224 1224 // Calls to C2R adapters often do not accept exceptional returns.
1225 1225 // We require that their callers must bang for them. But be careful, because
1226 1226 // some VM calls (such as call site linkage) can use several kilobytes of
1227 1227 // stack. But the stack safety zone should account for that.
1228 1228 // See bugs 4446381, 4468289, 4497237.
1229 1229 if (C->need_stack_bang(bangsize)) {
1230 1230 __ generate_stack_overflow_check(bangsize);
1231 1231 }
1232 1232
1233 1233 if (Assembler::is_simm13(-framesize)) {
1234 1234 __ save(SP, -framesize, SP);
1235 1235 } else {
1236 1236 __ sethi(-framesize & ~0x3ff, G3);
1237 1237 __ add(G3, -framesize & 0x3ff, G3);
1238 1238 __ save(SP, G3, SP);
1239 1239 }
1240 1240 C->set_frame_complete( __ offset() );
1241 1241
1242 1242 if (!UseRDPCForConstantTableBase && C->has_mach_constant_base_node()) {
1243 1243 // NOTE: We set the table base offset here because users might be
1244 1244 // emitted before MachConstantBaseNode.
1245 1245 Compile::ConstantTable& constant_table = C->constant_table();
1246 1246 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1247 1247 }
1248 1248 }
1249 1249
1250 1250 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1251 1251 return MachNode::size(ra_);
1252 1252 }
1253 1253
1254 1254 int MachPrologNode::reloc() const {
1255 1255 return 10; // a large enough number
1256 1256 }
1257 1257
1258 1258 //=============================================================================
1259 1259 #ifndef PRODUCT
1260 1260 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1261 1261 Compile* C = ra_->C;
1262 1262
1263 1263 if(do_polling() && ra_->C->is_method_compilation()) {
1264 1264 st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t");
1265 1265 #ifdef _LP64
1266 1266 st->print("LDX [L0],G0\t!Poll for Safepointing\n\t");
1267 1267 #else
1268 1268 st->print("LDUW [L0],G0\t!Poll for Safepointing\n\t");
1269 1269 #endif
1270 1270 }
1271 1271
1272 1272 if(do_polling()) {
1273 1273 if (UseCBCond && !ra_->C->is_method_compilation()) {
1274 1274 st->print("NOP\n\t");
1275 1275 }
1276 1276 st->print("RET\n\t");
1277 1277 }
1278 1278
1279 1279 st->print("RESTORE");
1280 1280 }
1281 1281 #endif
1282 1282
1283 1283 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1284 1284 MacroAssembler _masm(&cbuf);
1285 1285 Compile* C = ra_->C;
1286 1286
1287 1287 __ verify_thread();
1288 1288
1289 1289 if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1290 1290 __ reserved_stack_check();
1291 1291 }
1292 1292
1293 1293 // If this does safepoint polling, then do it here
1294 1294 if(do_polling() && ra_->C->is_method_compilation()) {
1295 1295 AddressLiteral polling_page(os::get_polling_page());
1296 1296 __ sethi(polling_page, L0);
1297 1297 __ relocate(relocInfo::poll_return_type);
1298 1298 __ ld_ptr(L0, 0, G0);
1299 1299 }
1300 1300
1301 1301 // If this is a return, then stuff the restore in the delay slot
1302 1302 if(do_polling()) {
1303 1303 if (UseCBCond && !ra_->C->is_method_compilation()) {
1304 1304 // Insert extra padding for the case when the epilogue is preceded by
1305 1305 // a cbcond jump, which can't be followed by a CTI instruction
1306 1306 __ nop();
1307 1307 }
1308 1308 __ ret();
1309 1309 __ delayed()->restore();
1310 1310 } else {
1311 1311 __ restore();
1312 1312 }
1313 1313 }
1314 1314
1315 1315 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1316 1316 return MachNode::size(ra_);
1317 1317 }
1318 1318
1319 1319 int MachEpilogNode::reloc() const {
1320 1320 return 16; // a large enough number
1321 1321 }
1322 1322
1323 1323 const Pipeline * MachEpilogNode::pipeline() const {
1324 1324 return MachNode::pipeline_class();
1325 1325 }
1326 1326
1327 1327 int MachEpilogNode::safepoint_offset() const {
1328 1328 assert( do_polling(), "no return for this epilog node");
1329 1329 return MacroAssembler::insts_for_sethi(os::get_polling_page()) * BytesPerInstWord;
1330 1330 }
1331 1331
1332 1332 //=============================================================================
1333 1333
1334 1334 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
1335 1335 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1336 1336 static enum RC rc_class( OptoReg::Name reg ) {
1337 1337 if (!OptoReg::is_valid(reg)) return rc_bad;
1338 1338 if (OptoReg::is_stack(reg)) return rc_stack;
1339 1339 VMReg r = OptoReg::as_VMReg(reg);
1340 1340 if (r->is_Register()) return rc_int;
1341 1341 assert(r->is_FloatRegister(), "must be");
1342 1342 return rc_float;
1343 1343 }
1344 1344
1345 1345 #ifndef PRODUCT
1346 1346 ATTRIBUTE_PRINTF(2, 3)
1347 1347 static void print_helper(outputStream* st, const char* format, ...) {
1348 1348 if (st->position() > 0) {
1349 1349 st->cr();
1350 1350 st->sp();
1351 1351 }
1352 1352 va_list ap;
1353 1353 va_start(ap, format);
1354 1354 st->vprint(format, ap);
1355 1355 va_end(ap);
1356 1356 }
1357 1357 #endif // !PRODUCT
1358 1358
1359 1359 static void impl_helper(const MachNode* mach, CodeBuffer* cbuf, PhaseRegAlloc* ra, bool is_load, int offset, int reg, int opcode, const char *op_str, outputStream* st) {
1360 1360 if (cbuf) {
1361 1361 emit_form3_mem_reg(*cbuf, ra, mach, opcode, -1, R_SP_enc, offset, 0, Matcher::_regEncode[reg]);
1362 1362 }
1363 1363 #ifndef PRODUCT
1364 1364 else {
1365 1365 if (is_load) {
1366 1366 print_helper(st, "%s [R_SP + #%d],R_%s\t! spill", op_str, offset, OptoReg::regname(reg));
1367 1367 } else {
1368 1368 print_helper(st, "%s R_%s,[R_SP + #%d]\t! spill", op_str, OptoReg::regname(reg), offset);
1369 1369 }
1370 1370 }
1371 1371 #endif
1372 1372 }
1373 1373
1374 1374 static void impl_mov_helper(CodeBuffer *cbuf, int src, int dst, int op1, int op2, const char *op_str, outputStream* st) {
1375 1375 if (cbuf) {
1376 1376 emit3(*cbuf, Assembler::arith_op, Matcher::_regEncode[dst], op1, 0, op2, Matcher::_regEncode[src]);
1377 1377 }
1378 1378 #ifndef PRODUCT
1379 1379 else {
1380 1380 print_helper(st, "%s R_%s,R_%s\t! spill", op_str, OptoReg::regname(src), OptoReg::regname(dst));
1381 1381 }
1382 1382 #endif
1383 1383 }
1384 1384
1385 1385 static void mach_spill_copy_implementation_helper(const MachNode* mach,
1386 1386 CodeBuffer *cbuf,
1387 1387 PhaseRegAlloc *ra_,
1388 1388 outputStream* st) {
1389 1389 // Get registers to move
1390 1390 OptoReg::Name src_second = ra_->get_reg_second(mach->in(1));
1391 1391 OptoReg::Name src_first = ra_->get_reg_first(mach->in(1));
1392 1392 OptoReg::Name dst_second = ra_->get_reg_second(mach);
1393 1393 OptoReg::Name dst_first = ra_->get_reg_first(mach);
1394 1394
1395 1395 enum RC src_second_rc = rc_class(src_second);
1396 1396 enum RC src_first_rc = rc_class(src_first);
1397 1397 enum RC dst_second_rc = rc_class(dst_second);
1398 1398 enum RC dst_first_rc = rc_class(dst_first);
1399 1399
1400 1400 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register");
1401 1401
1402 1402 if (src_first == dst_first && src_second == dst_second) {
1403 1403 return; // Self copy, no move
1404 1404 }
1405 1405
1406 1406 // --------------------------------------
1407 1407 // Check for mem-mem move. Load into unused float registers and fall into
1408 1408 // the float-store case.
1409 1409 if (src_first_rc == rc_stack && dst_first_rc == rc_stack) {
1410 1410 int offset = ra_->reg2offset(src_first);
1411 1411 // Further check for aligned-adjacent pair, so we can use a double load
1412 1412 if ((src_first&1) == 0 && src_first+1 == src_second) {
1413 1413 src_second = OptoReg::Name(R_F31_num);
1414 1414 src_second_rc = rc_float;
1415 1415 impl_helper(mach, cbuf, ra_, true, offset, R_F30_num, Assembler::lddf_op3, "LDDF", st);
1416 1416 } else {
1417 1417 impl_helper(mach, cbuf, ra_, true, offset, R_F30_num, Assembler::ldf_op3, "LDF ", st);
1418 1418 }
1419 1419 src_first = OptoReg::Name(R_F30_num);
1420 1420 src_first_rc = rc_float;
1421 1421 }
1422 1422
1423 1423 if( src_second_rc == rc_stack && dst_second_rc == rc_stack ) {
1424 1424 int offset = ra_->reg2offset(src_second);
1425 1425 impl_helper(mach, cbuf, ra_, true, offset, R_F31_num, Assembler::ldf_op3, "LDF ", st);
1426 1426 src_second = OptoReg::Name(R_F31_num);
1427 1427 src_second_rc = rc_float;
1428 1428 }
1429 1429
1430 1430 // --------------------------------------
1431 1431 // Check for float->int copy; requires a trip through memory
1432 1432 if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS < 3) {
1433 1433 int offset = frame::register_save_words*wordSize;
1434 1434 if (cbuf) {
1435 1435 emit3_simm13(*cbuf, Assembler::arith_op, R_SP_enc, Assembler::sub_op3, R_SP_enc, 16);
1436 1436 impl_helper(mach, cbuf, ra_, false, offset, src_first, Assembler::stf_op3, "STF ", st);
1437 1437 impl_helper(mach, cbuf, ra_, true, offset, dst_first, Assembler::lduw_op3, "LDUW", st);
1438 1438 emit3_simm13(*cbuf, Assembler::arith_op, R_SP_enc, Assembler::add_op3, R_SP_enc, 16);
1439 1439 }
1440 1440 #ifndef PRODUCT
1441 1441 else {
1442 1442 print_helper(st, "SUB R_SP,16,R_SP");
1443 1443 impl_helper(mach, cbuf, ra_, false, offset, src_first, Assembler::stf_op3, "STF ", st);
1444 1444 impl_helper(mach, cbuf, ra_, true, offset, dst_first, Assembler::lduw_op3, "LDUW", st);
1445 1445 print_helper(st, "ADD R_SP,16,R_SP");
1446 1446 }
1447 1447 #endif
1448 1448 }
1449 1449
1450 1450 // Check for float->int copy on T4
1451 1451 if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS >= 3) {
1452 1452 // Further check for aligned-adjacent pair, so we can use a double move
1453 1453 if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1454 1454 impl_mov_helper(cbuf, src_first, dst_first, Assembler::mftoi_op3, Assembler::mdtox_opf, "MOVDTOX", st);
1455 1455 return;
1456 1456 }
1457 1457 impl_mov_helper(cbuf, src_first, dst_first, Assembler::mftoi_op3, Assembler::mstouw_opf, "MOVSTOUW", st);
1458 1458 }
1459 1459 // Check for int->float copy on T4
1460 1460 if (src_first_rc == rc_int && dst_first_rc == rc_float && UseVIS >= 3) {
1461 1461 // Further check for aligned-adjacent pair, so we can use a double move
1462 1462 if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1463 1463 impl_mov_helper(cbuf, src_first, dst_first, Assembler::mftoi_op3, Assembler::mxtod_opf, "MOVXTOD", st);
1464 1464 return;
1465 1465 }
1466 1466 impl_mov_helper(cbuf, src_first, dst_first, Assembler::mftoi_op3, Assembler::mwtos_opf, "MOVWTOS", st);
1467 1467 }
1468 1468
1469 1469 // --------------------------------------
1470 1470 // In the 32-bit 1-reg-longs build ONLY, I see mis-aligned long destinations.
1471 1471 // In such cases, I have to do the big-endian swap. For aligned targets, the
1472 1472 // hardware does the flop for me. Doubles are always aligned, so no problem
1473 1473 // there. Misaligned sources only come from native-long-returns (handled
1474 1474 // special below).
1475 1475 #ifndef _LP64
1476 1476 if (src_first_rc == rc_int && // source is already big-endian
1477 1477 src_second_rc != rc_bad && // 64-bit move
1478 1478 ((dst_first & 1) != 0 || dst_second != dst_first + 1)) { // misaligned dst
1479 1479 assert((src_first & 1) == 0 && src_second == src_first + 1, "source must be aligned");
1480 1480 // Do the big-endian flop.
1481 1481 OptoReg::Name tmp = dst_first ; dst_first = dst_second ; dst_second = tmp ;
1482 1482 enum RC tmp_rc = dst_first_rc; dst_first_rc = dst_second_rc; dst_second_rc = tmp_rc;
1483 1483 }
1484 1484 #endif
1485 1485
1486 1486 // --------------------------------------
1487 1487 // Check for integer reg-reg copy
1488 1488 if (src_first_rc == rc_int && dst_first_rc == rc_int) {
1489 1489 #ifndef _LP64
1490 1490 if (src_first == R_O0_num && src_second == R_O1_num) { // Check for the evil O0/O1 native long-return case
1491 1491 // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value
1492 1492 // as stored in memory. On a big-endian machine like SPARC, this means that the _second
1493 1493 // operand contains the least significant word of the 64-bit value and vice versa.
1494 1494 OptoReg::Name tmp = OptoReg::Name(R_O7_num);
1495 1495 assert((dst_first & 1) == 0 && dst_second == dst_first + 1, "return a native O0/O1 long to an aligned-adjacent 64-bit reg" );
1496 1496 // Shift O0 left in-place, zero-extend O1, then OR them into the dst
1497 1497 if ( cbuf ) {
1498 1498 emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[tmp], Assembler::sllx_op3, Matcher::_regEncode[src_first], 0x1020);
1499 1499 emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[src_second], Assembler::srl_op3, Matcher::_regEncode[src_second], 0x0000);
1500 1500 emit3 (*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler:: or_op3, Matcher::_regEncode[tmp], 0, Matcher::_regEncode[src_second]);
1501 1501 #ifndef PRODUCT
1502 1502 } else {
1503 1503 print_helper(st, "SLLX R_%s,32,R_%s\t! Move O0-first to O7-high\n\t", OptoReg::regname(src_first), OptoReg::regname(tmp));
1504 1504 print_helper(st, "SRL R_%s, 0,R_%s\t! Zero-extend O1\n\t", OptoReg::regname(src_second), OptoReg::regname(src_second));
1505 1505 print_helper(st, "OR R_%s,R_%s,R_%s\t! spill",OptoReg::regname(tmp), OptoReg::regname(src_second), OptoReg::regname(dst_first));
1506 1506 #endif
1507 1507 }
1508 1508 return;
1509 1509 } else if (dst_first == R_I0_num && dst_second == R_I1_num) {
1510 1510 // returning a long value in I0/I1
1511 1511 // a SpillCopy must be able to target a return instruction's reg_class
1512 1512 // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value
1513 1513 // as stored in memory. On a big-endian machine like SPARC, this means that the _second
1514 1514 // operand contains the least significant word of the 64-bit value and vice versa.
1515 1515 OptoReg::Name tdest = dst_first;
1516 1516
1517 1517 if (src_first == dst_first) {
1518 1518 tdest = OptoReg::Name(R_O7_num);
1519 1519 }
1520 1520
1521 1521 if (cbuf) {
1522 1522 assert((src_first & 1) == 0 && (src_first + 1) == src_second, "return value was in an aligned-adjacent 64-bit reg");
1523 1523 // Shift value in upper 32-bits of src to lower 32-bits of I0; move lower 32-bits to I1
1524 1524 // ShrL_reg_imm6
1525 1525 emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[tdest], Assembler::srlx_op3, Matcher::_regEncode[src_second], 32 | 0x1000);
1526 1526 // ShrR_reg_imm6 src, 0, dst
1527 1527 emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srl_op3, Matcher::_regEncode[src_first], 0x0000);
1528 1528 if (tdest != dst_first) {
1529 1529 emit3 (*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler::or_op3, 0/*G0*/, 0/*op2*/, Matcher::_regEncode[tdest]);
1530 1530 }
1531 1531 }
1532 1532 #ifndef PRODUCT
1533 1533 else {
1534 1534 print_helper(st, "SRLX R_%s,32,R_%s\t! Extract MSW\n\t",OptoReg::regname(src_second),OptoReg::regname(tdest));
1535 1535 print_helper(st, "SRL R_%s, 0,R_%s\t! Extract LSW\n\t",OptoReg::regname(src_first),OptoReg::regname(dst_second));
1536 1536 if (tdest != dst_first) {
1537 1537 print_helper(st, "MOV R_%s,R_%s\t! spill\n\t", OptoReg::regname(tdest), OptoReg::regname(dst_first));
1538 1538 }
1539 1539 }
1540 1540 #endif // PRODUCT
1541 1541 return size+8;
1542 1542 }
1543 1543 #endif // !_LP64
1544 1544 // Else normal reg-reg copy
1545 1545 assert(src_second != dst_first, "smashed second before evacuating it");
1546 1546 impl_mov_helper(cbuf, src_first, dst_first, Assembler::or_op3, 0, "MOV ", st);
1547 1547 assert((src_first & 1) == 0 && (dst_first & 1) == 0, "never move second-halves of int registers");
1548 1548 // This moves an aligned adjacent pair.
1549 1549 // See if we are done.
1550 1550 if (src_first + 1 == src_second && dst_first + 1 == dst_second) {
1551 1551 return;
1552 1552 }
1553 1553 }
1554 1554
1555 1555 // Check for integer store
1556 1556 if (src_first_rc == rc_int && dst_first_rc == rc_stack) {
1557 1557 int offset = ra_->reg2offset(dst_first);
1558 1558 // Further check for aligned-adjacent pair, so we can use a double store
1559 1559 if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1560 1560 impl_helper(mach, cbuf, ra_, false, offset, src_first, Assembler::stx_op3, "STX ", st);
1561 1561 return;
1562 1562 }
1563 1563 impl_helper(mach, cbuf, ra_, false, offset, src_first, Assembler::stw_op3, "STW ", st);
1564 1564 }
1565 1565
1566 1566 // Check for integer load
1567 1567 if (dst_first_rc == rc_int && src_first_rc == rc_stack) {
1568 1568 int offset = ra_->reg2offset(src_first);
1569 1569 // Further check for aligned-adjacent pair, so we can use a double load
1570 1570 if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1571 1571 impl_helper(mach, cbuf, ra_, true, offset, dst_first, Assembler::ldx_op3, "LDX ", st);
1572 1572 return;
1573 1573 }
1574 1574 impl_helper(mach, cbuf, ra_, true, offset, dst_first, Assembler::lduw_op3, "LDUW", st);
1575 1575 }
1576 1576
1577 1577 // Check for float reg-reg copy
1578 1578 if (src_first_rc == rc_float && dst_first_rc == rc_float) {
1579 1579 // Further check for aligned-adjacent pair, so we can use a double move
1580 1580 if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1581 1581 impl_mov_helper(cbuf, src_first, dst_first, Assembler::fpop1_op3, Assembler::fmovd_opf, "FMOVD", st);
1582 1582 return;
1583 1583 }
1584 1584 impl_mov_helper(cbuf, src_first, dst_first, Assembler::fpop1_op3, Assembler::fmovs_opf, "FMOVS", st);
1585 1585 }
1586 1586
1587 1587 // Check for float store
1588 1588 if (src_first_rc == rc_float && dst_first_rc == rc_stack) {
1589 1589 int offset = ra_->reg2offset(dst_first);
1590 1590 // Further check for aligned-adjacent pair, so we can use a double store
1591 1591 if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1592 1592 impl_helper(mach, cbuf, ra_, false, offset, src_first, Assembler::stdf_op3, "STDF", st);
1593 1593 return;
1594 1594 }
1595 1595 impl_helper(mach, cbuf, ra_, false, offset, src_first, Assembler::stf_op3, "STF ", st);
1596 1596 }
1597 1597
1598 1598 // Check for float load
1599 1599 if (dst_first_rc == rc_float && src_first_rc == rc_stack) {
1600 1600 int offset = ra_->reg2offset(src_first);
1601 1601 // Further check for aligned-adjacent pair, so we can use a double load
1602 1602 if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1603 1603 impl_helper(mach, cbuf, ra_, true, offset, dst_first, Assembler::lddf_op3, "LDDF", st);
1604 1604 return;
1605 1605 }
1606 1606 impl_helper(mach, cbuf, ra_, true, offset, dst_first, Assembler::ldf_op3, "LDF ", st);
1607 1607 }
1608 1608
1609 1609 // --------------------------------------------------------------------
1610 1610 // Check for hi bits still needing moving. Only happens for misaligned
1611 1611 // arguments to native calls.
1612 1612 if (src_second == dst_second) {
1613 1613 return; // Self copy; no move
1614 1614 }
1615 1615 assert(src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad");
1616 1616
1617 1617 #ifndef _LP64
1618 1618 // In the LP64 build, all registers can be moved as aligned/adjacent
1619 1619 // pairs, so there's never any need to move the high bits separately.
1620 1620 // The 32-bit builds have to deal with the 32-bit ABI which can force
1621 1621 // all sorts of silly alignment problems.
1622 1622
1623 1623 // Check for integer reg-reg copy. Hi bits are stuck up in the top
1624 1624 // 32-bits of a 64-bit register, but are needed in low bits of another
1625 1625 // register (else it's a hi-bits-to-hi-bits copy which should have
1626 1626 // happened already as part of a 64-bit move)
1627 1627 if (src_second_rc == rc_int && dst_second_rc == rc_int) {
1628 1628 assert((src_second & 1) == 1, "its the evil O0/O1 native return case");
1629 1629 assert((dst_second & 1) == 0, "should have moved with 1 64-bit move");
1630 1630 // Shift src_second down to dst_second's low bits.
1631 1631 if (cbuf) {
1632 1632 emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020);
1633 1633 #ifndef PRODUCT
1634 1634 } else {
1635 1635 print_helper(st, "SRLX R_%s,32,R_%s\t! spill: Move high bits down low", OptoReg::regname(src_second - 1), OptoReg::regname(dst_second));
1636 1636 #endif
1637 1637 }
1638 1638 return;
1639 1639 }
1640 1640
1641 1641 // Check for high word integer store. Must down-shift the hi bits
1642 1642 // into a temp register, then fall into the case of storing int bits.
1643 1643 if (src_second_rc == rc_int && dst_second_rc == rc_stack && (src_second & 1) == 1) {
1644 1644 // Shift src_second down to dst_second's low bits.
1645 1645 if (cbuf) {
1646 1646 emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[R_O7_num], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020);
1647 1647 #ifndef PRODUCT
1648 1648 } else {
1649 1649 print_helper(st, "SRLX R_%s,32,R_%s\t! spill: Move high bits down low", OptoReg::regname(src_second-1), OptoReg::regname(R_O7_num));
1650 1650 #endif
1651 1651 }
1652 1652 src_second = OptoReg::Name(R_O7_num); // Not R_O7H_num!
1653 1653 }
1654 1654
1655 1655 // Check for high word integer load
1656 1656 if (dst_second_rc == rc_int && src_second_rc == rc_stack)
1657 1657 return impl_helper(this, cbuf, ra_, true, ra_->reg2offset(src_second), dst_second, Assembler::lduw_op3, "LDUW", size, st);
1658 1658
1659 1659 // Check for high word integer store
1660 1660 if (src_second_rc == rc_int && dst_second_rc == rc_stack)
1661 1661 return impl_helper(this, cbuf, ra_, false, ra_->reg2offset(dst_second), src_second, Assembler::stw_op3, "STW ", size, st);
1662 1662
1663 1663 // Check for high word float store
1664 1664 if (src_second_rc == rc_float && dst_second_rc == rc_stack)
1665 1665 return impl_helper(this, cbuf, ra_, false, ra_->reg2offset(dst_second), src_second, Assembler::stf_op3, "STF ", size, st);
1666 1666
1667 1667 #endif // !_LP64
1668 1668
1669 1669 Unimplemented();
1670 1670 }
1671 1671
1672 1672 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf,
1673 1673 PhaseRegAlloc *ra_,
1674 1674 bool do_size,
1675 1675 outputStream* st) const {
1676 1676 assert(!do_size, "not supported");
1677 1677 mach_spill_copy_implementation_helper(this, cbuf, ra_, st);
1678 1678 return 0;
1679 1679 }
1680 1680
1681 1681 #ifndef PRODUCT
1682 1682 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1683 1683 implementation( NULL, ra_, false, st );
1684 1684 }
1685 1685 #endif
1686 1686
1687 1687 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1688 1688 implementation( &cbuf, ra_, false, NULL );
1689 1689 }
1690 1690
1691 1691 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1692 1692 return MachNode::size(ra_);
1693 1693 }
1694 1694
1695 1695 //=============================================================================
1696 1696 #ifndef PRODUCT
1697 1697 void MachNopNode::format(PhaseRegAlloc *, outputStream *st) const {
1698 1698 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1699 1699 }
1700 1700 #endif
1701 1701
1702 1702 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *) const {
1703 1703 MacroAssembler _masm(&cbuf);
1704 1704 for (int i = 0; i < _count; i += 1) {
1705 1705 __ nop();
1706 1706 }
1707 1707 }
1708 1708
1709 1709 uint MachNopNode::size(PhaseRegAlloc *ra_) const {
1710 1710 return 4 * _count;
1711 1711 }
1712 1712
1713 1713
1714 1714 //=============================================================================
1715 1715 #ifndef PRODUCT
1716 1716 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1717 1717 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1718 1718 int reg = ra_->get_reg_first(this);
1719 1719 st->print("LEA [R_SP+#%d+BIAS],%s",offset,Matcher::regName[reg]);
1720 1720 }
1721 1721 #endif
1722 1722
1723 1723 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1724 1724 MacroAssembler _masm(&cbuf);
1725 1725 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()) + STACK_BIAS;
1726 1726 int reg = ra_->get_encode(this);
1727 1727
1728 1728 if (Assembler::is_simm13(offset)) {
1729 1729 __ add(SP, offset, reg_to_register_object(reg));
1730 1730 } else {
1731 1731 __ set(offset, O7);
1732 1732 __ add(SP, O7, reg_to_register_object(reg));
1733 1733 }
1734 1734 }
1735 1735
1736 1736 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1737 1737 // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_)
1738 1738 assert(ra_ == ra_->C->regalloc(), "sanity");
1739 1739 return ra_->C->scratch_emit_size(this);
1740 1740 }
1741 1741
1742 1742 //=============================================================================
1743 1743 #ifndef PRODUCT
1744 1744 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1745 1745 st->print_cr("\nUEP:");
1746 1746 #ifdef _LP64
1747 1747 if (UseCompressedClassPointers) {
1748 1748 assert(Universe::heap() != NULL, "java heap should be initialized");
1749 1749 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
1750 1750 if (Universe::narrow_klass_base() != 0) {
1751 1751 st->print_cr("\tSET Universe::narrow_klass_base,R_G6_heap_base");
1752 1752 if (Universe::narrow_klass_shift() != 0) {
1753 1753 st->print_cr("\tSLL R_G5,Universe::narrow_klass_shift,R_G5");
1754 1754 }
1755 1755 st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5");
1756 1756 st->print_cr("\tSET Universe::narrow_ptrs_base,R_G6_heap_base");
1757 1757 } else {
1758 1758 st->print_cr("\tSLL R_G5,Universe::narrow_klass_shift,R_G5");
1759 1759 }
1760 1760 } else {
1761 1761 st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
1762 1762 }
1763 1763 st->print_cr("\tCMP R_G5,R_G3" );
1764 1764 st->print ("\tTne xcc,R_G0+ST_RESERVED_FOR_USER_0+2");
1765 1765 #else // _LP64
1766 1766 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
1767 1767 st->print_cr("\tCMP R_G5,R_G3" );
1768 1768 st->print ("\tTne icc,R_G0+ST_RESERVED_FOR_USER_0+2");
1769 1769 #endif // _LP64
1770 1770 }
1771 1771 #endif
1772 1772
1773 1773 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1774 1774 MacroAssembler _masm(&cbuf);
1775 1775 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
1776 1776 Register temp_reg = G3;
1777 1777 assert( G5_ic_reg != temp_reg, "conflicting registers" );
1778 1778
1779 1779 // Load klass from receiver
1780 1780 __ load_klass(O0, temp_reg);
1781 1781 // Compare against expected klass
1782 1782 __ cmp(temp_reg, G5_ic_reg);
1783 1783 // Branch to miss code, checks xcc or icc depending
1784 1784 __ trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2);
1785 1785 }
1786 1786
1787 1787 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1788 1788 return MachNode::size(ra_);
1789 1789 }
1790 1790
1791 1791
1792 1792 //=============================================================================
1793 1793
1794 1794
1795 1795 // Emit exception handler code.
1796 1796 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
1797 1797 Register temp_reg = G3;
1798 1798 AddressLiteral exception_blob(OptoRuntime::exception_blob()->entry_point());
1799 1799 MacroAssembler _masm(&cbuf);
1800 1800
1801 1801 address base = __ start_a_stub(size_exception_handler());
1802 1802 if (base == NULL) {
1803 1803 ciEnv::current()->record_failure("CodeCache is full");
1804 1804 return 0; // CodeBuffer::expand failed
1805 1805 }
1806 1806
1807 1807 int offset = __ offset();
1808 1808
1809 1809 __ JUMP(exception_blob, temp_reg, 0); // sethi;jmp
1810 1810 __ delayed()->nop();
1811 1811
1812 1812 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
1813 1813
1814 1814 __ end_a_stub();
1815 1815
1816 1816 return offset;
1817 1817 }
1818 1818
1819 1819 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
1820 1820 // Can't use any of the current frame's registers as we may have deopted
1821 1821 // at a poll and everything (including G3) can be live.
1822 1822 Register temp_reg = L0;
1823 1823 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
1824 1824 MacroAssembler _masm(&cbuf);
1825 1825
1826 1826 address base = __ start_a_stub(size_deopt_handler());
1827 1827 if (base == NULL) {
1828 1828 ciEnv::current()->record_failure("CodeCache is full");
1829 1829 return 0; // CodeBuffer::expand failed
1830 1830 }
1831 1831
1832 1832 int offset = __ offset();
1833 1833 __ save_frame(0);
1834 1834 __ JUMP(deopt_blob, temp_reg, 0); // sethi;jmp
1835 1835 __ delayed()->restore();
1836 1836
1837 1837 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
1838 1838
1839 1839 __ end_a_stub();
1840 1840 return offset;
1841 1841
1842 1842 }
1843 1843
1844 1844 // Given a register encoding, produce a Integer Register object
1845 1845 static Register reg_to_register_object(int register_encoding) {
1846 1846 assert(L5->encoding() == R_L5_enc && G1->encoding() == R_G1_enc, "right coding");
1847 1847 return as_Register(register_encoding);
1848 1848 }
1849 1849
1850 1850 // Given a register encoding, produce a single-precision Float Register object
1851 1851 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding) {
1852 1852 assert(F5->encoding(FloatRegisterImpl::S) == R_F5_enc && F12->encoding(FloatRegisterImpl::S) == R_F12_enc, "right coding");
1853 1853 return as_SingleFloatRegister(register_encoding);
1854 1854 }
1855 1855
1856 1856 // Given a register encoding, produce a double-precision Float Register object
1857 1857 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding) {
1858 1858 assert(F4->encoding(FloatRegisterImpl::D) == R_F4_enc, "right coding");
1859 1859 assert(F32->encoding(FloatRegisterImpl::D) == R_D32_enc, "right coding");
↓ open down ↓ |
1859 lines elided |
↑ open up ↑ |
1860 1860 return as_DoubleFloatRegister(register_encoding);
1861 1861 }
1862 1862
1863 1863 const bool Matcher::match_rule_supported(int opcode) {
1864 1864 if (!has_match_rule(opcode))
1865 1865 return false;
1866 1866
1867 1867 switch (opcode) {
1868 1868 case Op_CountLeadingZerosI:
1869 1869 case Op_CountLeadingZerosL:
1870 + if (!(UsePopCountInstruction || UseCountLeadingZerosInstruction))
1871 + return false;
1872 + break;
1870 1873 case Op_CountTrailingZerosI:
1871 1874 case Op_CountTrailingZerosL:
1872 1875 case Op_PopCountI:
1873 1876 case Op_PopCountL:
1874 1877 if (!UsePopCountInstruction)
1875 1878 return false;
1876 1879 case Op_CompareAndSwapL:
1877 1880 #ifdef _LP64
1878 1881 case Op_CompareAndSwapP:
1879 1882 #endif
1880 1883 if (!VM_Version::supports_cx8())
1881 1884 return false;
1882 1885 break;
1883 1886 }
1884 1887
1885 1888 return true; // Per default match rules are supported.
1886 1889 }
1887 1890
1888 1891 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
1889 1892
1890 1893 // TODO
1891 1894 // identify extra cases that we might want to provide match rules for
1892 1895 // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
1893 1896 bool ret_value = match_rule_supported(opcode);
1894 1897 // Add rules here.
1895 1898
1896 1899 return ret_value; // Per default match rules are supported.
1897 1900 }
1898 1901
1899 1902 const bool Matcher::has_predicated_vectors(void) {
1900 1903 return false;
1901 1904 }
1902 1905
1903 1906 const int Matcher::float_pressure(int default_pressure_threshold) {
1904 1907 return default_pressure_threshold;
1905 1908 }
1906 1909
1907 1910 int Matcher::regnum_to_fpu_offset(int regnum) {
1908 1911 return regnum - 32; // The FP registers are in the second chunk
1909 1912 }
1910 1913
1911 1914 #ifdef ASSERT
1912 1915 address last_rethrow = NULL; // debugging aid for Rethrow encoding
1913 1916 #endif
1914 1917
1915 1918 // Vector width in bytes
1916 1919 const int Matcher::vector_width_in_bytes(BasicType bt) {
1917 1920 assert(MaxVectorSize == 8, "");
1918 1921 return 8;
1919 1922 }
1920 1923
1921 1924 // Vector ideal reg
1922 1925 const int Matcher::vector_ideal_reg(int size) {
1923 1926 assert(MaxVectorSize == 8, "");
1924 1927 return Op_RegD;
1925 1928 }
1926 1929
1927 1930 const int Matcher::vector_shift_count_ideal_reg(int size) {
1928 1931 fatal("vector shift is not supported");
1929 1932 return Node::NotAMachineReg;
1930 1933 }
1931 1934
1932 1935 // Limits on vector size (number of elements) loaded into vector.
1933 1936 const int Matcher::max_vector_size(const BasicType bt) {
1934 1937 assert(is_java_primitive(bt), "only primitive type vectors");
1935 1938 return vector_width_in_bytes(bt)/type2aelembytes(bt);
1936 1939 }
1937 1940
1938 1941 const int Matcher::min_vector_size(const BasicType bt) {
1939 1942 return max_vector_size(bt); // Same as max.
1940 1943 }
1941 1944
1942 1945 // SPARC doesn't support misaligned vectors store/load.
1943 1946 const bool Matcher::misaligned_vectors_ok() {
1944 1947 return false;
1945 1948 }
1946 1949
1947 1950 // Current (2013) SPARC platforms need to read original key
1948 1951 // to construct decryption expanded key
1949 1952 const bool Matcher::pass_original_key_for_aes() {
1950 1953 return true;
1951 1954 }
1952 1955
1953 1956 // USII supports fxtof through the whole range of number, USIII doesn't
1954 1957 const bool Matcher::convL2FSupported(void) {
1955 1958 return VM_Version::has_fast_fxtof();
1956 1959 }
1957 1960
1958 1961 // Is this branch offset short enough that a short branch can be used?
1959 1962 //
1960 1963 // NOTE: If the platform does not provide any short branch variants, then
1961 1964 // this method should return false for offset 0.
1962 1965 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
1963 1966 // The passed offset is relative to address of the branch.
1964 1967 // Don't need to adjust the offset.
1965 1968 return UseCBCond && Assembler::is_simm12(offset);
1966 1969 }
1967 1970
1968 1971 const bool Matcher::isSimpleConstant64(jlong value) {
1969 1972 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
1970 1973 // Depends on optimizations in MacroAssembler::setx.
1971 1974 int hi = (int)(value >> 32);
1972 1975 int lo = (int)(value & ~0);
1973 1976 return (hi == 0) || (hi == -1) || (lo == 0);
1974 1977 }
1975 1978
1976 1979 // No scaling for the parameter the ClearArray node.
1977 1980 const bool Matcher::init_array_count_is_in_bytes = true;
1978 1981
1979 1982 // No additional cost for CMOVL.
1980 1983 const int Matcher::long_cmove_cost() { return 0; }
1981 1984
1982 1985 // CMOVF/CMOVD are expensive on T4 and on SPARC64.
1983 1986 const int Matcher::float_cmove_cost() {
1984 1987 return (VM_Version::is_T4() || VM_Version::is_sparc64()) ? ConditionalMoveLimit : 0;
1985 1988 }
1986 1989
1987 1990 // Does the CPU require late expand (see block.cpp for description of late expand)?
1988 1991 const bool Matcher::require_postalloc_expand = false;
1989 1992
1990 1993 // Do we need to mask the count passed to shift instructions or does
1991 1994 // the cpu only look at the lower 5/6 bits anyway?
1992 1995 const bool Matcher::need_masked_shift_count = false;
1993 1996
1994 1997 bool Matcher::narrow_oop_use_complex_address() {
1995 1998 NOT_LP64(ShouldNotCallThis());
1996 1999 assert(UseCompressedOops, "only for compressed oops code");
1997 2000 return false;
1998 2001 }
1999 2002
2000 2003 bool Matcher::narrow_klass_use_complex_address() {
2001 2004 NOT_LP64(ShouldNotCallThis());
2002 2005 assert(UseCompressedClassPointers, "only for compressed klass code");
2003 2006 return false;
2004 2007 }
2005 2008
2006 2009 bool Matcher::const_oop_prefer_decode() {
2007 2010 // TODO: Check if loading ConP from TOC in heap-based mode is better:
2008 2011 // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
2009 2012 // return Universe::narrow_oop_base() == NULL;
2010 2013 return true;
2011 2014 }
2012 2015
2013 2016 bool Matcher::const_klass_prefer_decode() {
2014 2017 // TODO: Check if loading ConP from TOC in heap-based mode is better:
2015 2018 // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
2016 2019 // return Universe::narrow_klass_base() == NULL;
2017 2020 return true;
2018 2021 }
2019 2022
2020 2023 // Is it better to copy float constants, or load them directly from memory?
2021 2024 // Intel can load a float constant from a direct address, requiring no
2022 2025 // extra registers. Most RISCs will have to materialize an address into a
2023 2026 // register first, so they would do better to copy the constant from stack.
2024 2027 const bool Matcher::rematerialize_float_constants = false;
2025 2028
2026 2029 // If CPU can load and store mis-aligned doubles directly then no fixup is
2027 2030 // needed. Else we split the double into 2 integer pieces and move it
2028 2031 // piece-by-piece. Only happens when passing doubles into C code as the
2029 2032 // Java calling convention forces doubles to be aligned.
2030 2033 #ifdef _LP64
2031 2034 const bool Matcher::misaligned_doubles_ok = true;
2032 2035 #else
2033 2036 const bool Matcher::misaligned_doubles_ok = false;
2034 2037 #endif
2035 2038
2036 2039 // No-op on SPARC.
2037 2040 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2038 2041 }
2039 2042
2040 2043 // Advertise here if the CPU requires explicit rounding operations
2041 2044 // to implement the UseStrictFP mode.
2042 2045 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2043 2046
2044 2047 // Are floats converted to double when stored to stack during deoptimization?
2045 2048 // Sparc does not handle callee-save floats.
2046 2049 bool Matcher::float_in_double() { return false; }
2047 2050
2048 2051 // Do ints take an entire long register or just half?
2049 2052 // Note that we if-def off of _LP64.
2050 2053 // The relevant question is how the int is callee-saved. In _LP64
2051 2054 // the whole long is written but de-opt'ing will have to extract
2052 2055 // the relevant 32 bits, in not-_LP64 only the low 32 bits is written.
2053 2056 #ifdef _LP64
2054 2057 const bool Matcher::int_in_long = true;
2055 2058 #else
2056 2059 const bool Matcher::int_in_long = false;
2057 2060 #endif
2058 2061
2059 2062 // Return whether or not this register is ever used as an argument. This
2060 2063 // function is used on startup to build the trampoline stubs in generateOptoStub.
2061 2064 // Registers not mentioned will be killed by the VM call in the trampoline, and
2062 2065 // arguments in those registers not be available to the callee.
2063 2066 bool Matcher::can_be_java_arg( int reg ) {
2064 2067 // Standard sparc 6 args in registers
2065 2068 if( reg == R_I0_num ||
2066 2069 reg == R_I1_num ||
2067 2070 reg == R_I2_num ||
2068 2071 reg == R_I3_num ||
2069 2072 reg == R_I4_num ||
2070 2073 reg == R_I5_num ) return true;
2071 2074 #ifdef _LP64
2072 2075 // 64-bit builds can pass 64-bit pointers and longs in
2073 2076 // the high I registers
2074 2077 if( reg == R_I0H_num ||
2075 2078 reg == R_I1H_num ||
2076 2079 reg == R_I2H_num ||
2077 2080 reg == R_I3H_num ||
2078 2081 reg == R_I4H_num ||
2079 2082 reg == R_I5H_num ) return true;
2080 2083
2081 2084 if ((UseCompressedOops) && (reg == R_G6_num || reg == R_G6H_num)) {
2082 2085 return true;
2083 2086 }
2084 2087
2085 2088 #else
2086 2089 // 32-bit builds with longs-in-one-entry pass longs in G1 & G4.
2087 2090 // Longs cannot be passed in O regs, because O regs become I regs
2088 2091 // after a 'save' and I regs get their high bits chopped off on
2089 2092 // interrupt.
2090 2093 if( reg == R_G1H_num || reg == R_G1_num ) return true;
2091 2094 if( reg == R_G4H_num || reg == R_G4_num ) return true;
2092 2095 #endif
2093 2096 // A few float args in registers
2094 2097 if( reg >= R_F0_num && reg <= R_F7_num ) return true;
2095 2098
2096 2099 return false;
2097 2100 }
2098 2101
2099 2102 bool Matcher::is_spillable_arg( int reg ) {
2100 2103 return can_be_java_arg(reg);
2101 2104 }
2102 2105
2103 2106 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
2104 2107 // Use hardware SDIVX instruction when it is
2105 2108 // faster than a code which use multiply.
2106 2109 return VM_Version::has_fast_idiv();
2107 2110 }
2108 2111
2109 2112 // Register for DIVI projection of divmodI
2110 2113 RegMask Matcher::divI_proj_mask() {
2111 2114 ShouldNotReachHere();
2112 2115 return RegMask();
2113 2116 }
2114 2117
2115 2118 // Register for MODI projection of divmodI
2116 2119 RegMask Matcher::modI_proj_mask() {
2117 2120 ShouldNotReachHere();
2118 2121 return RegMask();
2119 2122 }
2120 2123
2121 2124 // Register for DIVL projection of divmodL
2122 2125 RegMask Matcher::divL_proj_mask() {
2123 2126 ShouldNotReachHere();
2124 2127 return RegMask();
2125 2128 }
2126 2129
2127 2130 // Register for MODL projection of divmodL
2128 2131 RegMask Matcher::modL_proj_mask() {
2129 2132 ShouldNotReachHere();
2130 2133 return RegMask();
2131 2134 }
2132 2135
2133 2136 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2134 2137 return L7_REGP_mask();
2135 2138 }
2136 2139
2137 2140
2138 2141 const bool Matcher::convi2l_type_required = true;
2139 2142
2140 2143 // Should the Matcher clone shifts on addressing modes, expecting them
2141 2144 // to be subsumed into complex addressing expressions or compute them
2142 2145 // into registers?
2143 2146 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2144 2147 return clone_base_plus_offset_address(m, mstack, address_visited);
2145 2148 }
2146 2149
2147 2150 void Compile::reshape_address(AddPNode* addp) {
2148 2151 }
2149 2152
2150 2153 %}
2151 2154
2152 2155
2153 2156 // The intptr_t operand types, defined by textual substitution.
2154 2157 // (Cf. opto/type.hpp. This lets us avoid many, many other ifdefs.)
2155 2158 #ifdef _LP64
2156 2159 #define immX immL
2157 2160 #define immX13 immL13
2158 2161 #define immX13m7 immL13m7
2159 2162 #define iRegX iRegL
2160 2163 #define g1RegX g1RegL
2161 2164 #else
2162 2165 #define immX immI
2163 2166 #define immX13 immI13
2164 2167 #define immX13m7 immI13m7
2165 2168 #define iRegX iRegI
2166 2169 #define g1RegX g1RegI
2167 2170 #endif
2168 2171
2169 2172 //----------ENCODING BLOCK-----------------------------------------------------
2170 2173 // This block specifies the encoding classes used by the compiler to output
2171 2174 // byte streams. Encoding classes are parameterized macros used by
2172 2175 // Machine Instruction Nodes in order to generate the bit encoding of the
2173 2176 // instruction. Operands specify their base encoding interface with the
2174 2177 // interface keyword. There are currently supported four interfaces,
2175 2178 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
2176 2179 // operand to generate a function which returns its register number when
2177 2180 // queried. CONST_INTER causes an operand to generate a function which
2178 2181 // returns the value of the constant when queried. MEMORY_INTER causes an
2179 2182 // operand to generate four functions which return the Base Register, the
2180 2183 // Index Register, the Scale Value, and the Offset Value of the operand when
2181 2184 // queried. COND_INTER causes an operand to generate six functions which
2182 2185 // return the encoding code (ie - encoding bits for the instruction)
2183 2186 // associated with each basic boolean condition for a conditional instruction.
2184 2187 //
2185 2188 // Instructions specify two basic values for encoding. Again, a function
2186 2189 // is available to check if the constant displacement is an oop. They use the
2187 2190 // ins_encode keyword to specify their encoding classes (which must be
2188 2191 // a sequence of enc_class names, and their parameters, specified in
2189 2192 // the encoding block), and they use the
2190 2193 // opcode keyword to specify, in order, their primary, secondary, and
2191 2194 // tertiary opcode. Only the opcode sections which a particular instruction
2192 2195 // needs for encoding need to be specified.
2193 2196 encode %{
2194 2197 enc_class enc_untested %{
2195 2198 #ifdef ASSERT
2196 2199 MacroAssembler _masm(&cbuf);
2197 2200 __ untested("encoding");
2198 2201 #endif
2199 2202 %}
2200 2203
2201 2204 enc_class form3_mem_reg( memory mem, iRegI dst ) %{
2202 2205 emit_form3_mem_reg(cbuf, ra_, this, $primary, $tertiary,
2203 2206 $mem$$base, $mem$$disp, $mem$$index, $dst$$reg);
2204 2207 %}
2205 2208
2206 2209 enc_class simple_form3_mem_reg( memory mem, iRegI dst ) %{
2207 2210 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1,
2208 2211 $mem$$base, $mem$$disp, $mem$$index, $dst$$reg);
2209 2212 %}
2210 2213
2211 2214 enc_class form3_mem_prefetch_read( memory mem ) %{
2212 2215 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1,
2213 2216 $mem$$base, $mem$$disp, $mem$$index, 0/*prefetch function many-reads*/);
2214 2217 %}
2215 2218
2216 2219 enc_class form3_mem_prefetch_write( memory mem ) %{
2217 2220 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1,
2218 2221 $mem$$base, $mem$$disp, $mem$$index, 2/*prefetch function many-writes*/);
2219 2222 %}
2220 2223
2221 2224 enc_class form3_mem_reg_long_unaligned_marshal( memory mem, iRegL reg ) %{
2222 2225 assert(Assembler::is_simm13($mem$$disp ), "need disp and disp+4");
2223 2226 assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4");
2224 2227 guarantee($mem$$index == R_G0_enc, "double index?");
2225 2228 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, R_O7_enc );
2226 2229 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg );
2227 2230 emit3_simm13( cbuf, Assembler::arith_op, $reg$$reg, Assembler::sllx_op3, $reg$$reg, 0x1020 );
2228 2231 emit3( cbuf, Assembler::arith_op, $reg$$reg, Assembler::or_op3, $reg$$reg, 0, R_O7_enc );
2229 2232 %}
2230 2233
2231 2234 enc_class form3_mem_reg_double_unaligned( memory mem, RegD_low reg ) %{
2232 2235 assert(Assembler::is_simm13($mem$$disp ), "need disp and disp+4");
2233 2236 assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4");
2234 2237 guarantee($mem$$index == R_G0_enc, "double index?");
2235 2238 // Load long with 2 instructions
2236 2239 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg+0 );
2237 2240 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, $reg$$reg+1 );
2238 2241 %}
2239 2242
2240 2243 //%%% form3_mem_plus_4_reg is a hack--get rid of it
2241 2244 enc_class form3_mem_plus_4_reg( memory mem, iRegI dst ) %{
2242 2245 guarantee($mem$$disp, "cannot offset a reg-reg operand by 4");
2243 2246 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp + 4, $mem$$index, $dst$$reg);
2244 2247 %}
2245 2248
2246 2249 enc_class form3_g0_rs2_rd_move( iRegI rs2, iRegI rd ) %{
2247 2250 // Encode a reg-reg copy. If it is useless, then empty encoding.
2248 2251 if( $rs2$$reg != $rd$$reg )
2249 2252 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, $rs2$$reg );
2250 2253 %}
2251 2254
2252 2255 // Target lo half of long
2253 2256 enc_class form3_g0_rs2_rd_move_lo( iRegI rs2, iRegL rd ) %{
2254 2257 // Encode a reg-reg copy. If it is useless, then empty encoding.
2255 2258 if( $rs2$$reg != LONG_LO_REG($rd$$reg) )
2256 2259 emit3( cbuf, Assembler::arith_op, LONG_LO_REG($rd$$reg), Assembler::or_op3, 0, 0, $rs2$$reg );
2257 2260 %}
2258 2261
2259 2262 // Source lo half of long
2260 2263 enc_class form3_g0_rs2_rd_move_lo2( iRegL rs2, iRegI rd ) %{
2261 2264 // Encode a reg-reg copy. If it is useless, then empty encoding.
2262 2265 if( LONG_LO_REG($rs2$$reg) != $rd$$reg )
2263 2266 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, LONG_LO_REG($rs2$$reg) );
2264 2267 %}
2265 2268
2266 2269 // Target hi half of long
2267 2270 enc_class form3_rs1_rd_copysign_hi( iRegI rs1, iRegL rd ) %{
2268 2271 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::sra_op3, $rs1$$reg, 31 );
2269 2272 %}
2270 2273
2271 2274 // Source lo half of long, and leave it sign extended.
2272 2275 enc_class form3_rs1_rd_signextend_lo1( iRegL rs1, iRegI rd ) %{
2273 2276 // Sign extend low half
2274 2277 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::sra_op3, $rs1$$reg, 0, 0 );
2275 2278 %}
2276 2279
2277 2280 // Source hi half of long, and leave it sign extended.
2278 2281 enc_class form3_rs1_rd_copy_hi1( iRegL rs1, iRegI rd ) %{
2279 2282 // Shift high half to low half
2280 2283 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::srlx_op3, $rs1$$reg, 32 );
2281 2284 %}
2282 2285
2283 2286 // Source hi half of long
2284 2287 enc_class form3_g0_rs2_rd_move_hi2( iRegL rs2, iRegI rd ) %{
2285 2288 // Encode a reg-reg copy. If it is useless, then empty encoding.
2286 2289 if( LONG_HI_REG($rs2$$reg) != $rd$$reg )
2287 2290 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, LONG_HI_REG($rs2$$reg) );
2288 2291 %}
2289 2292
2290 2293 enc_class form3_rs1_rs2_rd( iRegI rs1, iRegI rs2, iRegI rd ) %{
2291 2294 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0, $rs2$$reg );
2292 2295 %}
2293 2296
2294 2297 enc_class enc_to_bool( iRegI src, iRegI dst ) %{
2295 2298 emit3 ( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, 0, 0, $src$$reg );
2296 2299 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::addc_op3 , 0, 0 );
2297 2300 %}
2298 2301
2299 2302 enc_class enc_ltmask( iRegI p, iRegI q, iRegI dst ) %{
2300 2303 emit3 ( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, $p$$reg, 0, $q$$reg );
2301 2304 // clear if nothing else is happening
2302 2305 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, 0 );
2303 2306 // blt,a,pn done
2304 2307 emit2_19 ( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::less, Assembler::bp_op2, Assembler::icc, 0/*predict not taken*/, 2 );
2305 2308 // mov dst,-1 in delay slot
2306 2309 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, -1 );
2307 2310 %}
2308 2311
2309 2312 enc_class form3_rs1_imm5_rd( iRegI rs1, immU5 imm5, iRegI rd ) %{
2310 2313 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $imm5$$constant & 0x1F );
2311 2314 %}
2312 2315
2313 2316 enc_class form3_sd_rs1_imm6_rd( iRegL rs1, immU6 imm6, iRegL rd ) %{
2314 2317 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, ($imm6$$constant & 0x3F) | 0x1000 );
2315 2318 %}
2316 2319
2317 2320 enc_class form3_sd_rs1_rs2_rd( iRegL rs1, iRegI rs2, iRegL rd ) %{
2318 2321 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0x80, $rs2$$reg );
2319 2322 %}
2320 2323
2321 2324 enc_class form3_rs1_simm13_rd( iRegI rs1, immI13 simm13, iRegI rd ) %{
2322 2325 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $simm13$$constant );
2323 2326 %}
2324 2327
2325 2328 enc_class move_return_pc_to_o1() %{
2326 2329 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::add_op3, R_O7_enc, frame::pc_return_offset );
2327 2330 %}
2328 2331
2329 2332 #ifdef _LP64
2330 2333 /* %%% merge with enc_to_bool */
2331 2334 enc_class enc_convP2B( iRegI dst, iRegP src ) %{
2332 2335 MacroAssembler _masm(&cbuf);
2333 2336
2334 2337 Register src_reg = reg_to_register_object($src$$reg);
2335 2338 Register dst_reg = reg_to_register_object($dst$$reg);
2336 2339 __ movr(Assembler::rc_nz, src_reg, 1, dst_reg);
2337 2340 %}
2338 2341 #endif
2339 2342
2340 2343 enc_class enc_cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp ) %{
2341 2344 // (Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)))
2342 2345 MacroAssembler _masm(&cbuf);
2343 2346
2344 2347 Register p_reg = reg_to_register_object($p$$reg);
2345 2348 Register q_reg = reg_to_register_object($q$$reg);
2346 2349 Register y_reg = reg_to_register_object($y$$reg);
2347 2350 Register tmp_reg = reg_to_register_object($tmp$$reg);
2348 2351
2349 2352 __ subcc( p_reg, q_reg, p_reg );
2350 2353 __ add ( p_reg, y_reg, tmp_reg );
2351 2354 __ movcc( Assembler::less, false, Assembler::icc, tmp_reg, p_reg );
2352 2355 %}
2353 2356
2354 2357 enc_class form_d2i_helper(regD src, regF dst) %{
2355 2358 // fcmp %fcc0,$src,$src
2356 2359 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg );
2357 2360 // branch %fcc0 not-nan, predict taken
2358 2361 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 );
2359 2362 // fdtoi $src,$dst
2360 2363 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fdtoi_opf, $src$$reg );
2361 2364 // fitos $dst,$dst (if nan)
2362 2365 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fitos_opf, $dst$$reg );
2363 2366 // clear $dst (if nan)
2364 2367 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubs_opf, $dst$$reg );
2365 2368 // carry on here...
2366 2369 %}
2367 2370
2368 2371 enc_class form_d2l_helper(regD src, regD dst) %{
2369 2372 // fcmp %fcc0,$src,$src check for NAN
2370 2373 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg );
2371 2374 // branch %fcc0 not-nan, predict taken
2372 2375 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 );
2373 2376 // fdtox $src,$dst convert in delay slot
2374 2377 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fdtox_opf, $src$$reg );
2375 2378 // fxtod $dst,$dst (if nan)
2376 2379 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fxtod_opf, $dst$$reg );
2377 2380 // clear $dst (if nan)
2378 2381 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubd_opf, $dst$$reg );
2379 2382 // carry on here...
2380 2383 %}
2381 2384
2382 2385 enc_class form_f2i_helper(regF src, regF dst) %{
2383 2386 // fcmps %fcc0,$src,$src
2384 2387 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmps_opf, $src$$reg );
2385 2388 // branch %fcc0 not-nan, predict taken
2386 2389 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 );
2387 2390 // fstoi $src,$dst
2388 2391 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fstoi_opf, $src$$reg );
2389 2392 // fitos $dst,$dst (if nan)
2390 2393 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fitos_opf, $dst$$reg );
2391 2394 // clear $dst (if nan)
2392 2395 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubs_opf, $dst$$reg );
2393 2396 // carry on here...
2394 2397 %}
2395 2398
2396 2399 enc_class form_f2l_helper(regF src, regD dst) %{
2397 2400 // fcmps %fcc0,$src,$src
2398 2401 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmps_opf, $src$$reg );
2399 2402 // branch %fcc0 not-nan, predict taken
2400 2403 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 );
2401 2404 // fstox $src,$dst
2402 2405 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fstox_opf, $src$$reg );
2403 2406 // fxtod $dst,$dst (if nan)
2404 2407 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fxtod_opf, $dst$$reg );
2405 2408 // clear $dst (if nan)
2406 2409 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubd_opf, $dst$$reg );
2407 2410 // carry on here...
2408 2411 %}
2409 2412
2410 2413 enc_class form3_opf_rs2F_rdF(regF rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2411 2414 enc_class form3_opf_rs2F_rdD(regF rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2412 2415 enc_class form3_opf_rs2D_rdF(regD rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2413 2416 enc_class form3_opf_rs2D_rdD(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2414 2417
2415 2418 enc_class form3_opf_rs2D_lo_rdF(regD rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg+1); %}
2416 2419
2417 2420 enc_class form3_opf_rs2D_hi_rdD_hi(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2418 2421 enc_class form3_opf_rs2D_lo_rdD_lo(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg+1,$primary,0,$tertiary,$rs2$$reg+1); %}
2419 2422
2420 2423 enc_class form3_opf_rs1F_rs2F_rdF( regF rs1, regF rs2, regF rd ) %{
2421 2424 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg );
2422 2425 %}
2423 2426
2424 2427 enc_class form3_opf_rs1D_rs2D_rdD( regD rs1, regD rs2, regD rd ) %{
2425 2428 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg );
2426 2429 %}
2427 2430
2428 2431 enc_class form3_opf_rs1F_rs2F_fcc( regF rs1, regF rs2, flagsRegF fcc ) %{
2429 2432 emit3( cbuf, $secondary, $fcc$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg );
2430 2433 %}
2431 2434
2432 2435 enc_class form3_opf_rs1D_rs2D_fcc( regD rs1, regD rs2, flagsRegF fcc ) %{
2433 2436 emit3( cbuf, $secondary, $fcc$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg );
2434 2437 %}
2435 2438
2436 2439 enc_class form3_convI2F(regF rs2, regF rd) %{
2437 2440 emit3(cbuf,Assembler::arith_op,$rd$$reg,Assembler::fpop1_op3,0,$secondary,$rs2$$reg);
2438 2441 %}
2439 2442
2440 2443 // Encloding class for traceable jumps
2441 2444 enc_class form_jmpl(g3RegP dest) %{
2442 2445 emit_jmpl(cbuf, $dest$$reg);
2443 2446 %}
2444 2447
2445 2448 enc_class form_jmpl_set_exception_pc(g1RegP dest) %{
2446 2449 emit_jmpl_set_exception_pc(cbuf, $dest$$reg);
2447 2450 %}
2448 2451
2449 2452 enc_class form2_nop() %{
2450 2453 emit_nop(cbuf);
2451 2454 %}
2452 2455
2453 2456 enc_class form2_illtrap() %{
2454 2457 emit_illtrap(cbuf);
2455 2458 %}
2456 2459
2457 2460
2458 2461 // Compare longs and convert into -1, 0, 1.
2459 2462 enc_class cmpl_flag( iRegL src1, iRegL src2, iRegI dst ) %{
2460 2463 // CMP $src1,$src2
2461 2464 emit3( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, $src1$$reg, 0, $src2$$reg );
2462 2465 // blt,a,pn done
2463 2466 emit2_19( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::less , Assembler::bp_op2, Assembler::xcc, 0/*predict not taken*/, 5 );
2464 2467 // mov dst,-1 in delay slot
2465 2468 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, -1 );
2466 2469 // bgt,a,pn done
2467 2470 emit2_19( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::greater, Assembler::bp_op2, Assembler::xcc, 0/*predict not taken*/, 3 );
2468 2471 // mov dst,1 in delay slot
2469 2472 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, 1 );
2470 2473 // CLR $dst
2471 2474 emit3( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3 , 0, 0, 0 );
2472 2475 %}
2473 2476
2474 2477 enc_class enc_PartialSubtypeCheck() %{
2475 2478 MacroAssembler _masm(&cbuf);
2476 2479 __ call(StubRoutines::Sparc::partial_subtype_check(), relocInfo::runtime_call_type);
2477 2480 __ delayed()->nop();
2478 2481 %}
2479 2482
2480 2483 enc_class enc_bp( label labl, cmpOp cmp, flagsReg cc ) %{
2481 2484 MacroAssembler _masm(&cbuf);
2482 2485 Label* L = $labl$$label;
2483 2486 Assembler::Predict predict_taken =
2484 2487 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
2485 2488
2486 2489 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
2487 2490 __ delayed()->nop();
2488 2491 %}
2489 2492
2490 2493 enc_class enc_bpr( label labl, cmpOp_reg cmp, iRegI op1 ) %{
2491 2494 MacroAssembler _masm(&cbuf);
2492 2495 Label* L = $labl$$label;
2493 2496 Assembler::Predict predict_taken =
2494 2497 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
2495 2498
2496 2499 __ bpr( (Assembler::RCondition)($cmp$$cmpcode), false, predict_taken, as_Register($op1$$reg), *L);
2497 2500 __ delayed()->nop();
2498 2501 %}
2499 2502
2500 2503 enc_class enc_cmov_reg( cmpOp cmp, iRegI dst, iRegI src, immI pcc) %{
2501 2504 int op = (Assembler::arith_op << 30) |
2502 2505 ($dst$$reg << 25) |
2503 2506 (Assembler::movcc_op3 << 19) |
2504 2507 (1 << 18) | // cc2 bit for 'icc'
2505 2508 ($cmp$$cmpcode << 14) |
2506 2509 (0 << 13) | // select register move
2507 2510 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc' or 'xcc'
2508 2511 ($src$$reg << 0);
2509 2512 cbuf.insts()->emit_int32(op);
2510 2513 %}
2511 2514
2512 2515 enc_class enc_cmov_imm( cmpOp cmp, iRegI dst, immI11 src, immI pcc ) %{
2513 2516 int simm11 = $src$$constant & ((1<<11)-1); // Mask to 11 bits
2514 2517 int op = (Assembler::arith_op << 30) |
2515 2518 ($dst$$reg << 25) |
2516 2519 (Assembler::movcc_op3 << 19) |
2517 2520 (1 << 18) | // cc2 bit for 'icc'
2518 2521 ($cmp$$cmpcode << 14) |
2519 2522 (1 << 13) | // select immediate move
2520 2523 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc'
2521 2524 (simm11 << 0);
2522 2525 cbuf.insts()->emit_int32(op);
2523 2526 %}
2524 2527
2525 2528 enc_class enc_cmov_reg_f( cmpOpF cmp, iRegI dst, iRegI src, flagsRegF fcc ) %{
2526 2529 int op = (Assembler::arith_op << 30) |
2527 2530 ($dst$$reg << 25) |
2528 2531 (Assembler::movcc_op3 << 19) |
2529 2532 (0 << 18) | // cc2 bit for 'fccX'
2530 2533 ($cmp$$cmpcode << 14) |
2531 2534 (0 << 13) | // select register move
2532 2535 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3
2533 2536 ($src$$reg << 0);
2534 2537 cbuf.insts()->emit_int32(op);
2535 2538 %}
2536 2539
2537 2540 enc_class enc_cmov_imm_f( cmpOp cmp, iRegI dst, immI11 src, flagsRegF fcc ) %{
2538 2541 int simm11 = $src$$constant & ((1<<11)-1); // Mask to 11 bits
2539 2542 int op = (Assembler::arith_op << 30) |
2540 2543 ($dst$$reg << 25) |
2541 2544 (Assembler::movcc_op3 << 19) |
2542 2545 (0 << 18) | // cc2 bit for 'fccX'
2543 2546 ($cmp$$cmpcode << 14) |
2544 2547 (1 << 13) | // select immediate move
2545 2548 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3
2546 2549 (simm11 << 0);
2547 2550 cbuf.insts()->emit_int32(op);
2548 2551 %}
2549 2552
2550 2553 enc_class enc_cmovf_reg( cmpOp cmp, regD dst, regD src, immI pcc ) %{
2551 2554 int op = (Assembler::arith_op << 30) |
2552 2555 ($dst$$reg << 25) |
2553 2556 (Assembler::fpop2_op3 << 19) |
2554 2557 (0 << 18) |
2555 2558 ($cmp$$cmpcode << 14) |
2556 2559 (1 << 13) | // select register move
2557 2560 ($pcc$$constant << 11) | // cc1-cc0 bits for 'icc' or 'xcc'
2558 2561 ($primary << 5) | // select single, double or quad
2559 2562 ($src$$reg << 0);
2560 2563 cbuf.insts()->emit_int32(op);
2561 2564 %}
2562 2565
2563 2566 enc_class enc_cmovff_reg( cmpOpF cmp, flagsRegF fcc, regD dst, regD src ) %{
2564 2567 int op = (Assembler::arith_op << 30) |
2565 2568 ($dst$$reg << 25) |
2566 2569 (Assembler::fpop2_op3 << 19) |
2567 2570 (0 << 18) |
2568 2571 ($cmp$$cmpcode << 14) |
2569 2572 ($fcc$$reg << 11) | // cc2-cc0 bits for 'fccX'
2570 2573 ($primary << 5) | // select single, double or quad
2571 2574 ($src$$reg << 0);
2572 2575 cbuf.insts()->emit_int32(op);
2573 2576 %}
2574 2577
2575 2578 // Used by the MIN/MAX encodings. Same as a CMOV, but
2576 2579 // the condition comes from opcode-field instead of an argument.
2577 2580 enc_class enc_cmov_reg_minmax( iRegI dst, iRegI src ) %{
2578 2581 int op = (Assembler::arith_op << 30) |
2579 2582 ($dst$$reg << 25) |
2580 2583 (Assembler::movcc_op3 << 19) |
2581 2584 (1 << 18) | // cc2 bit for 'icc'
2582 2585 ($primary << 14) |
2583 2586 (0 << 13) | // select register move
2584 2587 (0 << 11) | // cc1, cc0 bits for 'icc'
2585 2588 ($src$$reg << 0);
2586 2589 cbuf.insts()->emit_int32(op);
2587 2590 %}
2588 2591
2589 2592 enc_class enc_cmov_reg_minmax_long( iRegL dst, iRegL src ) %{
2590 2593 int op = (Assembler::arith_op << 30) |
2591 2594 ($dst$$reg << 25) |
2592 2595 (Assembler::movcc_op3 << 19) |
2593 2596 (6 << 16) | // cc2 bit for 'xcc'
2594 2597 ($primary << 14) |
2595 2598 (0 << 13) | // select register move
2596 2599 (0 << 11) | // cc1, cc0 bits for 'icc'
2597 2600 ($src$$reg << 0);
2598 2601 cbuf.insts()->emit_int32(op);
2599 2602 %}
2600 2603
2601 2604 enc_class Set13( immI13 src, iRegI rd ) %{
2602 2605 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, $src$$constant );
2603 2606 %}
2604 2607
2605 2608 enc_class SetHi22( immI src, iRegI rd ) %{
2606 2609 emit2_22( cbuf, Assembler::branch_op, $rd$$reg, Assembler::sethi_op2, $src$$constant );
2607 2610 %}
2608 2611
2609 2612 enc_class Set32( immI src, iRegI rd ) %{
2610 2613 MacroAssembler _masm(&cbuf);
2611 2614 __ set($src$$constant, reg_to_register_object($rd$$reg));
2612 2615 %}
2613 2616
2614 2617 enc_class call_epilog %{
2615 2618 if( VerifyStackAtCalls ) {
2616 2619 MacroAssembler _masm(&cbuf);
2617 2620 int framesize = ra_->C->frame_size_in_bytes();
2618 2621 Register temp_reg = G3;
2619 2622 __ add(SP, framesize, temp_reg);
2620 2623 __ cmp(temp_reg, FP);
2621 2624 __ breakpoint_trap(Assembler::notEqual, Assembler::ptr_cc);
2622 2625 }
2623 2626 %}
2624 2627
2625 2628 // Long values come back from native calls in O0:O1 in the 32-bit VM, copy the value
2626 2629 // to G1 so the register allocator will not have to deal with the misaligned register
2627 2630 // pair.
2628 2631 enc_class adjust_long_from_native_call %{
2629 2632 #ifndef _LP64
2630 2633 if (returns_long()) {
2631 2634 // sllx O0,32,O0
2632 2635 emit3_simm13( cbuf, Assembler::arith_op, R_O0_enc, Assembler::sllx_op3, R_O0_enc, 0x1020 );
2633 2636 // srl O1,0,O1
2634 2637 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::srl_op3, R_O1_enc, 0x0000 );
2635 2638 // or O0,O1,G1
2636 2639 emit3 ( cbuf, Assembler::arith_op, R_G1_enc, Assembler:: or_op3, R_O0_enc, 0, R_O1_enc );
2637 2640 }
2638 2641 #endif
2639 2642 %}
2640 2643
2641 2644 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime
2642 2645 // CALL directly to the runtime
2643 2646 // The user of this is responsible for ensuring that R_L7 is empty (killed).
2644 2647 emit_call_reloc(cbuf, $meth$$method, runtime_call_Relocation::spec(), /*preserve_g2=*/true);
2645 2648 %}
2646 2649
2647 2650 enc_class preserve_SP %{
2648 2651 MacroAssembler _masm(&cbuf);
2649 2652 __ mov(SP, L7_mh_SP_save);
2650 2653 %}
2651 2654
2652 2655 enc_class restore_SP %{
2653 2656 MacroAssembler _masm(&cbuf);
2654 2657 __ mov(L7_mh_SP_save, SP);
2655 2658 %}
2656 2659
2657 2660 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
2658 2661 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
2659 2662 // who we intended to call.
2660 2663 if (!_method) {
2661 2664 emit_call_reloc(cbuf, $meth$$method, runtime_call_Relocation::spec());
2662 2665 } else {
2663 2666 int method_index = resolved_method_index(cbuf);
2664 2667 RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
2665 2668 : static_call_Relocation::spec(method_index);
2666 2669 emit_call_reloc(cbuf, $meth$$method, rspec);
2667 2670
2668 2671 // Emit stub for static call.
2669 2672 address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
2670 2673 if (stub == NULL) {
2671 2674 ciEnv::current()->record_failure("CodeCache is full");
2672 2675 return;
2673 2676 }
2674 2677 }
2675 2678 %}
2676 2679
2677 2680 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
2678 2681 MacroAssembler _masm(&cbuf);
2679 2682 __ set_inst_mark();
2680 2683 int vtable_index = this->_vtable_index;
2681 2684 // MachCallDynamicJavaNode::ret_addr_offset uses this same test
2682 2685 if (vtable_index < 0) {
2683 2686 // must be invalid_vtable_index, not nonvirtual_vtable_index
2684 2687 assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value");
2685 2688 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
2686 2689 assert(G5_ic_reg == G5_inline_cache_reg, "G5_inline_cache_reg used in assemble_ic_buffer_code()");
2687 2690 assert(G5_ic_reg == G5_megamorphic_method, "G5_megamorphic_method used in megamorphic call stub");
2688 2691 __ ic_call((address)$meth$$method, /*emit_delay=*/true, resolved_method_index(cbuf));
2689 2692 } else {
2690 2693 assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
2691 2694 // Just go thru the vtable
2692 2695 // get receiver klass (receiver already checked for non-null)
2693 2696 // If we end up going thru a c2i adapter interpreter expects method in G5
2694 2697 int off = __ offset();
2695 2698 __ load_klass(O0, G3_scratch);
2696 2699 int klass_load_size;
2697 2700 if (UseCompressedClassPointers) {
2698 2701 assert(Universe::heap() != NULL, "java heap should be initialized");
2699 2702 klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
2700 2703 } else {
2701 2704 klass_load_size = 1*BytesPerInstWord;
2702 2705 }
2703 2706 int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index*vtableEntry::size_in_bytes();
2704 2707 int v_off = entry_offset + vtableEntry::method_offset_in_bytes();
2705 2708 if (Assembler::is_simm13(v_off)) {
2706 2709 __ ld_ptr(G3, v_off, G5_method);
2707 2710 } else {
2708 2711 // Generate 2 instructions
2709 2712 __ Assembler::sethi(v_off & ~0x3ff, G5_method);
2710 2713 __ or3(G5_method, v_off & 0x3ff, G5_method);
2711 2714 // ld_ptr, set_hi, set
2712 2715 assert(__ offset() - off == klass_load_size + 2*BytesPerInstWord,
2713 2716 "Unexpected instruction size(s)");
2714 2717 __ ld_ptr(G3, G5_method, G5_method);
2715 2718 }
2716 2719 // NOTE: for vtable dispatches, the vtable entry will never be null.
2717 2720 // However it may very well end up in handle_wrong_method if the
2718 2721 // method is abstract for the particular class.
2719 2722 __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3_scratch);
2720 2723 // jump to target (either compiled code or c2iadapter)
2721 2724 __ jmpl(G3_scratch, G0, O7);
2722 2725 __ delayed()->nop();
2723 2726 }
2724 2727 %}
2725 2728
2726 2729 enc_class Java_Compiled_Call (method meth) %{ // JAVA COMPILED CALL
2727 2730 MacroAssembler _masm(&cbuf);
2728 2731
2729 2732 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
2730 2733 Register temp_reg = G3; // caller must kill G3! We cannot reuse G5_ic_reg here because
2731 2734 // we might be calling a C2I adapter which needs it.
2732 2735
2733 2736 assert(temp_reg != G5_ic_reg, "conflicting registers");
2734 2737 // Load nmethod
2735 2738 __ ld_ptr(G5_ic_reg, in_bytes(Method::from_compiled_offset()), temp_reg);
2736 2739
2737 2740 // CALL to compiled java, indirect the contents of G3
2738 2741 __ set_inst_mark();
2739 2742 __ callr(temp_reg, G0);
2740 2743 __ delayed()->nop();
2741 2744 %}
2742 2745
2743 2746 enc_class idiv_reg(iRegIsafe src1, iRegIsafe src2, iRegIsafe dst) %{
2744 2747 MacroAssembler _masm(&cbuf);
2745 2748 Register Rdividend = reg_to_register_object($src1$$reg);
2746 2749 Register Rdivisor = reg_to_register_object($src2$$reg);
2747 2750 Register Rresult = reg_to_register_object($dst$$reg);
2748 2751
2749 2752 __ sra(Rdivisor, 0, Rdivisor);
2750 2753 __ sra(Rdividend, 0, Rdividend);
2751 2754 __ sdivx(Rdividend, Rdivisor, Rresult);
2752 2755 %}
2753 2756
2754 2757 enc_class idiv_imm(iRegIsafe src1, immI13 imm, iRegIsafe dst) %{
2755 2758 MacroAssembler _masm(&cbuf);
2756 2759
2757 2760 Register Rdividend = reg_to_register_object($src1$$reg);
2758 2761 int divisor = $imm$$constant;
2759 2762 Register Rresult = reg_to_register_object($dst$$reg);
2760 2763
2761 2764 __ sra(Rdividend, 0, Rdividend);
2762 2765 __ sdivx(Rdividend, divisor, Rresult);
2763 2766 %}
2764 2767
2765 2768 enc_class enc_mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2) %{
2766 2769 MacroAssembler _masm(&cbuf);
2767 2770 Register Rsrc1 = reg_to_register_object($src1$$reg);
2768 2771 Register Rsrc2 = reg_to_register_object($src2$$reg);
2769 2772 Register Rdst = reg_to_register_object($dst$$reg);
2770 2773
2771 2774 __ sra( Rsrc1, 0, Rsrc1 );
2772 2775 __ sra( Rsrc2, 0, Rsrc2 );
2773 2776 __ mulx( Rsrc1, Rsrc2, Rdst );
2774 2777 __ srlx( Rdst, 32, Rdst );
2775 2778 %}
2776 2779
2777 2780 enc_class irem_reg(iRegIsafe src1, iRegIsafe src2, iRegIsafe dst, o7RegL scratch) %{
2778 2781 MacroAssembler _masm(&cbuf);
2779 2782 Register Rdividend = reg_to_register_object($src1$$reg);
2780 2783 Register Rdivisor = reg_to_register_object($src2$$reg);
2781 2784 Register Rresult = reg_to_register_object($dst$$reg);
2782 2785 Register Rscratch = reg_to_register_object($scratch$$reg);
2783 2786
2784 2787 assert(Rdividend != Rscratch, "");
2785 2788 assert(Rdivisor != Rscratch, "");
2786 2789
2787 2790 __ sra(Rdividend, 0, Rdividend);
2788 2791 __ sra(Rdivisor, 0, Rdivisor);
2789 2792 __ sdivx(Rdividend, Rdivisor, Rscratch);
2790 2793 __ mulx(Rscratch, Rdivisor, Rscratch);
2791 2794 __ sub(Rdividend, Rscratch, Rresult);
2792 2795 %}
2793 2796
2794 2797 enc_class irem_imm(iRegIsafe src1, immI13 imm, iRegIsafe dst, o7RegL scratch) %{
2795 2798 MacroAssembler _masm(&cbuf);
2796 2799
2797 2800 Register Rdividend = reg_to_register_object($src1$$reg);
2798 2801 int divisor = $imm$$constant;
2799 2802 Register Rresult = reg_to_register_object($dst$$reg);
2800 2803 Register Rscratch = reg_to_register_object($scratch$$reg);
2801 2804
2802 2805 assert(Rdividend != Rscratch, "");
2803 2806
2804 2807 __ sra(Rdividend, 0, Rdividend);
2805 2808 __ sdivx(Rdividend, divisor, Rscratch);
2806 2809 __ mulx(Rscratch, divisor, Rscratch);
2807 2810 __ sub(Rdividend, Rscratch, Rresult);
2808 2811 %}
2809 2812
2810 2813 enc_class fabss (sflt_reg dst, sflt_reg src) %{
2811 2814 MacroAssembler _masm(&cbuf);
2812 2815
2813 2816 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg);
2814 2817 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg);
2815 2818
2816 2819 __ fabs(FloatRegisterImpl::S, Fsrc, Fdst);
2817 2820 %}
2818 2821
2819 2822 enc_class fabsd (dflt_reg dst, dflt_reg src) %{
2820 2823 MacroAssembler _masm(&cbuf);
2821 2824
2822 2825 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg);
2823 2826 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg);
2824 2827
2825 2828 __ fabs(FloatRegisterImpl::D, Fsrc, Fdst);
2826 2829 %}
2827 2830
2828 2831 enc_class fnegd (dflt_reg dst, dflt_reg src) %{
2829 2832 MacroAssembler _masm(&cbuf);
2830 2833
2831 2834 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg);
2832 2835 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg);
2833 2836
2834 2837 __ fneg(FloatRegisterImpl::D, Fsrc, Fdst);
2835 2838 %}
2836 2839
2837 2840 enc_class fsqrts (sflt_reg dst, sflt_reg src) %{
2838 2841 MacroAssembler _masm(&cbuf);
2839 2842
2840 2843 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg);
2841 2844 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg);
2842 2845
2843 2846 __ fsqrt(FloatRegisterImpl::S, Fsrc, Fdst);
2844 2847 %}
2845 2848
2846 2849 enc_class fsqrtd (dflt_reg dst, dflt_reg src) %{
2847 2850 MacroAssembler _masm(&cbuf);
2848 2851
2849 2852 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg);
2850 2853 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg);
2851 2854
2852 2855 __ fsqrt(FloatRegisterImpl::D, Fsrc, Fdst);
2853 2856 %}
2854 2857
2855 2858 enc_class fmovs (dflt_reg dst, dflt_reg src) %{
2856 2859 MacroAssembler _masm(&cbuf);
2857 2860
2858 2861 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg);
2859 2862 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg);
2860 2863
2861 2864 __ fmov(FloatRegisterImpl::S, Fsrc, Fdst);
2862 2865 %}
2863 2866
2864 2867 enc_class fmovd (dflt_reg dst, dflt_reg src) %{
2865 2868 MacroAssembler _masm(&cbuf);
2866 2869
2867 2870 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg);
2868 2871 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg);
2869 2872
2870 2873 __ fmov(FloatRegisterImpl::D, Fsrc, Fdst);
2871 2874 %}
2872 2875
2873 2876 enc_class Fast_Lock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
2874 2877 MacroAssembler _masm(&cbuf);
2875 2878
2876 2879 Register Roop = reg_to_register_object($oop$$reg);
2877 2880 Register Rbox = reg_to_register_object($box$$reg);
2878 2881 Register Rscratch = reg_to_register_object($scratch$$reg);
2879 2882 Register Rmark = reg_to_register_object($scratch2$$reg);
2880 2883
2881 2884 assert(Roop != Rscratch, "");
2882 2885 assert(Roop != Rmark, "");
2883 2886 assert(Rbox != Rscratch, "");
2884 2887 assert(Rbox != Rmark, "");
2885 2888
2886 2889 __ compiler_lock_object(Roop, Rmark, Rbox, Rscratch, _counters, UseBiasedLocking && !UseOptoBiasInlining);
2887 2890 %}
2888 2891
2889 2892 enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
2890 2893 MacroAssembler _masm(&cbuf);
2891 2894
2892 2895 Register Roop = reg_to_register_object($oop$$reg);
2893 2896 Register Rbox = reg_to_register_object($box$$reg);
2894 2897 Register Rscratch = reg_to_register_object($scratch$$reg);
2895 2898 Register Rmark = reg_to_register_object($scratch2$$reg);
2896 2899
2897 2900 assert(Roop != Rscratch, "");
2898 2901 assert(Roop != Rmark, "");
2899 2902 assert(Rbox != Rscratch, "");
2900 2903 assert(Rbox != Rmark, "");
2901 2904
2902 2905 __ compiler_unlock_object(Roop, Rmark, Rbox, Rscratch, UseBiasedLocking && !UseOptoBiasInlining);
2903 2906 %}
2904 2907
2905 2908 enc_class enc_cas( iRegP mem, iRegP old, iRegP new ) %{
2906 2909 MacroAssembler _masm(&cbuf);
2907 2910 Register Rmem = reg_to_register_object($mem$$reg);
2908 2911 Register Rold = reg_to_register_object($old$$reg);
2909 2912 Register Rnew = reg_to_register_object($new$$reg);
2910 2913
2911 2914 __ cas_ptr(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold
2912 2915 __ cmp( Rold, Rnew );
2913 2916 %}
2914 2917
2915 2918 enc_class enc_casx( iRegP mem, iRegL old, iRegL new) %{
2916 2919 Register Rmem = reg_to_register_object($mem$$reg);
2917 2920 Register Rold = reg_to_register_object($old$$reg);
2918 2921 Register Rnew = reg_to_register_object($new$$reg);
2919 2922
2920 2923 MacroAssembler _masm(&cbuf);
2921 2924 __ mov(Rnew, O7);
2922 2925 __ casx(Rmem, Rold, O7);
2923 2926 __ cmp( Rold, O7 );
2924 2927 %}
2925 2928
2926 2929 // raw int cas, used for compareAndSwap
2927 2930 enc_class enc_casi( iRegP mem, iRegL old, iRegL new) %{
2928 2931 Register Rmem = reg_to_register_object($mem$$reg);
2929 2932 Register Rold = reg_to_register_object($old$$reg);
2930 2933 Register Rnew = reg_to_register_object($new$$reg);
2931 2934
2932 2935 MacroAssembler _masm(&cbuf);
2933 2936 __ mov(Rnew, O7);
2934 2937 __ cas(Rmem, Rold, O7);
2935 2938 __ cmp( Rold, O7 );
2936 2939 %}
2937 2940
2938 2941 // raw int cas without using tmp register for compareAndExchange
2939 2942 enc_class enc_casi_exch( iRegP mem, iRegL old, iRegL new) %{
2940 2943 Register Rmem = reg_to_register_object($mem$$reg);
2941 2944 Register Rold = reg_to_register_object($old$$reg);
2942 2945 Register Rnew = reg_to_register_object($new$$reg);
2943 2946
2944 2947 MacroAssembler _masm(&cbuf);
2945 2948 __ cas(Rmem, Rold, Rnew);
2946 2949 %}
2947 2950
2948 2951 // 64-bit cas without using tmp register for compareAndExchange
2949 2952 enc_class enc_casx_exch( iRegP mem, iRegL old, iRegL new) %{
2950 2953 Register Rmem = reg_to_register_object($mem$$reg);
2951 2954 Register Rold = reg_to_register_object($old$$reg);
2952 2955 Register Rnew = reg_to_register_object($new$$reg);
2953 2956
2954 2957 MacroAssembler _masm(&cbuf);
2955 2958 __ casx(Rmem, Rold, Rnew);
2956 2959 %}
2957 2960
2958 2961 enc_class enc_lflags_ne_to_boolean( iRegI res ) %{
2959 2962 Register Rres = reg_to_register_object($res$$reg);
2960 2963
2961 2964 MacroAssembler _masm(&cbuf);
2962 2965 __ mov(1, Rres);
2963 2966 __ movcc( Assembler::notEqual, false, Assembler::xcc, G0, Rres );
2964 2967 %}
2965 2968
2966 2969 enc_class enc_iflags_ne_to_boolean( iRegI res ) %{
2967 2970 Register Rres = reg_to_register_object($res$$reg);
2968 2971
2969 2972 MacroAssembler _masm(&cbuf);
2970 2973 __ mov(1, Rres);
2971 2974 __ movcc( Assembler::notEqual, false, Assembler::icc, G0, Rres );
2972 2975 %}
2973 2976
2974 2977 enc_class floating_cmp ( iRegP dst, regF src1, regF src2 ) %{
2975 2978 MacroAssembler _masm(&cbuf);
2976 2979 Register Rdst = reg_to_register_object($dst$$reg);
2977 2980 FloatRegister Fsrc1 = $primary ? reg_to_SingleFloatRegister_object($src1$$reg)
2978 2981 : reg_to_DoubleFloatRegister_object($src1$$reg);
2979 2982 FloatRegister Fsrc2 = $primary ? reg_to_SingleFloatRegister_object($src2$$reg)
2980 2983 : reg_to_DoubleFloatRegister_object($src2$$reg);
2981 2984
2982 2985 // Convert condition code fcc0 into -1,0,1; unordered reports less-than (-1)
2983 2986 __ float_cmp( $primary, -1, Fsrc1, Fsrc2, Rdst);
2984 2987 %}
2985 2988
2986 2989 enc_class enc_rethrow() %{
2987 2990 cbuf.set_insts_mark();
2988 2991 Register temp_reg = G3;
2989 2992 AddressLiteral rethrow_stub(OptoRuntime::rethrow_stub());
2990 2993 assert(temp_reg != reg_to_register_object(R_I0_num), "temp must not break oop_reg");
2991 2994 MacroAssembler _masm(&cbuf);
2992 2995 #ifdef ASSERT
2993 2996 __ save_frame(0);
2994 2997 AddressLiteral last_rethrow_addrlit(&last_rethrow);
2995 2998 __ sethi(last_rethrow_addrlit, L1);
2996 2999 Address addr(L1, last_rethrow_addrlit.low10());
2997 3000 __ rdpc(L2);
2998 3001 __ inc(L2, 3 * BytesPerInstWord); // skip this & 2 more insns to point at jump_to
2999 3002 __ st_ptr(L2, addr);
3000 3003 __ restore();
3001 3004 #endif
3002 3005 __ JUMP(rethrow_stub, temp_reg, 0); // sethi;jmp
3003 3006 __ delayed()->nop();
3004 3007 %}
3005 3008
3006 3009 enc_class emit_mem_nop() %{
3007 3010 // Generates the instruction LDUXA [o6,g0],#0x82,g0
3008 3011 cbuf.insts()->emit_int32((unsigned int) 0xc0839040);
3009 3012 %}
3010 3013
3011 3014 enc_class emit_fadd_nop() %{
3012 3015 // Generates the instruction FMOVS f31,f31
3013 3016 cbuf.insts()->emit_int32((unsigned int) 0xbfa0003f);
3014 3017 %}
3015 3018
3016 3019 enc_class emit_br_nop() %{
3017 3020 // Generates the instruction BPN,PN .
3018 3021 cbuf.insts()->emit_int32((unsigned int) 0x00400000);
3019 3022 %}
3020 3023
3021 3024 enc_class enc_membar_acquire %{
3022 3025 MacroAssembler _masm(&cbuf);
3023 3026 __ membar( Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::LoadLoad) );
3024 3027 %}
3025 3028
3026 3029 enc_class enc_membar_release %{
3027 3030 MacroAssembler _masm(&cbuf);
3028 3031 __ membar( Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore) );
3029 3032 %}
3030 3033
3031 3034 enc_class enc_membar_volatile %{
3032 3035 MacroAssembler _masm(&cbuf);
3033 3036 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) );
3034 3037 %}
3035 3038
3036 3039 %}
3037 3040
3038 3041 //----------FRAME--------------------------------------------------------------
3039 3042 // Definition of frame structure and management information.
3040 3043 //
3041 3044 // S T A C K L A Y O U T Allocators stack-slot number
3042 3045 // | (to get allocators register number
3043 3046 // G Owned by | | v add VMRegImpl::stack0)
3044 3047 // r CALLER | |
3045 3048 // o | +--------+ pad to even-align allocators stack-slot
3046 3049 // w V | pad0 | numbers; owned by CALLER
3047 3050 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3048 3051 // h ^ | in | 5
3049 3052 // | | args | 4 Holes in incoming args owned by SELF
3050 3053 // | | | | 3
3051 3054 // | | +--------+
3052 3055 // V | | old out| Empty on Intel, window on Sparc
3053 3056 // | old |preserve| Must be even aligned.
3054 3057 // | SP-+--------+----> Matcher::_old_SP, 8 (or 16 in LP64)-byte aligned
3055 3058 // | | in | 3 area for Intel ret address
3056 3059 // Owned by |preserve| Empty on Sparc.
3057 3060 // SELF +--------+
3058 3061 // | | pad2 | 2 pad to align old SP
3059 3062 // | +--------+ 1
3060 3063 // | | locks | 0
3061 3064 // | +--------+----> VMRegImpl::stack0, 8 (or 16 in LP64)-byte aligned
3062 3065 // | | pad1 | 11 pad to align new SP
3063 3066 // | +--------+
3064 3067 // | | | 10
3065 3068 // | | spills | 9 spills
3066 3069 // V | | 8 (pad0 slot for callee)
3067 3070 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3068 3071 // ^ | out | 7
3069 3072 // | | args | 6 Holes in outgoing args owned by CALLEE
3070 3073 // Owned by +--------+
3071 3074 // CALLEE | new out| 6 Empty on Intel, window on Sparc
3072 3075 // | new |preserve| Must be even-aligned.
3073 3076 // | SP-+--------+----> Matcher::_new_SP, even aligned
3074 3077 // | | |
3075 3078 //
3076 3079 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3077 3080 // known from SELF's arguments and the Java calling convention.
3078 3081 // Region 6-7 is determined per call site.
3079 3082 // Note 2: If the calling convention leaves holes in the incoming argument
3080 3083 // area, those holes are owned by SELF. Holes in the outgoing area
3081 3084 // are owned by the CALLEE. Holes should not be nessecary in the
3082 3085 // incoming area, as the Java calling convention is completely under
3083 3086 // the control of the AD file. Doubles can be sorted and packed to
3084 3087 // avoid holes. Holes in the outgoing arguments may be necessary for
3085 3088 // varargs C calling conventions.
3086 3089 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3087 3090 // even aligned with pad0 as needed.
3088 3091 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3089 3092 // region 6-11 is even aligned; it may be padded out more so that
3090 3093 // the region from SP to FP meets the minimum stack alignment.
3091 3094
3092 3095 frame %{
3093 3096 // What direction does stack grow in (assumed to be same for native & Java)
3094 3097 stack_direction(TOWARDS_LOW);
3095 3098
3096 3099 // These two registers define part of the calling convention
3097 3100 // between compiled code and the interpreter.
3098 3101 inline_cache_reg(R_G5); // Inline Cache Register or Method* for I2C
3099 3102 interpreter_method_oop_reg(R_G5); // Method Oop Register when calling interpreter
3100 3103
3101 3104 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3102 3105 cisc_spilling_operand_name(indOffset);
3103 3106
3104 3107 // Number of stack slots consumed by a Monitor enter
3105 3108 #ifdef _LP64
3106 3109 sync_stack_slots(2);
3107 3110 #else
3108 3111 sync_stack_slots(1);
3109 3112 #endif
3110 3113
3111 3114 // Compiled code's Frame Pointer
3112 3115 frame_pointer(R_SP);
3113 3116
3114 3117 // Stack alignment requirement
3115 3118 stack_alignment(StackAlignmentInBytes);
3116 3119 // LP64: Alignment size in bytes (128-bit -> 16 bytes)
3117 3120 // !LP64: Alignment size in bytes (64-bit -> 8 bytes)
3118 3121
3119 3122 // Number of stack slots between incoming argument block and the start of
3120 3123 // a new frame. The PROLOG must add this many slots to the stack. The
3121 3124 // EPILOG must remove this many slots.
3122 3125 in_preserve_stack_slots(0);
3123 3126
3124 3127 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3125 3128 // for calls to C. Supports the var-args backing area for register parms.
3126 3129 // ADLC doesn't support parsing expressions, so I folded the math by hand.
3127 3130 #ifdef _LP64
3128 3131 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (0)) * 2-stack-slots-per-word
3129 3132 varargs_C_out_slots_killed(12);
3130 3133 #else
3131 3134 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (1)) * 1-stack-slots-per-word
3132 3135 varargs_C_out_slots_killed( 7);
3133 3136 #endif
3134 3137
3135 3138 // The after-PROLOG location of the return address. Location of
3136 3139 // return address specifies a type (REG or STACK) and a number
3137 3140 // representing the register number (i.e. - use a register name) or
3138 3141 // stack slot.
3139 3142 return_addr(REG R_I7); // Ret Addr is in register I7
3140 3143
3141 3144 // Body of function which returns an OptoRegs array locating
3142 3145 // arguments either in registers or in stack slots for calling
3143 3146 // java
3144 3147 calling_convention %{
3145 3148 (void) SharedRuntime::java_calling_convention(sig_bt, regs, length, is_outgoing);
3146 3149
3147 3150 %}
3148 3151
3149 3152 // Body of function which returns an OptoRegs array locating
3150 3153 // arguments either in registers or in stack slots for calling
3151 3154 // C.
3152 3155 c_calling_convention %{
3153 3156 // This is obviously always outgoing
3154 3157 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3155 3158 %}
3156 3159
3157 3160 // Location of native (C/C++) and interpreter return values. This is specified to
3158 3161 // be the same as Java. In the 32-bit VM, long values are actually returned from
3159 3162 // native calls in O0:O1 and returned to the interpreter in I0:I1. The copying
3160 3163 // to and from the register pairs is done by the appropriate call and epilog
3161 3164 // opcodes. This simplifies the register allocator.
3162 3165 c_return_value %{
3163 3166 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3164 3167 #ifdef _LP64
3165 3168 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
3166 3169 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
3167 3170 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
3168 3171 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
3169 3172 #else // !_LP64
3170 3173 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
3171 3174 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
3172 3175 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
3173 3176 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
3174 3177 #endif
3175 3178 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
3176 3179 (is_outgoing?lo_out:lo_in)[ideal_reg] );
3177 3180 %}
3178 3181
3179 3182 // Location of compiled Java return values. Same as C
3180 3183 return_value %{
3181 3184 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3182 3185 #ifdef _LP64
3183 3186 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
3184 3187 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
3185 3188 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
3186 3189 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
3187 3190 #else // !_LP64
3188 3191 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
3189 3192 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
3190 3193 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
3191 3194 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
3192 3195 #endif
3193 3196 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
3194 3197 (is_outgoing?lo_out:lo_in)[ideal_reg] );
3195 3198 %}
3196 3199
3197 3200 %}
3198 3201
3199 3202
3200 3203 //----------ATTRIBUTES---------------------------------------------------------
3201 3204 //----------Operand Attributes-------------------------------------------------
3202 3205 op_attrib op_cost(1); // Required cost attribute
3203 3206
3204 3207 //----------Instruction Attributes---------------------------------------------
3205 3208 ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute
3206 3209 ins_attrib ins_size(32); // Required size attribute (in bits)
3207 3210
3208 3211 // avoid_back_to_back attribute is an expression that must return
3209 3212 // one of the following values defined in MachNode:
3210 3213 // AVOID_NONE - instruction can be placed anywhere
3211 3214 // AVOID_BEFORE - instruction cannot be placed after an
3212 3215 // instruction with MachNode::AVOID_AFTER
3213 3216 // AVOID_AFTER - the next instruction cannot be the one
3214 3217 // with MachNode::AVOID_BEFORE
3215 3218 // AVOID_BEFORE_AND_AFTER - BEFORE and AFTER attributes at
3216 3219 // the same time
3217 3220 ins_attrib ins_avoid_back_to_back(MachNode::AVOID_NONE);
3218 3221
3219 3222 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3220 3223 // non-matching short branch variant of some
3221 3224 // long branch?
3222 3225
3223 3226 //----------OPERANDS-----------------------------------------------------------
3224 3227 // Operand definitions must precede instruction definitions for correct parsing
3225 3228 // in the ADLC because operands constitute user defined types which are used in
3226 3229 // instruction definitions.
3227 3230
3228 3231 //----------Simple Operands----------------------------------------------------
3229 3232 // Immediate Operands
3230 3233 // Integer Immediate: 32-bit
3231 3234 operand immI() %{
3232 3235 match(ConI);
3233 3236
3234 3237 op_cost(0);
3235 3238 // formats are generated automatically for constants and base registers
3236 3239 format %{ %}
3237 3240 interface(CONST_INTER);
3238 3241 %}
3239 3242
3240 3243 // Integer Immediate: 0-bit
3241 3244 operand immI0() %{
3242 3245 predicate(n->get_int() == 0);
3243 3246 match(ConI);
3244 3247 op_cost(0);
3245 3248
3246 3249 format %{ %}
3247 3250 interface(CONST_INTER);
3248 3251 %}
3249 3252
3250 3253 // Integer Immediate: 5-bit
3251 3254 operand immI5() %{
3252 3255 predicate(Assembler::is_simm5(n->get_int()));
3253 3256 match(ConI);
3254 3257 op_cost(0);
3255 3258 format %{ %}
3256 3259 interface(CONST_INTER);
3257 3260 %}
3258 3261
3259 3262 // Integer Immediate: 8-bit
3260 3263 operand immI8() %{
3261 3264 predicate(Assembler::is_simm8(n->get_int()));
3262 3265 match(ConI);
3263 3266 op_cost(0);
3264 3267 format %{ %}
3265 3268 interface(CONST_INTER);
3266 3269 %}
3267 3270
3268 3271 // Integer Immediate: the value 10
3269 3272 operand immI10() %{
3270 3273 predicate(n->get_int() == 10);
3271 3274 match(ConI);
3272 3275 op_cost(0);
3273 3276
3274 3277 format %{ %}
3275 3278 interface(CONST_INTER);
3276 3279 %}
3277 3280
3278 3281 // Integer Immediate: 11-bit
3279 3282 operand immI11() %{
3280 3283 predicate(Assembler::is_simm11(n->get_int()));
3281 3284 match(ConI);
3282 3285 op_cost(0);
3283 3286 format %{ %}
3284 3287 interface(CONST_INTER);
3285 3288 %}
3286 3289
3287 3290 // Integer Immediate: 13-bit
3288 3291 operand immI13() %{
3289 3292 predicate(Assembler::is_simm13(n->get_int()));
3290 3293 match(ConI);
3291 3294 op_cost(0);
3292 3295
3293 3296 format %{ %}
3294 3297 interface(CONST_INTER);
3295 3298 %}
3296 3299
3297 3300 // Integer Immediate: 13-bit minus 7
3298 3301 operand immI13m7() %{
3299 3302 predicate((-4096 < n->get_int()) && ((n->get_int() + 7) <= 4095));
3300 3303 match(ConI);
3301 3304 op_cost(0);
3302 3305
3303 3306 format %{ %}
3304 3307 interface(CONST_INTER);
3305 3308 %}
3306 3309
3307 3310 // Integer Immediate: 16-bit
3308 3311 operand immI16() %{
3309 3312 predicate(Assembler::is_simm16(n->get_int()));
3310 3313 match(ConI);
3311 3314 op_cost(0);
3312 3315 format %{ %}
3313 3316 interface(CONST_INTER);
3314 3317 %}
3315 3318
3316 3319 // Integer Immediate: the values 1-31
3317 3320 operand immI_1_31() %{
3318 3321 predicate(n->get_int() >= 1 && n->get_int() <= 31);
3319 3322 match(ConI);
3320 3323 op_cost(0);
3321 3324
3322 3325 format %{ %}
3323 3326 interface(CONST_INTER);
3324 3327 %}
3325 3328
3326 3329 // Integer Immediate: the values 32-63
3327 3330 operand immI_32_63() %{
3328 3331 predicate(n->get_int() >= 32 && n->get_int() <= 63);
3329 3332 match(ConI);
3330 3333 op_cost(0);
3331 3334
3332 3335 format %{ %}
3333 3336 interface(CONST_INTER);
3334 3337 %}
3335 3338
3336 3339 // Immediates for special shifts (sign extend)
3337 3340
3338 3341 // Integer Immediate: the value 16
3339 3342 operand immI_16() %{
3340 3343 predicate(n->get_int() == 16);
3341 3344 match(ConI);
3342 3345 op_cost(0);
3343 3346
3344 3347 format %{ %}
3345 3348 interface(CONST_INTER);
3346 3349 %}
3347 3350
3348 3351 // Integer Immediate: the value 24
3349 3352 operand immI_24() %{
3350 3353 predicate(n->get_int() == 24);
3351 3354 match(ConI);
3352 3355 op_cost(0);
3353 3356
3354 3357 format %{ %}
3355 3358 interface(CONST_INTER);
3356 3359 %}
3357 3360 // Integer Immediate: the value 255
3358 3361 operand immI_255() %{
3359 3362 predicate( n->get_int() == 255 );
3360 3363 match(ConI);
3361 3364 op_cost(0);
3362 3365
3363 3366 format %{ %}
3364 3367 interface(CONST_INTER);
3365 3368 %}
3366 3369
3367 3370 // Integer Immediate: the value 65535
3368 3371 operand immI_65535() %{
3369 3372 predicate(n->get_int() == 65535);
3370 3373 match(ConI);
3371 3374 op_cost(0);
3372 3375
3373 3376 format %{ %}
3374 3377 interface(CONST_INTER);
3375 3378 %}
3376 3379
3377 3380 // Integer Immediate: the values 0-31
3378 3381 operand immU5() %{
3379 3382 predicate(n->get_int() >= 0 && n->get_int() <= 31);
3380 3383 match(ConI);
3381 3384 op_cost(0);
3382 3385
3383 3386 format %{ %}
3384 3387 interface(CONST_INTER);
3385 3388 %}
3386 3389
3387 3390 // Integer Immediate: 6-bit
3388 3391 operand immU6() %{
3389 3392 predicate(n->get_int() >= 0 && n->get_int() <= 63);
3390 3393 match(ConI);
3391 3394 op_cost(0);
3392 3395 format %{ %}
3393 3396 interface(CONST_INTER);
3394 3397 %}
3395 3398
3396 3399 // Unsigned Integer Immediate: 12-bit (non-negative that fits in simm13)
3397 3400 operand immU12() %{
3398 3401 predicate((0 <= n->get_int()) && Assembler::is_simm13(n->get_int()));
3399 3402 match(ConI);
3400 3403 op_cost(0);
3401 3404
3402 3405 format %{ %}
3403 3406 interface(CONST_INTER);
3404 3407 %}
3405 3408
3406 3409 // Integer Immediate non-negative
3407 3410 operand immU31()
3408 3411 %{
3409 3412 predicate(n->get_int() >= 0);
3410 3413 match(ConI);
3411 3414
3412 3415 op_cost(0);
3413 3416 format %{ %}
3414 3417 interface(CONST_INTER);
3415 3418 %}
3416 3419
3417 3420 // Long Immediate: the value FF
3418 3421 operand immL_FF() %{
3419 3422 predicate( n->get_long() == 0xFFL );
3420 3423 match(ConL);
3421 3424 op_cost(0);
3422 3425
3423 3426 format %{ %}
3424 3427 interface(CONST_INTER);
3425 3428 %}
3426 3429
3427 3430 // Long Immediate: the value FFFF
3428 3431 operand immL_FFFF() %{
3429 3432 predicate( n->get_long() == 0xFFFFL );
3430 3433 match(ConL);
3431 3434 op_cost(0);
3432 3435
3433 3436 format %{ %}
3434 3437 interface(CONST_INTER);
3435 3438 %}
3436 3439
3437 3440 // Pointer Immediate: 32 or 64-bit
3438 3441 operand immP() %{
3439 3442 match(ConP);
3440 3443
3441 3444 op_cost(5);
3442 3445 // formats are generated automatically for constants and base registers
3443 3446 format %{ %}
3444 3447 interface(CONST_INTER);
3445 3448 %}
3446 3449
3447 3450 #ifdef _LP64
3448 3451 // Pointer Immediate: 64-bit
3449 3452 operand immP_set() %{
3450 3453 predicate(!VM_Version::is_niagara_plus());
3451 3454 match(ConP);
3452 3455
3453 3456 op_cost(5);
3454 3457 // formats are generated automatically for constants and base registers
3455 3458 format %{ %}
3456 3459 interface(CONST_INTER);
3457 3460 %}
3458 3461
3459 3462 // Pointer Immediate: 64-bit
3460 3463 // From Niagara2 processors on a load should be better than materializing.
3461 3464 operand immP_load() %{
3462 3465 predicate(VM_Version::is_niagara_plus() && (n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set(n->get_ptr()) > 3)));
3463 3466 match(ConP);
3464 3467
3465 3468 op_cost(5);
3466 3469 // formats are generated automatically for constants and base registers
3467 3470 format %{ %}
3468 3471 interface(CONST_INTER);
3469 3472 %}
3470 3473
3471 3474 // Pointer Immediate: 64-bit
3472 3475 operand immP_no_oop_cheap() %{
3473 3476 predicate(VM_Version::is_niagara_plus() && !n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set(n->get_ptr()) <= 3));
3474 3477 match(ConP);
3475 3478
3476 3479 op_cost(5);
3477 3480 // formats are generated automatically for constants and base registers
3478 3481 format %{ %}
3479 3482 interface(CONST_INTER);
3480 3483 %}
3481 3484 #endif
3482 3485
3483 3486 operand immP13() %{
3484 3487 predicate((-4096 < n->get_ptr()) && (n->get_ptr() <= 4095));
3485 3488 match(ConP);
3486 3489 op_cost(0);
3487 3490
3488 3491 format %{ %}
3489 3492 interface(CONST_INTER);
3490 3493 %}
3491 3494
3492 3495 operand immP0() %{
3493 3496 predicate(n->get_ptr() == 0);
3494 3497 match(ConP);
3495 3498 op_cost(0);
3496 3499
3497 3500 format %{ %}
3498 3501 interface(CONST_INTER);
3499 3502 %}
3500 3503
3501 3504 operand immP_poll() %{
3502 3505 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3503 3506 match(ConP);
3504 3507
3505 3508 // formats are generated automatically for constants and base registers
3506 3509 format %{ %}
3507 3510 interface(CONST_INTER);
3508 3511 %}
3509 3512
3510 3513 // Pointer Immediate
3511 3514 operand immN()
3512 3515 %{
3513 3516 match(ConN);
3514 3517
3515 3518 op_cost(10);
3516 3519 format %{ %}
3517 3520 interface(CONST_INTER);
3518 3521 %}
3519 3522
3520 3523 operand immNKlass()
3521 3524 %{
3522 3525 match(ConNKlass);
3523 3526
3524 3527 op_cost(10);
3525 3528 format %{ %}
3526 3529 interface(CONST_INTER);
3527 3530 %}
3528 3531
3529 3532 // NULL Pointer Immediate
3530 3533 operand immN0()
3531 3534 %{
3532 3535 predicate(n->get_narrowcon() == 0);
3533 3536 match(ConN);
3534 3537
3535 3538 op_cost(0);
3536 3539 format %{ %}
3537 3540 interface(CONST_INTER);
3538 3541 %}
3539 3542
3540 3543 operand immL() %{
3541 3544 match(ConL);
3542 3545 op_cost(40);
3543 3546 // formats are generated automatically for constants and base registers
3544 3547 format %{ %}
3545 3548 interface(CONST_INTER);
3546 3549 %}
3547 3550
3548 3551 operand immL0() %{
3549 3552 predicate(n->get_long() == 0L);
3550 3553 match(ConL);
3551 3554 op_cost(0);
3552 3555 // formats are generated automatically for constants and base registers
3553 3556 format %{ %}
3554 3557 interface(CONST_INTER);
3555 3558 %}
3556 3559
3557 3560 // Integer Immediate: 5-bit
3558 3561 operand immL5() %{
3559 3562 predicate(n->get_long() == (int)n->get_long() && Assembler::is_simm5((int)n->get_long()));
3560 3563 match(ConL);
3561 3564 op_cost(0);
3562 3565 format %{ %}
3563 3566 interface(CONST_INTER);
3564 3567 %}
3565 3568
3566 3569 // Long Immediate: 13-bit
3567 3570 operand immL13() %{
3568 3571 predicate((-4096L < n->get_long()) && (n->get_long() <= 4095L));
3569 3572 match(ConL);
3570 3573 op_cost(0);
3571 3574
3572 3575 format %{ %}
3573 3576 interface(CONST_INTER);
3574 3577 %}
3575 3578
3576 3579 // Long Immediate: 13-bit minus 7
3577 3580 operand immL13m7() %{
3578 3581 predicate((-4096L < n->get_long()) && ((n->get_long() + 7L) <= 4095L));
3579 3582 match(ConL);
3580 3583 op_cost(0);
3581 3584
3582 3585 format %{ %}
3583 3586 interface(CONST_INTER);
3584 3587 %}
3585 3588
3586 3589 // Long Immediate: low 32-bit mask
3587 3590 operand immL_32bits() %{
3588 3591 predicate(n->get_long() == 0xFFFFFFFFL);
3589 3592 match(ConL);
3590 3593 op_cost(0);
3591 3594
3592 3595 format %{ %}
3593 3596 interface(CONST_INTER);
3594 3597 %}
3595 3598
3596 3599 // Long Immediate: cheap (materialize in <= 3 instructions)
3597 3600 operand immL_cheap() %{
3598 3601 predicate(!VM_Version::is_niagara_plus() || MacroAssembler::insts_for_set64(n->get_long()) <= 3);
3599 3602 match(ConL);
3600 3603 op_cost(0);
3601 3604
3602 3605 format %{ %}
3603 3606 interface(CONST_INTER);
3604 3607 %}
3605 3608
3606 3609 // Long Immediate: expensive (materialize in > 3 instructions)
3607 3610 operand immL_expensive() %{
3608 3611 predicate(VM_Version::is_niagara_plus() && MacroAssembler::insts_for_set64(n->get_long()) > 3);
3609 3612 match(ConL);
3610 3613 op_cost(0);
3611 3614
3612 3615 format %{ %}
3613 3616 interface(CONST_INTER);
3614 3617 %}
3615 3618
3616 3619 // Double Immediate
3617 3620 operand immD() %{
3618 3621 match(ConD);
3619 3622
3620 3623 op_cost(40);
3621 3624 format %{ %}
3622 3625 interface(CONST_INTER);
3623 3626 %}
3624 3627
3625 3628 // Double Immediate: +0.0d
3626 3629 operand immD0() %{
3627 3630 predicate(jlong_cast(n->getd()) == 0);
3628 3631 match(ConD);
3629 3632
3630 3633 op_cost(0);
3631 3634 format %{ %}
3632 3635 interface(CONST_INTER);
3633 3636 %}
3634 3637
3635 3638 // Float Immediate
3636 3639 operand immF() %{
3637 3640 match(ConF);
3638 3641
3639 3642 op_cost(20);
3640 3643 format %{ %}
3641 3644 interface(CONST_INTER);
3642 3645 %}
3643 3646
3644 3647 // Float Immediate: +0.0f
3645 3648 operand immF0() %{
3646 3649 predicate(jint_cast(n->getf()) == 0);
3647 3650 match(ConF);
3648 3651
3649 3652 op_cost(0);
3650 3653 format %{ %}
3651 3654 interface(CONST_INTER);
3652 3655 %}
3653 3656
3654 3657 // Integer Register Operands
3655 3658 // Integer Register
3656 3659 operand iRegI() %{
3657 3660 constraint(ALLOC_IN_RC(int_reg));
3658 3661 match(RegI);
3659 3662
3660 3663 match(notemp_iRegI);
3661 3664 match(g1RegI);
3662 3665 match(o0RegI);
3663 3666 match(iRegIsafe);
3664 3667
3665 3668 format %{ %}
3666 3669 interface(REG_INTER);
3667 3670 %}
3668 3671
3669 3672 operand notemp_iRegI() %{
3670 3673 constraint(ALLOC_IN_RC(notemp_int_reg));
3671 3674 match(RegI);
3672 3675
3673 3676 match(o0RegI);
3674 3677
3675 3678 format %{ %}
3676 3679 interface(REG_INTER);
3677 3680 %}
3678 3681
3679 3682 operand o0RegI() %{
3680 3683 constraint(ALLOC_IN_RC(o0_regI));
3681 3684 match(iRegI);
3682 3685
3683 3686 format %{ %}
3684 3687 interface(REG_INTER);
3685 3688 %}
3686 3689
3687 3690 // Pointer Register
3688 3691 operand iRegP() %{
3689 3692 constraint(ALLOC_IN_RC(ptr_reg));
3690 3693 match(RegP);
3691 3694
3692 3695 match(lock_ptr_RegP);
3693 3696 match(g1RegP);
3694 3697 match(g2RegP);
3695 3698 match(g3RegP);
3696 3699 match(g4RegP);
3697 3700 match(i0RegP);
3698 3701 match(o0RegP);
3699 3702 match(o1RegP);
3700 3703 match(l7RegP);
3701 3704
3702 3705 format %{ %}
3703 3706 interface(REG_INTER);
3704 3707 %}
3705 3708
3706 3709 operand sp_ptr_RegP() %{
3707 3710 constraint(ALLOC_IN_RC(sp_ptr_reg));
3708 3711 match(RegP);
3709 3712 match(iRegP);
3710 3713
3711 3714 format %{ %}
3712 3715 interface(REG_INTER);
3713 3716 %}
3714 3717
3715 3718 operand lock_ptr_RegP() %{
3716 3719 constraint(ALLOC_IN_RC(lock_ptr_reg));
3717 3720 match(RegP);
3718 3721 match(i0RegP);
3719 3722 match(o0RegP);
3720 3723 match(o1RegP);
3721 3724 match(l7RegP);
3722 3725
3723 3726 format %{ %}
3724 3727 interface(REG_INTER);
3725 3728 %}
3726 3729
3727 3730 operand g1RegP() %{
3728 3731 constraint(ALLOC_IN_RC(g1_regP));
3729 3732 match(iRegP);
3730 3733
3731 3734 format %{ %}
3732 3735 interface(REG_INTER);
3733 3736 %}
3734 3737
3735 3738 operand g2RegP() %{
3736 3739 constraint(ALLOC_IN_RC(g2_regP));
3737 3740 match(iRegP);
3738 3741
3739 3742 format %{ %}
3740 3743 interface(REG_INTER);
3741 3744 %}
3742 3745
3743 3746 operand g3RegP() %{
3744 3747 constraint(ALLOC_IN_RC(g3_regP));
3745 3748 match(iRegP);
3746 3749
3747 3750 format %{ %}
3748 3751 interface(REG_INTER);
3749 3752 %}
3750 3753
3751 3754 operand g1RegI() %{
3752 3755 constraint(ALLOC_IN_RC(g1_regI));
3753 3756 match(iRegI);
3754 3757
3755 3758 format %{ %}
3756 3759 interface(REG_INTER);
3757 3760 %}
3758 3761
3759 3762 operand g3RegI() %{
3760 3763 constraint(ALLOC_IN_RC(g3_regI));
3761 3764 match(iRegI);
3762 3765
3763 3766 format %{ %}
3764 3767 interface(REG_INTER);
3765 3768 %}
3766 3769
3767 3770 operand g4RegI() %{
3768 3771 constraint(ALLOC_IN_RC(g4_regI));
3769 3772 match(iRegI);
3770 3773
3771 3774 format %{ %}
3772 3775 interface(REG_INTER);
3773 3776 %}
3774 3777
3775 3778 operand g4RegP() %{
3776 3779 constraint(ALLOC_IN_RC(g4_regP));
3777 3780 match(iRegP);
3778 3781
3779 3782 format %{ %}
3780 3783 interface(REG_INTER);
3781 3784 %}
3782 3785
3783 3786 operand i0RegP() %{
3784 3787 constraint(ALLOC_IN_RC(i0_regP));
3785 3788 match(iRegP);
3786 3789
3787 3790 format %{ %}
3788 3791 interface(REG_INTER);
3789 3792 %}
3790 3793
3791 3794 operand o0RegP() %{
3792 3795 constraint(ALLOC_IN_RC(o0_regP));
3793 3796 match(iRegP);
3794 3797
3795 3798 format %{ %}
3796 3799 interface(REG_INTER);
3797 3800 %}
3798 3801
3799 3802 operand o1RegP() %{
3800 3803 constraint(ALLOC_IN_RC(o1_regP));
3801 3804 match(iRegP);
3802 3805
3803 3806 format %{ %}
3804 3807 interface(REG_INTER);
3805 3808 %}
3806 3809
3807 3810 operand o2RegP() %{
3808 3811 constraint(ALLOC_IN_RC(o2_regP));
3809 3812 match(iRegP);
3810 3813
3811 3814 format %{ %}
3812 3815 interface(REG_INTER);
3813 3816 %}
3814 3817
3815 3818 operand o7RegP() %{
3816 3819 constraint(ALLOC_IN_RC(o7_regP));
3817 3820 match(iRegP);
3818 3821
3819 3822 format %{ %}
3820 3823 interface(REG_INTER);
3821 3824 %}
3822 3825
3823 3826 operand l7RegP() %{
3824 3827 constraint(ALLOC_IN_RC(l7_regP));
3825 3828 match(iRegP);
3826 3829
3827 3830 format %{ %}
3828 3831 interface(REG_INTER);
3829 3832 %}
3830 3833
3831 3834 operand o7RegI() %{
3832 3835 constraint(ALLOC_IN_RC(o7_regI));
3833 3836 match(iRegI);
3834 3837
3835 3838 format %{ %}
3836 3839 interface(REG_INTER);
3837 3840 %}
3838 3841
3839 3842 operand iRegN() %{
3840 3843 constraint(ALLOC_IN_RC(int_reg));
3841 3844 match(RegN);
3842 3845
3843 3846 format %{ %}
3844 3847 interface(REG_INTER);
3845 3848 %}
3846 3849
3847 3850 // Long Register
3848 3851 operand iRegL() %{
3849 3852 constraint(ALLOC_IN_RC(long_reg));
3850 3853 match(RegL);
3851 3854
3852 3855 format %{ %}
3853 3856 interface(REG_INTER);
3854 3857 %}
3855 3858
3856 3859 operand o2RegL() %{
3857 3860 constraint(ALLOC_IN_RC(o2_regL));
3858 3861 match(iRegL);
3859 3862
3860 3863 format %{ %}
3861 3864 interface(REG_INTER);
3862 3865 %}
3863 3866
3864 3867 operand o7RegL() %{
3865 3868 constraint(ALLOC_IN_RC(o7_regL));
3866 3869 match(iRegL);
3867 3870
3868 3871 format %{ %}
3869 3872 interface(REG_INTER);
3870 3873 %}
3871 3874
3872 3875 operand g1RegL() %{
3873 3876 constraint(ALLOC_IN_RC(g1_regL));
3874 3877 match(iRegL);
3875 3878
3876 3879 format %{ %}
3877 3880 interface(REG_INTER);
3878 3881 %}
3879 3882
3880 3883 operand g3RegL() %{
3881 3884 constraint(ALLOC_IN_RC(g3_regL));
3882 3885 match(iRegL);
3883 3886
3884 3887 format %{ %}
3885 3888 interface(REG_INTER);
3886 3889 %}
3887 3890
3888 3891 // Int Register safe
3889 3892 // This is 64bit safe
3890 3893 operand iRegIsafe() %{
3891 3894 constraint(ALLOC_IN_RC(long_reg));
3892 3895
3893 3896 match(iRegI);
3894 3897
3895 3898 format %{ %}
3896 3899 interface(REG_INTER);
3897 3900 %}
3898 3901
3899 3902 // Condition Code Flag Register
3900 3903 operand flagsReg() %{
3901 3904 constraint(ALLOC_IN_RC(int_flags));
3902 3905 match(RegFlags);
3903 3906
3904 3907 format %{ "ccr" %} // both ICC and XCC
3905 3908 interface(REG_INTER);
3906 3909 %}
3907 3910
3908 3911 // Condition Code Register, unsigned comparisons.
3909 3912 operand flagsRegU() %{
3910 3913 constraint(ALLOC_IN_RC(int_flags));
3911 3914 match(RegFlags);
3912 3915
3913 3916 format %{ "icc_U" %}
3914 3917 interface(REG_INTER);
3915 3918 %}
3916 3919
3917 3920 // Condition Code Register, pointer comparisons.
3918 3921 operand flagsRegP() %{
3919 3922 constraint(ALLOC_IN_RC(int_flags));
3920 3923 match(RegFlags);
3921 3924
3922 3925 #ifdef _LP64
3923 3926 format %{ "xcc_P" %}
3924 3927 #else
3925 3928 format %{ "icc_P" %}
3926 3929 #endif
3927 3930 interface(REG_INTER);
3928 3931 %}
3929 3932
3930 3933 // Condition Code Register, long comparisons.
3931 3934 operand flagsRegL() %{
3932 3935 constraint(ALLOC_IN_RC(int_flags));
3933 3936 match(RegFlags);
3934 3937
3935 3938 format %{ "xcc_L" %}
3936 3939 interface(REG_INTER);
3937 3940 %}
3938 3941
3939 3942 // Condition Code Register, floating comparisons, unordered same as "less".
3940 3943 operand flagsRegF() %{
3941 3944 constraint(ALLOC_IN_RC(float_flags));
3942 3945 match(RegFlags);
3943 3946 match(flagsRegF0);
3944 3947
3945 3948 format %{ %}
3946 3949 interface(REG_INTER);
3947 3950 %}
3948 3951
3949 3952 operand flagsRegF0() %{
3950 3953 constraint(ALLOC_IN_RC(float_flag0));
3951 3954 match(RegFlags);
3952 3955
3953 3956 format %{ %}
3954 3957 interface(REG_INTER);
3955 3958 %}
3956 3959
3957 3960
3958 3961 // Condition Code Flag Register used by long compare
3959 3962 operand flagsReg_long_LTGE() %{
3960 3963 constraint(ALLOC_IN_RC(int_flags));
3961 3964 match(RegFlags);
3962 3965 format %{ "icc_LTGE" %}
3963 3966 interface(REG_INTER);
3964 3967 %}
3965 3968 operand flagsReg_long_EQNE() %{
3966 3969 constraint(ALLOC_IN_RC(int_flags));
3967 3970 match(RegFlags);
3968 3971 format %{ "icc_EQNE" %}
3969 3972 interface(REG_INTER);
3970 3973 %}
3971 3974 operand flagsReg_long_LEGT() %{
3972 3975 constraint(ALLOC_IN_RC(int_flags));
3973 3976 match(RegFlags);
3974 3977 format %{ "icc_LEGT" %}
3975 3978 interface(REG_INTER);
3976 3979 %}
3977 3980
3978 3981
3979 3982 operand regD() %{
3980 3983 constraint(ALLOC_IN_RC(dflt_reg));
3981 3984 match(RegD);
3982 3985
3983 3986 match(regD_low);
3984 3987
3985 3988 format %{ %}
3986 3989 interface(REG_INTER);
3987 3990 %}
3988 3991
3989 3992 operand regF() %{
3990 3993 constraint(ALLOC_IN_RC(sflt_reg));
3991 3994 match(RegF);
3992 3995
3993 3996 format %{ %}
3994 3997 interface(REG_INTER);
3995 3998 %}
3996 3999
3997 4000 operand regD_low() %{
3998 4001 constraint(ALLOC_IN_RC(dflt_low_reg));
3999 4002 match(regD);
4000 4003
4001 4004 format %{ %}
4002 4005 interface(REG_INTER);
4003 4006 %}
4004 4007
4005 4008 // Special Registers
4006 4009
4007 4010 // Method Register
4008 4011 operand inline_cache_regP(iRegP reg) %{
4009 4012 constraint(ALLOC_IN_RC(g5_regP)); // G5=inline_cache_reg but uses 2 bits instead of 1
4010 4013 match(reg);
4011 4014 format %{ %}
4012 4015 interface(REG_INTER);
4013 4016 %}
4014 4017
4015 4018 operand interpreter_method_oop_regP(iRegP reg) %{
4016 4019 constraint(ALLOC_IN_RC(g5_regP)); // G5=interpreter_method_oop_reg but uses 2 bits instead of 1
4017 4020 match(reg);
4018 4021 format %{ %}
4019 4022 interface(REG_INTER);
4020 4023 %}
4021 4024
4022 4025
4023 4026 //----------Complex Operands---------------------------------------------------
4024 4027 // Indirect Memory Reference
4025 4028 operand indirect(sp_ptr_RegP reg) %{
4026 4029 constraint(ALLOC_IN_RC(sp_ptr_reg));
4027 4030 match(reg);
4028 4031
4029 4032 op_cost(100);
4030 4033 format %{ "[$reg]" %}
4031 4034 interface(MEMORY_INTER) %{
4032 4035 base($reg);
4033 4036 index(0x0);
4034 4037 scale(0x0);
4035 4038 disp(0x0);
4036 4039 %}
4037 4040 %}
4038 4041
4039 4042 // Indirect with simm13 Offset
4040 4043 operand indOffset13(sp_ptr_RegP reg, immX13 offset) %{
4041 4044 constraint(ALLOC_IN_RC(sp_ptr_reg));
4042 4045 match(AddP reg offset);
4043 4046
4044 4047 op_cost(100);
4045 4048 format %{ "[$reg + $offset]" %}
4046 4049 interface(MEMORY_INTER) %{
4047 4050 base($reg);
4048 4051 index(0x0);
4049 4052 scale(0x0);
4050 4053 disp($offset);
4051 4054 %}
4052 4055 %}
4053 4056
4054 4057 // Indirect with simm13 Offset minus 7
4055 4058 operand indOffset13m7(sp_ptr_RegP reg, immX13m7 offset) %{
4056 4059 constraint(ALLOC_IN_RC(sp_ptr_reg));
4057 4060 match(AddP reg offset);
4058 4061
4059 4062 op_cost(100);
4060 4063 format %{ "[$reg + $offset]" %}
4061 4064 interface(MEMORY_INTER) %{
4062 4065 base($reg);
4063 4066 index(0x0);
4064 4067 scale(0x0);
4065 4068 disp($offset);
4066 4069 %}
4067 4070 %}
4068 4071
4069 4072 // Note: Intel has a swapped version also, like this:
4070 4073 //operand indOffsetX(iRegI reg, immP offset) %{
4071 4074 // constraint(ALLOC_IN_RC(int_reg));
4072 4075 // match(AddP offset reg);
4073 4076 //
4074 4077 // op_cost(100);
4075 4078 // format %{ "[$reg + $offset]" %}
4076 4079 // interface(MEMORY_INTER) %{
4077 4080 // base($reg);
4078 4081 // index(0x0);
4079 4082 // scale(0x0);
4080 4083 // disp($offset);
4081 4084 // %}
4082 4085 //%}
4083 4086 //// However, it doesn't make sense for SPARC, since
4084 4087 // we have no particularly good way to embed oops in
4085 4088 // single instructions.
4086 4089
4087 4090 // Indirect with Register Index
4088 4091 operand indIndex(iRegP addr, iRegX index) %{
4089 4092 constraint(ALLOC_IN_RC(ptr_reg));
4090 4093 match(AddP addr index);
4091 4094
4092 4095 op_cost(100);
4093 4096 format %{ "[$addr + $index]" %}
4094 4097 interface(MEMORY_INTER) %{
4095 4098 base($addr);
4096 4099 index($index);
4097 4100 scale(0x0);
4098 4101 disp(0x0);
4099 4102 %}
4100 4103 %}
4101 4104
4102 4105 //----------Special Memory Operands--------------------------------------------
4103 4106 // Stack Slot Operand - This operand is used for loading and storing temporary
4104 4107 // values on the stack where a match requires a value to
4105 4108 // flow through memory.
4106 4109 operand stackSlotI(sRegI reg) %{
4107 4110 constraint(ALLOC_IN_RC(stack_slots));
4108 4111 op_cost(100);
4109 4112 //match(RegI);
4110 4113 format %{ "[$reg]" %}
4111 4114 interface(MEMORY_INTER) %{
4112 4115 base(0xE); // R_SP
4113 4116 index(0x0);
4114 4117 scale(0x0);
4115 4118 disp($reg); // Stack Offset
4116 4119 %}
4117 4120 %}
4118 4121
4119 4122 operand stackSlotP(sRegP reg) %{
4120 4123 constraint(ALLOC_IN_RC(stack_slots));
4121 4124 op_cost(100);
4122 4125 //match(RegP);
4123 4126 format %{ "[$reg]" %}
4124 4127 interface(MEMORY_INTER) %{
4125 4128 base(0xE); // R_SP
4126 4129 index(0x0);
4127 4130 scale(0x0);
4128 4131 disp($reg); // Stack Offset
4129 4132 %}
4130 4133 %}
4131 4134
4132 4135 operand stackSlotF(sRegF reg) %{
4133 4136 constraint(ALLOC_IN_RC(stack_slots));
4134 4137 op_cost(100);
4135 4138 //match(RegF);
4136 4139 format %{ "[$reg]" %}
4137 4140 interface(MEMORY_INTER) %{
4138 4141 base(0xE); // R_SP
4139 4142 index(0x0);
4140 4143 scale(0x0);
4141 4144 disp($reg); // Stack Offset
4142 4145 %}
4143 4146 %}
4144 4147 operand stackSlotD(sRegD reg) %{
4145 4148 constraint(ALLOC_IN_RC(stack_slots));
4146 4149 op_cost(100);
4147 4150 //match(RegD);
4148 4151 format %{ "[$reg]" %}
4149 4152 interface(MEMORY_INTER) %{
4150 4153 base(0xE); // R_SP
4151 4154 index(0x0);
4152 4155 scale(0x0);
4153 4156 disp($reg); // Stack Offset
4154 4157 %}
4155 4158 %}
4156 4159 operand stackSlotL(sRegL reg) %{
4157 4160 constraint(ALLOC_IN_RC(stack_slots));
4158 4161 op_cost(100);
4159 4162 //match(RegL);
4160 4163 format %{ "[$reg]" %}
4161 4164 interface(MEMORY_INTER) %{
4162 4165 base(0xE); // R_SP
4163 4166 index(0x0);
4164 4167 scale(0x0);
4165 4168 disp($reg); // Stack Offset
4166 4169 %}
4167 4170 %}
4168 4171
4169 4172 // Operands for expressing Control Flow
4170 4173 // NOTE: Label is a predefined operand which should not be redefined in
4171 4174 // the AD file. It is generically handled within the ADLC.
4172 4175
4173 4176 //----------Conditional Branch Operands----------------------------------------
4174 4177 // Comparison Op - This is the operation of the comparison, and is limited to
4175 4178 // the following set of codes:
4176 4179 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
4177 4180 //
4178 4181 // Other attributes of the comparison, such as unsignedness, are specified
4179 4182 // by the comparison instruction that sets a condition code flags register.
4180 4183 // That result is represented by a flags operand whose subtype is appropriate
4181 4184 // to the unsignedness (etc.) of the comparison.
4182 4185 //
4183 4186 // Later, the instruction which matches both the Comparison Op (a Bool) and
4184 4187 // the flags (produced by the Cmp) specifies the coding of the comparison op
4185 4188 // by matching a specific subtype of Bool operand below, such as cmpOpU.
4186 4189
4187 4190 operand cmpOp() %{
4188 4191 match(Bool);
4189 4192
4190 4193 format %{ "" %}
4191 4194 interface(COND_INTER) %{
4192 4195 equal(0x1);
4193 4196 not_equal(0x9);
4194 4197 less(0x3);
4195 4198 greater_equal(0xB);
4196 4199 less_equal(0x2);
4197 4200 greater(0xA);
4198 4201 overflow(0x7);
4199 4202 no_overflow(0xF);
4200 4203 %}
4201 4204 %}
4202 4205
4203 4206 // Comparison Op, unsigned
4204 4207 operand cmpOpU() %{
4205 4208 match(Bool);
4206 4209 predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
4207 4210 n->as_Bool()->_test._test != BoolTest::no_overflow);
4208 4211
4209 4212 format %{ "u" %}
4210 4213 interface(COND_INTER) %{
4211 4214 equal(0x1);
4212 4215 not_equal(0x9);
4213 4216 less(0x5);
4214 4217 greater_equal(0xD);
4215 4218 less_equal(0x4);
4216 4219 greater(0xC);
4217 4220 overflow(0x7);
4218 4221 no_overflow(0xF);
4219 4222 %}
4220 4223 %}
4221 4224
4222 4225 // Comparison Op, pointer (same as unsigned)
4223 4226 operand cmpOpP() %{
4224 4227 match(Bool);
4225 4228 predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
4226 4229 n->as_Bool()->_test._test != BoolTest::no_overflow);
4227 4230
4228 4231 format %{ "p" %}
4229 4232 interface(COND_INTER) %{
4230 4233 equal(0x1);
4231 4234 not_equal(0x9);
4232 4235 less(0x5);
4233 4236 greater_equal(0xD);
4234 4237 less_equal(0x4);
4235 4238 greater(0xC);
4236 4239 overflow(0x7);
4237 4240 no_overflow(0xF);
4238 4241 %}
4239 4242 %}
4240 4243
4241 4244 // Comparison Op, branch-register encoding
4242 4245 operand cmpOp_reg() %{
4243 4246 match(Bool);
4244 4247 predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
4245 4248 n->as_Bool()->_test._test != BoolTest::no_overflow);
4246 4249
4247 4250 format %{ "" %}
4248 4251 interface(COND_INTER) %{
4249 4252 equal (0x1);
4250 4253 not_equal (0x5);
4251 4254 less (0x3);
4252 4255 greater_equal(0x7);
4253 4256 less_equal (0x2);
4254 4257 greater (0x6);
4255 4258 overflow(0x7); // not supported
4256 4259 no_overflow(0xF); // not supported
4257 4260 %}
4258 4261 %}
4259 4262
4260 4263 // Comparison Code, floating, unordered same as less
4261 4264 operand cmpOpF() %{
4262 4265 match(Bool);
4263 4266 predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
4264 4267 n->as_Bool()->_test._test != BoolTest::no_overflow);
4265 4268
4266 4269 format %{ "fl" %}
4267 4270 interface(COND_INTER) %{
4268 4271 equal(0x9);
4269 4272 not_equal(0x1);
4270 4273 less(0x3);
4271 4274 greater_equal(0xB);
4272 4275 less_equal(0xE);
4273 4276 greater(0x6);
4274 4277
4275 4278 overflow(0x7); // not supported
4276 4279 no_overflow(0xF); // not supported
4277 4280 %}
4278 4281 %}
4279 4282
4280 4283 // Used by long compare
4281 4284 operand cmpOp_commute() %{
4282 4285 match(Bool);
4283 4286 predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
4284 4287 n->as_Bool()->_test._test != BoolTest::no_overflow);
4285 4288
4286 4289 format %{ "" %}
4287 4290 interface(COND_INTER) %{
4288 4291 equal(0x1);
4289 4292 not_equal(0x9);
4290 4293 less(0xA);
4291 4294 greater_equal(0x2);
4292 4295 less_equal(0xB);
4293 4296 greater(0x3);
4294 4297 overflow(0x7);
4295 4298 no_overflow(0xF);
4296 4299 %}
4297 4300 %}
4298 4301
4299 4302 //----------OPERAND CLASSES----------------------------------------------------
4300 4303 // Operand Classes are groups of operands that are used to simplify
4301 4304 // instruction definitions by not requiring the AD writer to specify separate
4302 4305 // instructions for every form of operand when the instruction accepts
4303 4306 // multiple operand types with the same basic encoding and format. The classic
4304 4307 // case of this is memory operands.
4305 4308 opclass memory( indirect, indOffset13, indIndex );
4306 4309 opclass indIndexMemory( indIndex );
4307 4310
4308 4311 //----------PIPELINE-----------------------------------------------------------
4309 4312 pipeline %{
4310 4313
4311 4314 //----------ATTRIBUTES---------------------------------------------------------
4312 4315 attributes %{
4313 4316 fixed_size_instructions; // Fixed size instructions
4314 4317 branch_has_delay_slot; // Branch has delay slot following
4315 4318 max_instructions_per_bundle = 4; // Up to 4 instructions per bundle
4316 4319 instruction_unit_size = 4; // An instruction is 4 bytes long
4317 4320 instruction_fetch_unit_size = 16; // The processor fetches one line
4318 4321 instruction_fetch_units = 1; // of 16 bytes
4319 4322
4320 4323 // List of nop instructions
4321 4324 nops( Nop_A0, Nop_A1, Nop_MS, Nop_FA, Nop_BR );
4322 4325 %}
4323 4326
4324 4327 //----------RESOURCES----------------------------------------------------------
4325 4328 // Resources are the functional units available to the machine
4326 4329 resources(A0, A1, MS, BR, FA, FM, IDIV, FDIV, IALU = A0 | A1);
4327 4330
4328 4331 //----------PIPELINE DESCRIPTION-----------------------------------------------
4329 4332 // Pipeline Description specifies the stages in the machine's pipeline
4330 4333
4331 4334 pipe_desc(A, P, F, B, I, J, S, R, E, C, M, W, X, T, D);
4332 4335
4333 4336 //----------PIPELINE CLASSES---------------------------------------------------
4334 4337 // Pipeline Classes describe the stages in which input and output are
4335 4338 // referenced by the hardware pipeline.
4336 4339
4337 4340 // Integer ALU reg-reg operation
4338 4341 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
4339 4342 single_instruction;
4340 4343 dst : E(write);
4341 4344 src1 : R(read);
4342 4345 src2 : R(read);
4343 4346 IALU : R;
4344 4347 %}
4345 4348
4346 4349 // Integer ALU reg-reg long operation
4347 4350 pipe_class ialu_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{
4348 4351 instruction_count(2);
4349 4352 dst : E(write);
4350 4353 src1 : R(read);
4351 4354 src2 : R(read);
4352 4355 IALU : R;
4353 4356 IALU : R;
4354 4357 %}
4355 4358
4356 4359 // Integer ALU reg-reg long dependent operation
4357 4360 pipe_class ialu_reg_reg_2_dep(iRegL dst, iRegL src1, iRegL src2, flagsReg cr) %{
4358 4361 instruction_count(1); multiple_bundles;
4359 4362 dst : E(write);
4360 4363 src1 : R(read);
4361 4364 src2 : R(read);
4362 4365 cr : E(write);
4363 4366 IALU : R(2);
4364 4367 %}
4365 4368
4366 4369 // Integer ALU reg-imm operaion
4367 4370 pipe_class ialu_reg_imm(iRegI dst, iRegI src1, immI13 src2) %{
4368 4371 single_instruction;
4369 4372 dst : E(write);
4370 4373 src1 : R(read);
4371 4374 IALU : R;
4372 4375 %}
4373 4376
4374 4377 // Integer ALU reg-reg operation with condition code
4375 4378 pipe_class ialu_cc_reg_reg(iRegI dst, iRegI src1, iRegI src2, flagsReg cr) %{
4376 4379 single_instruction;
4377 4380 dst : E(write);
4378 4381 cr : E(write);
4379 4382 src1 : R(read);
4380 4383 src2 : R(read);
4381 4384 IALU : R;
4382 4385 %}
4383 4386
4384 4387 // Integer ALU reg-imm operation with condition code
4385 4388 pipe_class ialu_cc_reg_imm(iRegI dst, iRegI src1, immI13 src2, flagsReg cr) %{
4386 4389 single_instruction;
4387 4390 dst : E(write);
4388 4391 cr : E(write);
4389 4392 src1 : R(read);
4390 4393 IALU : R;
4391 4394 %}
4392 4395
4393 4396 // Integer ALU zero-reg operation
4394 4397 pipe_class ialu_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{
4395 4398 single_instruction;
4396 4399 dst : E(write);
4397 4400 src2 : R(read);
4398 4401 IALU : R;
4399 4402 %}
4400 4403
4401 4404 // Integer ALU zero-reg operation with condition code only
4402 4405 pipe_class ialu_cconly_zero_reg(flagsReg cr, iRegI src) %{
4403 4406 single_instruction;
4404 4407 cr : E(write);
4405 4408 src : R(read);
4406 4409 IALU : R;
4407 4410 %}
4408 4411
4409 4412 // Integer ALU reg-reg operation with condition code only
4410 4413 pipe_class ialu_cconly_reg_reg(flagsReg cr, iRegI src1, iRegI src2) %{
4411 4414 single_instruction;
4412 4415 cr : E(write);
4413 4416 src1 : R(read);
4414 4417 src2 : R(read);
4415 4418 IALU : R;
4416 4419 %}
4417 4420
4418 4421 // Integer ALU reg-imm operation with condition code only
4419 4422 pipe_class ialu_cconly_reg_imm(flagsReg cr, iRegI src1, immI13 src2) %{
4420 4423 single_instruction;
4421 4424 cr : E(write);
4422 4425 src1 : R(read);
4423 4426 IALU : R;
4424 4427 %}
4425 4428
4426 4429 // Integer ALU reg-reg-zero operation with condition code only
4427 4430 pipe_class ialu_cconly_reg_reg_zero(flagsReg cr, iRegI src1, iRegI src2, immI0 zero) %{
4428 4431 single_instruction;
4429 4432 cr : E(write);
4430 4433 src1 : R(read);
4431 4434 src2 : R(read);
4432 4435 IALU : R;
4433 4436 %}
4434 4437
4435 4438 // Integer ALU reg-imm-zero operation with condition code only
4436 4439 pipe_class ialu_cconly_reg_imm_zero(flagsReg cr, iRegI src1, immI13 src2, immI0 zero) %{
4437 4440 single_instruction;
4438 4441 cr : E(write);
4439 4442 src1 : R(read);
4440 4443 IALU : R;
4441 4444 %}
4442 4445
4443 4446 // Integer ALU reg-reg operation with condition code, src1 modified
4444 4447 pipe_class ialu_cc_rwreg_reg(flagsReg cr, iRegI src1, iRegI src2) %{
4445 4448 single_instruction;
4446 4449 cr : E(write);
4447 4450 src1 : E(write);
4448 4451 src1 : R(read);
4449 4452 src2 : R(read);
4450 4453 IALU : R;
4451 4454 %}
4452 4455
4453 4456 // Integer ALU reg-imm operation with condition code, src1 modified
4454 4457 pipe_class ialu_cc_rwreg_imm(flagsReg cr, iRegI src1, immI13 src2) %{
4455 4458 single_instruction;
4456 4459 cr : E(write);
4457 4460 src1 : E(write);
4458 4461 src1 : R(read);
4459 4462 IALU : R;
4460 4463 %}
4461 4464
4462 4465 pipe_class cmpL_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg cr ) %{
4463 4466 multiple_bundles;
4464 4467 dst : E(write)+4;
4465 4468 cr : E(write);
4466 4469 src1 : R(read);
4467 4470 src2 : R(read);
4468 4471 IALU : R(3);
4469 4472 BR : R(2);
4470 4473 %}
4471 4474
4472 4475 // Integer ALU operation
4473 4476 pipe_class ialu_none(iRegI dst) %{
4474 4477 single_instruction;
4475 4478 dst : E(write);
4476 4479 IALU : R;
4477 4480 %}
4478 4481
4479 4482 // Integer ALU reg operation
4480 4483 pipe_class ialu_reg(iRegI dst, iRegI src) %{
4481 4484 single_instruction; may_have_no_code;
4482 4485 dst : E(write);
4483 4486 src : R(read);
4484 4487 IALU : R;
4485 4488 %}
4486 4489
4487 4490 // Integer ALU reg conditional operation
4488 4491 // This instruction has a 1 cycle stall, and cannot execute
4489 4492 // in the same cycle as the instruction setting the condition
4490 4493 // code. We kludge this by pretending to read the condition code
4491 4494 // 1 cycle earlier, and by marking the functional units as busy
4492 4495 // for 2 cycles with the result available 1 cycle later than
4493 4496 // is really the case.
4494 4497 pipe_class ialu_reg_flags( iRegI op2_out, iRegI op2_in, iRegI op1, flagsReg cr ) %{
4495 4498 single_instruction;
4496 4499 op2_out : C(write);
4497 4500 op1 : R(read);
4498 4501 cr : R(read); // This is really E, with a 1 cycle stall
4499 4502 BR : R(2);
4500 4503 MS : R(2);
4501 4504 %}
4502 4505
4503 4506 #ifdef _LP64
4504 4507 pipe_class ialu_clr_and_mover( iRegI dst, iRegP src ) %{
4505 4508 instruction_count(1); multiple_bundles;
4506 4509 dst : C(write)+1;
4507 4510 src : R(read)+1;
4508 4511 IALU : R(1);
4509 4512 BR : E(2);
4510 4513 MS : E(2);
4511 4514 %}
4512 4515 #endif
4513 4516
4514 4517 // Integer ALU reg operation
4515 4518 pipe_class ialu_move_reg_L_to_I(iRegI dst, iRegL src) %{
4516 4519 single_instruction; may_have_no_code;
4517 4520 dst : E(write);
4518 4521 src : R(read);
4519 4522 IALU : R;
4520 4523 %}
4521 4524 pipe_class ialu_move_reg_I_to_L(iRegL dst, iRegI src) %{
4522 4525 single_instruction; may_have_no_code;
4523 4526 dst : E(write);
4524 4527 src : R(read);
4525 4528 IALU : R;
4526 4529 %}
4527 4530
4528 4531 // Two integer ALU reg operations
4529 4532 pipe_class ialu_reg_2(iRegL dst, iRegL src) %{
4530 4533 instruction_count(2);
4531 4534 dst : E(write);
4532 4535 src : R(read);
4533 4536 A0 : R;
4534 4537 A1 : R;
4535 4538 %}
4536 4539
4537 4540 // Two integer ALU reg operations
4538 4541 pipe_class ialu_move_reg_L_to_L(iRegL dst, iRegL src) %{
4539 4542 instruction_count(2); may_have_no_code;
4540 4543 dst : E(write);
4541 4544 src : R(read);
4542 4545 A0 : R;
4543 4546 A1 : R;
4544 4547 %}
4545 4548
4546 4549 // Integer ALU imm operation
4547 4550 pipe_class ialu_imm(iRegI dst, immI13 src) %{
4548 4551 single_instruction;
4549 4552 dst : E(write);
4550 4553 IALU : R;
4551 4554 %}
4552 4555
4553 4556 // Integer ALU reg-reg with carry operation
4554 4557 pipe_class ialu_reg_reg_cy(iRegI dst, iRegI src1, iRegI src2, iRegI cy) %{
4555 4558 single_instruction;
4556 4559 dst : E(write);
4557 4560 src1 : R(read);
4558 4561 src2 : R(read);
4559 4562 IALU : R;
4560 4563 %}
4561 4564
4562 4565 // Integer ALU cc operation
4563 4566 pipe_class ialu_cc(iRegI dst, flagsReg cc) %{
4564 4567 single_instruction;
4565 4568 dst : E(write);
4566 4569 cc : R(read);
4567 4570 IALU : R;
4568 4571 %}
4569 4572
4570 4573 // Integer ALU cc / second IALU operation
4571 4574 pipe_class ialu_reg_ialu( iRegI dst, iRegI src ) %{
4572 4575 instruction_count(1); multiple_bundles;
4573 4576 dst : E(write)+1;
4574 4577 src : R(read);
4575 4578 IALU : R;
4576 4579 %}
4577 4580
4578 4581 // Integer ALU cc / second IALU operation
4579 4582 pipe_class ialu_reg_reg_ialu( iRegI dst, iRegI p, iRegI q ) %{
4580 4583 instruction_count(1); multiple_bundles;
4581 4584 dst : E(write)+1;
4582 4585 p : R(read);
4583 4586 q : R(read);
4584 4587 IALU : R;
4585 4588 %}
4586 4589
4587 4590 // Integer ALU hi-lo-reg operation
4588 4591 pipe_class ialu_hi_lo_reg(iRegI dst, immI src) %{
4589 4592 instruction_count(1); multiple_bundles;
4590 4593 dst : E(write)+1;
4591 4594 IALU : R(2);
4592 4595 %}
4593 4596
4594 4597 // Float ALU hi-lo-reg operation (with temp)
4595 4598 pipe_class ialu_hi_lo_reg_temp(regF dst, immF src, g3RegP tmp) %{
4596 4599 instruction_count(1); multiple_bundles;
4597 4600 dst : E(write)+1;
4598 4601 IALU : R(2);
4599 4602 %}
4600 4603
4601 4604 // Long Constant
4602 4605 pipe_class loadConL( iRegL dst, immL src ) %{
4603 4606 instruction_count(2); multiple_bundles;
4604 4607 dst : E(write)+1;
4605 4608 IALU : R(2);
4606 4609 IALU : R(2);
4607 4610 %}
4608 4611
4609 4612 // Pointer Constant
4610 4613 pipe_class loadConP( iRegP dst, immP src ) %{
4611 4614 instruction_count(0); multiple_bundles;
4612 4615 fixed_latency(6);
4613 4616 %}
4614 4617
4615 4618 // Polling Address
4616 4619 pipe_class loadConP_poll( iRegP dst, immP_poll src ) %{
4617 4620 #ifdef _LP64
4618 4621 instruction_count(0); multiple_bundles;
4619 4622 fixed_latency(6);
4620 4623 #else
4621 4624 dst : E(write);
4622 4625 IALU : R;
4623 4626 #endif
4624 4627 %}
4625 4628
4626 4629 // Long Constant small
4627 4630 pipe_class loadConLlo( iRegL dst, immL src ) %{
4628 4631 instruction_count(2);
4629 4632 dst : E(write);
4630 4633 IALU : R;
4631 4634 IALU : R;
4632 4635 %}
4633 4636
4634 4637 // [PHH] This is wrong for 64-bit. See LdImmF/D.
4635 4638 pipe_class loadConFD(regF dst, immF src, g3RegP tmp) %{
4636 4639 instruction_count(1); multiple_bundles;
4637 4640 src : R(read);
4638 4641 dst : M(write)+1;
4639 4642 IALU : R;
4640 4643 MS : E;
4641 4644 %}
4642 4645
4643 4646 // Integer ALU nop operation
4644 4647 pipe_class ialu_nop() %{
4645 4648 single_instruction;
4646 4649 IALU : R;
4647 4650 %}
4648 4651
4649 4652 // Integer ALU nop operation
4650 4653 pipe_class ialu_nop_A0() %{
4651 4654 single_instruction;
4652 4655 A0 : R;
4653 4656 %}
4654 4657
4655 4658 // Integer ALU nop operation
4656 4659 pipe_class ialu_nop_A1() %{
4657 4660 single_instruction;
4658 4661 A1 : R;
4659 4662 %}
4660 4663
4661 4664 // Integer Multiply reg-reg operation
4662 4665 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
4663 4666 single_instruction;
4664 4667 dst : E(write);
4665 4668 src1 : R(read);
4666 4669 src2 : R(read);
4667 4670 MS : R(5);
4668 4671 %}
4669 4672
4670 4673 // Integer Multiply reg-imm operation
4671 4674 pipe_class imul_reg_imm(iRegI dst, iRegI src1, immI13 src2) %{
4672 4675 single_instruction;
4673 4676 dst : E(write);
4674 4677 src1 : R(read);
4675 4678 MS : R(5);
4676 4679 %}
4677 4680
4678 4681 pipe_class mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
4679 4682 single_instruction;
4680 4683 dst : E(write)+4;
4681 4684 src1 : R(read);
4682 4685 src2 : R(read);
4683 4686 MS : R(6);
4684 4687 %}
4685 4688
4686 4689 pipe_class mulL_reg_imm(iRegL dst, iRegL src1, immL13 src2) %{
4687 4690 single_instruction;
4688 4691 dst : E(write)+4;
4689 4692 src1 : R(read);
4690 4693 MS : R(6);
4691 4694 %}
4692 4695
4693 4696 // Integer Divide reg-reg
4694 4697 pipe_class sdiv_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI temp, flagsReg cr) %{
4695 4698 instruction_count(1); multiple_bundles;
4696 4699 dst : E(write);
4697 4700 temp : E(write);
4698 4701 src1 : R(read);
4699 4702 src2 : R(read);
4700 4703 temp : R(read);
4701 4704 MS : R(38);
4702 4705 %}
4703 4706
4704 4707 // Integer Divide reg-imm
4705 4708 pipe_class sdiv_reg_imm(iRegI dst, iRegI src1, immI13 src2, iRegI temp, flagsReg cr) %{
4706 4709 instruction_count(1); multiple_bundles;
4707 4710 dst : E(write);
4708 4711 temp : E(write);
4709 4712 src1 : R(read);
4710 4713 temp : R(read);
4711 4714 MS : R(38);
4712 4715 %}
4713 4716
4714 4717 // Long Divide
4715 4718 pipe_class divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
4716 4719 dst : E(write)+71;
4717 4720 src1 : R(read);
4718 4721 src2 : R(read)+1;
4719 4722 MS : R(70);
4720 4723 %}
4721 4724
4722 4725 pipe_class divL_reg_imm(iRegL dst, iRegL src1, immL13 src2) %{
4723 4726 dst : E(write)+71;
4724 4727 src1 : R(read);
4725 4728 MS : R(70);
4726 4729 %}
4727 4730
4728 4731 // Floating Point Add Float
4729 4732 pipe_class faddF_reg_reg(regF dst, regF src1, regF src2) %{
4730 4733 single_instruction;
4731 4734 dst : X(write);
4732 4735 src1 : E(read);
4733 4736 src2 : E(read);
4734 4737 FA : R;
4735 4738 %}
4736 4739
4737 4740 // Floating Point Add Double
4738 4741 pipe_class faddD_reg_reg(regD dst, regD src1, regD src2) %{
4739 4742 single_instruction;
4740 4743 dst : X(write);
4741 4744 src1 : E(read);
4742 4745 src2 : E(read);
4743 4746 FA : R;
4744 4747 %}
4745 4748
4746 4749 // Floating Point Conditional Move based on integer flags
4747 4750 pipe_class int_conditional_float_move (cmpOp cmp, flagsReg cr, regF dst, regF src) %{
4748 4751 single_instruction;
4749 4752 dst : X(write);
4750 4753 src : E(read);
4751 4754 cr : R(read);
4752 4755 FA : R(2);
4753 4756 BR : R(2);
4754 4757 %}
4755 4758
4756 4759 // Floating Point Conditional Move based on integer flags
4757 4760 pipe_class int_conditional_double_move (cmpOp cmp, flagsReg cr, regD dst, regD src) %{
4758 4761 single_instruction;
4759 4762 dst : X(write);
4760 4763 src : E(read);
4761 4764 cr : R(read);
4762 4765 FA : R(2);
4763 4766 BR : R(2);
4764 4767 %}
4765 4768
4766 4769 // Floating Point Multiply Float
4767 4770 pipe_class fmulF_reg_reg(regF dst, regF src1, regF src2) %{
4768 4771 single_instruction;
4769 4772 dst : X(write);
4770 4773 src1 : E(read);
4771 4774 src2 : E(read);
4772 4775 FM : R;
4773 4776 %}
4774 4777
4775 4778 // Floating Point Multiply Double
4776 4779 pipe_class fmulD_reg_reg(regD dst, regD src1, regD src2) %{
4777 4780 single_instruction;
4778 4781 dst : X(write);
4779 4782 src1 : E(read);
4780 4783 src2 : E(read);
4781 4784 FM : R;
4782 4785 %}
4783 4786
4784 4787 // Floating Point Divide Float
4785 4788 pipe_class fdivF_reg_reg(regF dst, regF src1, regF src2) %{
4786 4789 single_instruction;
4787 4790 dst : X(write);
4788 4791 src1 : E(read);
4789 4792 src2 : E(read);
4790 4793 FM : R;
4791 4794 FDIV : C(14);
4792 4795 %}
4793 4796
4794 4797 // Floating Point Divide Double
4795 4798 pipe_class fdivD_reg_reg(regD dst, regD src1, regD src2) %{
4796 4799 single_instruction;
4797 4800 dst : X(write);
4798 4801 src1 : E(read);
4799 4802 src2 : E(read);
4800 4803 FM : R;
4801 4804 FDIV : C(17);
4802 4805 %}
4803 4806
4804 4807 // Floating Point Move/Negate/Abs Float
4805 4808 pipe_class faddF_reg(regF dst, regF src) %{
4806 4809 single_instruction;
4807 4810 dst : W(write);
4808 4811 src : E(read);
4809 4812 FA : R(1);
4810 4813 %}
4811 4814
4812 4815 // Floating Point Move/Negate/Abs Double
4813 4816 pipe_class faddD_reg(regD dst, regD src) %{
4814 4817 single_instruction;
4815 4818 dst : W(write);
4816 4819 src : E(read);
4817 4820 FA : R;
4818 4821 %}
4819 4822
4820 4823 // Floating Point Convert F->D
4821 4824 pipe_class fcvtF2D(regD dst, regF src) %{
4822 4825 single_instruction;
4823 4826 dst : X(write);
4824 4827 src : E(read);
4825 4828 FA : R;
4826 4829 %}
4827 4830
4828 4831 // Floating Point Convert I->D
4829 4832 pipe_class fcvtI2D(regD dst, regF src) %{
4830 4833 single_instruction;
4831 4834 dst : X(write);
4832 4835 src : E(read);
4833 4836 FA : R;
4834 4837 %}
4835 4838
4836 4839 // Floating Point Convert LHi->D
4837 4840 pipe_class fcvtLHi2D(regD dst, regD src) %{
4838 4841 single_instruction;
4839 4842 dst : X(write);
4840 4843 src : E(read);
4841 4844 FA : R;
4842 4845 %}
4843 4846
4844 4847 // Floating Point Convert L->D
4845 4848 pipe_class fcvtL2D(regD dst, regF src) %{
4846 4849 single_instruction;
4847 4850 dst : X(write);
4848 4851 src : E(read);
4849 4852 FA : R;
4850 4853 %}
4851 4854
4852 4855 // Floating Point Convert L->F
4853 4856 pipe_class fcvtL2F(regD dst, regF src) %{
4854 4857 single_instruction;
4855 4858 dst : X(write);
4856 4859 src : E(read);
4857 4860 FA : R;
4858 4861 %}
4859 4862
4860 4863 // Floating Point Convert D->F
4861 4864 pipe_class fcvtD2F(regD dst, regF src) %{
4862 4865 single_instruction;
4863 4866 dst : X(write);
4864 4867 src : E(read);
4865 4868 FA : R;
4866 4869 %}
4867 4870
4868 4871 // Floating Point Convert I->L
4869 4872 pipe_class fcvtI2L(regD dst, regF src) %{
4870 4873 single_instruction;
4871 4874 dst : X(write);
4872 4875 src : E(read);
4873 4876 FA : R;
4874 4877 %}
4875 4878
4876 4879 // Floating Point Convert D->F
4877 4880 pipe_class fcvtD2I(regF dst, regD src, flagsReg cr) %{
4878 4881 instruction_count(1); multiple_bundles;
4879 4882 dst : X(write)+6;
4880 4883 src : E(read);
4881 4884 FA : R;
4882 4885 %}
4883 4886
4884 4887 // Floating Point Convert D->L
4885 4888 pipe_class fcvtD2L(regD dst, regD src, flagsReg cr) %{
4886 4889 instruction_count(1); multiple_bundles;
4887 4890 dst : X(write)+6;
4888 4891 src : E(read);
4889 4892 FA : R;
4890 4893 %}
4891 4894
4892 4895 // Floating Point Convert F->I
4893 4896 pipe_class fcvtF2I(regF dst, regF src, flagsReg cr) %{
4894 4897 instruction_count(1); multiple_bundles;
4895 4898 dst : X(write)+6;
4896 4899 src : E(read);
4897 4900 FA : R;
4898 4901 %}
4899 4902
4900 4903 // Floating Point Convert F->L
4901 4904 pipe_class fcvtF2L(regD dst, regF src, flagsReg cr) %{
4902 4905 instruction_count(1); multiple_bundles;
4903 4906 dst : X(write)+6;
4904 4907 src : E(read);
4905 4908 FA : R;
4906 4909 %}
4907 4910
4908 4911 // Floating Point Convert I->F
4909 4912 pipe_class fcvtI2F(regF dst, regF src) %{
4910 4913 single_instruction;
4911 4914 dst : X(write);
4912 4915 src : E(read);
4913 4916 FA : R;
4914 4917 %}
4915 4918
4916 4919 // Floating Point Compare
4917 4920 pipe_class faddF_fcc_reg_reg_zero(flagsRegF cr, regF src1, regF src2, immI0 zero) %{
4918 4921 single_instruction;
4919 4922 cr : X(write);
4920 4923 src1 : E(read);
4921 4924 src2 : E(read);
4922 4925 FA : R;
4923 4926 %}
4924 4927
4925 4928 // Floating Point Compare
4926 4929 pipe_class faddD_fcc_reg_reg_zero(flagsRegF cr, regD src1, regD src2, immI0 zero) %{
4927 4930 single_instruction;
4928 4931 cr : X(write);
4929 4932 src1 : E(read);
4930 4933 src2 : E(read);
4931 4934 FA : R;
4932 4935 %}
4933 4936
4934 4937 // Floating Add Nop
4935 4938 pipe_class fadd_nop() %{
4936 4939 single_instruction;
4937 4940 FA : R;
4938 4941 %}
4939 4942
4940 4943 // Integer Store to Memory
4941 4944 pipe_class istore_mem_reg(memory mem, iRegI src) %{
4942 4945 single_instruction;
4943 4946 mem : R(read);
4944 4947 src : C(read);
4945 4948 MS : R;
4946 4949 %}
4947 4950
4948 4951 // Integer Store to Memory
4949 4952 pipe_class istore_mem_spORreg(memory mem, sp_ptr_RegP src) %{
4950 4953 single_instruction;
4951 4954 mem : R(read);
4952 4955 src : C(read);
4953 4956 MS : R;
4954 4957 %}
4955 4958
4956 4959 // Integer Store Zero to Memory
4957 4960 pipe_class istore_mem_zero(memory mem, immI0 src) %{
4958 4961 single_instruction;
4959 4962 mem : R(read);
4960 4963 MS : R;
4961 4964 %}
4962 4965
4963 4966 // Special Stack Slot Store
4964 4967 pipe_class istore_stk_reg(stackSlotI stkSlot, iRegI src) %{
4965 4968 single_instruction;
4966 4969 stkSlot : R(read);
4967 4970 src : C(read);
4968 4971 MS : R;
4969 4972 %}
4970 4973
4971 4974 // Special Stack Slot Store
4972 4975 pipe_class lstoreI_stk_reg(stackSlotL stkSlot, iRegI src) %{
4973 4976 instruction_count(2); multiple_bundles;
4974 4977 stkSlot : R(read);
4975 4978 src : C(read);
4976 4979 MS : R(2);
4977 4980 %}
4978 4981
4979 4982 // Float Store
4980 4983 pipe_class fstoreF_mem_reg(memory mem, RegF src) %{
4981 4984 single_instruction;
4982 4985 mem : R(read);
4983 4986 src : C(read);
4984 4987 MS : R;
4985 4988 %}
4986 4989
4987 4990 // Float Store
4988 4991 pipe_class fstoreF_mem_zero(memory mem, immF0 src) %{
4989 4992 single_instruction;
4990 4993 mem : R(read);
4991 4994 MS : R;
4992 4995 %}
4993 4996
4994 4997 // Double Store
4995 4998 pipe_class fstoreD_mem_reg(memory mem, RegD src) %{
4996 4999 instruction_count(1);
4997 5000 mem : R(read);
4998 5001 src : C(read);
4999 5002 MS : R;
5000 5003 %}
5001 5004
5002 5005 // Double Store
5003 5006 pipe_class fstoreD_mem_zero(memory mem, immD0 src) %{
5004 5007 single_instruction;
5005 5008 mem : R(read);
5006 5009 MS : R;
5007 5010 %}
5008 5011
5009 5012 // Special Stack Slot Float Store
5010 5013 pipe_class fstoreF_stk_reg(stackSlotI stkSlot, RegF src) %{
5011 5014 single_instruction;
5012 5015 stkSlot : R(read);
5013 5016 src : C(read);
5014 5017 MS : R;
5015 5018 %}
5016 5019
5017 5020 // Special Stack Slot Double Store
5018 5021 pipe_class fstoreD_stk_reg(stackSlotI stkSlot, RegD src) %{
5019 5022 single_instruction;
5020 5023 stkSlot : R(read);
5021 5024 src : C(read);
5022 5025 MS : R;
5023 5026 %}
5024 5027
5025 5028 // Integer Load (when sign bit propagation not needed)
5026 5029 pipe_class iload_mem(iRegI dst, memory mem) %{
5027 5030 single_instruction;
5028 5031 mem : R(read);
5029 5032 dst : C(write);
5030 5033 MS : R;
5031 5034 %}
5032 5035
5033 5036 // Integer Load from stack operand
5034 5037 pipe_class iload_stkD(iRegI dst, stackSlotD mem ) %{
5035 5038 single_instruction;
5036 5039 mem : R(read);
5037 5040 dst : C(write);
5038 5041 MS : R;
5039 5042 %}
5040 5043
5041 5044 // Integer Load (when sign bit propagation or masking is needed)
5042 5045 pipe_class iload_mask_mem(iRegI dst, memory mem) %{
5043 5046 single_instruction;
5044 5047 mem : R(read);
5045 5048 dst : M(write);
5046 5049 MS : R;
5047 5050 %}
5048 5051
5049 5052 // Float Load
5050 5053 pipe_class floadF_mem(regF dst, memory mem) %{
5051 5054 single_instruction;
5052 5055 mem : R(read);
5053 5056 dst : M(write);
5054 5057 MS : R;
5055 5058 %}
5056 5059
5057 5060 // Float Load
5058 5061 pipe_class floadD_mem(regD dst, memory mem) %{
5059 5062 instruction_count(1); multiple_bundles; // Again, unaligned argument is only multiple case
5060 5063 mem : R(read);
5061 5064 dst : M(write);
5062 5065 MS : R;
5063 5066 %}
5064 5067
5065 5068 // Float Load
5066 5069 pipe_class floadF_stk(regF dst, stackSlotI stkSlot) %{
5067 5070 single_instruction;
5068 5071 stkSlot : R(read);
5069 5072 dst : M(write);
5070 5073 MS : R;
5071 5074 %}
5072 5075
5073 5076 // Float Load
5074 5077 pipe_class floadD_stk(regD dst, stackSlotI stkSlot) %{
5075 5078 single_instruction;
5076 5079 stkSlot : R(read);
5077 5080 dst : M(write);
5078 5081 MS : R;
5079 5082 %}
5080 5083
5081 5084 // Memory Nop
5082 5085 pipe_class mem_nop() %{
5083 5086 single_instruction;
5084 5087 MS : R;
5085 5088 %}
5086 5089
5087 5090 pipe_class sethi(iRegP dst, immI src) %{
5088 5091 single_instruction;
5089 5092 dst : E(write);
5090 5093 IALU : R;
5091 5094 %}
5092 5095
5093 5096 pipe_class loadPollP(iRegP poll) %{
5094 5097 single_instruction;
5095 5098 poll : R(read);
5096 5099 MS : R;
5097 5100 %}
5098 5101
5099 5102 pipe_class br(Universe br, label labl) %{
5100 5103 single_instruction_with_delay_slot;
5101 5104 BR : R;
5102 5105 %}
5103 5106
5104 5107 pipe_class br_cc(Universe br, cmpOp cmp, flagsReg cr, label labl) %{
5105 5108 single_instruction_with_delay_slot;
5106 5109 cr : E(read);
5107 5110 BR : R;
5108 5111 %}
5109 5112
5110 5113 pipe_class br_reg(Universe br, cmpOp cmp, iRegI op1, label labl) %{
5111 5114 single_instruction_with_delay_slot;
5112 5115 op1 : E(read);
5113 5116 BR : R;
5114 5117 MS : R;
5115 5118 %}
5116 5119
5117 5120 // Compare and branch
5118 5121 pipe_class cmp_br_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl, flagsReg cr) %{
5119 5122 instruction_count(2); has_delay_slot;
5120 5123 cr : E(write);
5121 5124 src1 : R(read);
5122 5125 src2 : R(read);
5123 5126 IALU : R;
5124 5127 BR : R;
5125 5128 %}
5126 5129
5127 5130 // Compare and branch
5128 5131 pipe_class cmp_br_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI13 src2, label labl, flagsReg cr) %{
5129 5132 instruction_count(2); has_delay_slot;
5130 5133 cr : E(write);
5131 5134 src1 : R(read);
5132 5135 IALU : R;
5133 5136 BR : R;
5134 5137 %}
5135 5138
5136 5139 // Compare and branch using cbcond
5137 5140 pipe_class cbcond_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl) %{
5138 5141 single_instruction;
5139 5142 src1 : E(read);
5140 5143 src2 : E(read);
5141 5144 IALU : R;
5142 5145 BR : R;
5143 5146 %}
5144 5147
5145 5148 // Compare and branch using cbcond
5146 5149 pipe_class cbcond_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI5 src2, label labl) %{
5147 5150 single_instruction;
5148 5151 src1 : E(read);
5149 5152 IALU : R;
5150 5153 BR : R;
5151 5154 %}
5152 5155
5153 5156 pipe_class br_fcc(Universe br, cmpOpF cc, flagsReg cr, label labl) %{
5154 5157 single_instruction_with_delay_slot;
5155 5158 cr : E(read);
5156 5159 BR : R;
5157 5160 %}
5158 5161
5159 5162 pipe_class br_nop() %{
5160 5163 single_instruction;
5161 5164 BR : R;
5162 5165 %}
5163 5166
5164 5167 pipe_class simple_call(method meth) %{
5165 5168 instruction_count(2); multiple_bundles; force_serialization;
5166 5169 fixed_latency(100);
5167 5170 BR : R(1);
5168 5171 MS : R(1);
5169 5172 A0 : R(1);
5170 5173 %}
5171 5174
5172 5175 pipe_class compiled_call(method meth) %{
5173 5176 instruction_count(1); multiple_bundles; force_serialization;
5174 5177 fixed_latency(100);
5175 5178 MS : R(1);
5176 5179 %}
5177 5180
5178 5181 pipe_class call(method meth) %{
5179 5182 instruction_count(0); multiple_bundles; force_serialization;
5180 5183 fixed_latency(100);
5181 5184 %}
5182 5185
5183 5186 pipe_class tail_call(Universe ignore, label labl) %{
5184 5187 single_instruction; has_delay_slot;
5185 5188 fixed_latency(100);
5186 5189 BR : R(1);
5187 5190 MS : R(1);
5188 5191 %}
5189 5192
5190 5193 pipe_class ret(Universe ignore) %{
5191 5194 single_instruction; has_delay_slot;
5192 5195 BR : R(1);
5193 5196 MS : R(1);
5194 5197 %}
5195 5198
5196 5199 pipe_class ret_poll(g3RegP poll) %{
5197 5200 instruction_count(3); has_delay_slot;
5198 5201 poll : E(read);
5199 5202 MS : R;
5200 5203 %}
5201 5204
5202 5205 // The real do-nothing guy
5203 5206 pipe_class empty( ) %{
5204 5207 instruction_count(0);
5205 5208 %}
5206 5209
5207 5210 pipe_class long_memory_op() %{
5208 5211 instruction_count(0); multiple_bundles; force_serialization;
5209 5212 fixed_latency(25);
5210 5213 MS : R(1);
5211 5214 %}
5212 5215
5213 5216 // Check-cast
5214 5217 pipe_class partial_subtype_check_pipe(Universe ignore, iRegP array, iRegP match ) %{
5215 5218 array : R(read);
5216 5219 match : R(read);
5217 5220 IALU : R(2);
5218 5221 BR : R(2);
5219 5222 MS : R;
5220 5223 %}
5221 5224
5222 5225 // Convert FPU flags into +1,0,-1
5223 5226 pipe_class floating_cmp( iRegI dst, regF src1, regF src2 ) %{
5224 5227 src1 : E(read);
5225 5228 src2 : E(read);
5226 5229 dst : E(write);
5227 5230 FA : R;
5228 5231 MS : R(2);
5229 5232 BR : R(2);
5230 5233 %}
5231 5234
5232 5235 // Compare for p < q, and conditionally add y
5233 5236 pipe_class cadd_cmpltmask( iRegI p, iRegI q, iRegI y ) %{
5234 5237 p : E(read);
5235 5238 q : E(read);
5236 5239 y : E(read);
5237 5240 IALU : R(3)
5238 5241 %}
5239 5242
5240 5243 // Perform a compare, then move conditionally in a branch delay slot.
5241 5244 pipe_class min_max( iRegI src2, iRegI srcdst ) %{
5242 5245 src2 : E(read);
5243 5246 srcdst : E(read);
5244 5247 IALU : R;
5245 5248 BR : R;
5246 5249 %}
5247 5250
5248 5251 // Define the class for the Nop node
5249 5252 define %{
5250 5253 MachNop = ialu_nop;
5251 5254 %}
5252 5255
5253 5256 %}
5254 5257
5255 5258 //----------INSTRUCTIONS-------------------------------------------------------
5256 5259
5257 5260 //------------Special Stack Slot instructions - no match rules-----------------
5258 5261 instruct stkI_to_regF(regF dst, stackSlotI src) %{
5259 5262 // No match rule to avoid chain rule match.
5260 5263 effect(DEF dst, USE src);
5261 5264 ins_cost(MEMORY_REF_COST);
5262 5265 format %{ "LDF $src,$dst\t! stkI to regF" %}
5263 5266 opcode(Assembler::ldf_op3);
5264 5267 ins_encode(simple_form3_mem_reg(src, dst));
5265 5268 ins_pipe(floadF_stk);
5266 5269 %}
5267 5270
5268 5271 instruct stkL_to_regD(regD dst, stackSlotL src) %{
5269 5272 // No match rule to avoid chain rule match.
5270 5273 effect(DEF dst, USE src);
5271 5274 ins_cost(MEMORY_REF_COST);
5272 5275 format %{ "LDDF $src,$dst\t! stkL to regD" %}
5273 5276 opcode(Assembler::lddf_op3);
5274 5277 ins_encode(simple_form3_mem_reg(src, dst));
5275 5278 ins_pipe(floadD_stk);
5276 5279 %}
5277 5280
5278 5281 instruct regF_to_stkI(stackSlotI dst, regF src) %{
5279 5282 // No match rule to avoid chain rule match.
5280 5283 effect(DEF dst, USE src);
5281 5284 ins_cost(MEMORY_REF_COST);
5282 5285 format %{ "STF $src,$dst\t! regF to stkI" %}
5283 5286 opcode(Assembler::stf_op3);
5284 5287 ins_encode(simple_form3_mem_reg(dst, src));
5285 5288 ins_pipe(fstoreF_stk_reg);
5286 5289 %}
5287 5290
5288 5291 instruct regD_to_stkL(stackSlotL dst, regD src) %{
5289 5292 // No match rule to avoid chain rule match.
5290 5293 effect(DEF dst, USE src);
5291 5294 ins_cost(MEMORY_REF_COST);
5292 5295 format %{ "STDF $src,$dst\t! regD to stkL" %}
5293 5296 opcode(Assembler::stdf_op3);
5294 5297 ins_encode(simple_form3_mem_reg(dst, src));
5295 5298 ins_pipe(fstoreD_stk_reg);
5296 5299 %}
5297 5300
5298 5301 instruct regI_to_stkLHi(stackSlotL dst, iRegI src) %{
5299 5302 effect(DEF dst, USE src);
5300 5303 ins_cost(MEMORY_REF_COST*2);
5301 5304 format %{ "STW $src,$dst.hi\t! long\n\t"
5302 5305 "STW R_G0,$dst.lo" %}
5303 5306 opcode(Assembler::stw_op3);
5304 5307 ins_encode(simple_form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, R_G0));
5305 5308 ins_pipe(lstoreI_stk_reg);
5306 5309 %}
5307 5310
5308 5311 instruct regL_to_stkD(stackSlotD dst, iRegL src) %{
5309 5312 // No match rule to avoid chain rule match.
5310 5313 effect(DEF dst, USE src);
5311 5314 ins_cost(MEMORY_REF_COST);
5312 5315 format %{ "STX $src,$dst\t! regL to stkD" %}
5313 5316 opcode(Assembler::stx_op3);
5314 5317 ins_encode(simple_form3_mem_reg( dst, src ) );
5315 5318 ins_pipe(istore_stk_reg);
5316 5319 %}
5317 5320
5318 5321 //---------- Chain stack slots between similar types --------
5319 5322
5320 5323 // Load integer from stack slot
5321 5324 instruct stkI_to_regI( iRegI dst, stackSlotI src ) %{
5322 5325 match(Set dst src);
5323 5326 ins_cost(MEMORY_REF_COST);
5324 5327
5325 5328 format %{ "LDUW $src,$dst\t!stk" %}
5326 5329 opcode(Assembler::lduw_op3);
5327 5330 ins_encode(simple_form3_mem_reg( src, dst ) );
5328 5331 ins_pipe(iload_mem);
5329 5332 %}
5330 5333
5331 5334 // Store integer to stack slot
5332 5335 instruct regI_to_stkI( stackSlotI dst, iRegI src ) %{
5333 5336 match(Set dst src);
5334 5337 ins_cost(MEMORY_REF_COST);
5335 5338
5336 5339 format %{ "STW $src,$dst\t!stk" %}
5337 5340 opcode(Assembler::stw_op3);
5338 5341 ins_encode(simple_form3_mem_reg( dst, src ) );
5339 5342 ins_pipe(istore_mem_reg);
5340 5343 %}
5341 5344
5342 5345 // Load long from stack slot
5343 5346 instruct stkL_to_regL( iRegL dst, stackSlotL src ) %{
5344 5347 match(Set dst src);
5345 5348
5346 5349 ins_cost(MEMORY_REF_COST);
5347 5350 format %{ "LDX $src,$dst\t! long" %}
5348 5351 opcode(Assembler::ldx_op3);
5349 5352 ins_encode(simple_form3_mem_reg( src, dst ) );
5350 5353 ins_pipe(iload_mem);
5351 5354 %}
5352 5355
5353 5356 // Store long to stack slot
5354 5357 instruct regL_to_stkL(stackSlotL dst, iRegL src) %{
5355 5358 match(Set dst src);
5356 5359
5357 5360 ins_cost(MEMORY_REF_COST);
5358 5361 format %{ "STX $src,$dst\t! long" %}
5359 5362 opcode(Assembler::stx_op3);
5360 5363 ins_encode(simple_form3_mem_reg( dst, src ) );
5361 5364 ins_pipe(istore_mem_reg);
5362 5365 %}
5363 5366
5364 5367 #ifdef _LP64
5365 5368 // Load pointer from stack slot, 64-bit encoding
5366 5369 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{
5367 5370 match(Set dst src);
5368 5371 ins_cost(MEMORY_REF_COST);
5369 5372 format %{ "LDX $src,$dst\t!ptr" %}
5370 5373 opcode(Assembler::ldx_op3);
5371 5374 ins_encode(simple_form3_mem_reg( src, dst ) );
5372 5375 ins_pipe(iload_mem);
5373 5376 %}
5374 5377
5375 5378 // Store pointer to stack slot
5376 5379 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{
5377 5380 match(Set dst src);
5378 5381 ins_cost(MEMORY_REF_COST);
5379 5382 format %{ "STX $src,$dst\t!ptr" %}
5380 5383 opcode(Assembler::stx_op3);
5381 5384 ins_encode(simple_form3_mem_reg( dst, src ) );
5382 5385 ins_pipe(istore_mem_reg);
5383 5386 %}
5384 5387 #else // _LP64
5385 5388 // Load pointer from stack slot, 32-bit encoding
5386 5389 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{
5387 5390 match(Set dst src);
5388 5391 ins_cost(MEMORY_REF_COST);
5389 5392 format %{ "LDUW $src,$dst\t!ptr" %}
5390 5393 opcode(Assembler::lduw_op3, Assembler::ldst_op);
5391 5394 ins_encode(simple_form3_mem_reg( src, dst ) );
5392 5395 ins_pipe(iload_mem);
5393 5396 %}
5394 5397
5395 5398 // Store pointer to stack slot
5396 5399 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{
5397 5400 match(Set dst src);
5398 5401 ins_cost(MEMORY_REF_COST);
5399 5402 format %{ "STW $src,$dst\t!ptr" %}
5400 5403 opcode(Assembler::stw_op3, Assembler::ldst_op);
5401 5404 ins_encode(simple_form3_mem_reg( dst, src ) );
5402 5405 ins_pipe(istore_mem_reg);
5403 5406 %}
5404 5407 #endif // _LP64
5405 5408
5406 5409 //------------Special Nop instructions for bundling - no match rules-----------
5407 5410 // Nop using the A0 functional unit
5408 5411 instruct Nop_A0() %{
5409 5412 ins_cost(0);
5410 5413
5411 5414 format %{ "NOP ! Alu Pipeline" %}
5412 5415 opcode(Assembler::or_op3, Assembler::arith_op);
5413 5416 ins_encode( form2_nop() );
5414 5417 ins_pipe(ialu_nop_A0);
5415 5418 %}
5416 5419
5417 5420 // Nop using the A1 functional unit
5418 5421 instruct Nop_A1( ) %{
5419 5422 ins_cost(0);
5420 5423
5421 5424 format %{ "NOP ! Alu Pipeline" %}
5422 5425 opcode(Assembler::or_op3, Assembler::arith_op);
5423 5426 ins_encode( form2_nop() );
5424 5427 ins_pipe(ialu_nop_A1);
5425 5428 %}
5426 5429
5427 5430 // Nop using the memory functional unit
5428 5431 instruct Nop_MS( ) %{
5429 5432 ins_cost(0);
5430 5433
5431 5434 format %{ "NOP ! Memory Pipeline" %}
5432 5435 ins_encode( emit_mem_nop );
5433 5436 ins_pipe(mem_nop);
5434 5437 %}
5435 5438
5436 5439 // Nop using the floating add functional unit
5437 5440 instruct Nop_FA( ) %{
5438 5441 ins_cost(0);
5439 5442
5440 5443 format %{ "NOP ! Floating Add Pipeline" %}
5441 5444 ins_encode( emit_fadd_nop );
5442 5445 ins_pipe(fadd_nop);
5443 5446 %}
5444 5447
5445 5448 // Nop using the branch functional unit
5446 5449 instruct Nop_BR( ) %{
5447 5450 ins_cost(0);
5448 5451
5449 5452 format %{ "NOP ! Branch Pipeline" %}
5450 5453 ins_encode( emit_br_nop );
5451 5454 ins_pipe(br_nop);
5452 5455 %}
5453 5456
5454 5457 //----------Load/Store/Move Instructions---------------------------------------
5455 5458 //----------Load Instructions--------------------------------------------------
5456 5459 // Load Byte (8bit signed)
5457 5460 instruct loadB(iRegI dst, memory mem) %{
5458 5461 match(Set dst (LoadB mem));
5459 5462 ins_cost(MEMORY_REF_COST);
5460 5463
5461 5464 size(4);
5462 5465 format %{ "LDSB $mem,$dst\t! byte" %}
5463 5466 ins_encode %{
5464 5467 __ ldsb($mem$$Address, $dst$$Register);
5465 5468 %}
5466 5469 ins_pipe(iload_mask_mem);
5467 5470 %}
5468 5471
5469 5472 // Load Byte (8bit signed) into a Long Register
5470 5473 instruct loadB2L(iRegL dst, memory mem) %{
5471 5474 match(Set dst (ConvI2L (LoadB mem)));
5472 5475 ins_cost(MEMORY_REF_COST);
5473 5476
5474 5477 size(4);
5475 5478 format %{ "LDSB $mem,$dst\t! byte -> long" %}
5476 5479 ins_encode %{
5477 5480 __ ldsb($mem$$Address, $dst$$Register);
5478 5481 %}
5479 5482 ins_pipe(iload_mask_mem);
5480 5483 %}
5481 5484
5482 5485 // Load Unsigned Byte (8bit UNsigned) into an int reg
5483 5486 instruct loadUB(iRegI dst, memory mem) %{
5484 5487 match(Set dst (LoadUB mem));
5485 5488 ins_cost(MEMORY_REF_COST);
5486 5489
5487 5490 size(4);
5488 5491 format %{ "LDUB $mem,$dst\t! ubyte" %}
5489 5492 ins_encode %{
5490 5493 __ ldub($mem$$Address, $dst$$Register);
5491 5494 %}
5492 5495 ins_pipe(iload_mem);
5493 5496 %}
5494 5497
5495 5498 // Load Unsigned Byte (8bit UNsigned) into a Long Register
5496 5499 instruct loadUB2L(iRegL dst, memory mem) %{
5497 5500 match(Set dst (ConvI2L (LoadUB mem)));
5498 5501 ins_cost(MEMORY_REF_COST);
5499 5502
5500 5503 size(4);
5501 5504 format %{ "LDUB $mem,$dst\t! ubyte -> long" %}
5502 5505 ins_encode %{
5503 5506 __ ldub($mem$$Address, $dst$$Register);
5504 5507 %}
5505 5508 ins_pipe(iload_mem);
5506 5509 %}
5507 5510
5508 5511 // Load Unsigned Byte (8 bit UNsigned) with 32-bit mask into Long Register
5509 5512 instruct loadUB2L_immI(iRegL dst, memory mem, immI mask) %{
5510 5513 match(Set dst (ConvI2L (AndI (LoadUB mem) mask)));
5511 5514 ins_cost(MEMORY_REF_COST + DEFAULT_COST);
5512 5515
5513 5516 size(2*4);
5514 5517 format %{ "LDUB $mem,$dst\t# ubyte & 32-bit mask -> long\n\t"
5515 5518 "AND $dst,right_n_bits($mask, 8),$dst" %}
5516 5519 ins_encode %{
5517 5520 __ ldub($mem$$Address, $dst$$Register);
5518 5521 __ and3($dst$$Register, $mask$$constant & right_n_bits(8), $dst$$Register);
5519 5522 %}
5520 5523 ins_pipe(iload_mem);
5521 5524 %}
5522 5525
5523 5526 // Load Short (16bit signed)
5524 5527 instruct loadS(iRegI dst, memory mem) %{
5525 5528 match(Set dst (LoadS mem));
5526 5529 ins_cost(MEMORY_REF_COST);
5527 5530
5528 5531 size(4);
5529 5532 format %{ "LDSH $mem,$dst\t! short" %}
5530 5533 ins_encode %{
5531 5534 __ ldsh($mem$$Address, $dst$$Register);
5532 5535 %}
5533 5536 ins_pipe(iload_mask_mem);
5534 5537 %}
5535 5538
5536 5539 // Load Short (16 bit signed) to Byte (8 bit signed)
5537 5540 instruct loadS2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{
5538 5541 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
5539 5542 ins_cost(MEMORY_REF_COST);
5540 5543
5541 5544 size(4);
5542 5545
5543 5546 format %{ "LDSB $mem+1,$dst\t! short -> byte" %}
5544 5547 ins_encode %{
5545 5548 __ ldsb($mem$$Address, $dst$$Register, 1);
5546 5549 %}
5547 5550 ins_pipe(iload_mask_mem);
5548 5551 %}
5549 5552
5550 5553 // Load Short (16bit signed) into a Long Register
5551 5554 instruct loadS2L(iRegL dst, memory mem) %{
5552 5555 match(Set dst (ConvI2L (LoadS mem)));
5553 5556 ins_cost(MEMORY_REF_COST);
5554 5557
5555 5558 size(4);
5556 5559 format %{ "LDSH $mem,$dst\t! short -> long" %}
5557 5560 ins_encode %{
5558 5561 __ ldsh($mem$$Address, $dst$$Register);
5559 5562 %}
5560 5563 ins_pipe(iload_mask_mem);
5561 5564 %}
5562 5565
5563 5566 // Load Unsigned Short/Char (16bit UNsigned)
5564 5567 instruct loadUS(iRegI dst, memory mem) %{
5565 5568 match(Set dst (LoadUS mem));
5566 5569 ins_cost(MEMORY_REF_COST);
5567 5570
5568 5571 size(4);
5569 5572 format %{ "LDUH $mem,$dst\t! ushort/char" %}
5570 5573 ins_encode %{
5571 5574 __ lduh($mem$$Address, $dst$$Register);
5572 5575 %}
5573 5576 ins_pipe(iload_mem);
5574 5577 %}
5575 5578
5576 5579 // Load Unsigned Short/Char (16 bit UNsigned) to Byte (8 bit signed)
5577 5580 instruct loadUS2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{
5578 5581 match(Set dst (RShiftI (LShiftI (LoadUS mem) twentyfour) twentyfour));
5579 5582 ins_cost(MEMORY_REF_COST);
5580 5583
5581 5584 size(4);
5582 5585 format %{ "LDSB $mem+1,$dst\t! ushort -> byte" %}
5583 5586 ins_encode %{
5584 5587 __ ldsb($mem$$Address, $dst$$Register, 1);
5585 5588 %}
5586 5589 ins_pipe(iload_mask_mem);
5587 5590 %}
5588 5591
5589 5592 // Load Unsigned Short/Char (16bit UNsigned) into a Long Register
5590 5593 instruct loadUS2L(iRegL dst, memory mem) %{
5591 5594 match(Set dst (ConvI2L (LoadUS mem)));
5592 5595 ins_cost(MEMORY_REF_COST);
5593 5596
5594 5597 size(4);
5595 5598 format %{ "LDUH $mem,$dst\t! ushort/char -> long" %}
5596 5599 ins_encode %{
5597 5600 __ lduh($mem$$Address, $dst$$Register);
5598 5601 %}
5599 5602 ins_pipe(iload_mem);
5600 5603 %}
5601 5604
5602 5605 // Load Unsigned Short/Char (16bit UNsigned) with mask 0xFF into a Long Register
5603 5606 instruct loadUS2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{
5604 5607 match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
5605 5608 ins_cost(MEMORY_REF_COST);
5606 5609
5607 5610 size(4);
5608 5611 format %{ "LDUB $mem+1,$dst\t! ushort/char & 0xFF -> long" %}
5609 5612 ins_encode %{
5610 5613 __ ldub($mem$$Address, $dst$$Register, 1); // LSB is index+1 on BE
5611 5614 %}
5612 5615 ins_pipe(iload_mem);
5613 5616 %}
5614 5617
5615 5618 // Load Unsigned Short/Char (16bit UNsigned) with a 13-bit mask into a Long Register
5616 5619 instruct loadUS2L_immI13(iRegL dst, memory mem, immI13 mask) %{
5617 5620 match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
5618 5621 ins_cost(MEMORY_REF_COST + DEFAULT_COST);
5619 5622
5620 5623 size(2*4);
5621 5624 format %{ "LDUH $mem,$dst\t! ushort/char & 13-bit mask -> long\n\t"
5622 5625 "AND $dst,$mask,$dst" %}
5623 5626 ins_encode %{
5624 5627 Register Rdst = $dst$$Register;
5625 5628 __ lduh($mem$$Address, Rdst);
5626 5629 __ and3(Rdst, $mask$$constant, Rdst);
5627 5630 %}
5628 5631 ins_pipe(iload_mem);
5629 5632 %}
5630 5633
5631 5634 // Load Unsigned Short/Char (16bit UNsigned) with a 32-bit mask into a Long Register
5632 5635 instruct loadUS2L_immI(iRegL dst, memory mem, immI mask, iRegL tmp) %{
5633 5636 match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
5634 5637 effect(TEMP dst, TEMP tmp);
5635 5638 ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
5636 5639
5637 5640 format %{ "LDUH $mem,$dst\t! ushort/char & 32-bit mask -> long\n\t"
5638 5641 "SET right_n_bits($mask, 16),$tmp\n\t"
5639 5642 "AND $dst,$tmp,$dst" %}
5640 5643 ins_encode %{
5641 5644 Register Rdst = $dst$$Register;
5642 5645 Register Rtmp = $tmp$$Register;
5643 5646 __ lduh($mem$$Address, Rdst);
5644 5647 __ set($mask$$constant & right_n_bits(16), Rtmp);
5645 5648 __ and3(Rdst, Rtmp, Rdst);
5646 5649 %}
5647 5650 ins_pipe(iload_mem);
5648 5651 %}
5649 5652
5650 5653 // Load Integer
5651 5654 instruct loadI(iRegI dst, memory mem) %{
5652 5655 match(Set dst (LoadI mem));
5653 5656 ins_cost(MEMORY_REF_COST);
5654 5657
5655 5658 size(4);
5656 5659 format %{ "LDUW $mem,$dst\t! int" %}
5657 5660 ins_encode %{
5658 5661 __ lduw($mem$$Address, $dst$$Register);
5659 5662 %}
5660 5663 ins_pipe(iload_mem);
5661 5664 %}
5662 5665
5663 5666 // Load Integer to Byte (8 bit signed)
5664 5667 instruct loadI2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{
5665 5668 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
5666 5669 ins_cost(MEMORY_REF_COST);
5667 5670
5668 5671 size(4);
5669 5672
5670 5673 format %{ "LDSB $mem+3,$dst\t! int -> byte" %}
5671 5674 ins_encode %{
5672 5675 __ ldsb($mem$$Address, $dst$$Register, 3);
5673 5676 %}
5674 5677 ins_pipe(iload_mask_mem);
5675 5678 %}
5676 5679
5677 5680 // Load Integer to Unsigned Byte (8 bit UNsigned)
5678 5681 instruct loadI2UB(iRegI dst, indOffset13m7 mem, immI_255 mask) %{
5679 5682 match(Set dst (AndI (LoadI mem) mask));
5680 5683 ins_cost(MEMORY_REF_COST);
5681 5684
5682 5685 size(4);
5683 5686
5684 5687 format %{ "LDUB $mem+3,$dst\t! int -> ubyte" %}
5685 5688 ins_encode %{
5686 5689 __ ldub($mem$$Address, $dst$$Register, 3);
5687 5690 %}
5688 5691 ins_pipe(iload_mask_mem);
5689 5692 %}
5690 5693
5691 5694 // Load Integer to Short (16 bit signed)
5692 5695 instruct loadI2S(iRegI dst, indOffset13m7 mem, immI_16 sixteen) %{
5693 5696 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
5694 5697 ins_cost(MEMORY_REF_COST);
5695 5698
5696 5699 size(4);
5697 5700
5698 5701 format %{ "LDSH $mem+2,$dst\t! int -> short" %}
5699 5702 ins_encode %{
5700 5703 __ ldsh($mem$$Address, $dst$$Register, 2);
5701 5704 %}
5702 5705 ins_pipe(iload_mask_mem);
5703 5706 %}
5704 5707
5705 5708 // Load Integer to Unsigned Short (16 bit UNsigned)
5706 5709 instruct loadI2US(iRegI dst, indOffset13m7 mem, immI_65535 mask) %{
5707 5710 match(Set dst (AndI (LoadI mem) mask));
5708 5711 ins_cost(MEMORY_REF_COST);
5709 5712
5710 5713 size(4);
5711 5714
5712 5715 format %{ "LDUH $mem+2,$dst\t! int -> ushort/char" %}
5713 5716 ins_encode %{
5714 5717 __ lduh($mem$$Address, $dst$$Register, 2);
5715 5718 %}
5716 5719 ins_pipe(iload_mask_mem);
5717 5720 %}
5718 5721
5719 5722 // Load Integer into a Long Register
5720 5723 instruct loadI2L(iRegL dst, memory mem) %{
5721 5724 match(Set dst (ConvI2L (LoadI mem)));
5722 5725 ins_cost(MEMORY_REF_COST);
5723 5726
5724 5727 size(4);
5725 5728 format %{ "LDSW $mem,$dst\t! int -> long" %}
5726 5729 ins_encode %{
5727 5730 __ ldsw($mem$$Address, $dst$$Register);
5728 5731 %}
5729 5732 ins_pipe(iload_mask_mem);
5730 5733 %}
5731 5734
5732 5735 // Load Integer with mask 0xFF into a Long Register
5733 5736 instruct loadI2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{
5734 5737 match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5735 5738 ins_cost(MEMORY_REF_COST);
5736 5739
5737 5740 size(4);
5738 5741 format %{ "LDUB $mem+3,$dst\t! int & 0xFF -> long" %}
5739 5742 ins_encode %{
5740 5743 __ ldub($mem$$Address, $dst$$Register, 3); // LSB is index+3 on BE
5741 5744 %}
5742 5745 ins_pipe(iload_mem);
5743 5746 %}
5744 5747
5745 5748 // Load Integer with mask 0xFFFF into a Long Register
5746 5749 instruct loadI2L_immI_65535(iRegL dst, indOffset13m7 mem, immI_65535 mask) %{
5747 5750 match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5748 5751 ins_cost(MEMORY_REF_COST);
5749 5752
5750 5753 size(4);
5751 5754 format %{ "LDUH $mem+2,$dst\t! int & 0xFFFF -> long" %}
5752 5755 ins_encode %{
5753 5756 __ lduh($mem$$Address, $dst$$Register, 2); // LSW is index+2 on BE
5754 5757 %}
5755 5758 ins_pipe(iload_mem);
5756 5759 %}
5757 5760
5758 5761 // Load Integer with a 12-bit mask into a Long Register
5759 5762 instruct loadI2L_immU12(iRegL dst, memory mem, immU12 mask) %{
5760 5763 match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5761 5764 ins_cost(MEMORY_REF_COST + DEFAULT_COST);
5762 5765
5763 5766 size(2*4);
5764 5767 format %{ "LDUW $mem,$dst\t! int & 12-bit mask -> long\n\t"
5765 5768 "AND $dst,$mask,$dst" %}
5766 5769 ins_encode %{
5767 5770 Register Rdst = $dst$$Register;
5768 5771 __ lduw($mem$$Address, Rdst);
5769 5772 __ and3(Rdst, $mask$$constant, Rdst);
5770 5773 %}
5771 5774 ins_pipe(iload_mem);
5772 5775 %}
5773 5776
5774 5777 // Load Integer with a 31-bit mask into a Long Register
5775 5778 instruct loadI2L_immU31(iRegL dst, memory mem, immU31 mask, iRegL tmp) %{
5776 5779 match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5777 5780 effect(TEMP dst, TEMP tmp);
5778 5781 ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
5779 5782
5780 5783 format %{ "LDUW $mem,$dst\t! int & 31-bit mask -> long\n\t"
5781 5784 "SET $mask,$tmp\n\t"
5782 5785 "AND $dst,$tmp,$dst" %}
5783 5786 ins_encode %{
5784 5787 Register Rdst = $dst$$Register;
5785 5788 Register Rtmp = $tmp$$Register;
5786 5789 __ lduw($mem$$Address, Rdst);
5787 5790 __ set($mask$$constant, Rtmp);
5788 5791 __ and3(Rdst, Rtmp, Rdst);
5789 5792 %}
5790 5793 ins_pipe(iload_mem);
5791 5794 %}
5792 5795
5793 5796 // Load Unsigned Integer into a Long Register
5794 5797 instruct loadUI2L(iRegL dst, memory mem, immL_32bits mask) %{
5795 5798 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
5796 5799 ins_cost(MEMORY_REF_COST);
5797 5800
5798 5801 size(4);
5799 5802 format %{ "LDUW $mem,$dst\t! uint -> long" %}
5800 5803 ins_encode %{
5801 5804 __ lduw($mem$$Address, $dst$$Register);
5802 5805 %}
5803 5806 ins_pipe(iload_mem);
5804 5807 %}
5805 5808
5806 5809 // Load Long - aligned
5807 5810 instruct loadL(iRegL dst, memory mem ) %{
5808 5811 match(Set dst (LoadL mem));
5809 5812 ins_cost(MEMORY_REF_COST);
5810 5813
5811 5814 size(4);
5812 5815 format %{ "LDX $mem,$dst\t! long" %}
5813 5816 ins_encode %{
5814 5817 __ ldx($mem$$Address, $dst$$Register);
5815 5818 %}
5816 5819 ins_pipe(iload_mem);
5817 5820 %}
5818 5821
5819 5822 // Load Long - UNaligned
5820 5823 instruct loadL_unaligned(iRegL dst, memory mem, o7RegI tmp) %{
5821 5824 match(Set dst (LoadL_unaligned mem));
5822 5825 effect(KILL tmp);
5823 5826 ins_cost(MEMORY_REF_COST*2+DEFAULT_COST);
5824 5827 format %{ "LDUW $mem+4,R_O7\t! misaligned long\n"
5825 5828 "\tLDUW $mem ,$dst\n"
5826 5829 "\tSLLX #32, $dst, $dst\n"
5827 5830 "\tOR $dst, R_O7, $dst" %}
5828 5831 opcode(Assembler::lduw_op3);
5829 5832 ins_encode(form3_mem_reg_long_unaligned_marshal( mem, dst ));
5830 5833 ins_pipe(iload_mem);
5831 5834 %}
5832 5835
5833 5836 // Load Range
5834 5837 instruct loadRange(iRegI dst, memory mem) %{
5835 5838 match(Set dst (LoadRange mem));
5836 5839 ins_cost(MEMORY_REF_COST);
5837 5840
5838 5841 format %{ "LDUW $mem,$dst\t! range" %}
5839 5842 opcode(Assembler::lduw_op3);
5840 5843 ins_encode(simple_form3_mem_reg( mem, dst ) );
5841 5844 ins_pipe(iload_mem);
5842 5845 %}
5843 5846
5844 5847 // Load Integer into %f register (for fitos/fitod)
5845 5848 instruct loadI_freg(regF dst, memory mem) %{
5846 5849 match(Set dst (LoadI mem));
5847 5850 ins_cost(MEMORY_REF_COST);
5848 5851
5849 5852 format %{ "LDF $mem,$dst\t! for fitos/fitod" %}
5850 5853 opcode(Assembler::ldf_op3);
5851 5854 ins_encode(simple_form3_mem_reg( mem, dst ) );
5852 5855 ins_pipe(floadF_mem);
5853 5856 %}
5854 5857
5855 5858 // Load Pointer
5856 5859 instruct loadP(iRegP dst, memory mem) %{
5857 5860 match(Set dst (LoadP mem));
5858 5861 ins_cost(MEMORY_REF_COST);
5859 5862 size(4);
5860 5863
5861 5864 #ifndef _LP64
5862 5865 format %{ "LDUW $mem,$dst\t! ptr" %}
5863 5866 ins_encode %{
5864 5867 __ lduw($mem$$Address, $dst$$Register);
5865 5868 %}
5866 5869 #else
5867 5870 format %{ "LDX $mem,$dst\t! ptr" %}
5868 5871 ins_encode %{
5869 5872 __ ldx($mem$$Address, $dst$$Register);
5870 5873 %}
5871 5874 #endif
5872 5875 ins_pipe(iload_mem);
5873 5876 %}
5874 5877
5875 5878 // Load Compressed Pointer
5876 5879 instruct loadN(iRegN dst, memory mem) %{
5877 5880 match(Set dst (LoadN mem));
5878 5881 ins_cost(MEMORY_REF_COST);
5879 5882 size(4);
5880 5883
5881 5884 format %{ "LDUW $mem,$dst\t! compressed ptr" %}
5882 5885 ins_encode %{
5883 5886 __ lduw($mem$$Address, $dst$$Register);
5884 5887 %}
5885 5888 ins_pipe(iload_mem);
5886 5889 %}
5887 5890
5888 5891 // Load Klass Pointer
5889 5892 instruct loadKlass(iRegP dst, memory mem) %{
5890 5893 match(Set dst (LoadKlass mem));
5891 5894 ins_cost(MEMORY_REF_COST);
5892 5895 size(4);
5893 5896
5894 5897 #ifndef _LP64
5895 5898 format %{ "LDUW $mem,$dst\t! klass ptr" %}
5896 5899 ins_encode %{
5897 5900 __ lduw($mem$$Address, $dst$$Register);
5898 5901 %}
5899 5902 #else
5900 5903 format %{ "LDX $mem,$dst\t! klass ptr" %}
5901 5904 ins_encode %{
5902 5905 __ ldx($mem$$Address, $dst$$Register);
5903 5906 %}
5904 5907 #endif
5905 5908 ins_pipe(iload_mem);
5906 5909 %}
5907 5910
5908 5911 // Load narrow Klass Pointer
5909 5912 instruct loadNKlass(iRegN dst, memory mem) %{
5910 5913 match(Set dst (LoadNKlass mem));
5911 5914 ins_cost(MEMORY_REF_COST);
5912 5915 size(4);
5913 5916
5914 5917 format %{ "LDUW $mem,$dst\t! compressed klass ptr" %}
5915 5918 ins_encode %{
5916 5919 __ lduw($mem$$Address, $dst$$Register);
5917 5920 %}
5918 5921 ins_pipe(iload_mem);
5919 5922 %}
5920 5923
5921 5924 // Load Double
5922 5925 instruct loadD(regD dst, memory mem) %{
5923 5926 match(Set dst (LoadD mem));
5924 5927 ins_cost(MEMORY_REF_COST);
5925 5928
5926 5929 format %{ "LDDF $mem,$dst" %}
5927 5930 opcode(Assembler::lddf_op3);
5928 5931 ins_encode(simple_form3_mem_reg( mem, dst ) );
5929 5932 ins_pipe(floadD_mem);
5930 5933 %}
5931 5934
5932 5935 // Load Double - UNaligned
5933 5936 instruct loadD_unaligned(regD_low dst, memory mem ) %{
5934 5937 match(Set dst (LoadD_unaligned mem));
5935 5938 ins_cost(MEMORY_REF_COST*2+DEFAULT_COST);
5936 5939 format %{ "LDF $mem ,$dst.hi\t! misaligned double\n"
5937 5940 "\tLDF $mem+4,$dst.lo\t!" %}
5938 5941 opcode(Assembler::ldf_op3);
5939 5942 ins_encode( form3_mem_reg_double_unaligned( mem, dst ));
5940 5943 ins_pipe(iload_mem);
5941 5944 %}
5942 5945
5943 5946 // Load Float
5944 5947 instruct loadF(regF dst, memory mem) %{
5945 5948 match(Set dst (LoadF mem));
5946 5949 ins_cost(MEMORY_REF_COST);
5947 5950
5948 5951 format %{ "LDF $mem,$dst" %}
5949 5952 opcode(Assembler::ldf_op3);
5950 5953 ins_encode(simple_form3_mem_reg( mem, dst ) );
5951 5954 ins_pipe(floadF_mem);
5952 5955 %}
5953 5956
5954 5957 // Load Constant
5955 5958 instruct loadConI( iRegI dst, immI src ) %{
5956 5959 match(Set dst src);
5957 5960 ins_cost(DEFAULT_COST * 3/2);
5958 5961 format %{ "SET $src,$dst" %}
5959 5962 ins_encode( Set32(src, dst) );
5960 5963 ins_pipe(ialu_hi_lo_reg);
5961 5964 %}
5962 5965
5963 5966 instruct loadConI13( iRegI dst, immI13 src ) %{
5964 5967 match(Set dst src);
5965 5968
5966 5969 size(4);
5967 5970 format %{ "MOV $src,$dst" %}
5968 5971 ins_encode( Set13( src, dst ) );
5969 5972 ins_pipe(ialu_imm);
5970 5973 %}
5971 5974
5972 5975 #ifndef _LP64
5973 5976 instruct loadConP(iRegP dst, immP con) %{
5974 5977 match(Set dst con);
5975 5978 ins_cost(DEFAULT_COST * 3/2);
5976 5979 format %{ "SET $con,$dst\t!ptr" %}
5977 5980 ins_encode %{
5978 5981 relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc();
5979 5982 intptr_t val = $con$$constant;
5980 5983 if (constant_reloc == relocInfo::oop_type) {
5981 5984 __ set_oop_constant((jobject) val, $dst$$Register);
5982 5985 } else if (constant_reloc == relocInfo::metadata_type) {
5983 5986 __ set_metadata_constant((Metadata*)val, $dst$$Register);
5984 5987 } else { // non-oop pointers, e.g. card mark base, heap top
5985 5988 assert(constant_reloc == relocInfo::none, "unexpected reloc type");
5986 5989 __ set(val, $dst$$Register);
5987 5990 }
5988 5991 %}
5989 5992 ins_pipe(loadConP);
5990 5993 %}
5991 5994 #else
5992 5995 instruct loadConP_set(iRegP dst, immP_set con) %{
5993 5996 match(Set dst con);
5994 5997 ins_cost(DEFAULT_COST * 3/2);
5995 5998 format %{ "SET $con,$dst\t! ptr" %}
5996 5999 ins_encode %{
5997 6000 relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc();
5998 6001 intptr_t val = $con$$constant;
5999 6002 if (constant_reloc == relocInfo::oop_type) {
6000 6003 __ set_oop_constant((jobject) val, $dst$$Register);
6001 6004 } else if (constant_reloc == relocInfo::metadata_type) {
6002 6005 __ set_metadata_constant((Metadata*)val, $dst$$Register);
6003 6006 } else { // non-oop pointers, e.g. card mark base, heap top
6004 6007 assert(constant_reloc == relocInfo::none, "unexpected reloc type");
6005 6008 __ set(val, $dst$$Register);
6006 6009 }
6007 6010 %}
6008 6011 ins_pipe(loadConP);
6009 6012 %}
6010 6013
6011 6014 instruct loadConP_load(iRegP dst, immP_load con) %{
6012 6015 match(Set dst con);
6013 6016 ins_cost(MEMORY_REF_COST);
6014 6017 format %{ "LD [$constanttablebase + $constantoffset],$dst\t! load from constant table: ptr=$con" %}
6015 6018 ins_encode %{
6016 6019 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register);
6017 6020 __ ld_ptr($constanttablebase, con_offset, $dst$$Register);
6018 6021 %}
6019 6022 ins_pipe(loadConP);
6020 6023 %}
6021 6024
6022 6025 instruct loadConP_no_oop_cheap(iRegP dst, immP_no_oop_cheap con) %{
6023 6026 match(Set dst con);
6024 6027 ins_cost(DEFAULT_COST * 3/2);
6025 6028 format %{ "SET $con,$dst\t! non-oop ptr" %}
6026 6029 ins_encode %{
6027 6030 if (_opnds[1]->constant_reloc() == relocInfo::metadata_type) {
6028 6031 __ set_metadata_constant((Metadata*)$con$$constant, $dst$$Register);
6029 6032 } else {
6030 6033 __ set($con$$constant, $dst$$Register);
6031 6034 }
6032 6035 %}
6033 6036 ins_pipe(loadConP);
6034 6037 %}
6035 6038 #endif // _LP64
6036 6039
6037 6040 instruct loadConP0(iRegP dst, immP0 src) %{
6038 6041 match(Set dst src);
6039 6042
6040 6043 size(4);
6041 6044 format %{ "CLR $dst\t!ptr" %}
6042 6045 ins_encode %{
6043 6046 __ clr($dst$$Register);
6044 6047 %}
6045 6048 ins_pipe(ialu_imm);
6046 6049 %}
6047 6050
6048 6051 instruct loadConP_poll(iRegP dst, immP_poll src) %{
6049 6052 match(Set dst src);
6050 6053 ins_cost(DEFAULT_COST);
6051 6054 format %{ "SET $src,$dst\t!ptr" %}
6052 6055 ins_encode %{
6053 6056 AddressLiteral polling_page(os::get_polling_page());
6054 6057 __ sethi(polling_page, reg_to_register_object($dst$$reg));
6055 6058 %}
6056 6059 ins_pipe(loadConP_poll);
6057 6060 %}
6058 6061
6059 6062 instruct loadConN0(iRegN dst, immN0 src) %{
6060 6063 match(Set dst src);
6061 6064
6062 6065 size(4);
6063 6066 format %{ "CLR $dst\t! compressed NULL ptr" %}
6064 6067 ins_encode %{
6065 6068 __ clr($dst$$Register);
6066 6069 %}
6067 6070 ins_pipe(ialu_imm);
6068 6071 %}
6069 6072
6070 6073 instruct loadConN(iRegN dst, immN src) %{
6071 6074 match(Set dst src);
6072 6075 ins_cost(DEFAULT_COST * 3/2);
6073 6076 format %{ "SET $src,$dst\t! compressed ptr" %}
6074 6077 ins_encode %{
6075 6078 Register dst = $dst$$Register;
6076 6079 __ set_narrow_oop((jobject)$src$$constant, dst);
6077 6080 %}
6078 6081 ins_pipe(ialu_hi_lo_reg);
6079 6082 %}
6080 6083
6081 6084 instruct loadConNKlass(iRegN dst, immNKlass src) %{
6082 6085 match(Set dst src);
6083 6086 ins_cost(DEFAULT_COST * 3/2);
6084 6087 format %{ "SET $src,$dst\t! compressed klass ptr" %}
6085 6088 ins_encode %{
6086 6089 Register dst = $dst$$Register;
6087 6090 __ set_narrow_klass((Klass*)$src$$constant, dst);
6088 6091 %}
6089 6092 ins_pipe(ialu_hi_lo_reg);
6090 6093 %}
6091 6094
6092 6095 // Materialize long value (predicated by immL_cheap).
6093 6096 instruct loadConL_set64(iRegL dst, immL_cheap con, o7RegL tmp) %{
6094 6097 match(Set dst con);
6095 6098 effect(KILL tmp);
6096 6099 ins_cost(DEFAULT_COST * 3);
6097 6100 format %{ "SET64 $con,$dst KILL $tmp\t! cheap long" %}
6098 6101 ins_encode %{
6099 6102 __ set64($con$$constant, $dst$$Register, $tmp$$Register);
6100 6103 %}
6101 6104 ins_pipe(loadConL);
6102 6105 %}
6103 6106
6104 6107 // Load long value from constant table (predicated by immL_expensive).
6105 6108 instruct loadConL_ldx(iRegL dst, immL_expensive con) %{
6106 6109 match(Set dst con);
6107 6110 ins_cost(MEMORY_REF_COST);
6108 6111 format %{ "LDX [$constanttablebase + $constantoffset],$dst\t! load from constant table: long=$con" %}
6109 6112 ins_encode %{
6110 6113 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register);
6111 6114 __ ldx($constanttablebase, con_offset, $dst$$Register);
6112 6115 %}
6113 6116 ins_pipe(loadConL);
6114 6117 %}
6115 6118
6116 6119 instruct loadConL0( iRegL dst, immL0 src ) %{
6117 6120 match(Set dst src);
6118 6121 ins_cost(DEFAULT_COST);
6119 6122 size(4);
6120 6123 format %{ "CLR $dst\t! long" %}
6121 6124 ins_encode( Set13( src, dst ) );
6122 6125 ins_pipe(ialu_imm);
6123 6126 %}
6124 6127
6125 6128 instruct loadConL13( iRegL dst, immL13 src ) %{
6126 6129 match(Set dst src);
6127 6130 ins_cost(DEFAULT_COST * 2);
6128 6131
6129 6132 size(4);
6130 6133 format %{ "MOV $src,$dst\t! long" %}
6131 6134 ins_encode( Set13( src, dst ) );
6132 6135 ins_pipe(ialu_imm);
6133 6136 %}
6134 6137
6135 6138 instruct loadConF(regF dst, immF con, o7RegI tmp) %{
6136 6139 match(Set dst con);
6137 6140 effect(KILL tmp);
6138 6141 format %{ "LDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: float=$con" %}
6139 6142 ins_encode %{
6140 6143 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register);
6141 6144 __ ldf(FloatRegisterImpl::S, $constanttablebase, con_offset, $dst$$FloatRegister);
6142 6145 %}
6143 6146 ins_pipe(loadConFD);
6144 6147 %}
6145 6148
6146 6149 instruct loadConD(regD dst, immD con, o7RegI tmp) %{
6147 6150 match(Set dst con);
6148 6151 effect(KILL tmp);
6149 6152 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: double=$con" %}
6150 6153 ins_encode %{
6151 6154 // XXX This is a quick fix for 6833573.
6152 6155 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset($con), $dst$$FloatRegister);
6153 6156 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register);
6154 6157 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
6155 6158 %}
6156 6159 ins_pipe(loadConFD);
6157 6160 %}
6158 6161
6159 6162 // Prefetch instructions for allocation.
6160 6163 // Must be safe to execute with invalid address (cannot fault).
6161 6164
6162 6165 instruct prefetchAlloc( memory mem ) %{
6163 6166 predicate(AllocatePrefetchInstr == 0);
6164 6167 match( PrefetchAllocation mem );
6165 6168 ins_cost(MEMORY_REF_COST);
6166 6169
6167 6170 format %{ "PREFETCH $mem,2\t! Prefetch allocation" %}
6168 6171 opcode(Assembler::prefetch_op3);
6169 6172 ins_encode( form3_mem_prefetch_write( mem ) );
6170 6173 ins_pipe(iload_mem);
6171 6174 %}
6172 6175
6173 6176 // Use BIS instruction to prefetch for allocation.
6174 6177 // Could fault, need space at the end of TLAB.
6175 6178 instruct prefetchAlloc_bis( iRegP dst ) %{
6176 6179 predicate(AllocatePrefetchInstr == 1);
6177 6180 match( PrefetchAllocation dst );
6178 6181 ins_cost(MEMORY_REF_COST);
6179 6182 size(4);
6180 6183
6181 6184 format %{ "STXA [$dst]\t! // Prefetch allocation using BIS" %}
6182 6185 ins_encode %{
6183 6186 __ stxa(G0, $dst$$Register, G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
6184 6187 %}
6185 6188 ins_pipe(istore_mem_reg);
6186 6189 %}
6187 6190
6188 6191 // Next code is used for finding next cache line address to prefetch.
6189 6192 #ifndef _LP64
6190 6193 instruct cacheLineAdr( iRegP dst, iRegP src, immI13 mask ) %{
6191 6194 match(Set dst (CastX2P (AndI (CastP2X src) mask)));
6192 6195 ins_cost(DEFAULT_COST);
6193 6196 size(4);
6194 6197
6195 6198 format %{ "AND $src,$mask,$dst\t! next cache line address" %}
6196 6199 ins_encode %{
6197 6200 __ and3($src$$Register, $mask$$constant, $dst$$Register);
6198 6201 %}
6199 6202 ins_pipe(ialu_reg_imm);
6200 6203 %}
6201 6204 #else
6202 6205 instruct cacheLineAdr( iRegP dst, iRegP src, immL13 mask ) %{
6203 6206 match(Set dst (CastX2P (AndL (CastP2X src) mask)));
6204 6207 ins_cost(DEFAULT_COST);
6205 6208 size(4);
6206 6209
6207 6210 format %{ "AND $src,$mask,$dst\t! next cache line address" %}
6208 6211 ins_encode %{
6209 6212 __ and3($src$$Register, $mask$$constant, $dst$$Register);
6210 6213 %}
6211 6214 ins_pipe(ialu_reg_imm);
6212 6215 %}
6213 6216 #endif
6214 6217
6215 6218 //----------Store Instructions-------------------------------------------------
6216 6219 // Store Byte
6217 6220 instruct storeB(memory mem, iRegI src) %{
6218 6221 match(Set mem (StoreB mem src));
6219 6222 ins_cost(MEMORY_REF_COST);
6220 6223
6221 6224 format %{ "STB $src,$mem\t! byte" %}
6222 6225 opcode(Assembler::stb_op3);
6223 6226 ins_encode(simple_form3_mem_reg( mem, src ) );
6224 6227 ins_pipe(istore_mem_reg);
6225 6228 %}
6226 6229
6227 6230 instruct storeB0(memory mem, immI0 src) %{
6228 6231 match(Set mem (StoreB mem src));
6229 6232 ins_cost(MEMORY_REF_COST);
6230 6233
6231 6234 format %{ "STB $src,$mem\t! byte" %}
6232 6235 opcode(Assembler::stb_op3);
6233 6236 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6234 6237 ins_pipe(istore_mem_zero);
6235 6238 %}
6236 6239
6237 6240 instruct storeCM0(memory mem, immI0 src) %{
6238 6241 match(Set mem (StoreCM mem src));
6239 6242 ins_cost(MEMORY_REF_COST);
6240 6243
6241 6244 format %{ "STB $src,$mem\t! CMS card-mark byte 0" %}
6242 6245 opcode(Assembler::stb_op3);
6243 6246 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6244 6247 ins_pipe(istore_mem_zero);
6245 6248 %}
6246 6249
6247 6250 // Store Char/Short
6248 6251 instruct storeC(memory mem, iRegI src) %{
6249 6252 match(Set mem (StoreC mem src));
6250 6253 ins_cost(MEMORY_REF_COST);
6251 6254
6252 6255 format %{ "STH $src,$mem\t! short" %}
6253 6256 opcode(Assembler::sth_op3);
6254 6257 ins_encode(simple_form3_mem_reg( mem, src ) );
6255 6258 ins_pipe(istore_mem_reg);
6256 6259 %}
6257 6260
6258 6261 instruct storeC0(memory mem, immI0 src) %{
6259 6262 match(Set mem (StoreC mem src));
6260 6263 ins_cost(MEMORY_REF_COST);
6261 6264
6262 6265 format %{ "STH $src,$mem\t! short" %}
6263 6266 opcode(Assembler::sth_op3);
6264 6267 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6265 6268 ins_pipe(istore_mem_zero);
6266 6269 %}
6267 6270
6268 6271 // Store Integer
6269 6272 instruct storeI(memory mem, iRegI src) %{
6270 6273 match(Set mem (StoreI mem src));
6271 6274 ins_cost(MEMORY_REF_COST);
6272 6275
6273 6276 format %{ "STW $src,$mem" %}
6274 6277 opcode(Assembler::stw_op3);
6275 6278 ins_encode(simple_form3_mem_reg( mem, src ) );
6276 6279 ins_pipe(istore_mem_reg);
6277 6280 %}
6278 6281
6279 6282 // Store Long
6280 6283 instruct storeL(memory mem, iRegL src) %{
6281 6284 match(Set mem (StoreL mem src));
6282 6285 ins_cost(MEMORY_REF_COST);
6283 6286 format %{ "STX $src,$mem\t! long" %}
6284 6287 opcode(Assembler::stx_op3);
6285 6288 ins_encode(simple_form3_mem_reg( mem, src ) );
6286 6289 ins_pipe(istore_mem_reg);
6287 6290 %}
6288 6291
6289 6292 instruct storeI0(memory mem, immI0 src) %{
6290 6293 match(Set mem (StoreI mem src));
6291 6294 ins_cost(MEMORY_REF_COST);
6292 6295
6293 6296 format %{ "STW $src,$mem" %}
6294 6297 opcode(Assembler::stw_op3);
6295 6298 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6296 6299 ins_pipe(istore_mem_zero);
6297 6300 %}
6298 6301
6299 6302 instruct storeL0(memory mem, immL0 src) %{
6300 6303 match(Set mem (StoreL mem src));
6301 6304 ins_cost(MEMORY_REF_COST);
6302 6305
6303 6306 format %{ "STX $src,$mem" %}
6304 6307 opcode(Assembler::stx_op3);
6305 6308 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6306 6309 ins_pipe(istore_mem_zero);
6307 6310 %}
6308 6311
6309 6312 // Store Integer from float register (used after fstoi)
6310 6313 instruct storeI_Freg(memory mem, regF src) %{
6311 6314 match(Set mem (StoreI mem src));
6312 6315 ins_cost(MEMORY_REF_COST);
6313 6316
6314 6317 format %{ "STF $src,$mem\t! after fstoi/fdtoi" %}
6315 6318 opcode(Assembler::stf_op3);
6316 6319 ins_encode(simple_form3_mem_reg( mem, src ) );
6317 6320 ins_pipe(fstoreF_mem_reg);
6318 6321 %}
6319 6322
6320 6323 // Store Pointer
6321 6324 instruct storeP(memory dst, sp_ptr_RegP src) %{
6322 6325 match(Set dst (StoreP dst src));
6323 6326 ins_cost(MEMORY_REF_COST);
6324 6327
6325 6328 #ifndef _LP64
6326 6329 format %{ "STW $src,$dst\t! ptr" %}
6327 6330 opcode(Assembler::stw_op3, 0, REGP_OP);
6328 6331 #else
6329 6332 format %{ "STX $src,$dst\t! ptr" %}
6330 6333 opcode(Assembler::stx_op3, 0, REGP_OP);
6331 6334 #endif
6332 6335 ins_encode( form3_mem_reg( dst, src ) );
6333 6336 ins_pipe(istore_mem_spORreg);
6334 6337 %}
6335 6338
6336 6339 instruct storeP0(memory dst, immP0 src) %{
6337 6340 match(Set dst (StoreP dst src));
6338 6341 ins_cost(MEMORY_REF_COST);
6339 6342
6340 6343 #ifndef _LP64
6341 6344 format %{ "STW $src,$dst\t! ptr" %}
6342 6345 opcode(Assembler::stw_op3, 0, REGP_OP);
6343 6346 #else
6344 6347 format %{ "STX $src,$dst\t! ptr" %}
6345 6348 opcode(Assembler::stx_op3, 0, REGP_OP);
6346 6349 #endif
6347 6350 ins_encode( form3_mem_reg( dst, R_G0 ) );
6348 6351 ins_pipe(istore_mem_zero);
6349 6352 %}
6350 6353
6351 6354 // Store Compressed Pointer
6352 6355 instruct storeN(memory dst, iRegN src) %{
6353 6356 match(Set dst (StoreN dst src));
6354 6357 ins_cost(MEMORY_REF_COST);
6355 6358 size(4);
6356 6359
6357 6360 format %{ "STW $src,$dst\t! compressed ptr" %}
6358 6361 ins_encode %{
6359 6362 Register base = as_Register($dst$$base);
6360 6363 Register index = as_Register($dst$$index);
6361 6364 Register src = $src$$Register;
6362 6365 if (index != G0) {
6363 6366 __ stw(src, base, index);
6364 6367 } else {
6365 6368 __ stw(src, base, $dst$$disp);
6366 6369 }
6367 6370 %}
6368 6371 ins_pipe(istore_mem_spORreg);
6369 6372 %}
6370 6373
6371 6374 instruct storeNKlass(memory dst, iRegN src) %{
6372 6375 match(Set dst (StoreNKlass dst src));
6373 6376 ins_cost(MEMORY_REF_COST);
6374 6377 size(4);
6375 6378
6376 6379 format %{ "STW $src,$dst\t! compressed klass ptr" %}
6377 6380 ins_encode %{
6378 6381 Register base = as_Register($dst$$base);
6379 6382 Register index = as_Register($dst$$index);
6380 6383 Register src = $src$$Register;
6381 6384 if (index != G0) {
6382 6385 __ stw(src, base, index);
6383 6386 } else {
6384 6387 __ stw(src, base, $dst$$disp);
6385 6388 }
6386 6389 %}
6387 6390 ins_pipe(istore_mem_spORreg);
6388 6391 %}
6389 6392
6390 6393 instruct storeN0(memory dst, immN0 src) %{
6391 6394 match(Set dst (StoreN dst src));
6392 6395 ins_cost(MEMORY_REF_COST);
6393 6396 size(4);
6394 6397
6395 6398 format %{ "STW $src,$dst\t! compressed ptr" %}
6396 6399 ins_encode %{
6397 6400 Register base = as_Register($dst$$base);
6398 6401 Register index = as_Register($dst$$index);
6399 6402 if (index != G0) {
6400 6403 __ stw(0, base, index);
6401 6404 } else {
6402 6405 __ stw(0, base, $dst$$disp);
6403 6406 }
6404 6407 %}
6405 6408 ins_pipe(istore_mem_zero);
6406 6409 %}
6407 6410
6408 6411 // Store Double
6409 6412 instruct storeD( memory mem, regD src) %{
6410 6413 match(Set mem (StoreD mem src));
6411 6414 ins_cost(MEMORY_REF_COST);
6412 6415
6413 6416 format %{ "STDF $src,$mem" %}
6414 6417 opcode(Assembler::stdf_op3);
6415 6418 ins_encode(simple_form3_mem_reg( mem, src ) );
6416 6419 ins_pipe(fstoreD_mem_reg);
6417 6420 %}
6418 6421
6419 6422 instruct storeD0( memory mem, immD0 src) %{
6420 6423 match(Set mem (StoreD mem src));
6421 6424 ins_cost(MEMORY_REF_COST);
6422 6425
6423 6426 format %{ "STX $src,$mem" %}
6424 6427 opcode(Assembler::stx_op3);
6425 6428 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6426 6429 ins_pipe(fstoreD_mem_zero);
6427 6430 %}
6428 6431
6429 6432 // Store Float
6430 6433 instruct storeF( memory mem, regF src) %{
6431 6434 match(Set mem (StoreF mem src));
6432 6435 ins_cost(MEMORY_REF_COST);
6433 6436
6434 6437 format %{ "STF $src,$mem" %}
6435 6438 opcode(Assembler::stf_op3);
6436 6439 ins_encode(simple_form3_mem_reg( mem, src ) );
6437 6440 ins_pipe(fstoreF_mem_reg);
6438 6441 %}
6439 6442
6440 6443 instruct storeF0( memory mem, immF0 src) %{
6441 6444 match(Set mem (StoreF mem src));
6442 6445 ins_cost(MEMORY_REF_COST);
6443 6446
6444 6447 format %{ "STW $src,$mem\t! storeF0" %}
6445 6448 opcode(Assembler::stw_op3);
6446 6449 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6447 6450 ins_pipe(fstoreF_mem_zero);
6448 6451 %}
6449 6452
6450 6453 // Convert oop pointer into compressed form
6451 6454 instruct encodeHeapOop(iRegN dst, iRegP src) %{
6452 6455 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
6453 6456 match(Set dst (EncodeP src));
6454 6457 format %{ "encode_heap_oop $src, $dst" %}
6455 6458 ins_encode %{
6456 6459 __ encode_heap_oop($src$$Register, $dst$$Register);
6457 6460 %}
6458 6461 ins_avoid_back_to_back(Universe::narrow_oop_base() == NULL ? AVOID_NONE : AVOID_BEFORE);
6459 6462 ins_pipe(ialu_reg);
6460 6463 %}
6461 6464
6462 6465 instruct encodeHeapOop_not_null(iRegN dst, iRegP src) %{
6463 6466 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
6464 6467 match(Set dst (EncodeP src));
6465 6468 format %{ "encode_heap_oop_not_null $src, $dst" %}
6466 6469 ins_encode %{
6467 6470 __ encode_heap_oop_not_null($src$$Register, $dst$$Register);
6468 6471 %}
6469 6472 ins_pipe(ialu_reg);
6470 6473 %}
6471 6474
6472 6475 instruct decodeHeapOop(iRegP dst, iRegN src) %{
6473 6476 predicate(n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull &&
6474 6477 n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant);
6475 6478 match(Set dst (DecodeN src));
6476 6479 format %{ "decode_heap_oop $src, $dst" %}
6477 6480 ins_encode %{
6478 6481 __ decode_heap_oop($src$$Register, $dst$$Register);
6479 6482 %}
6480 6483 ins_pipe(ialu_reg);
6481 6484 %}
6482 6485
6483 6486 instruct decodeHeapOop_not_null(iRegP dst, iRegN src) %{
6484 6487 predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
6485 6488 n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant);
6486 6489 match(Set dst (DecodeN src));
6487 6490 format %{ "decode_heap_oop_not_null $src, $dst" %}
6488 6491 ins_encode %{
6489 6492 __ decode_heap_oop_not_null($src$$Register, $dst$$Register);
6490 6493 %}
6491 6494 ins_pipe(ialu_reg);
6492 6495 %}
6493 6496
6494 6497 instruct encodeKlass_not_null(iRegN dst, iRegP src) %{
6495 6498 match(Set dst (EncodePKlass src));
6496 6499 format %{ "encode_klass_not_null $src, $dst" %}
6497 6500 ins_encode %{
6498 6501 __ encode_klass_not_null($src$$Register, $dst$$Register);
6499 6502 %}
6500 6503 ins_pipe(ialu_reg);
6501 6504 %}
6502 6505
6503 6506 instruct decodeKlass_not_null(iRegP dst, iRegN src) %{
6504 6507 match(Set dst (DecodeNKlass src));
6505 6508 format %{ "decode_klass_not_null $src, $dst" %}
6506 6509 ins_encode %{
6507 6510 __ decode_klass_not_null($src$$Register, $dst$$Register);
6508 6511 %}
6509 6512 ins_pipe(ialu_reg);
6510 6513 %}
6511 6514
6512 6515 //----------MemBar Instructions-----------------------------------------------
6513 6516 // Memory barrier flavors
6514 6517
6515 6518 instruct membar_acquire() %{
6516 6519 match(MemBarAcquire);
6517 6520 match(LoadFence);
6518 6521 ins_cost(4*MEMORY_REF_COST);
6519 6522
6520 6523 size(0);
6521 6524 format %{ "MEMBAR-acquire" %}
6522 6525 ins_encode( enc_membar_acquire );
6523 6526 ins_pipe(long_memory_op);
6524 6527 %}
6525 6528
6526 6529 instruct membar_acquire_lock() %{
6527 6530 match(MemBarAcquireLock);
6528 6531 ins_cost(0);
6529 6532
6530 6533 size(0);
6531 6534 format %{ "!MEMBAR-acquire (CAS in prior FastLock so empty encoding)" %}
6532 6535 ins_encode( );
6533 6536 ins_pipe(empty);
6534 6537 %}
6535 6538
6536 6539 instruct membar_release() %{
6537 6540 match(MemBarRelease);
6538 6541 match(StoreFence);
6539 6542 ins_cost(4*MEMORY_REF_COST);
6540 6543
6541 6544 size(0);
6542 6545 format %{ "MEMBAR-release" %}
6543 6546 ins_encode( enc_membar_release );
6544 6547 ins_pipe(long_memory_op);
6545 6548 %}
6546 6549
6547 6550 instruct membar_release_lock() %{
6548 6551 match(MemBarReleaseLock);
6549 6552 ins_cost(0);
6550 6553
6551 6554 size(0);
6552 6555 format %{ "!MEMBAR-release (CAS in succeeding FastUnlock so empty encoding)" %}
6553 6556 ins_encode( );
6554 6557 ins_pipe(empty);
6555 6558 %}
6556 6559
6557 6560 instruct membar_volatile() %{
6558 6561 match(MemBarVolatile);
6559 6562 ins_cost(4*MEMORY_REF_COST);
6560 6563
6561 6564 size(4);
6562 6565 format %{ "MEMBAR-volatile" %}
6563 6566 ins_encode( enc_membar_volatile );
6564 6567 ins_pipe(long_memory_op);
6565 6568 %}
6566 6569
6567 6570 instruct unnecessary_membar_volatile() %{
6568 6571 match(MemBarVolatile);
6569 6572 predicate(Matcher::post_store_load_barrier(n));
6570 6573 ins_cost(0);
6571 6574
6572 6575 size(0);
6573 6576 format %{ "!MEMBAR-volatile (unnecessary so empty encoding)" %}
6574 6577 ins_encode( );
6575 6578 ins_pipe(empty);
6576 6579 %}
6577 6580
6578 6581 instruct membar_storestore() %{
6579 6582 match(MemBarStoreStore);
6580 6583 ins_cost(0);
6581 6584
6582 6585 size(0);
6583 6586 format %{ "!MEMBAR-storestore (empty encoding)" %}
6584 6587 ins_encode( );
6585 6588 ins_pipe(empty);
6586 6589 %}
6587 6590
6588 6591 //----------Register Move Instructions-----------------------------------------
6589 6592 instruct roundDouble_nop(regD dst) %{
6590 6593 match(Set dst (RoundDouble dst));
6591 6594 ins_cost(0);
6592 6595 // SPARC results are already "rounded" (i.e., normal-format IEEE)
6593 6596 ins_encode( );
6594 6597 ins_pipe(empty);
6595 6598 %}
6596 6599
6597 6600
6598 6601 instruct roundFloat_nop(regF dst) %{
6599 6602 match(Set dst (RoundFloat dst));
6600 6603 ins_cost(0);
6601 6604 // SPARC results are already "rounded" (i.e., normal-format IEEE)
6602 6605 ins_encode( );
6603 6606 ins_pipe(empty);
6604 6607 %}
6605 6608
6606 6609
6607 6610 // Cast Index to Pointer for unsafe natives
6608 6611 instruct castX2P(iRegX src, iRegP dst) %{
6609 6612 match(Set dst (CastX2P src));
6610 6613
6611 6614 format %{ "MOV $src,$dst\t! IntX->Ptr" %}
6612 6615 ins_encode( form3_g0_rs2_rd_move( src, dst ) );
6613 6616 ins_pipe(ialu_reg);
6614 6617 %}
6615 6618
6616 6619 // Cast Pointer to Index for unsafe natives
6617 6620 instruct castP2X(iRegP src, iRegX dst) %{
6618 6621 match(Set dst (CastP2X src));
6619 6622
6620 6623 format %{ "MOV $src,$dst\t! Ptr->IntX" %}
6621 6624 ins_encode( form3_g0_rs2_rd_move( src, dst ) );
6622 6625 ins_pipe(ialu_reg);
6623 6626 %}
6624 6627
6625 6628 instruct stfSSD(stackSlotD stkSlot, regD src) %{
6626 6629 // %%%% TO DO: Tell the coalescer that this kind of node is a copy!
6627 6630 match(Set stkSlot src); // chain rule
6628 6631 ins_cost(MEMORY_REF_COST);
6629 6632 format %{ "STDF $src,$stkSlot\t!stk" %}
6630 6633 opcode(Assembler::stdf_op3);
6631 6634 ins_encode(simple_form3_mem_reg(stkSlot, src));
6632 6635 ins_pipe(fstoreD_stk_reg);
6633 6636 %}
6634 6637
6635 6638 instruct ldfSSD(regD dst, stackSlotD stkSlot) %{
6636 6639 // %%%% TO DO: Tell the coalescer that this kind of node is a copy!
6637 6640 match(Set dst stkSlot); // chain rule
6638 6641 ins_cost(MEMORY_REF_COST);
6639 6642 format %{ "LDDF $stkSlot,$dst\t!stk" %}
6640 6643 opcode(Assembler::lddf_op3);
6641 6644 ins_encode(simple_form3_mem_reg(stkSlot, dst));
6642 6645 ins_pipe(floadD_stk);
6643 6646 %}
6644 6647
6645 6648 instruct stfSSF(stackSlotF stkSlot, regF src) %{
6646 6649 // %%%% TO DO: Tell the coalescer that this kind of node is a copy!
6647 6650 match(Set stkSlot src); // chain rule
6648 6651 ins_cost(MEMORY_REF_COST);
6649 6652 format %{ "STF $src,$stkSlot\t!stk" %}
6650 6653 opcode(Assembler::stf_op3);
6651 6654 ins_encode(simple_form3_mem_reg(stkSlot, src));
6652 6655 ins_pipe(fstoreF_stk_reg);
6653 6656 %}
6654 6657
6655 6658 //----------Conditional Move---------------------------------------------------
6656 6659 // Conditional move
6657 6660 instruct cmovIP_reg(cmpOpP cmp, flagsRegP pcc, iRegI dst, iRegI src) %{
6658 6661 match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src)));
6659 6662 ins_cost(150);
6660 6663 format %{ "MOV$cmp $pcc,$src,$dst" %}
6661 6664 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) );
6662 6665 ins_pipe(ialu_reg);
6663 6666 %}
6664 6667
6665 6668 instruct cmovIP_imm(cmpOpP cmp, flagsRegP pcc, iRegI dst, immI11 src) %{
6666 6669 match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src)));
6667 6670 ins_cost(140);
6668 6671 format %{ "MOV$cmp $pcc,$src,$dst" %}
6669 6672 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) );
6670 6673 ins_pipe(ialu_imm);
6671 6674 %}
6672 6675
6673 6676 instruct cmovII_reg(cmpOp cmp, flagsReg icc, iRegI dst, iRegI src) %{
6674 6677 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6675 6678 ins_cost(150);
6676 6679 size(4);
6677 6680 format %{ "MOV$cmp $icc,$src,$dst" %}
6678 6681 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6679 6682 ins_pipe(ialu_reg);
6680 6683 %}
6681 6684
6682 6685 instruct cmovII_imm(cmpOp cmp, flagsReg icc, iRegI dst, immI11 src) %{
6683 6686 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6684 6687 ins_cost(140);
6685 6688 size(4);
6686 6689 format %{ "MOV$cmp $icc,$src,$dst" %}
6687 6690 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
6688 6691 ins_pipe(ialu_imm);
6689 6692 %}
6690 6693
6691 6694 instruct cmovIIu_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{
6692 6695 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6693 6696 ins_cost(150);
6694 6697 size(4);
6695 6698 format %{ "MOV$cmp $icc,$src,$dst" %}
6696 6699 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6697 6700 ins_pipe(ialu_reg);
6698 6701 %}
6699 6702
6700 6703 instruct cmovIIu_imm(cmpOpU cmp, flagsRegU icc, iRegI dst, immI11 src) %{
6701 6704 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6702 6705 ins_cost(140);
6703 6706 size(4);
6704 6707 format %{ "MOV$cmp $icc,$src,$dst" %}
6705 6708 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
6706 6709 ins_pipe(ialu_imm);
6707 6710 %}
6708 6711
6709 6712 instruct cmovIF_reg(cmpOpF cmp, flagsRegF fcc, iRegI dst, iRegI src) %{
6710 6713 match(Set dst (CMoveI (Binary cmp fcc) (Binary dst src)));
6711 6714 ins_cost(150);
6712 6715 size(4);
6713 6716 format %{ "MOV$cmp $fcc,$src,$dst" %}
6714 6717 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) );
6715 6718 ins_pipe(ialu_reg);
6716 6719 %}
6717 6720
6718 6721 instruct cmovIF_imm(cmpOpF cmp, flagsRegF fcc, iRegI dst, immI11 src) %{
6719 6722 match(Set dst (CMoveI (Binary cmp fcc) (Binary dst src)));
6720 6723 ins_cost(140);
6721 6724 size(4);
6722 6725 format %{ "MOV$cmp $fcc,$src,$dst" %}
6723 6726 ins_encode( enc_cmov_imm_f(cmp,dst,src, fcc) );
6724 6727 ins_pipe(ialu_imm);
6725 6728 %}
6726 6729
6727 6730 // Conditional move for RegN. Only cmov(reg,reg).
6728 6731 instruct cmovNP_reg(cmpOpP cmp, flagsRegP pcc, iRegN dst, iRegN src) %{
6729 6732 match(Set dst (CMoveN (Binary cmp pcc) (Binary dst src)));
6730 6733 ins_cost(150);
6731 6734 format %{ "MOV$cmp $pcc,$src,$dst" %}
6732 6735 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) );
6733 6736 ins_pipe(ialu_reg);
6734 6737 %}
6735 6738
6736 6739 // This instruction also works with CmpN so we don't need cmovNN_reg.
6737 6740 instruct cmovNI_reg(cmpOp cmp, flagsReg icc, iRegN dst, iRegN src) %{
6738 6741 match(Set dst (CMoveN (Binary cmp icc) (Binary dst src)));
6739 6742 ins_cost(150);
6740 6743 size(4);
6741 6744 format %{ "MOV$cmp $icc,$src,$dst" %}
6742 6745 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6743 6746 ins_pipe(ialu_reg);
6744 6747 %}
6745 6748
6746 6749 // This instruction also works with CmpN so we don't need cmovNN_reg.
6747 6750 instruct cmovNIu_reg(cmpOpU cmp, flagsRegU icc, iRegN dst, iRegN src) %{
6748 6751 match(Set dst (CMoveN (Binary cmp icc) (Binary dst src)));
6749 6752 ins_cost(150);
6750 6753 size(4);
6751 6754 format %{ "MOV$cmp $icc,$src,$dst" %}
6752 6755 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6753 6756 ins_pipe(ialu_reg);
6754 6757 %}
6755 6758
6756 6759 instruct cmovNF_reg(cmpOpF cmp, flagsRegF fcc, iRegN dst, iRegN src) %{
6757 6760 match(Set dst (CMoveN (Binary cmp fcc) (Binary dst src)));
6758 6761 ins_cost(150);
6759 6762 size(4);
6760 6763 format %{ "MOV$cmp $fcc,$src,$dst" %}
6761 6764 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) );
6762 6765 ins_pipe(ialu_reg);
6763 6766 %}
6764 6767
6765 6768 // Conditional move
6766 6769 instruct cmovPP_reg(cmpOpP cmp, flagsRegP pcc, iRegP dst, iRegP src) %{
6767 6770 match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src)));
6768 6771 ins_cost(150);
6769 6772 format %{ "MOV$cmp $pcc,$src,$dst\t! ptr" %}
6770 6773 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) );
6771 6774 ins_pipe(ialu_reg);
6772 6775 %}
6773 6776
6774 6777 instruct cmovPP_imm(cmpOpP cmp, flagsRegP pcc, iRegP dst, immP0 src) %{
6775 6778 match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src)));
6776 6779 ins_cost(140);
6777 6780 format %{ "MOV$cmp $pcc,$src,$dst\t! ptr" %}
6778 6781 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) );
6779 6782 ins_pipe(ialu_imm);
6780 6783 %}
6781 6784
6782 6785 // This instruction also works with CmpN so we don't need cmovPN_reg.
6783 6786 instruct cmovPI_reg(cmpOp cmp, flagsReg icc, iRegP dst, iRegP src) %{
6784 6787 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6785 6788 ins_cost(150);
6786 6789
6787 6790 size(4);
6788 6791 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
6789 6792 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6790 6793 ins_pipe(ialu_reg);
6791 6794 %}
6792 6795
6793 6796 instruct cmovPIu_reg(cmpOpU cmp, flagsRegU icc, iRegP dst, iRegP src) %{
6794 6797 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6795 6798 ins_cost(150);
6796 6799
6797 6800 size(4);
6798 6801 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
6799 6802 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6800 6803 ins_pipe(ialu_reg);
6801 6804 %}
6802 6805
6803 6806 instruct cmovPI_imm(cmpOp cmp, flagsReg icc, iRegP dst, immP0 src) %{
6804 6807 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6805 6808 ins_cost(140);
6806 6809
6807 6810 size(4);
6808 6811 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
6809 6812 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
6810 6813 ins_pipe(ialu_imm);
6811 6814 %}
6812 6815
6813 6816 instruct cmovPIu_imm(cmpOpU cmp, flagsRegU icc, iRegP dst, immP0 src) %{
6814 6817 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6815 6818 ins_cost(140);
6816 6819
6817 6820 size(4);
6818 6821 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
6819 6822 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
6820 6823 ins_pipe(ialu_imm);
6821 6824 %}
6822 6825
6823 6826 instruct cmovPF_reg(cmpOpF cmp, flagsRegF fcc, iRegP dst, iRegP src) %{
6824 6827 match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src)));
6825 6828 ins_cost(150);
6826 6829 size(4);
6827 6830 format %{ "MOV$cmp $fcc,$src,$dst" %}
6828 6831 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) );
6829 6832 ins_pipe(ialu_imm);
6830 6833 %}
6831 6834
6832 6835 instruct cmovPF_imm(cmpOpF cmp, flagsRegF fcc, iRegP dst, immP0 src) %{
6833 6836 match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src)));
6834 6837 ins_cost(140);
6835 6838 size(4);
6836 6839 format %{ "MOV$cmp $fcc,$src,$dst" %}
6837 6840 ins_encode( enc_cmov_imm_f(cmp,dst,src, fcc) );
6838 6841 ins_pipe(ialu_imm);
6839 6842 %}
6840 6843
6841 6844 // Conditional move
6842 6845 instruct cmovFP_reg(cmpOpP cmp, flagsRegP pcc, regF dst, regF src) %{
6843 6846 match(Set dst (CMoveF (Binary cmp pcc) (Binary dst src)));
6844 6847 ins_cost(150);
6845 6848 opcode(0x101);
6846 6849 format %{ "FMOVD$cmp $pcc,$src,$dst" %}
6847 6850 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::ptr_cc)) );
6848 6851 ins_pipe(int_conditional_float_move);
6849 6852 %}
6850 6853
6851 6854 instruct cmovFI_reg(cmpOp cmp, flagsReg icc, regF dst, regF src) %{
6852 6855 match(Set dst (CMoveF (Binary cmp icc) (Binary dst src)));
6853 6856 ins_cost(150);
6854 6857
6855 6858 size(4);
6856 6859 format %{ "FMOVS$cmp $icc,$src,$dst" %}
6857 6860 opcode(0x101);
6858 6861 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
6859 6862 ins_pipe(int_conditional_float_move);
6860 6863 %}
6861 6864
6862 6865 instruct cmovFIu_reg(cmpOpU cmp, flagsRegU icc, regF dst, regF src) %{
6863 6866 match(Set dst (CMoveF (Binary cmp icc) (Binary dst src)));
6864 6867 ins_cost(150);
6865 6868
6866 6869 size(4);
6867 6870 format %{ "FMOVS$cmp $icc,$src,$dst" %}
6868 6871 opcode(0x101);
6869 6872 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
6870 6873 ins_pipe(int_conditional_float_move);
6871 6874 %}
6872 6875
6873 6876 // Conditional move,
6874 6877 instruct cmovFF_reg(cmpOpF cmp, flagsRegF fcc, regF dst, regF src) %{
6875 6878 match(Set dst (CMoveF (Binary cmp fcc) (Binary dst src)));
6876 6879 ins_cost(150);
6877 6880 size(4);
6878 6881 format %{ "FMOVF$cmp $fcc,$src,$dst" %}
6879 6882 opcode(0x1);
6880 6883 ins_encode( enc_cmovff_reg(cmp,fcc,dst,src) );
6881 6884 ins_pipe(int_conditional_double_move);
6882 6885 %}
6883 6886
6884 6887 // Conditional move
6885 6888 instruct cmovDP_reg(cmpOpP cmp, flagsRegP pcc, regD dst, regD src) %{
6886 6889 match(Set dst (CMoveD (Binary cmp pcc) (Binary dst src)));
6887 6890 ins_cost(150);
6888 6891 size(4);
6889 6892 opcode(0x102);
6890 6893 format %{ "FMOVD$cmp $pcc,$src,$dst" %}
6891 6894 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::ptr_cc)) );
6892 6895 ins_pipe(int_conditional_double_move);
6893 6896 %}
6894 6897
6895 6898 instruct cmovDI_reg(cmpOp cmp, flagsReg icc, regD dst, regD src) %{
6896 6899 match(Set dst (CMoveD (Binary cmp icc) (Binary dst src)));
6897 6900 ins_cost(150);
6898 6901
6899 6902 size(4);
6900 6903 format %{ "FMOVD$cmp $icc,$src,$dst" %}
6901 6904 opcode(0x102);
6902 6905 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
6903 6906 ins_pipe(int_conditional_double_move);
6904 6907 %}
6905 6908
6906 6909 instruct cmovDIu_reg(cmpOpU cmp, flagsRegU icc, regD dst, regD src) %{
6907 6910 match(Set dst (CMoveD (Binary cmp icc) (Binary dst src)));
6908 6911 ins_cost(150);
6909 6912
6910 6913 size(4);
6911 6914 format %{ "FMOVD$cmp $icc,$src,$dst" %}
6912 6915 opcode(0x102);
6913 6916 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
6914 6917 ins_pipe(int_conditional_double_move);
6915 6918 %}
6916 6919
6917 6920 // Conditional move,
6918 6921 instruct cmovDF_reg(cmpOpF cmp, flagsRegF fcc, regD dst, regD src) %{
6919 6922 match(Set dst (CMoveD (Binary cmp fcc) (Binary dst src)));
6920 6923 ins_cost(150);
6921 6924 size(4);
6922 6925 format %{ "FMOVD$cmp $fcc,$src,$dst" %}
6923 6926 opcode(0x2);
6924 6927 ins_encode( enc_cmovff_reg(cmp,fcc,dst,src) );
6925 6928 ins_pipe(int_conditional_double_move);
6926 6929 %}
6927 6930
6928 6931 // Conditional move
6929 6932 instruct cmovLP_reg(cmpOpP cmp, flagsRegP pcc, iRegL dst, iRegL src) %{
6930 6933 match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src)));
6931 6934 ins_cost(150);
6932 6935 format %{ "MOV$cmp $pcc,$src,$dst\t! long" %}
6933 6936 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) );
6934 6937 ins_pipe(ialu_reg);
6935 6938 %}
6936 6939
6937 6940 instruct cmovLP_imm(cmpOpP cmp, flagsRegP pcc, iRegL dst, immI11 src) %{
6938 6941 match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src)));
6939 6942 ins_cost(140);
6940 6943 format %{ "MOV$cmp $pcc,$src,$dst\t! long" %}
6941 6944 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) );
6942 6945 ins_pipe(ialu_imm);
6943 6946 %}
6944 6947
6945 6948 instruct cmovLI_reg(cmpOp cmp, flagsReg icc, iRegL dst, iRegL src) %{
6946 6949 match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
6947 6950 ins_cost(150);
6948 6951
6949 6952 size(4);
6950 6953 format %{ "MOV$cmp $icc,$src,$dst\t! long" %}
6951 6954 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6952 6955 ins_pipe(ialu_reg);
6953 6956 %}
6954 6957
6955 6958
6956 6959 instruct cmovLIu_reg(cmpOpU cmp, flagsRegU icc, iRegL dst, iRegL src) %{
6957 6960 match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
6958 6961 ins_cost(150);
6959 6962
6960 6963 size(4);
6961 6964 format %{ "MOV$cmp $icc,$src,$dst\t! long" %}
6962 6965 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6963 6966 ins_pipe(ialu_reg);
6964 6967 %}
6965 6968
6966 6969
6967 6970 instruct cmovLF_reg(cmpOpF cmp, flagsRegF fcc, iRegL dst, iRegL src) %{
6968 6971 match(Set dst (CMoveL (Binary cmp fcc) (Binary dst src)));
6969 6972 ins_cost(150);
6970 6973
6971 6974 size(4);
6972 6975 format %{ "MOV$cmp $fcc,$src,$dst\t! long" %}
6973 6976 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) );
6974 6977 ins_pipe(ialu_reg);
6975 6978 %}
6976 6979
6977 6980
6978 6981
6979 6982 //----------OS and Locking Instructions----------------------------------------
6980 6983
6981 6984 // This name is KNOWN by the ADLC and cannot be changed.
6982 6985 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
6983 6986 // for this guy.
6984 6987 instruct tlsLoadP(g2RegP dst) %{
6985 6988 match(Set dst (ThreadLocal));
6986 6989
6987 6990 size(0);
6988 6991 ins_cost(0);
6989 6992 format %{ "# TLS is in G2" %}
6990 6993 ins_encode( /*empty encoding*/ );
6991 6994 ins_pipe(ialu_none);
6992 6995 %}
6993 6996
6994 6997 instruct checkCastPP( iRegP dst ) %{
6995 6998 match(Set dst (CheckCastPP dst));
6996 6999
6997 7000 size(0);
6998 7001 format %{ "# checkcastPP of $dst" %}
6999 7002 ins_encode( /*empty encoding*/ );
7000 7003 ins_pipe(empty);
7001 7004 %}
7002 7005
7003 7006
7004 7007 instruct castPP( iRegP dst ) %{
7005 7008 match(Set dst (CastPP dst));
7006 7009 format %{ "# castPP of $dst" %}
7007 7010 ins_encode( /*empty encoding*/ );
7008 7011 ins_pipe(empty);
7009 7012 %}
7010 7013
7011 7014 instruct castII( iRegI dst ) %{
7012 7015 match(Set dst (CastII dst));
7013 7016 format %{ "# castII of $dst" %}
7014 7017 ins_encode( /*empty encoding*/ );
7015 7018 ins_cost(0);
7016 7019 ins_pipe(empty);
7017 7020 %}
7018 7021
7019 7022 //----------Arithmetic Instructions--------------------------------------------
7020 7023 // Addition Instructions
7021 7024 // Register Addition
7022 7025 instruct addI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7023 7026 match(Set dst (AddI src1 src2));
7024 7027
7025 7028 size(4);
7026 7029 format %{ "ADD $src1,$src2,$dst" %}
7027 7030 ins_encode %{
7028 7031 __ add($src1$$Register, $src2$$Register, $dst$$Register);
7029 7032 %}
7030 7033 ins_pipe(ialu_reg_reg);
7031 7034 %}
7032 7035
7033 7036 // Immediate Addition
7034 7037 instruct addI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
7035 7038 match(Set dst (AddI src1 src2));
7036 7039
7037 7040 size(4);
7038 7041 format %{ "ADD $src1,$src2,$dst" %}
7039 7042 opcode(Assembler::add_op3, Assembler::arith_op);
7040 7043 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7041 7044 ins_pipe(ialu_reg_imm);
7042 7045 %}
7043 7046
7044 7047 // Pointer Register Addition
7045 7048 instruct addP_reg_reg(iRegP dst, iRegP src1, iRegX src2) %{
7046 7049 match(Set dst (AddP src1 src2));
7047 7050
7048 7051 size(4);
7049 7052 format %{ "ADD $src1,$src2,$dst" %}
7050 7053 opcode(Assembler::add_op3, Assembler::arith_op);
7051 7054 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7052 7055 ins_pipe(ialu_reg_reg);
7053 7056 %}
7054 7057
7055 7058 // Pointer Immediate Addition
7056 7059 instruct addP_reg_imm13(iRegP dst, iRegP src1, immX13 src2) %{
7057 7060 match(Set dst (AddP src1 src2));
7058 7061
7059 7062 size(4);
7060 7063 format %{ "ADD $src1,$src2,$dst" %}
7061 7064 opcode(Assembler::add_op3, Assembler::arith_op);
7062 7065 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7063 7066 ins_pipe(ialu_reg_imm);
7064 7067 %}
7065 7068
7066 7069 // Long Addition
7067 7070 instruct addL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7068 7071 match(Set dst (AddL src1 src2));
7069 7072
7070 7073 size(4);
7071 7074 format %{ "ADD $src1,$src2,$dst\t! long" %}
7072 7075 opcode(Assembler::add_op3, Assembler::arith_op);
7073 7076 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7074 7077 ins_pipe(ialu_reg_reg);
7075 7078 %}
7076 7079
7077 7080 instruct addL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
7078 7081 match(Set dst (AddL src1 con));
7079 7082
7080 7083 size(4);
7081 7084 format %{ "ADD $src1,$con,$dst" %}
7082 7085 opcode(Assembler::add_op3, Assembler::arith_op);
7083 7086 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
7084 7087 ins_pipe(ialu_reg_imm);
7085 7088 %}
7086 7089
7087 7090 //----------Conditional_store--------------------------------------------------
7088 7091 // Conditional-store of the updated heap-top.
7089 7092 // Used during allocation of the shared heap.
7090 7093 // Sets flags (EQ) on success. Implemented with a CASA on Sparc.
7091 7094
7092 7095 // LoadP-locked. Same as a regular pointer load when used with a compare-swap
7093 7096 instruct loadPLocked(iRegP dst, memory mem) %{
7094 7097 match(Set dst (LoadPLocked mem));
7095 7098 ins_cost(MEMORY_REF_COST);
7096 7099
7097 7100 #ifndef _LP64
7098 7101 format %{ "LDUW $mem,$dst\t! ptr" %}
7099 7102 opcode(Assembler::lduw_op3, 0, REGP_OP);
7100 7103 #else
7101 7104 format %{ "LDX $mem,$dst\t! ptr" %}
7102 7105 opcode(Assembler::ldx_op3, 0, REGP_OP);
7103 7106 #endif
7104 7107 ins_encode( form3_mem_reg( mem, dst ) );
7105 7108 ins_pipe(iload_mem);
7106 7109 %}
7107 7110
7108 7111 instruct storePConditional( iRegP heap_top_ptr, iRegP oldval, g3RegP newval, flagsRegP pcc ) %{
7109 7112 match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval)));
7110 7113 effect( KILL newval );
7111 7114 format %{ "CASA [$heap_top_ptr],$oldval,R_G3\t! If $oldval==[$heap_top_ptr] Then store R_G3 into [$heap_top_ptr], set R_G3=[$heap_top_ptr] in any case\n\t"
7112 7115 "CMP R_G3,$oldval\t\t! See if we made progress" %}
7113 7116 ins_encode( enc_cas(heap_top_ptr,oldval,newval) );
7114 7117 ins_pipe( long_memory_op );
7115 7118 %}
7116 7119
7117 7120 // Conditional-store of an int value.
7118 7121 instruct storeIConditional( iRegP mem_ptr, iRegI oldval, g3RegI newval, flagsReg icc ) %{
7119 7122 match(Set icc (StoreIConditional mem_ptr (Binary oldval newval)));
7120 7123 effect( KILL newval );
7121 7124 format %{ "CASA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t"
7122 7125 "CMP $oldval,$newval\t\t! See if we made progress" %}
7123 7126 ins_encode( enc_cas(mem_ptr,oldval,newval) );
7124 7127 ins_pipe( long_memory_op );
7125 7128 %}
7126 7129
7127 7130 // Conditional-store of a long value.
7128 7131 instruct storeLConditional( iRegP mem_ptr, iRegL oldval, g3RegL newval, flagsRegL xcc ) %{
7129 7132 match(Set xcc (StoreLConditional mem_ptr (Binary oldval newval)));
7130 7133 effect( KILL newval );
7131 7134 format %{ "CASXA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t"
7132 7135 "CMP $oldval,$newval\t\t! See if we made progress" %}
7133 7136 ins_encode( enc_cas(mem_ptr,oldval,newval) );
7134 7137 ins_pipe( long_memory_op );
7135 7138 %}
7136 7139
7137 7140 // No flag versions for CompareAndSwap{P,I,L} because matcher can't match them
7138 7141
7139 7142 instruct compareAndSwapL_bool(iRegP mem_ptr, iRegL oldval, iRegL newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
7140 7143 predicate(VM_Version::supports_cx8());
7141 7144 match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval)));
7142 7145 match(Set res (WeakCompareAndSwapL mem_ptr (Binary oldval newval)));
7143 7146 effect( USE mem_ptr, KILL ccr, KILL tmp1);
7144 7147 format %{
7145 7148 "MOV $newval,O7\n\t"
7146 7149 "CASXA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
7147 7150 "CMP $oldval,O7\t\t! See if we made progress\n\t"
7148 7151 "MOV 1,$res\n\t"
7149 7152 "MOVne xcc,R_G0,$res"
7150 7153 %}
7151 7154 ins_encode( enc_casx(mem_ptr, oldval, newval),
7152 7155 enc_lflags_ne_to_boolean(res) );
7153 7156 ins_pipe( long_memory_op );
7154 7157 %}
7155 7158
7156 7159
7157 7160 instruct compareAndSwapI_bool(iRegP mem_ptr, iRegI oldval, iRegI newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
7158 7161 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
7159 7162 match(Set res (WeakCompareAndSwapI mem_ptr (Binary oldval newval)));
7160 7163 effect( USE mem_ptr, KILL ccr, KILL tmp1);
7161 7164 format %{
7162 7165 "MOV $newval,O7\n\t"
7163 7166 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
7164 7167 "CMP $oldval,O7\t\t! See if we made progress\n\t"
7165 7168 "MOV 1,$res\n\t"
7166 7169 "MOVne icc,R_G0,$res"
7167 7170 %}
7168 7171 ins_encode( enc_casi(mem_ptr, oldval, newval),
7169 7172 enc_iflags_ne_to_boolean(res) );
7170 7173 ins_pipe( long_memory_op );
7171 7174 %}
7172 7175
7173 7176 instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
7174 7177 #ifdef _LP64
7175 7178 predicate(VM_Version::supports_cx8());
7176 7179 #endif
7177 7180 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
7178 7181 match(Set res (WeakCompareAndSwapP mem_ptr (Binary oldval newval)));
7179 7182 effect( USE mem_ptr, KILL ccr, KILL tmp1);
7180 7183 format %{
7181 7184 "MOV $newval,O7\n\t"
7182 7185 "CASA_PTR [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
7183 7186 "CMP $oldval,O7\t\t! See if we made progress\n\t"
7184 7187 "MOV 1,$res\n\t"
7185 7188 "MOVne xcc,R_G0,$res"
7186 7189 %}
7187 7190 #ifdef _LP64
7188 7191 ins_encode( enc_casx(mem_ptr, oldval, newval),
7189 7192 enc_lflags_ne_to_boolean(res) );
7190 7193 #else
7191 7194 ins_encode( enc_casi(mem_ptr, oldval, newval),
7192 7195 enc_iflags_ne_to_boolean(res) );
7193 7196 #endif
7194 7197 ins_pipe( long_memory_op );
7195 7198 %}
7196 7199
7197 7200 instruct compareAndSwapN_bool(iRegP mem_ptr, iRegN oldval, iRegN newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
7198 7201 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
7199 7202 match(Set res (WeakCompareAndSwapN mem_ptr (Binary oldval newval)));
7200 7203 effect( USE mem_ptr, KILL ccr, KILL tmp1);
7201 7204 format %{
7202 7205 "MOV $newval,O7\n\t"
7203 7206 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
7204 7207 "CMP $oldval,O7\t\t! See if we made progress\n\t"
7205 7208 "MOV 1,$res\n\t"
7206 7209 "MOVne icc,R_G0,$res"
7207 7210 %}
7208 7211 ins_encode( enc_casi(mem_ptr, oldval, newval),
7209 7212 enc_iflags_ne_to_boolean(res) );
7210 7213 ins_pipe( long_memory_op );
7211 7214 %}
7212 7215
7213 7216 instruct compareAndExchangeI(iRegP mem_ptr, iRegI oldval, iRegI newval)
7214 7217 %{
7215 7218 match(Set newval (CompareAndExchangeI mem_ptr (Binary oldval newval)));
7216 7219 effect( USE mem_ptr );
7217 7220
7218 7221 format %{
7219 7222 "CASA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr] and set $newval=[$mem_ptr]\n\t"
7220 7223 %}
7221 7224 ins_encode( enc_casi_exch(mem_ptr, oldval, newval) );
7222 7225 ins_pipe( long_memory_op );
7223 7226 %}
7224 7227
7225 7228 instruct compareAndExchangeL(iRegP mem_ptr, iRegL oldval, iRegL newval)
7226 7229 %{
7227 7230 match(Set newval (CompareAndExchangeL mem_ptr (Binary oldval newval)));
7228 7231 effect( USE mem_ptr );
7229 7232
7230 7233 format %{
7231 7234 "CASXA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr] and set $newval=[$mem_ptr]\n\t"
7232 7235 %}
7233 7236 ins_encode( enc_casx_exch(mem_ptr, oldval, newval) );
7234 7237 ins_pipe( long_memory_op );
7235 7238 %}
7236 7239
7237 7240 instruct compareAndExchangeP(iRegP mem_ptr, iRegP oldval, iRegP newval)
7238 7241 %{
7239 7242 match(Set newval (CompareAndExchangeP mem_ptr (Binary oldval newval)));
7240 7243 effect( USE mem_ptr );
7241 7244
7242 7245 format %{
7243 7246 "CASXA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr] and set $newval=[$mem_ptr]\n\t"
7244 7247 %}
7245 7248 ins_encode( enc_casx_exch(mem_ptr, oldval, newval) );
7246 7249 ins_pipe( long_memory_op );
7247 7250 %}
7248 7251
7249 7252 instruct compareAndExchangeN(iRegP mem_ptr, iRegN oldval, iRegN newval)
7250 7253 %{
7251 7254 match(Set newval (CompareAndExchangeN mem_ptr (Binary oldval newval)));
7252 7255 effect( USE mem_ptr );
7253 7256
7254 7257 format %{
7255 7258 "CASA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr] and set $newval=[$mem_ptr]\n\t"
7256 7259 %}
7257 7260 ins_encode( enc_casi_exch(mem_ptr, oldval, newval) );
7258 7261 ins_pipe( long_memory_op );
7259 7262 %}
7260 7263
7261 7264 instruct xchgI( memory mem, iRegI newval) %{
7262 7265 match(Set newval (GetAndSetI mem newval));
7263 7266 format %{ "SWAP [$mem],$newval" %}
7264 7267 size(4);
7265 7268 ins_encode %{
7266 7269 __ swap($mem$$Address, $newval$$Register);
7267 7270 %}
7268 7271 ins_pipe( long_memory_op );
7269 7272 %}
7270 7273
7271 7274 #ifndef _LP64
7272 7275 instruct xchgP( memory mem, iRegP newval) %{
7273 7276 match(Set newval (GetAndSetP mem newval));
7274 7277 format %{ "SWAP [$mem],$newval" %}
7275 7278 size(4);
7276 7279 ins_encode %{
7277 7280 __ swap($mem$$Address, $newval$$Register);
7278 7281 %}
7279 7282 ins_pipe( long_memory_op );
7280 7283 %}
7281 7284 #endif
7282 7285
7283 7286 instruct xchgN( memory mem, iRegN newval) %{
7284 7287 match(Set newval (GetAndSetN mem newval));
7285 7288 format %{ "SWAP [$mem],$newval" %}
7286 7289 size(4);
7287 7290 ins_encode %{
7288 7291 __ swap($mem$$Address, $newval$$Register);
7289 7292 %}
7290 7293 ins_pipe( long_memory_op );
7291 7294 %}
7292 7295
7293 7296 //---------------------
7294 7297 // Subtraction Instructions
7295 7298 // Register Subtraction
7296 7299 instruct subI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7297 7300 match(Set dst (SubI src1 src2));
7298 7301
7299 7302 size(4);
7300 7303 format %{ "SUB $src1,$src2,$dst" %}
7301 7304 opcode(Assembler::sub_op3, Assembler::arith_op);
7302 7305 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7303 7306 ins_pipe(ialu_reg_reg);
7304 7307 %}
7305 7308
7306 7309 // Immediate Subtraction
7307 7310 instruct subI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
7308 7311 match(Set dst (SubI src1 src2));
7309 7312
7310 7313 size(4);
7311 7314 format %{ "SUB $src1,$src2,$dst" %}
7312 7315 opcode(Assembler::sub_op3, Assembler::arith_op);
7313 7316 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7314 7317 ins_pipe(ialu_reg_imm);
7315 7318 %}
7316 7319
7317 7320 instruct subI_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{
7318 7321 match(Set dst (SubI zero src2));
7319 7322
7320 7323 size(4);
7321 7324 format %{ "NEG $src2,$dst" %}
7322 7325 opcode(Assembler::sub_op3, Assembler::arith_op);
7323 7326 ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) );
7324 7327 ins_pipe(ialu_zero_reg);
7325 7328 %}
7326 7329
7327 7330 // Long subtraction
7328 7331 instruct subL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7329 7332 match(Set dst (SubL src1 src2));
7330 7333
7331 7334 size(4);
7332 7335 format %{ "SUB $src1,$src2,$dst\t! long" %}
7333 7336 opcode(Assembler::sub_op3, Assembler::arith_op);
7334 7337 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7335 7338 ins_pipe(ialu_reg_reg);
7336 7339 %}
7337 7340
7338 7341 // Immediate Subtraction
7339 7342 instruct subL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
7340 7343 match(Set dst (SubL src1 con));
7341 7344
7342 7345 size(4);
7343 7346 format %{ "SUB $src1,$con,$dst\t! long" %}
7344 7347 opcode(Assembler::sub_op3, Assembler::arith_op);
7345 7348 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
7346 7349 ins_pipe(ialu_reg_imm);
7347 7350 %}
7348 7351
7349 7352 // Long negation
7350 7353 instruct negL_reg_reg(iRegL dst, immL0 zero, iRegL src2) %{
7351 7354 match(Set dst (SubL zero src2));
7352 7355
7353 7356 size(4);
7354 7357 format %{ "NEG $src2,$dst\t! long" %}
7355 7358 opcode(Assembler::sub_op3, Assembler::arith_op);
7356 7359 ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) );
7357 7360 ins_pipe(ialu_zero_reg);
7358 7361 %}
7359 7362
7360 7363 // Multiplication Instructions
7361 7364 // Integer Multiplication
7362 7365 // Register Multiplication
7363 7366 instruct mulI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7364 7367 match(Set dst (MulI src1 src2));
7365 7368
7366 7369 size(4);
7367 7370 format %{ "MULX $src1,$src2,$dst" %}
7368 7371 opcode(Assembler::mulx_op3, Assembler::arith_op);
7369 7372 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7370 7373 ins_pipe(imul_reg_reg);
7371 7374 %}
7372 7375
7373 7376 // Immediate Multiplication
7374 7377 instruct mulI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
7375 7378 match(Set dst (MulI src1 src2));
7376 7379
7377 7380 size(4);
7378 7381 format %{ "MULX $src1,$src2,$dst" %}
7379 7382 opcode(Assembler::mulx_op3, Assembler::arith_op);
7380 7383 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7381 7384 ins_pipe(imul_reg_imm);
7382 7385 %}
7383 7386
7384 7387 instruct mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7385 7388 match(Set dst (MulL src1 src2));
7386 7389 ins_cost(DEFAULT_COST * 5);
7387 7390 size(4);
7388 7391 format %{ "MULX $src1,$src2,$dst\t! long" %}
7389 7392 opcode(Assembler::mulx_op3, Assembler::arith_op);
7390 7393 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7391 7394 ins_pipe(mulL_reg_reg);
7392 7395 %}
7393 7396
7394 7397 // Immediate Multiplication
7395 7398 instruct mulL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{
7396 7399 match(Set dst (MulL src1 src2));
7397 7400 ins_cost(DEFAULT_COST * 5);
7398 7401 size(4);
7399 7402 format %{ "MULX $src1,$src2,$dst" %}
7400 7403 opcode(Assembler::mulx_op3, Assembler::arith_op);
7401 7404 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7402 7405 ins_pipe(mulL_reg_imm);
7403 7406 %}
7404 7407
7405 7408 // Integer Division
7406 7409 // Register Division
7407 7410 instruct divI_reg_reg(iRegI dst, iRegIsafe src1, iRegIsafe src2) %{
7408 7411 match(Set dst (DivI src1 src2));
7409 7412 ins_cost((2+71)*DEFAULT_COST);
7410 7413
7411 7414 format %{ "SRA $src2,0,$src2\n\t"
7412 7415 "SRA $src1,0,$src1\n\t"
7413 7416 "SDIVX $src1,$src2,$dst" %}
7414 7417 ins_encode( idiv_reg( src1, src2, dst ) );
7415 7418 ins_pipe(sdiv_reg_reg);
7416 7419 %}
7417 7420
7418 7421 // Immediate Division
7419 7422 instruct divI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2) %{
7420 7423 match(Set dst (DivI src1 src2));
7421 7424 ins_cost((2+71)*DEFAULT_COST);
7422 7425
7423 7426 format %{ "SRA $src1,0,$src1\n\t"
7424 7427 "SDIVX $src1,$src2,$dst" %}
7425 7428 ins_encode( idiv_imm( src1, src2, dst ) );
7426 7429 ins_pipe(sdiv_reg_imm);
7427 7430 %}
7428 7431
7429 7432 //----------Div-By-10-Expansion------------------------------------------------
7430 7433 // Extract hi bits of a 32x32->64 bit multiply.
7431 7434 // Expand rule only, not matched
7432 7435 instruct mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2 ) %{
7433 7436 effect( DEF dst, USE src1, USE src2 );
7434 7437 format %{ "MULX $src1,$src2,$dst\t! Used in div-by-10\n\t"
7435 7438 "SRLX $dst,#32,$dst\t\t! Extract only hi word of result" %}
7436 7439 ins_encode( enc_mul_hi(dst,src1,src2));
7437 7440 ins_pipe(sdiv_reg_reg);
7438 7441 %}
7439 7442
7440 7443 // Magic constant, reciprocal of 10
7441 7444 instruct loadConI_x66666667(iRegIsafe dst) %{
7442 7445 effect( DEF dst );
7443 7446
7444 7447 size(8);
7445 7448 format %{ "SET 0x66666667,$dst\t! Used in div-by-10" %}
7446 7449 ins_encode( Set32(0x66666667, dst) );
7447 7450 ins_pipe(ialu_hi_lo_reg);
7448 7451 %}
7449 7452
7450 7453 // Register Shift Right Arithmetic Long by 32-63
7451 7454 instruct sra_31( iRegI dst, iRegI src ) %{
7452 7455 effect( DEF dst, USE src );
7453 7456 format %{ "SRA $src,31,$dst\t! Used in div-by-10" %}
7454 7457 ins_encode( form3_rs1_rd_copysign_hi(src,dst) );
7455 7458 ins_pipe(ialu_reg_reg);
7456 7459 %}
7457 7460
7458 7461 // Arithmetic Shift Right by 8-bit immediate
7459 7462 instruct sra_reg_2( iRegI dst, iRegI src ) %{
7460 7463 effect( DEF dst, USE src );
7461 7464 format %{ "SRA $src,2,$dst\t! Used in div-by-10" %}
7462 7465 opcode(Assembler::sra_op3, Assembler::arith_op);
7463 7466 ins_encode( form3_rs1_simm13_rd( src, 0x2, dst ) );
7464 7467 ins_pipe(ialu_reg_imm);
7465 7468 %}
7466 7469
7467 7470 // Integer DIV with 10
7468 7471 instruct divI_10( iRegI dst, iRegIsafe src, immI10 div ) %{
7469 7472 match(Set dst (DivI src div));
7470 7473 ins_cost((6+6)*DEFAULT_COST);
7471 7474 expand %{
7472 7475 iRegIsafe tmp1; // Killed temps;
7473 7476 iRegIsafe tmp2; // Killed temps;
7474 7477 iRegI tmp3; // Killed temps;
7475 7478 iRegI tmp4; // Killed temps;
7476 7479 loadConI_x66666667( tmp1 ); // SET 0x66666667 -> tmp1
7477 7480 mul_hi( tmp2, src, tmp1 ); // MUL hibits(src * tmp1) -> tmp2
7478 7481 sra_31( tmp3, src ); // SRA src,31 -> tmp3
7479 7482 sra_reg_2( tmp4, tmp2 ); // SRA tmp2,2 -> tmp4
7480 7483 subI_reg_reg( dst,tmp4,tmp3); // SUB tmp4 - tmp3 -> dst
7481 7484 %}
7482 7485 %}
7483 7486
7484 7487 // Register Long Division
7485 7488 instruct divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7486 7489 match(Set dst (DivL src1 src2));
7487 7490 ins_cost(DEFAULT_COST*71);
7488 7491 size(4);
7489 7492 format %{ "SDIVX $src1,$src2,$dst\t! long" %}
7490 7493 opcode(Assembler::sdivx_op3, Assembler::arith_op);
7491 7494 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7492 7495 ins_pipe(divL_reg_reg);
7493 7496 %}
7494 7497
7495 7498 // Register Long Division
7496 7499 instruct divL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{
7497 7500 match(Set dst (DivL src1 src2));
7498 7501 ins_cost(DEFAULT_COST*71);
7499 7502 size(4);
7500 7503 format %{ "SDIVX $src1,$src2,$dst\t! long" %}
7501 7504 opcode(Assembler::sdivx_op3, Assembler::arith_op);
7502 7505 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7503 7506 ins_pipe(divL_reg_imm);
7504 7507 %}
7505 7508
7506 7509 // Integer Remainder
7507 7510 // Register Remainder
7508 7511 instruct modI_reg_reg(iRegI dst, iRegIsafe src1, iRegIsafe src2, o7RegP temp, flagsReg ccr ) %{
7509 7512 match(Set dst (ModI src1 src2));
7510 7513 effect( KILL ccr, KILL temp);
7511 7514
7512 7515 format %{ "SREM $src1,$src2,$dst" %}
7513 7516 ins_encode( irem_reg(src1, src2, dst, temp) );
7514 7517 ins_pipe(sdiv_reg_reg);
7515 7518 %}
7516 7519
7517 7520 // Immediate Remainder
7518 7521 instruct modI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2, o7RegP temp, flagsReg ccr ) %{
7519 7522 match(Set dst (ModI src1 src2));
7520 7523 effect( KILL ccr, KILL temp);
7521 7524
7522 7525 format %{ "SREM $src1,$src2,$dst" %}
7523 7526 ins_encode( irem_imm(src1, src2, dst, temp) );
7524 7527 ins_pipe(sdiv_reg_imm);
7525 7528 %}
7526 7529
7527 7530 // Register Long Remainder
7528 7531 instruct divL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{
7529 7532 effect(DEF dst, USE src1, USE src2);
7530 7533 size(4);
7531 7534 format %{ "SDIVX $src1,$src2,$dst\t! long" %}
7532 7535 opcode(Assembler::sdivx_op3, Assembler::arith_op);
7533 7536 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7534 7537 ins_pipe(divL_reg_reg);
7535 7538 %}
7536 7539
7537 7540 // Register Long Division
7538 7541 instruct divL_reg_imm13_1(iRegL dst, iRegL src1, immL13 src2) %{
7539 7542 effect(DEF dst, USE src1, USE src2);
7540 7543 size(4);
7541 7544 format %{ "SDIVX $src1,$src2,$dst\t! long" %}
7542 7545 opcode(Assembler::sdivx_op3, Assembler::arith_op);
7543 7546 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7544 7547 ins_pipe(divL_reg_imm);
7545 7548 %}
7546 7549
7547 7550 instruct mulL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{
7548 7551 effect(DEF dst, USE src1, USE src2);
7549 7552 size(4);
7550 7553 format %{ "MULX $src1,$src2,$dst\t! long" %}
7551 7554 opcode(Assembler::mulx_op3, Assembler::arith_op);
7552 7555 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7553 7556 ins_pipe(mulL_reg_reg);
7554 7557 %}
7555 7558
7556 7559 // Immediate Multiplication
7557 7560 instruct mulL_reg_imm13_1(iRegL dst, iRegL src1, immL13 src2) %{
7558 7561 effect(DEF dst, USE src1, USE src2);
7559 7562 size(4);
7560 7563 format %{ "MULX $src1,$src2,$dst" %}
7561 7564 opcode(Assembler::mulx_op3, Assembler::arith_op);
7562 7565 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7563 7566 ins_pipe(mulL_reg_imm);
7564 7567 %}
7565 7568
7566 7569 instruct subL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{
7567 7570 effect(DEF dst, USE src1, USE src2);
7568 7571 size(4);
7569 7572 format %{ "SUB $src1,$src2,$dst\t! long" %}
7570 7573 opcode(Assembler::sub_op3, Assembler::arith_op);
7571 7574 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7572 7575 ins_pipe(ialu_reg_reg);
7573 7576 %}
7574 7577
7575 7578 instruct subL_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{
7576 7579 effect(DEF dst, USE src1, USE src2);
7577 7580 size(4);
7578 7581 format %{ "SUB $src1,$src2,$dst\t! long" %}
7579 7582 opcode(Assembler::sub_op3, Assembler::arith_op);
7580 7583 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7581 7584 ins_pipe(ialu_reg_reg);
7582 7585 %}
7583 7586
7584 7587 // Register Long Remainder
7585 7588 instruct modL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7586 7589 match(Set dst (ModL src1 src2));
7587 7590 ins_cost(DEFAULT_COST*(71 + 6 + 1));
7588 7591 expand %{
7589 7592 iRegL tmp1;
7590 7593 iRegL tmp2;
7591 7594 divL_reg_reg_1(tmp1, src1, src2);
7592 7595 mulL_reg_reg_1(tmp2, tmp1, src2);
7593 7596 subL_reg_reg_1(dst, src1, tmp2);
7594 7597 %}
7595 7598 %}
7596 7599
7597 7600 // Register Long Remainder
7598 7601 instruct modL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{
7599 7602 match(Set dst (ModL src1 src2));
7600 7603 ins_cost(DEFAULT_COST*(71 + 6 + 1));
7601 7604 expand %{
7602 7605 iRegL tmp1;
7603 7606 iRegL tmp2;
7604 7607 divL_reg_imm13_1(tmp1, src1, src2);
7605 7608 mulL_reg_imm13_1(tmp2, tmp1, src2);
7606 7609 subL_reg_reg_2 (dst, src1, tmp2);
7607 7610 %}
7608 7611 %}
7609 7612
7610 7613 // Integer Shift Instructions
7611 7614 // Register Shift Left
7612 7615 instruct shlI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7613 7616 match(Set dst (LShiftI src1 src2));
7614 7617
7615 7618 size(4);
7616 7619 format %{ "SLL $src1,$src2,$dst" %}
7617 7620 opcode(Assembler::sll_op3, Assembler::arith_op);
7618 7621 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7619 7622 ins_pipe(ialu_reg_reg);
7620 7623 %}
7621 7624
7622 7625 // Register Shift Left Immediate
7623 7626 instruct shlI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{
7624 7627 match(Set dst (LShiftI src1 src2));
7625 7628
7626 7629 size(4);
7627 7630 format %{ "SLL $src1,$src2,$dst" %}
7628 7631 opcode(Assembler::sll_op3, Assembler::arith_op);
7629 7632 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
7630 7633 ins_pipe(ialu_reg_imm);
7631 7634 %}
7632 7635
7633 7636 // Register Shift Left
7634 7637 instruct shlL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{
7635 7638 match(Set dst (LShiftL src1 src2));
7636 7639
7637 7640 size(4);
7638 7641 format %{ "SLLX $src1,$src2,$dst" %}
7639 7642 opcode(Assembler::sllx_op3, Assembler::arith_op);
7640 7643 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) );
7641 7644 ins_pipe(ialu_reg_reg);
7642 7645 %}
7643 7646
7644 7647 // Register Shift Left Immediate
7645 7648 instruct shlL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
7646 7649 match(Set dst (LShiftL src1 src2));
7647 7650
7648 7651 size(4);
7649 7652 format %{ "SLLX $src1,$src2,$dst" %}
7650 7653 opcode(Assembler::sllx_op3, Assembler::arith_op);
7651 7654 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
7652 7655 ins_pipe(ialu_reg_imm);
7653 7656 %}
7654 7657
7655 7658 // Register Arithmetic Shift Right
7656 7659 instruct sarI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7657 7660 match(Set dst (RShiftI src1 src2));
7658 7661 size(4);
7659 7662 format %{ "SRA $src1,$src2,$dst" %}
7660 7663 opcode(Assembler::sra_op3, Assembler::arith_op);
7661 7664 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7662 7665 ins_pipe(ialu_reg_reg);
7663 7666 %}
7664 7667
7665 7668 // Register Arithmetic Shift Right Immediate
7666 7669 instruct sarI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{
7667 7670 match(Set dst (RShiftI src1 src2));
7668 7671
7669 7672 size(4);
7670 7673 format %{ "SRA $src1,$src2,$dst" %}
7671 7674 opcode(Assembler::sra_op3, Assembler::arith_op);
7672 7675 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
7673 7676 ins_pipe(ialu_reg_imm);
7674 7677 %}
7675 7678
7676 7679 // Register Shift Right Arithmatic Long
7677 7680 instruct sarL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{
7678 7681 match(Set dst (RShiftL src1 src2));
7679 7682
7680 7683 size(4);
7681 7684 format %{ "SRAX $src1,$src2,$dst" %}
7682 7685 opcode(Assembler::srax_op3, Assembler::arith_op);
7683 7686 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) );
7684 7687 ins_pipe(ialu_reg_reg);
7685 7688 %}
7686 7689
7687 7690 // Register Shift Left Immediate
7688 7691 instruct sarL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
7689 7692 match(Set dst (RShiftL src1 src2));
7690 7693
7691 7694 size(4);
7692 7695 format %{ "SRAX $src1,$src2,$dst" %}
7693 7696 opcode(Assembler::srax_op3, Assembler::arith_op);
7694 7697 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
7695 7698 ins_pipe(ialu_reg_imm);
7696 7699 %}
7697 7700
7698 7701 // Register Shift Right
7699 7702 instruct shrI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7700 7703 match(Set dst (URShiftI src1 src2));
7701 7704
7702 7705 size(4);
7703 7706 format %{ "SRL $src1,$src2,$dst" %}
7704 7707 opcode(Assembler::srl_op3, Assembler::arith_op);
7705 7708 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7706 7709 ins_pipe(ialu_reg_reg);
7707 7710 %}
7708 7711
7709 7712 // Register Shift Right Immediate
7710 7713 instruct shrI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{
7711 7714 match(Set dst (URShiftI src1 src2));
7712 7715
7713 7716 size(4);
7714 7717 format %{ "SRL $src1,$src2,$dst" %}
7715 7718 opcode(Assembler::srl_op3, Assembler::arith_op);
7716 7719 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
7717 7720 ins_pipe(ialu_reg_imm);
7718 7721 %}
7719 7722
7720 7723 // Register Shift Right
7721 7724 instruct shrL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{
7722 7725 match(Set dst (URShiftL src1 src2));
7723 7726
7724 7727 size(4);
7725 7728 format %{ "SRLX $src1,$src2,$dst" %}
7726 7729 opcode(Assembler::srlx_op3, Assembler::arith_op);
7727 7730 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) );
7728 7731 ins_pipe(ialu_reg_reg);
7729 7732 %}
7730 7733
7731 7734 // Register Shift Right Immediate
7732 7735 instruct shrL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
7733 7736 match(Set dst (URShiftL src1 src2));
7734 7737
7735 7738 size(4);
7736 7739 format %{ "SRLX $src1,$src2,$dst" %}
7737 7740 opcode(Assembler::srlx_op3, Assembler::arith_op);
7738 7741 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
7739 7742 ins_pipe(ialu_reg_imm);
7740 7743 %}
7741 7744
7742 7745 // Register Shift Right Immediate with a CastP2X
7743 7746 #ifdef _LP64
7744 7747 instruct shrP_reg_imm6(iRegL dst, iRegP src1, immU6 src2) %{
7745 7748 match(Set dst (URShiftL (CastP2X src1) src2));
7746 7749 size(4);
7747 7750 format %{ "SRLX $src1,$src2,$dst\t! Cast ptr $src1 to long and shift" %}
7748 7751 opcode(Assembler::srlx_op3, Assembler::arith_op);
7749 7752 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
7750 7753 ins_pipe(ialu_reg_imm);
7751 7754 %}
7752 7755 #else
7753 7756 instruct shrP_reg_imm5(iRegI dst, iRegP src1, immU5 src2) %{
7754 7757 match(Set dst (URShiftI (CastP2X src1) src2));
7755 7758 size(4);
7756 7759 format %{ "SRL $src1,$src2,$dst\t! Cast ptr $src1 to int and shift" %}
7757 7760 opcode(Assembler::srl_op3, Assembler::arith_op);
7758 7761 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
7759 7762 ins_pipe(ialu_reg_imm);
7760 7763 %}
7761 7764 #endif
7762 7765
7763 7766
7764 7767 //----------Floating Point Arithmetic Instructions-----------------------------
7765 7768
7766 7769 // Add float single precision
7767 7770 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
7768 7771 match(Set dst (AddF src1 src2));
7769 7772
7770 7773 size(4);
7771 7774 format %{ "FADDS $src1,$src2,$dst" %}
7772 7775 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fadds_opf);
7773 7776 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst));
7774 7777 ins_pipe(faddF_reg_reg);
7775 7778 %}
7776 7779
7777 7780 // Add float double precision
7778 7781 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
7779 7782 match(Set dst (AddD src1 src2));
7780 7783
7781 7784 size(4);
7782 7785 format %{ "FADDD $src1,$src2,$dst" %}
7783 7786 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::faddd_opf);
7784 7787 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
7785 7788 ins_pipe(faddD_reg_reg);
7786 7789 %}
7787 7790
7788 7791 // Sub float single precision
7789 7792 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
7790 7793 match(Set dst (SubF src1 src2));
7791 7794
7792 7795 size(4);
7793 7796 format %{ "FSUBS $src1,$src2,$dst" %}
7794 7797 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubs_opf);
7795 7798 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst));
7796 7799 ins_pipe(faddF_reg_reg);
7797 7800 %}
7798 7801
7799 7802 // Sub float double precision
7800 7803 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
7801 7804 match(Set dst (SubD src1 src2));
7802 7805
7803 7806 size(4);
7804 7807 format %{ "FSUBD $src1,$src2,$dst" %}
7805 7808 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubd_opf);
7806 7809 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
7807 7810 ins_pipe(faddD_reg_reg);
7808 7811 %}
7809 7812
7810 7813 // Mul float single precision
7811 7814 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
7812 7815 match(Set dst (MulF src1 src2));
7813 7816
7814 7817 size(4);
7815 7818 format %{ "FMULS $src1,$src2,$dst" %}
7816 7819 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuls_opf);
7817 7820 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst));
7818 7821 ins_pipe(fmulF_reg_reg);
7819 7822 %}
7820 7823
7821 7824 // Mul float double precision
7822 7825 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
7823 7826 match(Set dst (MulD src1 src2));
7824 7827
7825 7828 size(4);
7826 7829 format %{ "FMULD $src1,$src2,$dst" %}
7827 7830 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuld_opf);
7828 7831 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
7829 7832 ins_pipe(fmulD_reg_reg);
7830 7833 %}
7831 7834
7832 7835 // Div float single precision
7833 7836 instruct divF_reg_reg(regF dst, regF src1, regF src2) %{
7834 7837 match(Set dst (DivF src1 src2));
7835 7838
7836 7839 size(4);
7837 7840 format %{ "FDIVS $src1,$src2,$dst" %}
7838 7841 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdivs_opf);
7839 7842 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst));
7840 7843 ins_pipe(fdivF_reg_reg);
7841 7844 %}
7842 7845
7843 7846 // Div float double precision
7844 7847 instruct divD_reg_reg(regD dst, regD src1, regD src2) %{
7845 7848 match(Set dst (DivD src1 src2));
7846 7849
7847 7850 size(4);
7848 7851 format %{ "FDIVD $src1,$src2,$dst" %}
7849 7852 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdivd_opf);
7850 7853 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
7851 7854 ins_pipe(fdivD_reg_reg);
7852 7855 %}
7853 7856
7854 7857 // Absolute float double precision
7855 7858 instruct absD_reg(regD dst, regD src) %{
7856 7859 match(Set dst (AbsD src));
7857 7860
7858 7861 format %{ "FABSd $src,$dst" %}
7859 7862 ins_encode(fabsd(dst, src));
7860 7863 ins_pipe(faddD_reg);
7861 7864 %}
7862 7865
7863 7866 // Absolute float single precision
7864 7867 instruct absF_reg(regF dst, regF src) %{
7865 7868 match(Set dst (AbsF src));
7866 7869
7867 7870 format %{ "FABSs $src,$dst" %}
7868 7871 ins_encode(fabss(dst, src));
7869 7872 ins_pipe(faddF_reg);
7870 7873 %}
7871 7874
7872 7875 instruct negF_reg(regF dst, regF src) %{
7873 7876 match(Set dst (NegF src));
7874 7877
7875 7878 size(4);
7876 7879 format %{ "FNEGs $src,$dst" %}
7877 7880 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fnegs_opf);
7878 7881 ins_encode(form3_opf_rs2F_rdF(src, dst));
7879 7882 ins_pipe(faddF_reg);
7880 7883 %}
7881 7884
7882 7885 instruct negD_reg(regD dst, regD src) %{
7883 7886 match(Set dst (NegD src));
7884 7887
7885 7888 format %{ "FNEGd $src,$dst" %}
7886 7889 ins_encode(fnegd(dst, src));
7887 7890 ins_pipe(faddD_reg);
7888 7891 %}
7889 7892
7890 7893 // Sqrt float double precision
7891 7894 instruct sqrtF_reg_reg(regF dst, regF src) %{
7892 7895 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
7893 7896
7894 7897 size(4);
7895 7898 format %{ "FSQRTS $src,$dst" %}
7896 7899 ins_encode(fsqrts(dst, src));
7897 7900 ins_pipe(fdivF_reg_reg);
7898 7901 %}
7899 7902
7900 7903 // Sqrt float double precision
7901 7904 instruct sqrtD_reg_reg(regD dst, regD src) %{
7902 7905 match(Set dst (SqrtD src));
7903 7906
7904 7907 size(4);
7905 7908 format %{ "FSQRTD $src,$dst" %}
7906 7909 ins_encode(fsqrtd(dst, src));
7907 7910 ins_pipe(fdivD_reg_reg);
7908 7911 %}
7909 7912
7910 7913 //----------Logical Instructions-----------------------------------------------
7911 7914 // And Instructions
7912 7915 // Register And
7913 7916 instruct andI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7914 7917 match(Set dst (AndI src1 src2));
7915 7918
7916 7919 size(4);
7917 7920 format %{ "AND $src1,$src2,$dst" %}
7918 7921 opcode(Assembler::and_op3, Assembler::arith_op);
7919 7922 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7920 7923 ins_pipe(ialu_reg_reg);
7921 7924 %}
7922 7925
7923 7926 // Immediate And
7924 7927 instruct andI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
7925 7928 match(Set dst (AndI src1 src2));
7926 7929
7927 7930 size(4);
7928 7931 format %{ "AND $src1,$src2,$dst" %}
7929 7932 opcode(Assembler::and_op3, Assembler::arith_op);
7930 7933 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7931 7934 ins_pipe(ialu_reg_imm);
7932 7935 %}
7933 7936
7934 7937 // Register And Long
7935 7938 instruct andL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7936 7939 match(Set dst (AndL src1 src2));
7937 7940
7938 7941 ins_cost(DEFAULT_COST);
7939 7942 size(4);
7940 7943 format %{ "AND $src1,$src2,$dst\t! long" %}
7941 7944 opcode(Assembler::and_op3, Assembler::arith_op);
7942 7945 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7943 7946 ins_pipe(ialu_reg_reg);
7944 7947 %}
7945 7948
7946 7949 instruct andL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
7947 7950 match(Set dst (AndL src1 con));
7948 7951
7949 7952 ins_cost(DEFAULT_COST);
7950 7953 size(4);
7951 7954 format %{ "AND $src1,$con,$dst\t! long" %}
7952 7955 opcode(Assembler::and_op3, Assembler::arith_op);
7953 7956 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
7954 7957 ins_pipe(ialu_reg_imm);
7955 7958 %}
7956 7959
7957 7960 // Or Instructions
7958 7961 // Register Or
7959 7962 instruct orI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7960 7963 match(Set dst (OrI src1 src2));
7961 7964
7962 7965 size(4);
7963 7966 format %{ "OR $src1,$src2,$dst" %}
7964 7967 opcode(Assembler::or_op3, Assembler::arith_op);
7965 7968 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7966 7969 ins_pipe(ialu_reg_reg);
7967 7970 %}
7968 7971
7969 7972 // Immediate Or
7970 7973 instruct orI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
7971 7974 match(Set dst (OrI src1 src2));
7972 7975
7973 7976 size(4);
7974 7977 format %{ "OR $src1,$src2,$dst" %}
7975 7978 opcode(Assembler::or_op3, Assembler::arith_op);
7976 7979 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7977 7980 ins_pipe(ialu_reg_imm);
7978 7981 %}
7979 7982
7980 7983 // Register Or Long
7981 7984 instruct orL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7982 7985 match(Set dst (OrL src1 src2));
7983 7986
7984 7987 ins_cost(DEFAULT_COST);
7985 7988 size(4);
7986 7989 format %{ "OR $src1,$src2,$dst\t! long" %}
7987 7990 opcode(Assembler::or_op3, Assembler::arith_op);
7988 7991 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7989 7992 ins_pipe(ialu_reg_reg);
7990 7993 %}
7991 7994
7992 7995 instruct orL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
7993 7996 match(Set dst (OrL src1 con));
7994 7997 ins_cost(DEFAULT_COST*2);
7995 7998
7996 7999 ins_cost(DEFAULT_COST);
7997 8000 size(4);
7998 8001 format %{ "OR $src1,$con,$dst\t! long" %}
7999 8002 opcode(Assembler::or_op3, Assembler::arith_op);
8000 8003 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
8001 8004 ins_pipe(ialu_reg_imm);
8002 8005 %}
8003 8006
8004 8007 #ifndef _LP64
8005 8008
8006 8009 // Use sp_ptr_RegP to match G2 (TLS register) without spilling.
8007 8010 instruct orI_reg_castP2X(iRegI dst, iRegI src1, sp_ptr_RegP src2) %{
8008 8011 match(Set dst (OrI src1 (CastP2X src2)));
8009 8012
8010 8013 size(4);
8011 8014 format %{ "OR $src1,$src2,$dst" %}
8012 8015 opcode(Assembler::or_op3, Assembler::arith_op);
8013 8016 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8014 8017 ins_pipe(ialu_reg_reg);
8015 8018 %}
8016 8019
8017 8020 #else
8018 8021
8019 8022 instruct orL_reg_castP2X(iRegL dst, iRegL src1, sp_ptr_RegP src2) %{
8020 8023 match(Set dst (OrL src1 (CastP2X src2)));
8021 8024
8022 8025 ins_cost(DEFAULT_COST);
8023 8026 size(4);
8024 8027 format %{ "OR $src1,$src2,$dst\t! long" %}
8025 8028 opcode(Assembler::or_op3, Assembler::arith_op);
8026 8029 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8027 8030 ins_pipe(ialu_reg_reg);
8028 8031 %}
8029 8032
8030 8033 #endif
8031 8034
8032 8035 // Xor Instructions
8033 8036 // Register Xor
8034 8037 instruct xorI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
8035 8038 match(Set dst (XorI src1 src2));
8036 8039
8037 8040 size(4);
8038 8041 format %{ "XOR $src1,$src2,$dst" %}
8039 8042 opcode(Assembler::xor_op3, Assembler::arith_op);
8040 8043 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8041 8044 ins_pipe(ialu_reg_reg);
8042 8045 %}
8043 8046
8044 8047 // Immediate Xor
8045 8048 instruct xorI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
8046 8049 match(Set dst (XorI src1 src2));
8047 8050
8048 8051 size(4);
8049 8052 format %{ "XOR $src1,$src2,$dst" %}
8050 8053 opcode(Assembler::xor_op3, Assembler::arith_op);
8051 8054 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
8052 8055 ins_pipe(ialu_reg_imm);
8053 8056 %}
8054 8057
8055 8058 // Register Xor Long
8056 8059 instruct xorL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
8057 8060 match(Set dst (XorL src1 src2));
8058 8061
8059 8062 ins_cost(DEFAULT_COST);
8060 8063 size(4);
8061 8064 format %{ "XOR $src1,$src2,$dst\t! long" %}
8062 8065 opcode(Assembler::xor_op3, Assembler::arith_op);
8063 8066 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8064 8067 ins_pipe(ialu_reg_reg);
8065 8068 %}
8066 8069
8067 8070 instruct xorL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
8068 8071 match(Set dst (XorL src1 con));
8069 8072
8070 8073 ins_cost(DEFAULT_COST);
8071 8074 size(4);
8072 8075 format %{ "XOR $src1,$con,$dst\t! long" %}
8073 8076 opcode(Assembler::xor_op3, Assembler::arith_op);
8074 8077 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
8075 8078 ins_pipe(ialu_reg_imm);
8076 8079 %}
8077 8080
8078 8081 //----------Convert to Boolean-------------------------------------------------
8079 8082 // Nice hack for 32-bit tests but doesn't work for
8080 8083 // 64-bit pointers.
8081 8084 instruct convI2B( iRegI dst, iRegI src, flagsReg ccr ) %{
8082 8085 match(Set dst (Conv2B src));
8083 8086 effect( KILL ccr );
8084 8087 ins_cost(DEFAULT_COST*2);
8085 8088 format %{ "CMP R_G0,$src\n\t"
8086 8089 "ADDX R_G0,0,$dst" %}
8087 8090 ins_encode( enc_to_bool( src, dst ) );
8088 8091 ins_pipe(ialu_reg_ialu);
8089 8092 %}
8090 8093
8091 8094 #ifndef _LP64
8092 8095 instruct convP2B( iRegI dst, iRegP src, flagsReg ccr ) %{
8093 8096 match(Set dst (Conv2B src));
8094 8097 effect( KILL ccr );
8095 8098 ins_cost(DEFAULT_COST*2);
8096 8099 format %{ "CMP R_G0,$src\n\t"
8097 8100 "ADDX R_G0,0,$dst" %}
8098 8101 ins_encode( enc_to_bool( src, dst ) );
8099 8102 ins_pipe(ialu_reg_ialu);
8100 8103 %}
8101 8104 #else
8102 8105 instruct convP2B( iRegI dst, iRegP src ) %{
8103 8106 match(Set dst (Conv2B src));
8104 8107 ins_cost(DEFAULT_COST*2);
8105 8108 format %{ "MOV $src,$dst\n\t"
8106 8109 "MOVRNZ $src,1,$dst" %}
8107 8110 ins_encode( form3_g0_rs2_rd_move( src, dst ), enc_convP2B( dst, src ) );
8108 8111 ins_pipe(ialu_clr_and_mover);
8109 8112 %}
8110 8113 #endif
8111 8114
8112 8115 instruct cmpLTMask0( iRegI dst, iRegI src, immI0 zero, flagsReg ccr ) %{
8113 8116 match(Set dst (CmpLTMask src zero));
8114 8117 effect(KILL ccr);
8115 8118 size(4);
8116 8119 format %{ "SRA $src,#31,$dst\t# cmpLTMask0" %}
8117 8120 ins_encode %{
8118 8121 __ sra($src$$Register, 31, $dst$$Register);
8119 8122 %}
8120 8123 ins_pipe(ialu_reg_imm);
8121 8124 %}
8122 8125
8123 8126 instruct cmpLTMask_reg_reg( iRegI dst, iRegI p, iRegI q, flagsReg ccr ) %{
8124 8127 match(Set dst (CmpLTMask p q));
8125 8128 effect( KILL ccr );
8126 8129 ins_cost(DEFAULT_COST*4);
8127 8130 format %{ "CMP $p,$q\n\t"
8128 8131 "MOV #0,$dst\n\t"
8129 8132 "BLT,a .+8\n\t"
8130 8133 "MOV #-1,$dst" %}
8131 8134 ins_encode( enc_ltmask(p,q,dst) );
8132 8135 ins_pipe(ialu_reg_reg_ialu);
8133 8136 %}
8134 8137
8135 8138 instruct cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp, flagsReg ccr ) %{
8136 8139 match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)));
8137 8140 effect(KILL ccr, TEMP tmp);
8138 8141 ins_cost(DEFAULT_COST*3);
8139 8142
8140 8143 format %{ "SUBcc $p,$q,$p\t! p' = p-q\n\t"
8141 8144 "ADD $p,$y,$tmp\t! g3=p-q+y\n\t"
8142 8145 "MOVlt $tmp,$p\t! p' < 0 ? p'+y : p'" %}
8143 8146 ins_encode(enc_cadd_cmpLTMask(p, q, y, tmp));
8144 8147 ins_pipe(cadd_cmpltmask);
8145 8148 %}
8146 8149
8147 8150 instruct and_cmpLTMask(iRegI p, iRegI q, iRegI y, flagsReg ccr) %{
8148 8151 match(Set p (AndI (CmpLTMask p q) y));
8149 8152 effect(KILL ccr);
8150 8153 ins_cost(DEFAULT_COST*3);
8151 8154
8152 8155 format %{ "CMP $p,$q\n\t"
8153 8156 "MOV $y,$p\n\t"
8154 8157 "MOVge G0,$p" %}
8155 8158 ins_encode %{
8156 8159 __ cmp($p$$Register, $q$$Register);
8157 8160 __ mov($y$$Register, $p$$Register);
8158 8161 __ movcc(Assembler::greaterEqual, false, Assembler::icc, G0, $p$$Register);
8159 8162 %}
8160 8163 ins_pipe(ialu_reg_reg_ialu);
8161 8164 %}
8162 8165
8163 8166 //-----------------------------------------------------------------
8164 8167 // Direct raw moves between float and general registers using VIS3.
8165 8168
8166 8169 // ins_pipe(faddF_reg);
8167 8170 instruct MoveF2I_reg_reg(iRegI dst, regF src) %{
8168 8171 predicate(UseVIS >= 3);
8169 8172 match(Set dst (MoveF2I src));
8170 8173
8171 8174 format %{ "MOVSTOUW $src,$dst\t! MoveF2I" %}
8172 8175 ins_encode %{
8173 8176 __ movstouw($src$$FloatRegister, $dst$$Register);
8174 8177 %}
8175 8178 ins_pipe(ialu_reg_reg);
8176 8179 %}
8177 8180
8178 8181 instruct MoveI2F_reg_reg(regF dst, iRegI src) %{
8179 8182 predicate(UseVIS >= 3);
8180 8183 match(Set dst (MoveI2F src));
8181 8184
8182 8185 format %{ "MOVWTOS $src,$dst\t! MoveI2F" %}
8183 8186 ins_encode %{
8184 8187 __ movwtos($src$$Register, $dst$$FloatRegister);
8185 8188 %}
8186 8189 ins_pipe(ialu_reg_reg);
8187 8190 %}
8188 8191
8189 8192 instruct MoveD2L_reg_reg(iRegL dst, regD src) %{
8190 8193 predicate(UseVIS >= 3);
8191 8194 match(Set dst (MoveD2L src));
8192 8195
8193 8196 format %{ "MOVDTOX $src,$dst\t! MoveD2L" %}
8194 8197 ins_encode %{
8195 8198 __ movdtox(as_DoubleFloatRegister($src$$reg), $dst$$Register);
8196 8199 %}
8197 8200 ins_pipe(ialu_reg_reg);
8198 8201 %}
8199 8202
8200 8203 instruct MoveL2D_reg_reg(regD dst, iRegL src) %{
8201 8204 predicate(UseVIS >= 3);
8202 8205 match(Set dst (MoveL2D src));
8203 8206
8204 8207 format %{ "MOVXTOD $src,$dst\t! MoveL2D" %}
8205 8208 ins_encode %{
8206 8209 __ movxtod($src$$Register, as_DoubleFloatRegister($dst$$reg));
8207 8210 %}
8208 8211 ins_pipe(ialu_reg_reg);
8209 8212 %}
8210 8213
8211 8214
8212 8215 // Raw moves between float and general registers using stack.
8213 8216
8214 8217 instruct MoveF2I_stack_reg(iRegI dst, stackSlotF src) %{
8215 8218 match(Set dst (MoveF2I src));
8216 8219 effect(DEF dst, USE src);
8217 8220 ins_cost(MEMORY_REF_COST);
8218 8221
8219 8222 format %{ "LDUW $src,$dst\t! MoveF2I" %}
8220 8223 opcode(Assembler::lduw_op3);
8221 8224 ins_encode(simple_form3_mem_reg( src, dst ) );
8222 8225 ins_pipe(iload_mem);
8223 8226 %}
8224 8227
8225 8228 instruct MoveI2F_stack_reg(regF dst, stackSlotI src) %{
8226 8229 match(Set dst (MoveI2F src));
8227 8230 effect(DEF dst, USE src);
8228 8231 ins_cost(MEMORY_REF_COST);
8229 8232
8230 8233 format %{ "LDF $src,$dst\t! MoveI2F" %}
8231 8234 opcode(Assembler::ldf_op3);
8232 8235 ins_encode(simple_form3_mem_reg(src, dst));
8233 8236 ins_pipe(floadF_stk);
8234 8237 %}
8235 8238
8236 8239 instruct MoveD2L_stack_reg(iRegL dst, stackSlotD src) %{
8237 8240 match(Set dst (MoveD2L src));
8238 8241 effect(DEF dst, USE src);
8239 8242 ins_cost(MEMORY_REF_COST);
8240 8243
8241 8244 format %{ "LDX $src,$dst\t! MoveD2L" %}
8242 8245 opcode(Assembler::ldx_op3);
8243 8246 ins_encode(simple_form3_mem_reg( src, dst ) );
8244 8247 ins_pipe(iload_mem);
8245 8248 %}
8246 8249
8247 8250 instruct MoveL2D_stack_reg(regD dst, stackSlotL src) %{
8248 8251 match(Set dst (MoveL2D src));
8249 8252 effect(DEF dst, USE src);
8250 8253 ins_cost(MEMORY_REF_COST);
8251 8254
8252 8255 format %{ "LDDF $src,$dst\t! MoveL2D" %}
8253 8256 opcode(Assembler::lddf_op3);
8254 8257 ins_encode(simple_form3_mem_reg(src, dst));
8255 8258 ins_pipe(floadD_stk);
8256 8259 %}
8257 8260
8258 8261 instruct MoveF2I_reg_stack(stackSlotI dst, regF src) %{
8259 8262 match(Set dst (MoveF2I src));
8260 8263 effect(DEF dst, USE src);
8261 8264 ins_cost(MEMORY_REF_COST);
8262 8265
8263 8266 format %{ "STF $src,$dst\t! MoveF2I" %}
8264 8267 opcode(Assembler::stf_op3);
8265 8268 ins_encode(simple_form3_mem_reg(dst, src));
8266 8269 ins_pipe(fstoreF_stk_reg);
8267 8270 %}
8268 8271
8269 8272 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
8270 8273 match(Set dst (MoveI2F src));
8271 8274 effect(DEF dst, USE src);
8272 8275 ins_cost(MEMORY_REF_COST);
8273 8276
8274 8277 format %{ "STW $src,$dst\t! MoveI2F" %}
8275 8278 opcode(Assembler::stw_op3);
8276 8279 ins_encode(simple_form3_mem_reg( dst, src ) );
8277 8280 ins_pipe(istore_mem_reg);
8278 8281 %}
8279 8282
8280 8283 instruct MoveD2L_reg_stack(stackSlotL dst, regD src) %{
8281 8284 match(Set dst (MoveD2L src));
8282 8285 effect(DEF dst, USE src);
8283 8286 ins_cost(MEMORY_REF_COST);
8284 8287
8285 8288 format %{ "STDF $src,$dst\t! MoveD2L" %}
8286 8289 opcode(Assembler::stdf_op3);
8287 8290 ins_encode(simple_form3_mem_reg(dst, src));
8288 8291 ins_pipe(fstoreD_stk_reg);
8289 8292 %}
8290 8293
8291 8294 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
8292 8295 match(Set dst (MoveL2D src));
8293 8296 effect(DEF dst, USE src);
8294 8297 ins_cost(MEMORY_REF_COST);
8295 8298
8296 8299 format %{ "STX $src,$dst\t! MoveL2D" %}
8297 8300 opcode(Assembler::stx_op3);
8298 8301 ins_encode(simple_form3_mem_reg( dst, src ) );
8299 8302 ins_pipe(istore_mem_reg);
8300 8303 %}
8301 8304
8302 8305
8303 8306 //----------Arithmetic Conversion Instructions---------------------------------
8304 8307 // The conversions operations are all Alpha sorted. Please keep it that way!
8305 8308
8306 8309 instruct convD2F_reg(regF dst, regD src) %{
8307 8310 match(Set dst (ConvD2F src));
8308 8311 size(4);
8309 8312 format %{ "FDTOS $src,$dst" %}
8310 8313 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdtos_opf);
8311 8314 ins_encode(form3_opf_rs2D_rdF(src, dst));
8312 8315 ins_pipe(fcvtD2F);
8313 8316 %}
8314 8317
8315 8318
8316 8319 // Convert a double to an int in a float register.
8317 8320 // If the double is a NAN, stuff a zero in instead.
8318 8321 instruct convD2I_helper(regF dst, regD src, flagsRegF0 fcc0) %{
8319 8322 effect(DEF dst, USE src, KILL fcc0);
8320 8323 format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t"
8321 8324 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
8322 8325 "FDTOI $src,$dst\t! convert in delay slot\n\t"
8323 8326 "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t"
8324 8327 "FSUBs $dst,$dst,$dst\t! cleared only if nan\n"
8325 8328 "skip:" %}
8326 8329 ins_encode(form_d2i_helper(src,dst));
8327 8330 ins_pipe(fcvtD2I);
8328 8331 %}
8329 8332
8330 8333 instruct convD2I_stk(stackSlotI dst, regD src) %{
8331 8334 match(Set dst (ConvD2I src));
8332 8335 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
8333 8336 expand %{
8334 8337 regF tmp;
8335 8338 convD2I_helper(tmp, src);
8336 8339 regF_to_stkI(dst, tmp);
8337 8340 %}
8338 8341 %}
8339 8342
8340 8343 instruct convD2I_reg(iRegI dst, regD src) %{
8341 8344 predicate(UseVIS >= 3);
8342 8345 match(Set dst (ConvD2I src));
8343 8346 ins_cost(DEFAULT_COST*2 + BRANCH_COST);
8344 8347 expand %{
8345 8348 regF tmp;
8346 8349 convD2I_helper(tmp, src);
8347 8350 MoveF2I_reg_reg(dst, tmp);
8348 8351 %}
8349 8352 %}
8350 8353
8351 8354
8352 8355 // Convert a double to a long in a double register.
8353 8356 // If the double is a NAN, stuff a zero in instead.
8354 8357 instruct convD2L_helper(regD dst, regD src, flagsRegF0 fcc0) %{
8355 8358 effect(DEF dst, USE src, KILL fcc0);
8356 8359 format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t"
8357 8360 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
8358 8361 "FDTOX $src,$dst\t! convert in delay slot\n\t"
8359 8362 "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t"
8360 8363 "FSUBd $dst,$dst,$dst\t! cleared only if nan\n"
8361 8364 "skip:" %}
8362 8365 ins_encode(form_d2l_helper(src,dst));
8363 8366 ins_pipe(fcvtD2L);
8364 8367 %}
8365 8368
8366 8369 instruct convD2L_stk(stackSlotL dst, regD src) %{
8367 8370 match(Set dst (ConvD2L src));
8368 8371 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
8369 8372 expand %{
8370 8373 regD tmp;
8371 8374 convD2L_helper(tmp, src);
8372 8375 regD_to_stkL(dst, tmp);
8373 8376 %}
8374 8377 %}
8375 8378
8376 8379 instruct convD2L_reg(iRegL dst, regD src) %{
8377 8380 predicate(UseVIS >= 3);
8378 8381 match(Set dst (ConvD2L src));
8379 8382 ins_cost(DEFAULT_COST*2 + BRANCH_COST);
8380 8383 expand %{
8381 8384 regD tmp;
8382 8385 convD2L_helper(tmp, src);
8383 8386 MoveD2L_reg_reg(dst, tmp);
8384 8387 %}
8385 8388 %}
8386 8389
8387 8390
8388 8391 instruct convF2D_reg(regD dst, regF src) %{
8389 8392 match(Set dst (ConvF2D src));
8390 8393 format %{ "FSTOD $src,$dst" %}
8391 8394 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fstod_opf);
8392 8395 ins_encode(form3_opf_rs2F_rdD(src, dst));
8393 8396 ins_pipe(fcvtF2D);
8394 8397 %}
8395 8398
8396 8399
8397 8400 // Convert a float to an int in a float register.
8398 8401 // If the float is a NAN, stuff a zero in instead.
8399 8402 instruct convF2I_helper(regF dst, regF src, flagsRegF0 fcc0) %{
8400 8403 effect(DEF dst, USE src, KILL fcc0);
8401 8404 format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t"
8402 8405 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
8403 8406 "FSTOI $src,$dst\t! convert in delay slot\n\t"
8404 8407 "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t"
8405 8408 "FSUBs $dst,$dst,$dst\t! cleared only if nan\n"
8406 8409 "skip:" %}
8407 8410 ins_encode(form_f2i_helper(src,dst));
8408 8411 ins_pipe(fcvtF2I);
8409 8412 %}
8410 8413
8411 8414 instruct convF2I_stk(stackSlotI dst, regF src) %{
8412 8415 match(Set dst (ConvF2I src));
8413 8416 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
8414 8417 expand %{
8415 8418 regF tmp;
8416 8419 convF2I_helper(tmp, src);
8417 8420 regF_to_stkI(dst, tmp);
8418 8421 %}
8419 8422 %}
8420 8423
8421 8424 instruct convF2I_reg(iRegI dst, regF src) %{
8422 8425 predicate(UseVIS >= 3);
8423 8426 match(Set dst (ConvF2I src));
8424 8427 ins_cost(DEFAULT_COST*2 + BRANCH_COST);
8425 8428 expand %{
8426 8429 regF tmp;
8427 8430 convF2I_helper(tmp, src);
8428 8431 MoveF2I_reg_reg(dst, tmp);
8429 8432 %}
8430 8433 %}
8431 8434
8432 8435
8433 8436 // Convert a float to a long in a float register.
8434 8437 // If the float is a NAN, stuff a zero in instead.
8435 8438 instruct convF2L_helper(regD dst, regF src, flagsRegF0 fcc0) %{
8436 8439 effect(DEF dst, USE src, KILL fcc0);
8437 8440 format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t"
8438 8441 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
8439 8442 "FSTOX $src,$dst\t! convert in delay slot\n\t"
8440 8443 "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t"
8441 8444 "FSUBd $dst,$dst,$dst\t! cleared only if nan\n"
8442 8445 "skip:" %}
8443 8446 ins_encode(form_f2l_helper(src,dst));
8444 8447 ins_pipe(fcvtF2L);
8445 8448 %}
8446 8449
8447 8450 instruct convF2L_stk(stackSlotL dst, regF src) %{
8448 8451 match(Set dst (ConvF2L src));
8449 8452 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
8450 8453 expand %{
8451 8454 regD tmp;
8452 8455 convF2L_helper(tmp, src);
8453 8456 regD_to_stkL(dst, tmp);
8454 8457 %}
8455 8458 %}
8456 8459
8457 8460 instruct convF2L_reg(iRegL dst, regF src) %{
8458 8461 predicate(UseVIS >= 3);
8459 8462 match(Set dst (ConvF2L src));
8460 8463 ins_cost(DEFAULT_COST*2 + BRANCH_COST);
8461 8464 expand %{
8462 8465 regD tmp;
8463 8466 convF2L_helper(tmp, src);
8464 8467 MoveD2L_reg_reg(dst, tmp);
8465 8468 %}
8466 8469 %}
8467 8470
8468 8471
8469 8472 instruct convI2D_helper(regD dst, regF tmp) %{
8470 8473 effect(USE tmp, DEF dst);
8471 8474 format %{ "FITOD $tmp,$dst" %}
8472 8475 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf);
8473 8476 ins_encode(form3_opf_rs2F_rdD(tmp, dst));
8474 8477 ins_pipe(fcvtI2D);
8475 8478 %}
8476 8479
8477 8480 instruct convI2D_stk(stackSlotI src, regD dst) %{
8478 8481 match(Set dst (ConvI2D src));
8479 8482 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8480 8483 expand %{
8481 8484 regF tmp;
8482 8485 stkI_to_regF(tmp, src);
8483 8486 convI2D_helper(dst, tmp);
8484 8487 %}
8485 8488 %}
8486 8489
8487 8490 instruct convI2D_reg(regD_low dst, iRegI src) %{
8488 8491 predicate(UseVIS >= 3);
8489 8492 match(Set dst (ConvI2D src));
8490 8493 expand %{
8491 8494 regF tmp;
8492 8495 MoveI2F_reg_reg(tmp, src);
8493 8496 convI2D_helper(dst, tmp);
8494 8497 %}
8495 8498 %}
8496 8499
8497 8500 instruct convI2D_mem(regD_low dst, memory mem) %{
8498 8501 match(Set dst (ConvI2D (LoadI mem)));
8499 8502 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8500 8503 format %{ "LDF $mem,$dst\n\t"
8501 8504 "FITOD $dst,$dst" %}
8502 8505 opcode(Assembler::ldf_op3, Assembler::fitod_opf);
8503 8506 ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
8504 8507 ins_pipe(floadF_mem);
8505 8508 %}
8506 8509
8507 8510
8508 8511 instruct convI2F_helper(regF dst, regF tmp) %{
8509 8512 effect(DEF dst, USE tmp);
8510 8513 format %{ "FITOS $tmp,$dst" %}
8511 8514 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitos_opf);
8512 8515 ins_encode(form3_opf_rs2F_rdF(tmp, dst));
8513 8516 ins_pipe(fcvtI2F);
8514 8517 %}
8515 8518
8516 8519 instruct convI2F_stk(regF dst, stackSlotI src) %{
8517 8520 match(Set dst (ConvI2F src));
8518 8521 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8519 8522 expand %{
8520 8523 regF tmp;
8521 8524 stkI_to_regF(tmp,src);
8522 8525 convI2F_helper(dst, tmp);
8523 8526 %}
8524 8527 %}
8525 8528
8526 8529 instruct convI2F_reg(regF dst, iRegI src) %{
8527 8530 predicate(UseVIS >= 3);
8528 8531 match(Set dst (ConvI2F src));
8529 8532 ins_cost(DEFAULT_COST);
8530 8533 expand %{
8531 8534 regF tmp;
8532 8535 MoveI2F_reg_reg(tmp, src);
8533 8536 convI2F_helper(dst, tmp);
8534 8537 %}
8535 8538 %}
8536 8539
8537 8540 instruct convI2F_mem( regF dst, memory mem ) %{
8538 8541 match(Set dst (ConvI2F (LoadI mem)));
8539 8542 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8540 8543 format %{ "LDF $mem,$dst\n\t"
8541 8544 "FITOS $dst,$dst" %}
8542 8545 opcode(Assembler::ldf_op3, Assembler::fitos_opf);
8543 8546 ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
8544 8547 ins_pipe(floadF_mem);
8545 8548 %}
8546 8549
8547 8550
8548 8551 instruct convI2L_reg(iRegL dst, iRegI src) %{
8549 8552 match(Set dst (ConvI2L src));
8550 8553 size(4);
8551 8554 format %{ "SRA $src,0,$dst\t! int->long" %}
8552 8555 opcode(Assembler::sra_op3, Assembler::arith_op);
8553 8556 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
8554 8557 ins_pipe(ialu_reg_reg);
8555 8558 %}
8556 8559
8557 8560 // Zero-extend convert int to long
8558 8561 instruct convI2L_reg_zex(iRegL dst, iRegI src, immL_32bits mask ) %{
8559 8562 match(Set dst (AndL (ConvI2L src) mask) );
8560 8563 size(4);
8561 8564 format %{ "SRL $src,0,$dst\t! zero-extend int to long" %}
8562 8565 opcode(Assembler::srl_op3, Assembler::arith_op);
8563 8566 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
8564 8567 ins_pipe(ialu_reg_reg);
8565 8568 %}
8566 8569
8567 8570 // Zero-extend long
8568 8571 instruct zerox_long(iRegL dst, iRegL src, immL_32bits mask ) %{
8569 8572 match(Set dst (AndL src mask) );
8570 8573 size(4);
8571 8574 format %{ "SRL $src,0,$dst\t! zero-extend long" %}
8572 8575 opcode(Assembler::srl_op3, Assembler::arith_op);
8573 8576 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
8574 8577 ins_pipe(ialu_reg_reg);
8575 8578 %}
8576 8579
8577 8580
8578 8581 //-----------
8579 8582 // Long to Double conversion using V8 opcodes.
8580 8583 // Still useful because cheetah traps and becomes
8581 8584 // amazingly slow for some common numbers.
8582 8585
8583 8586 // Magic constant, 0x43300000
8584 8587 instruct loadConI_x43300000(iRegI dst) %{
8585 8588 effect(DEF dst);
8586 8589 size(4);
8587 8590 format %{ "SETHI HI(0x43300000),$dst\t! 2^52" %}
8588 8591 ins_encode(SetHi22(0x43300000, dst));
8589 8592 ins_pipe(ialu_none);
8590 8593 %}
8591 8594
8592 8595 // Magic constant, 0x41f00000
8593 8596 instruct loadConI_x41f00000(iRegI dst) %{
8594 8597 effect(DEF dst);
8595 8598 size(4);
8596 8599 format %{ "SETHI HI(0x41f00000),$dst\t! 2^32" %}
8597 8600 ins_encode(SetHi22(0x41f00000, dst));
8598 8601 ins_pipe(ialu_none);
8599 8602 %}
8600 8603
8601 8604 // Construct a double from two float halves
8602 8605 instruct regDHi_regDLo_to_regD(regD_low dst, regD_low src1, regD_low src2) %{
8603 8606 effect(DEF dst, USE src1, USE src2);
8604 8607 size(8);
8605 8608 format %{ "FMOVS $src1.hi,$dst.hi\n\t"
8606 8609 "FMOVS $src2.lo,$dst.lo" %}
8607 8610 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmovs_opf);
8608 8611 ins_encode(form3_opf_rs2D_hi_rdD_hi(src1, dst), form3_opf_rs2D_lo_rdD_lo(src2, dst));
8609 8612 ins_pipe(faddD_reg_reg);
8610 8613 %}
8611 8614
8612 8615 // Convert integer in high half of a double register (in the lower half of
8613 8616 // the double register file) to double
8614 8617 instruct convI2D_regDHi_regD(regD dst, regD_low src) %{
8615 8618 effect(DEF dst, USE src);
8616 8619 size(4);
8617 8620 format %{ "FITOD $src,$dst" %}
8618 8621 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf);
8619 8622 ins_encode(form3_opf_rs2D_rdD(src, dst));
8620 8623 ins_pipe(fcvtLHi2D);
8621 8624 %}
8622 8625
8623 8626 // Add float double precision
8624 8627 instruct addD_regD_regD(regD dst, regD src1, regD src2) %{
8625 8628 effect(DEF dst, USE src1, USE src2);
8626 8629 size(4);
8627 8630 format %{ "FADDD $src1,$src2,$dst" %}
8628 8631 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::faddd_opf);
8629 8632 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
8630 8633 ins_pipe(faddD_reg_reg);
8631 8634 %}
8632 8635
8633 8636 // Sub float double precision
8634 8637 instruct subD_regD_regD(regD dst, regD src1, regD src2) %{
8635 8638 effect(DEF dst, USE src1, USE src2);
8636 8639 size(4);
8637 8640 format %{ "FSUBD $src1,$src2,$dst" %}
8638 8641 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubd_opf);
8639 8642 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
8640 8643 ins_pipe(faddD_reg_reg);
8641 8644 %}
8642 8645
8643 8646 // Mul float double precision
8644 8647 instruct mulD_regD_regD(regD dst, regD src1, regD src2) %{
8645 8648 effect(DEF dst, USE src1, USE src2);
8646 8649 size(4);
8647 8650 format %{ "FMULD $src1,$src2,$dst" %}
8648 8651 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuld_opf);
8649 8652 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
8650 8653 ins_pipe(fmulD_reg_reg);
8651 8654 %}
8652 8655
8653 8656 instruct convL2D_reg_slow_fxtof(regD dst, stackSlotL src) %{
8654 8657 match(Set dst (ConvL2D src));
8655 8658 ins_cost(DEFAULT_COST*8 + MEMORY_REF_COST*6);
8656 8659
8657 8660 expand %{
8658 8661 regD_low tmpsrc;
8659 8662 iRegI ix43300000;
8660 8663 iRegI ix41f00000;
8661 8664 stackSlotL lx43300000;
8662 8665 stackSlotL lx41f00000;
8663 8666 regD_low dx43300000;
8664 8667 regD dx41f00000;
8665 8668 regD tmp1;
8666 8669 regD_low tmp2;
8667 8670 regD tmp3;
8668 8671 regD tmp4;
8669 8672
8670 8673 stkL_to_regD(tmpsrc, src);
8671 8674
8672 8675 loadConI_x43300000(ix43300000);
8673 8676 loadConI_x41f00000(ix41f00000);
8674 8677 regI_to_stkLHi(lx43300000, ix43300000);
8675 8678 regI_to_stkLHi(lx41f00000, ix41f00000);
8676 8679 stkL_to_regD(dx43300000, lx43300000);
8677 8680 stkL_to_regD(dx41f00000, lx41f00000);
8678 8681
8679 8682 convI2D_regDHi_regD(tmp1, tmpsrc);
8680 8683 regDHi_regDLo_to_regD(tmp2, dx43300000, tmpsrc);
8681 8684 subD_regD_regD(tmp3, tmp2, dx43300000);
8682 8685 mulD_regD_regD(tmp4, tmp1, dx41f00000);
8683 8686 addD_regD_regD(dst, tmp3, tmp4);
8684 8687 %}
8685 8688 %}
8686 8689
8687 8690 // Long to Double conversion using fast fxtof
8688 8691 instruct convL2D_helper(regD dst, regD tmp) %{
8689 8692 effect(DEF dst, USE tmp);
8690 8693 size(4);
8691 8694 format %{ "FXTOD $tmp,$dst" %}
8692 8695 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fxtod_opf);
8693 8696 ins_encode(form3_opf_rs2D_rdD(tmp, dst));
8694 8697 ins_pipe(fcvtL2D);
8695 8698 %}
8696 8699
8697 8700 instruct convL2D_stk_fast_fxtof(regD dst, stackSlotL src) %{
8698 8701 predicate(VM_Version::has_fast_fxtof());
8699 8702 match(Set dst (ConvL2D src));
8700 8703 ins_cost(DEFAULT_COST + 3 * MEMORY_REF_COST);
8701 8704 expand %{
8702 8705 regD tmp;
8703 8706 stkL_to_regD(tmp, src);
8704 8707 convL2D_helper(dst, tmp);
8705 8708 %}
8706 8709 %}
8707 8710
8708 8711 instruct convL2D_reg(regD dst, iRegL src) %{
8709 8712 predicate(UseVIS >= 3);
8710 8713 match(Set dst (ConvL2D src));
8711 8714 expand %{
8712 8715 regD tmp;
8713 8716 MoveL2D_reg_reg(tmp, src);
8714 8717 convL2D_helper(dst, tmp);
8715 8718 %}
8716 8719 %}
8717 8720
8718 8721 // Long to Float conversion using fast fxtof
8719 8722 instruct convL2F_helper(regF dst, regD tmp) %{
8720 8723 effect(DEF dst, USE tmp);
8721 8724 size(4);
8722 8725 format %{ "FXTOS $tmp,$dst" %}
8723 8726 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fxtos_opf);
8724 8727 ins_encode(form3_opf_rs2D_rdF(tmp, dst));
8725 8728 ins_pipe(fcvtL2F);
8726 8729 %}
8727 8730
8728 8731 instruct convL2F_stk_fast_fxtof(regF dst, stackSlotL src) %{
8729 8732 match(Set dst (ConvL2F src));
8730 8733 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8731 8734 expand %{
8732 8735 regD tmp;
8733 8736 stkL_to_regD(tmp, src);
8734 8737 convL2F_helper(dst, tmp);
8735 8738 %}
8736 8739 %}
8737 8740
8738 8741 instruct convL2F_reg(regF dst, iRegL src) %{
8739 8742 predicate(UseVIS >= 3);
8740 8743 match(Set dst (ConvL2F src));
8741 8744 ins_cost(DEFAULT_COST);
8742 8745 expand %{
8743 8746 regD tmp;
8744 8747 MoveL2D_reg_reg(tmp, src);
8745 8748 convL2F_helper(dst, tmp);
8746 8749 %}
8747 8750 %}
8748 8751
8749 8752 //-----------
8750 8753
8751 8754 instruct convL2I_reg(iRegI dst, iRegL src) %{
8752 8755 match(Set dst (ConvL2I src));
8753 8756 #ifndef _LP64
8754 8757 format %{ "MOV $src.lo,$dst\t! long->int" %}
8755 8758 ins_encode( form3_g0_rs2_rd_move_lo2( src, dst ) );
8756 8759 ins_pipe(ialu_move_reg_I_to_L);
8757 8760 #else
8758 8761 size(4);
8759 8762 format %{ "SRA $src,R_G0,$dst\t! long->int" %}
8760 8763 ins_encode( form3_rs1_rd_signextend_lo1( src, dst ) );
8761 8764 ins_pipe(ialu_reg);
8762 8765 #endif
8763 8766 %}
8764 8767
8765 8768 // Register Shift Right Immediate
8766 8769 instruct shrL_reg_imm6_L2I(iRegI dst, iRegL src, immI_32_63 cnt) %{
8767 8770 match(Set dst (ConvL2I (RShiftL src cnt)));
8768 8771
8769 8772 size(4);
8770 8773 format %{ "SRAX $src,$cnt,$dst" %}
8771 8774 opcode(Assembler::srax_op3, Assembler::arith_op);
8772 8775 ins_encode( form3_sd_rs1_imm6_rd( src, cnt, dst ) );
8773 8776 ins_pipe(ialu_reg_imm);
8774 8777 %}
8775 8778
8776 8779 //----------Control Flow Instructions------------------------------------------
8777 8780 // Compare Instructions
8778 8781 // Compare Integers
8779 8782 instruct compI_iReg(flagsReg icc, iRegI op1, iRegI op2) %{
8780 8783 match(Set icc (CmpI op1 op2));
8781 8784 effect( DEF icc, USE op1, USE op2 );
8782 8785
8783 8786 size(4);
8784 8787 format %{ "CMP $op1,$op2" %}
8785 8788 opcode(Assembler::subcc_op3, Assembler::arith_op);
8786 8789 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8787 8790 ins_pipe(ialu_cconly_reg_reg);
8788 8791 %}
8789 8792
8790 8793 instruct compU_iReg(flagsRegU icc, iRegI op1, iRegI op2) %{
8791 8794 match(Set icc (CmpU op1 op2));
8792 8795
8793 8796 size(4);
8794 8797 format %{ "CMP $op1,$op2\t! unsigned" %}
8795 8798 opcode(Assembler::subcc_op3, Assembler::arith_op);
8796 8799 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8797 8800 ins_pipe(ialu_cconly_reg_reg);
8798 8801 %}
8799 8802
8800 8803 instruct compI_iReg_imm13(flagsReg icc, iRegI op1, immI13 op2) %{
8801 8804 match(Set icc (CmpI op1 op2));
8802 8805 effect( DEF icc, USE op1 );
8803 8806
8804 8807 size(4);
8805 8808 format %{ "CMP $op1,$op2" %}
8806 8809 opcode(Assembler::subcc_op3, Assembler::arith_op);
8807 8810 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
8808 8811 ins_pipe(ialu_cconly_reg_imm);
8809 8812 %}
8810 8813
8811 8814 instruct testI_reg_reg( flagsReg icc, iRegI op1, iRegI op2, immI0 zero ) %{
8812 8815 match(Set icc (CmpI (AndI op1 op2) zero));
8813 8816
8814 8817 size(4);
8815 8818 format %{ "BTST $op2,$op1" %}
8816 8819 opcode(Assembler::andcc_op3, Assembler::arith_op);
8817 8820 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8818 8821 ins_pipe(ialu_cconly_reg_reg_zero);
8819 8822 %}
8820 8823
8821 8824 instruct testI_reg_imm( flagsReg icc, iRegI op1, immI13 op2, immI0 zero ) %{
8822 8825 match(Set icc (CmpI (AndI op1 op2) zero));
8823 8826
8824 8827 size(4);
8825 8828 format %{ "BTST $op2,$op1" %}
8826 8829 opcode(Assembler::andcc_op3, Assembler::arith_op);
8827 8830 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
8828 8831 ins_pipe(ialu_cconly_reg_imm_zero);
8829 8832 %}
8830 8833
8831 8834 instruct compL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2 ) %{
8832 8835 match(Set xcc (CmpL op1 op2));
8833 8836 effect( DEF xcc, USE op1, USE op2 );
8834 8837
8835 8838 size(4);
8836 8839 format %{ "CMP $op1,$op2\t\t! long" %}
8837 8840 opcode(Assembler::subcc_op3, Assembler::arith_op);
8838 8841 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8839 8842 ins_pipe(ialu_cconly_reg_reg);
8840 8843 %}
8841 8844
8842 8845 instruct compL_reg_con(flagsRegL xcc, iRegL op1, immL13 con) %{
8843 8846 match(Set xcc (CmpL op1 con));
8844 8847 effect( DEF xcc, USE op1, USE con );
8845 8848
8846 8849 size(4);
8847 8850 format %{ "CMP $op1,$con\t\t! long" %}
8848 8851 opcode(Assembler::subcc_op3, Assembler::arith_op);
8849 8852 ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) );
8850 8853 ins_pipe(ialu_cconly_reg_reg);
8851 8854 %}
8852 8855
8853 8856 instruct testL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2, immL0 zero) %{
8854 8857 match(Set xcc (CmpL (AndL op1 op2) zero));
8855 8858 effect( DEF xcc, USE op1, USE op2 );
8856 8859
8857 8860 size(4);
8858 8861 format %{ "BTST $op1,$op2\t\t! long" %}
8859 8862 opcode(Assembler::andcc_op3, Assembler::arith_op);
8860 8863 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8861 8864 ins_pipe(ialu_cconly_reg_reg);
8862 8865 %}
8863 8866
8864 8867 // useful for checking the alignment of a pointer:
8865 8868 instruct testL_reg_con(flagsRegL xcc, iRegL op1, immL13 con, immL0 zero) %{
8866 8869 match(Set xcc (CmpL (AndL op1 con) zero));
8867 8870 effect( DEF xcc, USE op1, USE con );
8868 8871
8869 8872 size(4);
8870 8873 format %{ "BTST $op1,$con\t\t! long" %}
8871 8874 opcode(Assembler::andcc_op3, Assembler::arith_op);
8872 8875 ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) );
8873 8876 ins_pipe(ialu_cconly_reg_reg);
8874 8877 %}
8875 8878
8876 8879 instruct compU_iReg_imm13(flagsRegU icc, iRegI op1, immU12 op2 ) %{
8877 8880 match(Set icc (CmpU op1 op2));
8878 8881
8879 8882 size(4);
8880 8883 format %{ "CMP $op1,$op2\t! unsigned" %}
8881 8884 opcode(Assembler::subcc_op3, Assembler::arith_op);
8882 8885 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
8883 8886 ins_pipe(ialu_cconly_reg_imm);
8884 8887 %}
8885 8888
8886 8889 // Compare Pointers
8887 8890 instruct compP_iRegP(flagsRegP pcc, iRegP op1, iRegP op2 ) %{
8888 8891 match(Set pcc (CmpP op1 op2));
8889 8892
8890 8893 size(4);
8891 8894 format %{ "CMP $op1,$op2\t! ptr" %}
8892 8895 opcode(Assembler::subcc_op3, Assembler::arith_op);
8893 8896 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8894 8897 ins_pipe(ialu_cconly_reg_reg);
8895 8898 %}
8896 8899
8897 8900 instruct compP_iRegP_imm13(flagsRegP pcc, iRegP op1, immP13 op2 ) %{
8898 8901 match(Set pcc (CmpP op1 op2));
8899 8902
8900 8903 size(4);
8901 8904 format %{ "CMP $op1,$op2\t! ptr" %}
8902 8905 opcode(Assembler::subcc_op3, Assembler::arith_op);
8903 8906 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
8904 8907 ins_pipe(ialu_cconly_reg_imm);
8905 8908 %}
8906 8909
8907 8910 // Compare Narrow oops
8908 8911 instruct compN_iRegN(flagsReg icc, iRegN op1, iRegN op2 ) %{
8909 8912 match(Set icc (CmpN op1 op2));
8910 8913
8911 8914 size(4);
8912 8915 format %{ "CMP $op1,$op2\t! compressed ptr" %}
8913 8916 opcode(Assembler::subcc_op3, Assembler::arith_op);
8914 8917 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8915 8918 ins_pipe(ialu_cconly_reg_reg);
8916 8919 %}
8917 8920
8918 8921 instruct compN_iRegN_immN0(flagsReg icc, iRegN op1, immN0 op2 ) %{
8919 8922 match(Set icc (CmpN op1 op2));
8920 8923
8921 8924 size(4);
8922 8925 format %{ "CMP $op1,$op2\t! compressed ptr" %}
8923 8926 opcode(Assembler::subcc_op3, Assembler::arith_op);
8924 8927 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
8925 8928 ins_pipe(ialu_cconly_reg_imm);
8926 8929 %}
8927 8930
8928 8931 //----------Max and Min--------------------------------------------------------
8929 8932 // Min Instructions
8930 8933 // Conditional move for min
8931 8934 instruct cmovI_reg_lt( iRegI op2, iRegI op1, flagsReg icc ) %{
8932 8935 effect( USE_DEF op2, USE op1, USE icc );
8933 8936
8934 8937 size(4);
8935 8938 format %{ "MOVlt icc,$op1,$op2\t! min" %}
8936 8939 opcode(Assembler::less);
8937 8940 ins_encode( enc_cmov_reg_minmax(op2,op1) );
8938 8941 ins_pipe(ialu_reg_flags);
8939 8942 %}
8940 8943
8941 8944 // Min Register with Register.
8942 8945 instruct minI_eReg(iRegI op1, iRegI op2) %{
8943 8946 match(Set op2 (MinI op1 op2));
8944 8947 ins_cost(DEFAULT_COST*2);
8945 8948 expand %{
8946 8949 flagsReg icc;
8947 8950 compI_iReg(icc,op1,op2);
8948 8951 cmovI_reg_lt(op2,op1,icc);
8949 8952 %}
8950 8953 %}
8951 8954
8952 8955 // Max Instructions
8953 8956 // Conditional move for max
8954 8957 instruct cmovI_reg_gt( iRegI op2, iRegI op1, flagsReg icc ) %{
8955 8958 effect( USE_DEF op2, USE op1, USE icc );
8956 8959 format %{ "MOVgt icc,$op1,$op2\t! max" %}
8957 8960 opcode(Assembler::greater);
8958 8961 ins_encode( enc_cmov_reg_minmax(op2,op1) );
8959 8962 ins_pipe(ialu_reg_flags);
8960 8963 %}
8961 8964
8962 8965 // Max Register with Register
8963 8966 instruct maxI_eReg(iRegI op1, iRegI op2) %{
8964 8967 match(Set op2 (MaxI op1 op2));
8965 8968 ins_cost(DEFAULT_COST*2);
8966 8969 expand %{
8967 8970 flagsReg icc;
8968 8971 compI_iReg(icc,op1,op2);
8969 8972 cmovI_reg_gt(op2,op1,icc);
8970 8973 %}
8971 8974 %}
8972 8975
8973 8976
8974 8977 //----------Float Compares----------------------------------------------------
8975 8978 // Compare floating, generate condition code
8976 8979 instruct cmpF_cc(flagsRegF fcc, regF src1, regF src2) %{
8977 8980 match(Set fcc (CmpF src1 src2));
8978 8981
8979 8982 size(4);
8980 8983 format %{ "FCMPs $fcc,$src1,$src2" %}
8981 8984 opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmps_opf);
8982 8985 ins_encode( form3_opf_rs1F_rs2F_fcc( src1, src2, fcc ) );
8983 8986 ins_pipe(faddF_fcc_reg_reg_zero);
8984 8987 %}
8985 8988
8986 8989 instruct cmpD_cc(flagsRegF fcc, regD src1, regD src2) %{
8987 8990 match(Set fcc (CmpD src1 src2));
8988 8991
8989 8992 size(4);
8990 8993 format %{ "FCMPd $fcc,$src1,$src2" %}
8991 8994 opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmpd_opf);
8992 8995 ins_encode( form3_opf_rs1D_rs2D_fcc( src1, src2, fcc ) );
8993 8996 ins_pipe(faddD_fcc_reg_reg_zero);
8994 8997 %}
8995 8998
8996 8999
8997 9000 // Compare floating, generate -1,0,1
8998 9001 instruct cmpF_reg(iRegI dst, regF src1, regF src2, flagsRegF0 fcc0) %{
8999 9002 match(Set dst (CmpF3 src1 src2));
9000 9003 effect(KILL fcc0);
9001 9004 ins_cost(DEFAULT_COST*3+BRANCH_COST*3);
9002 9005 format %{ "fcmpl $dst,$src1,$src2" %}
9003 9006 // Primary = float
9004 9007 opcode( true );
9005 9008 ins_encode( floating_cmp( dst, src1, src2 ) );
9006 9009 ins_pipe( floating_cmp );
9007 9010 %}
9008 9011
9009 9012 instruct cmpD_reg(iRegI dst, regD src1, regD src2, flagsRegF0 fcc0) %{
9010 9013 match(Set dst (CmpD3 src1 src2));
9011 9014 effect(KILL fcc0);
9012 9015 ins_cost(DEFAULT_COST*3+BRANCH_COST*3);
9013 9016 format %{ "dcmpl $dst,$src1,$src2" %}
9014 9017 // Primary = double (not float)
9015 9018 opcode( false );
9016 9019 ins_encode( floating_cmp( dst, src1, src2 ) );
9017 9020 ins_pipe( floating_cmp );
9018 9021 %}
9019 9022
9020 9023 //----------Branches---------------------------------------------------------
9021 9024 // Jump
9022 9025 // (compare 'operand indIndex' and 'instruct addP_reg_reg' above)
9023 9026 instruct jumpXtnd(iRegX switch_val, o7RegI table) %{
9024 9027 match(Jump switch_val);
9025 9028 effect(TEMP table);
9026 9029
9027 9030 ins_cost(350);
9028 9031
9029 9032 format %{ "ADD $constanttablebase, $constantoffset, O7\n\t"
9030 9033 "LD [O7 + $switch_val], O7\n\t"
9031 9034 "JUMP O7" %}
9032 9035 ins_encode %{
9033 9036 // Calculate table address into a register.
9034 9037 Register table_reg;
9035 9038 Register label_reg = O7;
9036 9039 // If we are calculating the size of this instruction don't trust
9037 9040 // zero offsets because they might change when
9038 9041 // MachConstantBaseNode decides to optimize the constant table
9039 9042 // base.
9040 9043 if ((constant_offset() == 0) && !Compile::current()->in_scratch_emit_size()) {
9041 9044 table_reg = $constanttablebase;
9042 9045 } else {
9043 9046 table_reg = O7;
9044 9047 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset, O7);
9045 9048 __ add($constanttablebase, con_offset, table_reg);
9046 9049 }
9047 9050
9048 9051 // Jump to base address + switch value
9049 9052 __ ld_ptr(table_reg, $switch_val$$Register, label_reg);
9050 9053 __ jmp(label_reg, G0);
9051 9054 __ delayed()->nop();
9052 9055 %}
9053 9056 ins_pipe(ialu_reg_reg);
9054 9057 %}
9055 9058
9056 9059 // Direct Branch. Use V8 version with longer range.
9057 9060 instruct branch(label labl) %{
9058 9061 match(Goto);
9059 9062 effect(USE labl);
9060 9063
9061 9064 size(8);
9062 9065 ins_cost(BRANCH_COST);
9063 9066 format %{ "BA $labl" %}
9064 9067 ins_encode %{
9065 9068 Label* L = $labl$$label;
9066 9069 __ ba(*L);
9067 9070 __ delayed()->nop();
9068 9071 %}
9069 9072 ins_avoid_back_to_back(AVOID_BEFORE);
9070 9073 ins_pipe(br);
9071 9074 %}
9072 9075
9073 9076 // Direct Branch, short with no delay slot
9074 9077 instruct branch_short(label labl) %{
9075 9078 match(Goto);
9076 9079 predicate(UseCBCond);
9077 9080 effect(USE labl);
9078 9081
9079 9082 size(4);
9080 9083 ins_cost(BRANCH_COST);
9081 9084 format %{ "BA $labl\t! short branch" %}
9082 9085 ins_encode %{
9083 9086 Label* L = $labl$$label;
9084 9087 assert(__ use_cbcond(*L), "back to back cbcond");
9085 9088 __ ba_short(*L);
9086 9089 %}
9087 9090 ins_short_branch(1);
9088 9091 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
9089 9092 ins_pipe(cbcond_reg_imm);
9090 9093 %}
9091 9094
9092 9095 // Conditional Direct Branch
9093 9096 instruct branchCon(cmpOp cmp, flagsReg icc, label labl) %{
9094 9097 match(If cmp icc);
9095 9098 effect(USE labl);
9096 9099
9097 9100 size(8);
9098 9101 ins_cost(BRANCH_COST);
9099 9102 format %{ "BP$cmp $icc,$labl" %}
9100 9103 // Prim = bits 24-22, Secnd = bits 31-30
9101 9104 ins_encode( enc_bp( labl, cmp, icc ) );
9102 9105 ins_avoid_back_to_back(AVOID_BEFORE);
9103 9106 ins_pipe(br_cc);
9104 9107 %}
9105 9108
9106 9109 instruct branchConU(cmpOpU cmp, flagsRegU icc, label labl) %{
9107 9110 match(If cmp icc);
9108 9111 effect(USE labl);
9109 9112
9110 9113 ins_cost(BRANCH_COST);
9111 9114 format %{ "BP$cmp $icc,$labl" %}
9112 9115 // Prim = bits 24-22, Secnd = bits 31-30
9113 9116 ins_encode( enc_bp( labl, cmp, icc ) );
9114 9117 ins_avoid_back_to_back(AVOID_BEFORE);
9115 9118 ins_pipe(br_cc);
9116 9119 %}
9117 9120
9118 9121 instruct branchConP(cmpOpP cmp, flagsRegP pcc, label labl) %{
9119 9122 match(If cmp pcc);
9120 9123 effect(USE labl);
9121 9124
9122 9125 size(8);
9123 9126 ins_cost(BRANCH_COST);
9124 9127 format %{ "BP$cmp $pcc,$labl" %}
9125 9128 ins_encode %{
9126 9129 Label* L = $labl$$label;
9127 9130 Assembler::Predict predict_taken =
9128 9131 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9129 9132
9130 9133 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L);
9131 9134 __ delayed()->nop();
9132 9135 %}
9133 9136 ins_avoid_back_to_back(AVOID_BEFORE);
9134 9137 ins_pipe(br_cc);
9135 9138 %}
9136 9139
9137 9140 instruct branchConF(cmpOpF cmp, flagsRegF fcc, label labl) %{
9138 9141 match(If cmp fcc);
9139 9142 effect(USE labl);
9140 9143
9141 9144 size(8);
9142 9145 ins_cost(BRANCH_COST);
9143 9146 format %{ "FBP$cmp $fcc,$labl" %}
9144 9147 ins_encode %{
9145 9148 Label* L = $labl$$label;
9146 9149 Assembler::Predict predict_taken =
9147 9150 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9148 9151
9149 9152 __ fbp( (Assembler::Condition)($cmp$$cmpcode), false, (Assembler::CC)($fcc$$reg), predict_taken, *L);
9150 9153 __ delayed()->nop();
9151 9154 %}
9152 9155 ins_avoid_back_to_back(AVOID_BEFORE);
9153 9156 ins_pipe(br_fcc);
9154 9157 %}
9155 9158
9156 9159 instruct branchLoopEnd(cmpOp cmp, flagsReg icc, label labl) %{
9157 9160 match(CountedLoopEnd cmp icc);
9158 9161 effect(USE labl);
9159 9162
9160 9163 size(8);
9161 9164 ins_cost(BRANCH_COST);
9162 9165 format %{ "BP$cmp $icc,$labl\t! Loop end" %}
9163 9166 // Prim = bits 24-22, Secnd = bits 31-30
9164 9167 ins_encode( enc_bp( labl, cmp, icc ) );
9165 9168 ins_avoid_back_to_back(AVOID_BEFORE);
9166 9169 ins_pipe(br_cc);
9167 9170 %}
9168 9171
9169 9172 instruct branchLoopEndU(cmpOpU cmp, flagsRegU icc, label labl) %{
9170 9173 match(CountedLoopEnd cmp icc);
9171 9174 effect(USE labl);
9172 9175
9173 9176 size(8);
9174 9177 ins_cost(BRANCH_COST);
9175 9178 format %{ "BP$cmp $icc,$labl\t! Loop end" %}
9176 9179 // Prim = bits 24-22, Secnd = bits 31-30
9177 9180 ins_encode( enc_bp( labl, cmp, icc ) );
9178 9181 ins_avoid_back_to_back(AVOID_BEFORE);
9179 9182 ins_pipe(br_cc);
9180 9183 %}
9181 9184
9182 9185 // Compare and branch instructions
9183 9186 instruct cmpI_reg_branch(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{
9184 9187 match(If cmp (CmpI op1 op2));
9185 9188 effect(USE labl, KILL icc);
9186 9189
9187 9190 size(12);
9188 9191 ins_cost(BRANCH_COST);
9189 9192 format %{ "CMP $op1,$op2\t! int\n\t"
9190 9193 "BP$cmp $labl" %}
9191 9194 ins_encode %{
9192 9195 Label* L = $labl$$label;
9193 9196 Assembler::Predict predict_taken =
9194 9197 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9195 9198 __ cmp($op1$$Register, $op2$$Register);
9196 9199 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9197 9200 __ delayed()->nop();
9198 9201 %}
9199 9202 ins_pipe(cmp_br_reg_reg);
9200 9203 %}
9201 9204
9202 9205 instruct cmpI_imm_branch(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{
9203 9206 match(If cmp (CmpI op1 op2));
9204 9207 effect(USE labl, KILL icc);
9205 9208
9206 9209 size(12);
9207 9210 ins_cost(BRANCH_COST);
9208 9211 format %{ "CMP $op1,$op2\t! int\n\t"
9209 9212 "BP$cmp $labl" %}
9210 9213 ins_encode %{
9211 9214 Label* L = $labl$$label;
9212 9215 Assembler::Predict predict_taken =
9213 9216 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9214 9217 __ cmp($op1$$Register, $op2$$constant);
9215 9218 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9216 9219 __ delayed()->nop();
9217 9220 %}
9218 9221 ins_pipe(cmp_br_reg_imm);
9219 9222 %}
9220 9223
9221 9224 instruct cmpU_reg_branch(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{
9222 9225 match(If cmp (CmpU op1 op2));
9223 9226 effect(USE labl, KILL icc);
9224 9227
9225 9228 size(12);
9226 9229 ins_cost(BRANCH_COST);
9227 9230 format %{ "CMP $op1,$op2\t! unsigned\n\t"
9228 9231 "BP$cmp $labl" %}
9229 9232 ins_encode %{
9230 9233 Label* L = $labl$$label;
9231 9234 Assembler::Predict predict_taken =
9232 9235 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9233 9236 __ cmp($op1$$Register, $op2$$Register);
9234 9237 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9235 9238 __ delayed()->nop();
9236 9239 %}
9237 9240 ins_pipe(cmp_br_reg_reg);
9238 9241 %}
9239 9242
9240 9243 instruct cmpU_imm_branch(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{
9241 9244 match(If cmp (CmpU op1 op2));
9242 9245 effect(USE labl, KILL icc);
9243 9246
9244 9247 size(12);
9245 9248 ins_cost(BRANCH_COST);
9246 9249 format %{ "CMP $op1,$op2\t! unsigned\n\t"
9247 9250 "BP$cmp $labl" %}
9248 9251 ins_encode %{
9249 9252 Label* L = $labl$$label;
9250 9253 Assembler::Predict predict_taken =
9251 9254 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9252 9255 __ cmp($op1$$Register, $op2$$constant);
9253 9256 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9254 9257 __ delayed()->nop();
9255 9258 %}
9256 9259 ins_pipe(cmp_br_reg_imm);
9257 9260 %}
9258 9261
9259 9262 instruct cmpL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{
9260 9263 match(If cmp (CmpL op1 op2));
9261 9264 effect(USE labl, KILL xcc);
9262 9265
9263 9266 size(12);
9264 9267 ins_cost(BRANCH_COST);
9265 9268 format %{ "CMP $op1,$op2\t! long\n\t"
9266 9269 "BP$cmp $labl" %}
9267 9270 ins_encode %{
9268 9271 Label* L = $labl$$label;
9269 9272 Assembler::Predict predict_taken =
9270 9273 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9271 9274 __ cmp($op1$$Register, $op2$$Register);
9272 9275 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L);
9273 9276 __ delayed()->nop();
9274 9277 %}
9275 9278 ins_pipe(cmp_br_reg_reg);
9276 9279 %}
9277 9280
9278 9281 instruct cmpL_imm_branch(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{
9279 9282 match(If cmp (CmpL op1 op2));
9280 9283 effect(USE labl, KILL xcc);
9281 9284
9282 9285 size(12);
9283 9286 ins_cost(BRANCH_COST);
9284 9287 format %{ "CMP $op1,$op2\t! long\n\t"
9285 9288 "BP$cmp $labl" %}
9286 9289 ins_encode %{
9287 9290 Label* L = $labl$$label;
9288 9291 Assembler::Predict predict_taken =
9289 9292 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9290 9293 __ cmp($op1$$Register, $op2$$constant);
9291 9294 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L);
9292 9295 __ delayed()->nop();
9293 9296 %}
9294 9297 ins_pipe(cmp_br_reg_imm);
9295 9298 %}
9296 9299
9297 9300 // Compare Pointers and branch
9298 9301 instruct cmpP_reg_branch(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{
9299 9302 match(If cmp (CmpP op1 op2));
9300 9303 effect(USE labl, KILL pcc);
9301 9304
9302 9305 size(12);
9303 9306 ins_cost(BRANCH_COST);
9304 9307 format %{ "CMP $op1,$op2\t! ptr\n\t"
9305 9308 "B$cmp $labl" %}
9306 9309 ins_encode %{
9307 9310 Label* L = $labl$$label;
9308 9311 Assembler::Predict predict_taken =
9309 9312 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9310 9313 __ cmp($op1$$Register, $op2$$Register);
9311 9314 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L);
9312 9315 __ delayed()->nop();
9313 9316 %}
9314 9317 ins_pipe(cmp_br_reg_reg);
9315 9318 %}
9316 9319
9317 9320 instruct cmpP_null_branch(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{
9318 9321 match(If cmp (CmpP op1 null));
9319 9322 effect(USE labl, KILL pcc);
9320 9323
9321 9324 size(12);
9322 9325 ins_cost(BRANCH_COST);
9323 9326 format %{ "CMP $op1,0\t! ptr\n\t"
9324 9327 "B$cmp $labl" %}
9325 9328 ins_encode %{
9326 9329 Label* L = $labl$$label;
9327 9330 Assembler::Predict predict_taken =
9328 9331 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9329 9332 __ cmp($op1$$Register, G0);
9330 9333 // bpr() is not used here since it has shorter distance.
9331 9334 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L);
9332 9335 __ delayed()->nop();
9333 9336 %}
9334 9337 ins_pipe(cmp_br_reg_reg);
9335 9338 %}
9336 9339
9337 9340 instruct cmpN_reg_branch(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{
9338 9341 match(If cmp (CmpN op1 op2));
9339 9342 effect(USE labl, KILL icc);
9340 9343
9341 9344 size(12);
9342 9345 ins_cost(BRANCH_COST);
9343 9346 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
9344 9347 "BP$cmp $labl" %}
9345 9348 ins_encode %{
9346 9349 Label* L = $labl$$label;
9347 9350 Assembler::Predict predict_taken =
9348 9351 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9349 9352 __ cmp($op1$$Register, $op2$$Register);
9350 9353 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9351 9354 __ delayed()->nop();
9352 9355 %}
9353 9356 ins_pipe(cmp_br_reg_reg);
9354 9357 %}
9355 9358
9356 9359 instruct cmpN_null_branch(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{
9357 9360 match(If cmp (CmpN op1 null));
9358 9361 effect(USE labl, KILL icc);
9359 9362
9360 9363 size(12);
9361 9364 ins_cost(BRANCH_COST);
9362 9365 format %{ "CMP $op1,0\t! compressed ptr\n\t"
9363 9366 "BP$cmp $labl" %}
9364 9367 ins_encode %{
9365 9368 Label* L = $labl$$label;
9366 9369 Assembler::Predict predict_taken =
9367 9370 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9368 9371 __ cmp($op1$$Register, G0);
9369 9372 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9370 9373 __ delayed()->nop();
9371 9374 %}
9372 9375 ins_pipe(cmp_br_reg_reg);
9373 9376 %}
9374 9377
9375 9378 // Loop back branch
9376 9379 instruct cmpI_reg_branchLoopEnd(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{
9377 9380 match(CountedLoopEnd cmp (CmpI op1 op2));
9378 9381 effect(USE labl, KILL icc);
9379 9382
9380 9383 size(12);
9381 9384 ins_cost(BRANCH_COST);
9382 9385 format %{ "CMP $op1,$op2\t! int\n\t"
9383 9386 "BP$cmp $labl\t! Loop end" %}
9384 9387 ins_encode %{
9385 9388 Label* L = $labl$$label;
9386 9389 Assembler::Predict predict_taken =
9387 9390 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9388 9391 __ cmp($op1$$Register, $op2$$Register);
9389 9392 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9390 9393 __ delayed()->nop();
9391 9394 %}
9392 9395 ins_pipe(cmp_br_reg_reg);
9393 9396 %}
9394 9397
9395 9398 instruct cmpI_imm_branchLoopEnd(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{
9396 9399 match(CountedLoopEnd cmp (CmpI op1 op2));
9397 9400 effect(USE labl, KILL icc);
9398 9401
9399 9402 size(12);
9400 9403 ins_cost(BRANCH_COST);
9401 9404 format %{ "CMP $op1,$op2\t! int\n\t"
9402 9405 "BP$cmp $labl\t! Loop end" %}
9403 9406 ins_encode %{
9404 9407 Label* L = $labl$$label;
9405 9408 Assembler::Predict predict_taken =
9406 9409 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9407 9410 __ cmp($op1$$Register, $op2$$constant);
9408 9411 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9409 9412 __ delayed()->nop();
9410 9413 %}
9411 9414 ins_pipe(cmp_br_reg_imm);
9412 9415 %}
9413 9416
9414 9417 // Short compare and branch instructions
9415 9418 instruct cmpI_reg_branch_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{
9416 9419 match(If cmp (CmpI op1 op2));
9417 9420 predicate(UseCBCond);
9418 9421 effect(USE labl, KILL icc);
9419 9422
9420 9423 size(4);
9421 9424 ins_cost(BRANCH_COST);
9422 9425 format %{ "CWB$cmp $op1,$op2,$labl\t! int" %}
9423 9426 ins_encode %{
9424 9427 Label* L = $labl$$label;
9425 9428 assert(__ use_cbcond(*L), "back to back cbcond");
9426 9429 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
9427 9430 %}
9428 9431 ins_short_branch(1);
9429 9432 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
9430 9433 ins_pipe(cbcond_reg_reg);
9431 9434 %}
9432 9435
9433 9436 instruct cmpI_imm_branch_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{
9434 9437 match(If cmp (CmpI op1 op2));
9435 9438 predicate(UseCBCond);
9436 9439 effect(USE labl, KILL icc);
9437 9440
9438 9441 size(4);
9439 9442 ins_cost(BRANCH_COST);
9440 9443 format %{ "CWB$cmp $op1,$op2,$labl\t! int" %}
9441 9444 ins_encode %{
9442 9445 Label* L = $labl$$label;
9443 9446 assert(__ use_cbcond(*L), "back to back cbcond");
9444 9447 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
9445 9448 %}
9446 9449 ins_short_branch(1);
9447 9450 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
9448 9451 ins_pipe(cbcond_reg_imm);
9449 9452 %}
9450 9453
9451 9454 instruct cmpU_reg_branch_short(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{
9452 9455 match(If cmp (CmpU op1 op2));
9453 9456 predicate(UseCBCond);
9454 9457 effect(USE labl, KILL icc);
9455 9458
9456 9459 size(4);
9457 9460 ins_cost(BRANCH_COST);
9458 9461 format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %}
9459 9462 ins_encode %{
9460 9463 Label* L = $labl$$label;
9461 9464 assert(__ use_cbcond(*L), "back to back cbcond");
9462 9465 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
9463 9466 %}
9464 9467 ins_short_branch(1);
9465 9468 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
9466 9469 ins_pipe(cbcond_reg_reg);
9467 9470 %}
9468 9471
9469 9472 instruct cmpU_imm_branch_short(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{
9470 9473 match(If cmp (CmpU op1 op2));
9471 9474 predicate(UseCBCond);
9472 9475 effect(USE labl, KILL icc);
9473 9476
9474 9477 size(4);
9475 9478 ins_cost(BRANCH_COST);
9476 9479 format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %}
9477 9480 ins_encode %{
9478 9481 Label* L = $labl$$label;
9479 9482 assert(__ use_cbcond(*L), "back to back cbcond");
9480 9483 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
9481 9484 %}
9482 9485 ins_short_branch(1);
9483 9486 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
9484 9487 ins_pipe(cbcond_reg_imm);
9485 9488 %}
9486 9489
9487 9490 instruct cmpL_reg_branch_short(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{
9488 9491 match(If cmp (CmpL op1 op2));
9489 9492 predicate(UseCBCond);
9490 9493 effect(USE labl, KILL xcc);
9491 9494
9492 9495 size(4);
9493 9496 ins_cost(BRANCH_COST);
9494 9497 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %}
9495 9498 ins_encode %{
9496 9499 Label* L = $labl$$label;
9497 9500 assert(__ use_cbcond(*L), "back to back cbcond");
9498 9501 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$Register, *L);
9499 9502 %}
9500 9503 ins_short_branch(1);
9501 9504 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
9502 9505 ins_pipe(cbcond_reg_reg);
9503 9506 %}
9504 9507
9505 9508 instruct cmpL_imm_branch_short(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{
9506 9509 match(If cmp (CmpL op1 op2));
9507 9510 predicate(UseCBCond);
9508 9511 effect(USE labl, KILL xcc);
9509 9512
9510 9513 size(4);
9511 9514 ins_cost(BRANCH_COST);
9512 9515 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %}
9513 9516 ins_encode %{
9514 9517 Label* L = $labl$$label;
9515 9518 assert(__ use_cbcond(*L), "back to back cbcond");
9516 9519 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$constant, *L);
9517 9520 %}
9518 9521 ins_short_branch(1);
9519 9522 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
9520 9523 ins_pipe(cbcond_reg_imm);
9521 9524 %}
9522 9525
9523 9526 // Compare Pointers and branch
9524 9527 instruct cmpP_reg_branch_short(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{
9525 9528 match(If cmp (CmpP op1 op2));
9526 9529 predicate(UseCBCond);
9527 9530 effect(USE labl, KILL pcc);
9528 9531
9529 9532 size(4);
9530 9533 ins_cost(BRANCH_COST);
9531 9534 #ifdef _LP64
9532 9535 format %{ "CXB$cmp $op1,$op2,$labl\t! ptr" %}
9533 9536 #else
9534 9537 format %{ "CWB$cmp $op1,$op2,$labl\t! ptr" %}
9535 9538 #endif
9536 9539 ins_encode %{
9537 9540 Label* L = $labl$$label;
9538 9541 assert(__ use_cbcond(*L), "back to back cbcond");
9539 9542 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, $op2$$Register, *L);
9540 9543 %}
9541 9544 ins_short_branch(1);
9542 9545 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
9543 9546 ins_pipe(cbcond_reg_reg);
9544 9547 %}
9545 9548
9546 9549 instruct cmpP_null_branch_short(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{
9547 9550 match(If cmp (CmpP op1 null));
9548 9551 predicate(UseCBCond);
9549 9552 effect(USE labl, KILL pcc);
9550 9553
9551 9554 size(4);
9552 9555 ins_cost(BRANCH_COST);
9553 9556 #ifdef _LP64
9554 9557 format %{ "CXB$cmp $op1,0,$labl\t! ptr" %}
9555 9558 #else
9556 9559 format %{ "CWB$cmp $op1,0,$labl\t! ptr" %}
9557 9560 #endif
9558 9561 ins_encode %{
9559 9562 Label* L = $labl$$label;
9560 9563 assert(__ use_cbcond(*L), "back to back cbcond");
9561 9564 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, G0, *L);
9562 9565 %}
9563 9566 ins_short_branch(1);
9564 9567 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
9565 9568 ins_pipe(cbcond_reg_reg);
9566 9569 %}
9567 9570
9568 9571 instruct cmpN_reg_branch_short(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{
9569 9572 match(If cmp (CmpN op1 op2));
9570 9573 predicate(UseCBCond);
9571 9574 effect(USE labl, KILL icc);
9572 9575
9573 9576 size(4);
9574 9577 ins_cost(BRANCH_COST);
9575 9578 format %{ "CWB$cmp $op1,$op2,$labl\t! compressed ptr" %}
9576 9579 ins_encode %{
9577 9580 Label* L = $labl$$label;
9578 9581 assert(__ use_cbcond(*L), "back to back cbcond");
9579 9582 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
9580 9583 %}
9581 9584 ins_short_branch(1);
9582 9585 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
9583 9586 ins_pipe(cbcond_reg_reg);
9584 9587 %}
9585 9588
9586 9589 instruct cmpN_null_branch_short(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{
9587 9590 match(If cmp (CmpN op1 null));
9588 9591 predicate(UseCBCond);
9589 9592 effect(USE labl, KILL icc);
9590 9593
9591 9594 size(4);
9592 9595 ins_cost(BRANCH_COST);
9593 9596 format %{ "CWB$cmp $op1,0,$labl\t! compressed ptr" %}
9594 9597 ins_encode %{
9595 9598 Label* L = $labl$$label;
9596 9599 assert(__ use_cbcond(*L), "back to back cbcond");
9597 9600 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, G0, *L);
9598 9601 %}
9599 9602 ins_short_branch(1);
9600 9603 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
9601 9604 ins_pipe(cbcond_reg_reg);
9602 9605 %}
9603 9606
9604 9607 // Loop back branch
9605 9608 instruct cmpI_reg_branchLoopEnd_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{
9606 9609 match(CountedLoopEnd cmp (CmpI op1 op2));
9607 9610 predicate(UseCBCond);
9608 9611 effect(USE labl, KILL icc);
9609 9612
9610 9613 size(4);
9611 9614 ins_cost(BRANCH_COST);
9612 9615 format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %}
9613 9616 ins_encode %{
9614 9617 Label* L = $labl$$label;
9615 9618 assert(__ use_cbcond(*L), "back to back cbcond");
9616 9619 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
9617 9620 %}
9618 9621 ins_short_branch(1);
9619 9622 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
9620 9623 ins_pipe(cbcond_reg_reg);
9621 9624 %}
9622 9625
9623 9626 instruct cmpI_imm_branchLoopEnd_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{
9624 9627 match(CountedLoopEnd cmp (CmpI op1 op2));
9625 9628 predicate(UseCBCond);
9626 9629 effect(USE labl, KILL icc);
9627 9630
9628 9631 size(4);
9629 9632 ins_cost(BRANCH_COST);
9630 9633 format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %}
9631 9634 ins_encode %{
9632 9635 Label* L = $labl$$label;
9633 9636 assert(__ use_cbcond(*L), "back to back cbcond");
9634 9637 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
9635 9638 %}
9636 9639 ins_short_branch(1);
9637 9640 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
9638 9641 ins_pipe(cbcond_reg_imm);
9639 9642 %}
9640 9643
9641 9644 // Branch-on-register tests all 64 bits. We assume that values
9642 9645 // in 64-bit registers always remains zero or sign extended
9643 9646 // unless our code munges the high bits. Interrupts can chop
9644 9647 // the high order bits to zero or sign at any time.
9645 9648 instruct branchCon_regI(cmpOp_reg cmp, iRegI op1, immI0 zero, label labl) %{
9646 9649 match(If cmp (CmpI op1 zero));
9647 9650 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
9648 9651 effect(USE labl);
9649 9652
9650 9653 size(8);
9651 9654 ins_cost(BRANCH_COST);
9652 9655 format %{ "BR$cmp $op1,$labl" %}
9653 9656 ins_encode( enc_bpr( labl, cmp, op1 ) );
9654 9657 ins_avoid_back_to_back(AVOID_BEFORE);
9655 9658 ins_pipe(br_reg);
9656 9659 %}
9657 9660
9658 9661 instruct branchCon_regP(cmpOp_reg cmp, iRegP op1, immP0 null, label labl) %{
9659 9662 match(If cmp (CmpP op1 null));
9660 9663 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
9661 9664 effect(USE labl);
9662 9665
9663 9666 size(8);
9664 9667 ins_cost(BRANCH_COST);
9665 9668 format %{ "BR$cmp $op1,$labl" %}
9666 9669 ins_encode( enc_bpr( labl, cmp, op1 ) );
9667 9670 ins_avoid_back_to_back(AVOID_BEFORE);
9668 9671 ins_pipe(br_reg);
9669 9672 %}
9670 9673
9671 9674 instruct branchCon_regL(cmpOp_reg cmp, iRegL op1, immL0 zero, label labl) %{
9672 9675 match(If cmp (CmpL op1 zero));
9673 9676 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
9674 9677 effect(USE labl);
9675 9678
9676 9679 size(8);
9677 9680 ins_cost(BRANCH_COST);
9678 9681 format %{ "BR$cmp $op1,$labl" %}
9679 9682 ins_encode( enc_bpr( labl, cmp, op1 ) );
9680 9683 ins_avoid_back_to_back(AVOID_BEFORE);
9681 9684 ins_pipe(br_reg);
9682 9685 %}
9683 9686
9684 9687
9685 9688 // ============================================================================
9686 9689 // Long Compare
9687 9690 //
9688 9691 // Currently we hold longs in 2 registers. Comparing such values efficiently
9689 9692 // is tricky. The flavor of compare used depends on whether we are testing
9690 9693 // for LT, LE, or EQ. For a simple LT test we can check just the sign bit.
9691 9694 // The GE test is the negated LT test. The LE test can be had by commuting
9692 9695 // the operands (yielding a GE test) and then negating; negate again for the
9693 9696 // GT test. The EQ test is done by ORcc'ing the high and low halves, and the
9694 9697 // NE test is negated from that.
9695 9698
9696 9699 // Due to a shortcoming in the ADLC, it mixes up expressions like:
9697 9700 // (foo (CmpI (CmpL X Y) 0)) and (bar (CmpI (CmpL X 0L) 0)). Note the
9698 9701 // difference between 'Y' and '0L'. The tree-matches for the CmpI sections
9699 9702 // are collapsed internally in the ADLC's dfa-gen code. The match for
9700 9703 // (CmpI (CmpL X Y) 0) is silently replaced with (CmpI (CmpL X 0L) 0) and the
9701 9704 // foo match ends up with the wrong leaf. One fix is to not match both
9702 9705 // reg-reg and reg-zero forms of long-compare. This is unfortunate because
9703 9706 // both forms beat the trinary form of long-compare and both are very useful
9704 9707 // on Intel which has so few registers.
9705 9708
9706 9709 instruct branchCon_long(cmpOp cmp, flagsRegL xcc, label labl) %{
9707 9710 match(If cmp xcc);
9708 9711 effect(USE labl);
9709 9712
9710 9713 size(8);
9711 9714 ins_cost(BRANCH_COST);
9712 9715 format %{ "BP$cmp $xcc,$labl" %}
9713 9716 ins_encode %{
9714 9717 Label* L = $labl$$label;
9715 9718 Assembler::Predict predict_taken =
9716 9719 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9717 9720
9718 9721 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L);
9719 9722 __ delayed()->nop();
9720 9723 %}
9721 9724 ins_avoid_back_to_back(AVOID_BEFORE);
9722 9725 ins_pipe(br_cc);
9723 9726 %}
9724 9727
9725 9728 // Manifest a CmpL3 result in an integer register. Very painful.
9726 9729 // This is the test to avoid.
9727 9730 instruct cmpL3_reg_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg ccr ) %{
9728 9731 match(Set dst (CmpL3 src1 src2) );
9729 9732 effect( KILL ccr );
9730 9733 ins_cost(6*DEFAULT_COST);
9731 9734 size(24);
9732 9735 format %{ "CMP $src1,$src2\t\t! long\n"
9733 9736 "\tBLT,a,pn done\n"
9734 9737 "\tMOV -1,$dst\t! delay slot\n"
9735 9738 "\tBGT,a,pn done\n"
9736 9739 "\tMOV 1,$dst\t! delay slot\n"
9737 9740 "\tCLR $dst\n"
9738 9741 "done:" %}
9739 9742 ins_encode( cmpl_flag(src1,src2,dst) );
9740 9743 ins_pipe(cmpL_reg);
9741 9744 %}
9742 9745
9743 9746 // Conditional move
9744 9747 instruct cmovLL_reg(cmpOp cmp, flagsRegL xcc, iRegL dst, iRegL src) %{
9745 9748 match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src)));
9746 9749 ins_cost(150);
9747 9750 format %{ "MOV$cmp $xcc,$src,$dst\t! long" %}
9748 9751 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) );
9749 9752 ins_pipe(ialu_reg);
9750 9753 %}
9751 9754
9752 9755 instruct cmovLL_imm(cmpOp cmp, flagsRegL xcc, iRegL dst, immL0 src) %{
9753 9756 match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src)));
9754 9757 ins_cost(140);
9755 9758 format %{ "MOV$cmp $xcc,$src,$dst\t! long" %}
9756 9759 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) );
9757 9760 ins_pipe(ialu_imm);
9758 9761 %}
9759 9762
9760 9763 instruct cmovIL_reg(cmpOp cmp, flagsRegL xcc, iRegI dst, iRegI src) %{
9761 9764 match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src)));
9762 9765 ins_cost(150);
9763 9766 format %{ "MOV$cmp $xcc,$src,$dst" %}
9764 9767 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) );
9765 9768 ins_pipe(ialu_reg);
9766 9769 %}
9767 9770
9768 9771 instruct cmovIL_imm(cmpOp cmp, flagsRegL xcc, iRegI dst, immI11 src) %{
9769 9772 match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src)));
9770 9773 ins_cost(140);
9771 9774 format %{ "MOV$cmp $xcc,$src,$dst" %}
9772 9775 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) );
9773 9776 ins_pipe(ialu_imm);
9774 9777 %}
9775 9778
9776 9779 instruct cmovNL_reg(cmpOp cmp, flagsRegL xcc, iRegN dst, iRegN src) %{
9777 9780 match(Set dst (CMoveN (Binary cmp xcc) (Binary dst src)));
9778 9781 ins_cost(150);
9779 9782 format %{ "MOV$cmp $xcc,$src,$dst" %}
9780 9783 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) );
9781 9784 ins_pipe(ialu_reg);
9782 9785 %}
9783 9786
9784 9787 instruct cmovPL_reg(cmpOp cmp, flagsRegL xcc, iRegP dst, iRegP src) %{
9785 9788 match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src)));
9786 9789 ins_cost(150);
9787 9790 format %{ "MOV$cmp $xcc,$src,$dst" %}
9788 9791 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) );
9789 9792 ins_pipe(ialu_reg);
9790 9793 %}
9791 9794
9792 9795 instruct cmovPL_imm(cmpOp cmp, flagsRegL xcc, iRegP dst, immP0 src) %{
9793 9796 match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src)));
9794 9797 ins_cost(140);
9795 9798 format %{ "MOV$cmp $xcc,$src,$dst" %}
9796 9799 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) );
9797 9800 ins_pipe(ialu_imm);
9798 9801 %}
9799 9802
9800 9803 instruct cmovFL_reg(cmpOp cmp, flagsRegL xcc, regF dst, regF src) %{
9801 9804 match(Set dst (CMoveF (Binary cmp xcc) (Binary dst src)));
9802 9805 ins_cost(150);
9803 9806 opcode(0x101);
9804 9807 format %{ "FMOVS$cmp $xcc,$src,$dst" %}
9805 9808 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) );
9806 9809 ins_pipe(int_conditional_float_move);
9807 9810 %}
9808 9811
9809 9812 instruct cmovDL_reg(cmpOp cmp, flagsRegL xcc, regD dst, regD src) %{
9810 9813 match(Set dst (CMoveD (Binary cmp xcc) (Binary dst src)));
9811 9814 ins_cost(150);
9812 9815 opcode(0x102);
9813 9816 format %{ "FMOVD$cmp $xcc,$src,$dst" %}
9814 9817 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) );
9815 9818 ins_pipe(int_conditional_float_move);
9816 9819 %}
9817 9820
9818 9821 // ============================================================================
9819 9822 // Safepoint Instruction
9820 9823 instruct safePoint_poll(iRegP poll) %{
9821 9824 match(SafePoint poll);
9822 9825 effect(USE poll);
9823 9826
9824 9827 size(4);
9825 9828 #ifdef _LP64
9826 9829 format %{ "LDX [$poll],R_G0\t! Safepoint: poll for GC" %}
9827 9830 #else
9828 9831 format %{ "LDUW [$poll],R_G0\t! Safepoint: poll for GC" %}
9829 9832 #endif
9830 9833 ins_encode %{
9831 9834 __ relocate(relocInfo::poll_type);
9832 9835 __ ld_ptr($poll$$Register, 0, G0);
9833 9836 %}
9834 9837 ins_pipe(loadPollP);
9835 9838 %}
9836 9839
9837 9840 // ============================================================================
9838 9841 // Call Instructions
9839 9842 // Call Java Static Instruction
9840 9843 instruct CallStaticJavaDirect( method meth ) %{
9841 9844 match(CallStaticJava);
9842 9845 predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke());
9843 9846 effect(USE meth);
9844 9847
9845 9848 size(8);
9846 9849 ins_cost(CALL_COST);
9847 9850 format %{ "CALL,static ; NOP ==> " %}
9848 9851 ins_encode( Java_Static_Call( meth ), call_epilog );
9849 9852 ins_avoid_back_to_back(AVOID_BEFORE);
9850 9853 ins_pipe(simple_call);
9851 9854 %}
9852 9855
9853 9856 // Call Java Static Instruction (method handle version)
9854 9857 instruct CallStaticJavaHandle(method meth, l7RegP l7_mh_SP_save) %{
9855 9858 match(CallStaticJava);
9856 9859 predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
9857 9860 effect(USE meth, KILL l7_mh_SP_save);
9858 9861
9859 9862 size(16);
9860 9863 ins_cost(CALL_COST);
9861 9864 format %{ "CALL,static/MethodHandle" %}
9862 9865 ins_encode(preserve_SP, Java_Static_Call(meth), restore_SP, call_epilog);
9863 9866 ins_pipe(simple_call);
9864 9867 %}
9865 9868
9866 9869 // Call Java Dynamic Instruction
9867 9870 instruct CallDynamicJavaDirect( method meth ) %{
9868 9871 match(CallDynamicJava);
9869 9872 effect(USE meth);
9870 9873
9871 9874 ins_cost(CALL_COST);
9872 9875 format %{ "SET (empty),R_G5\n\t"
9873 9876 "CALL,dynamic ; NOP ==> " %}
9874 9877 ins_encode( Java_Dynamic_Call( meth ), call_epilog );
9875 9878 ins_pipe(call);
9876 9879 %}
9877 9880
9878 9881 // Call Runtime Instruction
9879 9882 instruct CallRuntimeDirect(method meth, l7RegP l7) %{
9880 9883 match(CallRuntime);
9881 9884 effect(USE meth, KILL l7);
9882 9885 ins_cost(CALL_COST);
9883 9886 format %{ "CALL,runtime" %}
9884 9887 ins_encode( Java_To_Runtime( meth ),
9885 9888 call_epilog, adjust_long_from_native_call );
9886 9889 ins_avoid_back_to_back(AVOID_BEFORE);
9887 9890 ins_pipe(simple_call);
9888 9891 %}
9889 9892
9890 9893 // Call runtime without safepoint - same as CallRuntime
9891 9894 instruct CallLeafDirect(method meth, l7RegP l7) %{
9892 9895 match(CallLeaf);
9893 9896 effect(USE meth, KILL l7);
9894 9897 ins_cost(CALL_COST);
9895 9898 format %{ "CALL,runtime leaf" %}
9896 9899 ins_encode( Java_To_Runtime( meth ),
9897 9900 call_epilog,
9898 9901 adjust_long_from_native_call );
9899 9902 ins_avoid_back_to_back(AVOID_BEFORE);
9900 9903 ins_pipe(simple_call);
9901 9904 %}
9902 9905
9903 9906 // Call runtime without safepoint - same as CallLeaf
9904 9907 instruct CallLeafNoFPDirect(method meth, l7RegP l7) %{
9905 9908 match(CallLeafNoFP);
9906 9909 effect(USE meth, KILL l7);
9907 9910 ins_cost(CALL_COST);
9908 9911 format %{ "CALL,runtime leaf nofp" %}
9909 9912 ins_encode( Java_To_Runtime( meth ),
9910 9913 call_epilog,
9911 9914 adjust_long_from_native_call );
9912 9915 ins_avoid_back_to_back(AVOID_BEFORE);
9913 9916 ins_pipe(simple_call);
9914 9917 %}
9915 9918
9916 9919 // Tail Call; Jump from runtime stub to Java code.
9917 9920 // Also known as an 'interprocedural jump'.
9918 9921 // Target of jump will eventually return to caller.
9919 9922 // TailJump below removes the return address.
9920 9923 instruct TailCalljmpInd(g3RegP jump_target, inline_cache_regP method_oop) %{
9921 9924 match(TailCall jump_target method_oop );
9922 9925
9923 9926 ins_cost(CALL_COST);
9924 9927 format %{ "Jmp $jump_target ; NOP \t! $method_oop holds method oop" %}
9925 9928 ins_encode(form_jmpl(jump_target));
9926 9929 ins_avoid_back_to_back(AVOID_BEFORE);
9927 9930 ins_pipe(tail_call);
9928 9931 %}
9929 9932
9930 9933
9931 9934 // Return Instruction
9932 9935 instruct Ret() %{
9933 9936 match(Return);
9934 9937
9935 9938 // The epilogue node did the ret already.
9936 9939 size(0);
9937 9940 format %{ "! return" %}
9938 9941 ins_encode();
9939 9942 ins_pipe(empty);
9940 9943 %}
9941 9944
9942 9945
9943 9946 // Tail Jump; remove the return address; jump to target.
9944 9947 // TailCall above leaves the return address around.
9945 9948 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
9946 9949 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
9947 9950 // "restore" before this instruction (in Epilogue), we need to materialize it
9948 9951 // in %i0.
9949 9952 instruct tailjmpInd(g1RegP jump_target, i0RegP ex_oop) %{
9950 9953 match( TailJump jump_target ex_oop );
9951 9954 ins_cost(CALL_COST);
9952 9955 format %{ "! discard R_O7\n\t"
9953 9956 "Jmp $jump_target ; ADD O7,8,O1 \t! $ex_oop holds exc. oop" %}
9954 9957 ins_encode(form_jmpl_set_exception_pc(jump_target));
9955 9958 // opcode(Assembler::jmpl_op3, Assembler::arith_op);
9956 9959 // The hack duplicates the exception oop into G3, so that CreateEx can use it there.
9957 9960 // ins_encode( form3_rs1_simm13_rd( jump_target, 0x00, R_G0 ), move_return_pc_to_o1() );
9958 9961 ins_avoid_back_to_back(AVOID_BEFORE);
9959 9962 ins_pipe(tail_call);
9960 9963 %}
9961 9964
9962 9965 // Create exception oop: created by stack-crawling runtime code.
9963 9966 // Created exception is now available to this handler, and is setup
9964 9967 // just prior to jumping to this handler. No code emitted.
9965 9968 instruct CreateException( o0RegP ex_oop )
9966 9969 %{
9967 9970 match(Set ex_oop (CreateEx));
9968 9971 ins_cost(0);
9969 9972
9970 9973 size(0);
9971 9974 // use the following format syntax
9972 9975 format %{ "! exception oop is in R_O0; no code emitted" %}
9973 9976 ins_encode();
9974 9977 ins_pipe(empty);
9975 9978 %}
9976 9979
9977 9980
9978 9981 // Rethrow exception:
9979 9982 // The exception oop will come in the first argument position.
9980 9983 // Then JUMP (not call) to the rethrow stub code.
9981 9984 instruct RethrowException()
9982 9985 %{
9983 9986 match(Rethrow);
9984 9987 ins_cost(CALL_COST);
9985 9988
9986 9989 // use the following format syntax
9987 9990 format %{ "Jmp rethrow_stub" %}
9988 9991 ins_encode(enc_rethrow);
9989 9992 ins_avoid_back_to_back(AVOID_BEFORE);
9990 9993 ins_pipe(tail_call);
9991 9994 %}
9992 9995
9993 9996
9994 9997 // Die now
9995 9998 instruct ShouldNotReachHere( )
9996 9999 %{
9997 10000 match(Halt);
9998 10001 ins_cost(CALL_COST);
9999 10002
10000 10003 size(4);
10001 10004 // Use the following format syntax
10002 10005 format %{ "ILLTRAP ; ShouldNotReachHere" %}
10003 10006 ins_encode( form2_illtrap() );
10004 10007 ins_pipe(tail_call);
10005 10008 %}
10006 10009
10007 10010 // ============================================================================
10008 10011 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
10009 10012 // array for an instance of the superklass. Set a hidden internal cache on a
10010 10013 // hit (cache is checked with exposed code in gen_subtype_check()). Return
10011 10014 // not zero for a miss or zero for a hit. The encoding ALSO sets flags.
10012 10015 instruct partialSubtypeCheck( o0RegP index, o1RegP sub, o2RegP super, flagsRegP pcc, o7RegP o7 ) %{
10013 10016 match(Set index (PartialSubtypeCheck sub super));
10014 10017 effect( KILL pcc, KILL o7 );
10015 10018 ins_cost(DEFAULT_COST*10);
10016 10019 format %{ "CALL PartialSubtypeCheck\n\tNOP" %}
10017 10020 ins_encode( enc_PartialSubtypeCheck() );
10018 10021 ins_avoid_back_to_back(AVOID_BEFORE);
10019 10022 ins_pipe(partial_subtype_check_pipe);
10020 10023 %}
10021 10024
10022 10025 instruct partialSubtypeCheck_vs_zero( flagsRegP pcc, o1RegP sub, o2RegP super, immP0 zero, o0RegP idx, o7RegP o7 ) %{
10023 10026 match(Set pcc (CmpP (PartialSubtypeCheck sub super) zero));
10024 10027 effect( KILL idx, KILL o7 );
10025 10028 ins_cost(DEFAULT_COST*10);
10026 10029 format %{ "CALL PartialSubtypeCheck\n\tNOP\t# (sets condition codes)" %}
10027 10030 ins_encode( enc_PartialSubtypeCheck() );
10028 10031 ins_avoid_back_to_back(AVOID_BEFORE);
10029 10032 ins_pipe(partial_subtype_check_pipe);
10030 10033 %}
10031 10034
10032 10035
10033 10036 // ============================================================================
10034 10037 // inlined locking and unlocking
10035 10038
10036 10039 instruct cmpFastLock(flagsRegP pcc, iRegP object, o1RegP box, iRegP scratch2, o7RegP scratch ) %{
10037 10040 match(Set pcc (FastLock object box));
10038 10041
10039 10042 effect(TEMP scratch2, USE_KILL box, KILL scratch);
10040 10043 ins_cost(100);
10041 10044
10042 10045 format %{ "FASTLOCK $object,$box\t! kills $box,$scratch,$scratch2" %}
10043 10046 ins_encode( Fast_Lock(object, box, scratch, scratch2) );
10044 10047 ins_pipe(long_memory_op);
10045 10048 %}
10046 10049
10047 10050
10048 10051 instruct cmpFastUnlock(flagsRegP pcc, iRegP object, o1RegP box, iRegP scratch2, o7RegP scratch ) %{
10049 10052 match(Set pcc (FastUnlock object box));
10050 10053 effect(TEMP scratch2, USE_KILL box, KILL scratch);
10051 10054 ins_cost(100);
10052 10055
10053 10056 format %{ "FASTUNLOCK $object,$box\t! kills $box,$scratch,$scratch2" %}
10054 10057 ins_encode( Fast_Unlock(object, box, scratch, scratch2) );
10055 10058 ins_pipe(long_memory_op);
10056 10059 %}
10057 10060
10058 10061 // The encodings are generic.
10059 10062 instruct clear_array(iRegX cnt, iRegP base, iRegX temp, Universe dummy, flagsReg ccr) %{
10060 10063 predicate(!use_block_zeroing(n->in(2)) );
10061 10064 match(Set dummy (ClearArray cnt base));
10062 10065 effect(TEMP temp, KILL ccr);
10063 10066 ins_cost(300);
10064 10067 format %{ "MOV $cnt,$temp\n"
10065 10068 "loop: SUBcc $temp,8,$temp\t! Count down a dword of bytes\n"
10066 10069 " BRge loop\t\t! Clearing loop\n"
10067 10070 " STX G0,[$base+$temp]\t! delay slot" %}
10068 10071
10069 10072 ins_encode %{
10070 10073 // Compiler ensures base is doubleword aligned and cnt is count of doublewords
10071 10074 Register nof_bytes_arg = $cnt$$Register;
10072 10075 Register nof_bytes_tmp = $temp$$Register;
10073 10076 Register base_pointer_arg = $base$$Register;
10074 10077
10075 10078 Label loop;
10076 10079 __ mov(nof_bytes_arg, nof_bytes_tmp);
10077 10080
10078 10081 // Loop and clear, walking backwards through the array.
10079 10082 // nof_bytes_tmp (if >0) is always the number of bytes to zero
10080 10083 __ bind(loop);
10081 10084 __ deccc(nof_bytes_tmp, 8);
10082 10085 __ br(Assembler::greaterEqual, true, Assembler::pt, loop);
10083 10086 __ delayed()-> stx(G0, base_pointer_arg, nof_bytes_tmp);
10084 10087 // %%%% this mini-loop must not cross a cache boundary!
10085 10088 %}
10086 10089 ins_pipe(long_memory_op);
10087 10090 %}
10088 10091
10089 10092 instruct clear_array_bis(g1RegX cnt, o0RegP base, Universe dummy, flagsReg ccr) %{
10090 10093 predicate(use_block_zeroing(n->in(2)));
10091 10094 match(Set dummy (ClearArray cnt base));
10092 10095 effect(USE_KILL cnt, USE_KILL base, KILL ccr);
10093 10096 ins_cost(300);
10094 10097 format %{ "CLEAR [$base, $cnt]\t! ClearArray" %}
10095 10098
10096 10099 ins_encode %{
10097 10100
10098 10101 assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
10099 10102 Register to = $base$$Register;
10100 10103 Register count = $cnt$$Register;
10101 10104
10102 10105 Label Ldone;
10103 10106 __ nop(); // Separate short branches
10104 10107 // Use BIS for zeroing (temp is not used).
10105 10108 __ bis_zeroing(to, count, G0, Ldone);
10106 10109 __ bind(Ldone);
10107 10110
10108 10111 %}
10109 10112 ins_pipe(long_memory_op);
10110 10113 %}
10111 10114
10112 10115 instruct clear_array_bis_2(g1RegX cnt, o0RegP base, iRegX tmp, Universe dummy, flagsReg ccr) %{
10113 10116 predicate(use_block_zeroing(n->in(2)) && !Assembler::is_simm13((int)BlockZeroingLowLimit));
10114 10117 match(Set dummy (ClearArray cnt base));
10115 10118 effect(TEMP tmp, USE_KILL cnt, USE_KILL base, KILL ccr);
10116 10119 ins_cost(300);
10117 10120 format %{ "CLEAR [$base, $cnt]\t! ClearArray" %}
10118 10121
10119 10122 ins_encode %{
10120 10123
10121 10124 assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
10122 10125 Register to = $base$$Register;
10123 10126 Register count = $cnt$$Register;
10124 10127 Register temp = $tmp$$Register;
10125 10128
10126 10129 Label Ldone;
10127 10130 __ nop(); // Separate short branches
10128 10131 // Use BIS for zeroing
10129 10132 __ bis_zeroing(to, count, temp, Ldone);
10130 10133 __ bind(Ldone);
10131 10134
10132 10135 %}
10133 10136 ins_pipe(long_memory_op);
10134 10137 %}
10135 10138
10136 10139 instruct string_compareL(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result,
10137 10140 o7RegI tmp, flagsReg ccr) %{
10138 10141 predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
10139 10142 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10140 10143 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ccr, KILL tmp);
10141 10144 ins_cost(300);
10142 10145 format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp" %}
10143 10146 ins_encode %{
10144 10147 __ string_compare($str1$$Register, $str2$$Register,
10145 10148 $cnt1$$Register, $cnt2$$Register,
10146 10149 $tmp$$Register, $tmp$$Register,
10147 10150 $result$$Register, StrIntrinsicNode::LL);
10148 10151 %}
10149 10152 ins_pipe(long_memory_op);
10150 10153 %}
10151 10154
10152 10155 instruct string_compareU(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result,
10153 10156 o7RegI tmp, flagsReg ccr) %{
10154 10157 predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
10155 10158 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10156 10159 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ccr, KILL tmp);
10157 10160 ins_cost(300);
10158 10161 format %{ "String Compare char[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp" %}
10159 10162 ins_encode %{
10160 10163 __ string_compare($str1$$Register, $str2$$Register,
10161 10164 $cnt1$$Register, $cnt2$$Register,
10162 10165 $tmp$$Register, $tmp$$Register,
10163 10166 $result$$Register, StrIntrinsicNode::UU);
10164 10167 %}
10165 10168 ins_pipe(long_memory_op);
10166 10169 %}
10167 10170
10168 10171 instruct string_compareLU(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result,
10169 10172 o7RegI tmp1, g1RegI tmp2, flagsReg ccr) %{
10170 10173 predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
10171 10174 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10172 10175 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ccr, KILL tmp1, KILL tmp2);
10173 10176 ins_cost(300);
10174 10177 format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1,$tmp2" %}
10175 10178 ins_encode %{
10176 10179 __ string_compare($str1$$Register, $str2$$Register,
10177 10180 $cnt1$$Register, $cnt2$$Register,
10178 10181 $tmp1$$Register, $tmp2$$Register,
10179 10182 $result$$Register, StrIntrinsicNode::LU);
10180 10183 %}
10181 10184 ins_pipe(long_memory_op);
10182 10185 %}
10183 10186
10184 10187 instruct string_compareUL(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result,
10185 10188 o7RegI tmp1, g1RegI tmp2, flagsReg ccr) %{
10186 10189 predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
10187 10190 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10188 10191 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ccr, KILL tmp1, KILL tmp2);
10189 10192 ins_cost(300);
10190 10193 format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1,$tmp2" %}
10191 10194 ins_encode %{
10192 10195 __ string_compare($str2$$Register, $str1$$Register,
10193 10196 $cnt2$$Register, $cnt1$$Register,
10194 10197 $tmp1$$Register, $tmp2$$Register,
10195 10198 $result$$Register, StrIntrinsicNode::UL);
10196 10199 %}
10197 10200 ins_pipe(long_memory_op);
10198 10201 %}
10199 10202
10200 10203 instruct string_equalsL(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result,
10201 10204 o7RegI tmp, flagsReg ccr) %{
10202 10205 predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
10203 10206 match(Set result (StrEquals (Binary str1 str2) cnt));
10204 10207 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp, KILL ccr);
10205 10208 ins_cost(300);
10206 10209 format %{ "String Equals byte[] $str1,$str2,$cnt -> $result // KILL $tmp" %}
10207 10210 ins_encode %{
10208 10211 __ array_equals(false, $str1$$Register, $str2$$Register,
10209 10212 $cnt$$Register, $tmp$$Register,
10210 10213 $result$$Register, true /* byte */);
10211 10214 %}
10212 10215 ins_pipe(long_memory_op);
10213 10216 %}
10214 10217
10215 10218 instruct string_equalsU(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result,
10216 10219 o7RegI tmp, flagsReg ccr) %{
10217 10220 predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
10218 10221 match(Set result (StrEquals (Binary str1 str2) cnt));
10219 10222 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp, KILL ccr);
10220 10223 ins_cost(300);
10221 10224 format %{ "String Equals char[] $str1,$str2,$cnt -> $result // KILL $tmp" %}
10222 10225 ins_encode %{
10223 10226 __ array_equals(false, $str1$$Register, $str2$$Register,
10224 10227 $cnt$$Register, $tmp$$Register,
10225 10228 $result$$Register, false /* byte */);
10226 10229 %}
10227 10230 ins_pipe(long_memory_op);
10228 10231 %}
10229 10232
10230 10233 instruct array_equalsB(o0RegP ary1, o1RegP ary2, g3RegI tmp1, notemp_iRegI result,
10231 10234 o7RegI tmp2, flagsReg ccr) %{
10232 10235 predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
10233 10236 match(Set result (AryEq ary1 ary2));
10234 10237 effect(USE_KILL ary1, USE_KILL ary2, KILL tmp1, KILL tmp2, KILL ccr);
10235 10238 ins_cost(300);
10236 10239 format %{ "Array Equals $ary1,$ary2 -> $result // KILL $tmp1,$tmp2" %}
10237 10240 ins_encode %{
10238 10241 __ array_equals(true, $ary1$$Register, $ary2$$Register,
10239 10242 $tmp1$$Register, $tmp2$$Register,
10240 10243 $result$$Register, true /* byte */);
10241 10244 %}
10242 10245 ins_pipe(long_memory_op);
10243 10246 %}
10244 10247
10245 10248 instruct array_equalsC(o0RegP ary1, o1RegP ary2, g3RegI tmp1, notemp_iRegI result,
10246 10249 o7RegI tmp2, flagsReg ccr) %{
10247 10250 predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
10248 10251 match(Set result (AryEq ary1 ary2));
10249 10252 effect(USE_KILL ary1, USE_KILL ary2, KILL tmp1, KILL tmp2, KILL ccr);
10250 10253 ins_cost(300);
10251 10254 format %{ "Array Equals $ary1,$ary2 -> $result // KILL $tmp1,$tmp2" %}
10252 10255 ins_encode %{
10253 10256 __ array_equals(true, $ary1$$Register, $ary2$$Register,
10254 10257 $tmp1$$Register, $tmp2$$Register,
10255 10258 $result$$Register, false /* byte */);
10256 10259 %}
10257 10260 ins_pipe(long_memory_op);
10258 10261 %}
10259 10262
10260 10263 instruct has_negatives(o0RegP pAryR, g3RegI iSizeR, notemp_iRegI resultR,
10261 10264 iRegL tmp1L, iRegL tmp2L, iRegL tmp3L, iRegL tmp4L,
10262 10265 flagsReg ccr)
10263 10266 %{
10264 10267 match(Set resultR (HasNegatives pAryR iSizeR));
10265 10268 effect(TEMP resultR, TEMP tmp1L, TEMP tmp2L, TEMP tmp3L, TEMP tmp4L, USE pAryR, USE iSizeR, KILL ccr);
10266 10269 format %{ "has negatives byte[] $pAryR,$iSizeR -> $resultR // KILL $tmp1L,$tmp2L,$tmp3L,$tmp4L" %}
10267 10270 ins_encode %{
10268 10271 __ has_negatives($pAryR$$Register, $iSizeR$$Register,
10269 10272 $resultR$$Register,
10270 10273 $tmp1L$$Register, $tmp2L$$Register,
10271 10274 $tmp3L$$Register, $tmp4L$$Register);
10272 10275 %}
10273 10276 ins_pipe(long_memory_op);
10274 10277 %}
10275 10278
10276 10279 // char[] to byte[] compression
10277 10280 instruct string_compress(o0RegP src, o1RegP dst, g3RegI len, notemp_iRegI result, iRegL tmp, flagsReg ccr) %{
10278 10281 predicate(UseVIS < 3);
10279 10282 match(Set result (StrCompressedCopy src (Binary dst len)));
10280 10283 effect(TEMP result, TEMP tmp, USE_KILL src, USE_KILL dst, USE_KILL len, KILL ccr);
10281 10284 ins_cost(300);
10282 10285 format %{ "String Compress $src,$dst,$len -> $result // KILL $tmp" %}
10283 10286 ins_encode %{
10284 10287 Label Ldone;
10285 10288 __ signx($len$$Register);
10286 10289 __ cmp_zero_and_br(Assembler::zero, $len$$Register, Ldone, false, Assembler::pn);
10287 10290 __ delayed()->mov($len$$Register, $result$$Register); // copy count
10288 10291 __ string_compress($src$$Register, $dst$$Register, $len$$Register, $result$$Register, $tmp$$Register, Ldone);
10289 10292 __ bind(Ldone);
10290 10293 %}
10291 10294 ins_pipe(long_memory_op);
10292 10295 %}
10293 10296
10294 10297 // fast char[] to byte[] compression using VIS instructions
10295 10298 instruct string_compress_fast(o0RegP src, o1RegP dst, g3RegI len, notemp_iRegI result,
10296 10299 iRegL tmp1, iRegL tmp2, iRegL tmp3, iRegL tmp4,
10297 10300 regD ftmp1, regD ftmp2, regD ftmp3, flagsReg ccr) %{
10298 10301 predicate(UseVIS >= 3);
10299 10302 match(Set result (StrCompressedCopy src (Binary dst len)));
10300 10303 effect(TEMP result, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP ftmp1, TEMP ftmp2, TEMP ftmp3, USE_KILL src, USE_KILL dst, USE_KILL len, KILL ccr);
10301 10304 ins_cost(300);
10302 10305 format %{ "String Compress Fast $src,$dst,$len -> $result // KILL $tmp1,$tmp2,$tmp3,$tmp4,$ftmp1,$ftmp2,$ftmp3" %}
10303 10306 ins_encode %{
10304 10307 Label Ldone;
10305 10308 __ signx($len$$Register);
10306 10309 __ string_compress_16($src$$Register, $dst$$Register, $len$$Register, $result$$Register,
10307 10310 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register,
10308 10311 $ftmp1$$FloatRegister, $ftmp2$$FloatRegister, $ftmp3$$FloatRegister, Ldone);
10309 10312 __ cmp_and_brx_short($len$$Register, 0, Assembler::equal, Assembler::pn, Ldone);
10310 10313 __ string_compress($src$$Register, $dst$$Register, $len$$Register, $result$$Register, $tmp1$$Register, Ldone);
10311 10314 __ bind(Ldone);
10312 10315 %}
10313 10316 ins_pipe(long_memory_op);
10314 10317 %}
10315 10318
10316 10319 // byte[] to char[] inflation
10317 10320 instruct string_inflate(Universe dummy, o0RegP src, o1RegP dst, g3RegI len,
10318 10321 iRegL tmp, flagsReg ccr) %{
10319 10322 match(Set dummy (StrInflatedCopy src (Binary dst len)));
10320 10323 effect(TEMP tmp, USE_KILL src, USE_KILL dst, USE_KILL len, KILL ccr);
10321 10324 ins_cost(300);
10322 10325 format %{ "String Inflate $src,$dst,$len // KILL $tmp" %}
10323 10326 ins_encode %{
10324 10327 Label Ldone;
10325 10328 __ signx($len$$Register);
10326 10329 __ cmp_and_brx_short($len$$Register, 0, Assembler::equal, Assembler::pn, Ldone);
10327 10330 __ string_inflate($src$$Register, $dst$$Register, $len$$Register, $tmp$$Register, Ldone);
10328 10331 __ bind(Ldone);
10329 10332 %}
10330 10333 ins_pipe(long_memory_op);
10331 10334 %}
10332 10335
10333 10336 // fast byte[] to char[] inflation using VIS instructions
10334 10337 instruct string_inflate_fast(Universe dummy, o0RegP src, o1RegP dst, g3RegI len,
10335 10338 iRegL tmp, regD ftmp1, regD ftmp2, regD ftmp3, regD ftmp4, flagsReg ccr) %{
10336 10339 predicate(UseVIS >= 3);
10337 10340 match(Set dummy (StrInflatedCopy src (Binary dst len)));
10338 10341 effect(TEMP tmp, TEMP ftmp1, TEMP ftmp2, TEMP ftmp3, TEMP ftmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL ccr);
10339 10342 ins_cost(300);
10340 10343 format %{ "String Inflate Fast $src,$dst,$len // KILL $tmp,$ftmp1,$ftmp2,$ftmp3,$ftmp4" %}
10341 10344 ins_encode %{
10342 10345 Label Ldone;
10343 10346 __ signx($len$$Register);
10344 10347 __ string_inflate_16($src$$Register, $dst$$Register, $len$$Register, $tmp$$Register,
10345 10348 $ftmp1$$FloatRegister, $ftmp2$$FloatRegister, $ftmp3$$FloatRegister, $ftmp4$$FloatRegister, Ldone);
10346 10349 __ cmp_and_brx_short($len$$Register, 0, Assembler::equal, Assembler::pn, Ldone);
↓ open down ↓ |
8467 lines elided |
↑ open up ↑ |
10347 10350 __ string_inflate($src$$Register, $dst$$Register, $len$$Register, $tmp$$Register, Ldone);
10348 10351 __ bind(Ldone);
10349 10352 %}
10350 10353 ins_pipe(long_memory_op);
10351 10354 %}
10352 10355
10353 10356
10354 10357 //---------- Zeros Count Instructions ------------------------------------------
10355 10358
10356 10359 instruct countLeadingZerosI(iRegIsafe dst, iRegI src, iRegI tmp, flagsReg cr) %{
10357 - predicate(UsePopCountInstruction); // See Matcher::match_rule_supported
10360 + predicate(UsePopCountInstruction && !UseCountLeadingZerosInstruction); // See Matcher::match_rule_supported
10358 10361 match(Set dst (CountLeadingZerosI src));
10359 10362 effect(TEMP dst, TEMP tmp, KILL cr);
10360 10363
10361 10364 // x |= (x >> 1);
10362 10365 // x |= (x >> 2);
10363 10366 // x |= (x >> 4);
10364 10367 // x |= (x >> 8);
10365 10368 // x |= (x >> 16);
10366 10369 // return (WORDBITS - popc(x));
10367 10370 format %{ "SRL $src,1,$tmp\t! count leading zeros (int)\n\t"
10368 10371 "SRL $src,0,$dst\t! 32-bit zero extend\n\t"
10369 10372 "OR $dst,$tmp,$dst\n\t"
10370 10373 "SRL $dst,2,$tmp\n\t"
10371 10374 "OR $dst,$tmp,$dst\n\t"
10372 10375 "SRL $dst,4,$tmp\n\t"
10373 10376 "OR $dst,$tmp,$dst\n\t"
10374 10377 "SRL $dst,8,$tmp\n\t"
10375 10378 "OR $dst,$tmp,$dst\n\t"
10376 10379 "SRL $dst,16,$tmp\n\t"
10377 10380 "OR $dst,$tmp,$dst\n\t"
10378 10381 "POPC $dst,$dst\n\t"
10379 10382 "MOV 32,$tmp\n\t"
10380 10383 "SUB $tmp,$dst,$dst" %}
10381 10384 ins_encode %{
10382 10385 Register Rdst = $dst$$Register;
10383 10386 Register Rsrc = $src$$Register;
10384 10387 Register Rtmp = $tmp$$Register;
10385 10388 __ srl(Rsrc, 1, Rtmp);
10386 10389 __ srl(Rsrc, 0, Rdst);
10387 10390 __ or3(Rdst, Rtmp, Rdst);
10388 10391 __ srl(Rdst, 2, Rtmp);
10389 10392 __ or3(Rdst, Rtmp, Rdst);
10390 10393 __ srl(Rdst, 4, Rtmp);
10391 10394 __ or3(Rdst, Rtmp, Rdst);
10392 10395 __ srl(Rdst, 8, Rtmp);
10393 10396 __ or3(Rdst, Rtmp, Rdst);
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
10394 10397 __ srl(Rdst, 16, Rtmp);
10395 10398 __ or3(Rdst, Rtmp, Rdst);
10396 10399 __ popc(Rdst, Rdst);
10397 10400 __ mov(BitsPerInt, Rtmp);
10398 10401 __ sub(Rtmp, Rdst, Rdst);
10399 10402 %}
10400 10403 ins_pipe(ialu_reg);
10401 10404 %}
10402 10405
10403 10406 instruct countLeadingZerosL(iRegIsafe dst, iRegL src, iRegL tmp, flagsReg cr) %{
10404 - predicate(UsePopCountInstruction); // See Matcher::match_rule_supported
10407 + predicate(UsePopCountInstruction && !UseCountLeadingZerosInstruction);
10405 10408 match(Set dst (CountLeadingZerosL src));
10406 10409 effect(TEMP dst, TEMP tmp, KILL cr);
10407 10410
10408 10411 // x |= (x >> 1);
10409 10412 // x |= (x >> 2);
10410 10413 // x |= (x >> 4);
10411 10414 // x |= (x >> 8);
10412 10415 // x |= (x >> 16);
10413 10416 // x |= (x >> 32);
10414 10417 // return (WORDBITS - popc(x));
10415 10418 format %{ "SRLX $src,1,$tmp\t! count leading zeros (long)\n\t"
10416 10419 "OR $src,$tmp,$dst\n\t"
10417 10420 "SRLX $dst,2,$tmp\n\t"
10418 10421 "OR $dst,$tmp,$dst\n\t"
10419 10422 "SRLX $dst,4,$tmp\n\t"
10420 10423 "OR $dst,$tmp,$dst\n\t"
10421 10424 "SRLX $dst,8,$tmp\n\t"
10422 10425 "OR $dst,$tmp,$dst\n\t"
10423 10426 "SRLX $dst,16,$tmp\n\t"
10424 10427 "OR $dst,$tmp,$dst\n\t"
10425 10428 "SRLX $dst,32,$tmp\n\t"
10426 10429 "OR $dst,$tmp,$dst\n\t"
10427 10430 "POPC $dst,$dst\n\t"
10428 10431 "MOV 64,$tmp\n\t"
10429 10432 "SUB $tmp,$dst,$dst" %}
10430 10433 ins_encode %{
10431 10434 Register Rdst = $dst$$Register;
10432 10435 Register Rsrc = $src$$Register;
10433 10436 Register Rtmp = $tmp$$Register;
10434 10437 __ srlx(Rsrc, 1, Rtmp);
10435 10438 __ or3( Rsrc, Rtmp, Rdst);
10436 10439 __ srlx(Rdst, 2, Rtmp);
10437 10440 __ or3( Rdst, Rtmp, Rdst);
10438 10441 __ srlx(Rdst, 4, Rtmp);
10439 10442 __ or3( Rdst, Rtmp, Rdst);
10440 10443 __ srlx(Rdst, 8, Rtmp);
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
10441 10444 __ or3( Rdst, Rtmp, Rdst);
10442 10445 __ srlx(Rdst, 16, Rtmp);
10443 10446 __ or3( Rdst, Rtmp, Rdst);
10444 10447 __ srlx(Rdst, 32, Rtmp);
10445 10448 __ or3( Rdst, Rtmp, Rdst);
10446 10449 __ popc(Rdst, Rdst);
10447 10450 __ mov(BitsPerLong, Rtmp);
10448 10451 __ sub(Rtmp, Rdst, Rdst);
10449 10452 %}
10450 10453 ins_pipe(ialu_reg);
10454 +%}
10455 +
10456 +instruct countLeadingZerosIvis(iRegIsafe dst, iRegI src) %{
10457 + predicate(UseCountLeadingZerosInstruction);
10458 + match(Set dst (CountLeadingZerosI src));
10459 + effect(TEMP dst);
10460 +
10461 + format %{ "SRL $src,0,$dst\t! count leading zeros (int)\n\t"
10462 + "LZCNT $dst,$dst\n\t"
10463 + "SUB $dst,32,$dst" %}
10464 + ins_encode %{
10465 + Register Rdst = $dst$$Register;
10466 + Register Rsrc = $src$$Register;
10467 + __ srl(Rsrc, 0, Rdst);
10468 + __ lzcnt(Rdst, Rdst);
10469 + __ sub(Rdst, BitsPerInt, Rdst);
10470 + %}
10471 + ins_pipe(ialu_reg);
10472 +%}
10473 +
10474 +instruct countLeadingZerosLvis(iRegIsafe dst, iRegL src) %{
10475 + predicate(UseCountLeadingZerosInstruction);
10476 + match(Set dst (CountLeadingZerosL src));
10477 + effect(TEMP dst);
10478 +
10479 + format %{ "LZCNT $src,$dst\t! count leading zeros (long)" %}
10480 + ins_encode %{
10481 + Register Rdst = $dst$$Register;
10482 + Register Rsrc = $src$$Register;
10483 + __ lzcnt(Rsrc, Rdst);
10484 + %}
10485 + ins_pipe(ialu_reg);
10451 10486 %}
10452 10487
10453 10488 instruct countTrailingZerosI(iRegIsafe dst, iRegI src, flagsReg cr) %{
10454 10489 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported
10455 10490 match(Set dst (CountTrailingZerosI src));
10456 10491 effect(TEMP dst, KILL cr);
10457 10492
10458 10493 // return popc(~x & (x - 1));
10459 10494 format %{ "SUB $src,1,$dst\t! count trailing zeros (int)\n\t"
10460 10495 "ANDN $dst,$src,$dst\n\t"
10461 10496 "SRL $dst,R_G0,$dst\n\t"
10462 10497 "POPC $dst,$dst" %}
10463 10498 ins_encode %{
10464 10499 Register Rdst = $dst$$Register;
10465 10500 Register Rsrc = $src$$Register;
10466 10501 __ sub(Rsrc, 1, Rdst);
10467 10502 __ andn(Rdst, Rsrc, Rdst);
10468 10503 __ srl(Rdst, G0, Rdst);
10469 10504 __ popc(Rdst, Rdst);
10470 10505 %}
10471 10506 ins_pipe(ialu_reg);
10472 10507 %}
10473 10508
10474 10509 instruct countTrailingZerosL(iRegIsafe dst, iRegL src, flagsReg cr) %{
10475 10510 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported
10476 10511 match(Set dst (CountTrailingZerosL src));
10477 10512 effect(TEMP dst, KILL cr);
10478 10513
10479 10514 // return popc(~x & (x - 1));
10480 10515 format %{ "SUB $src,1,$dst\t! count trailing zeros (long)\n\t"
10481 10516 "ANDN $dst,$src,$dst\n\t"
10482 10517 "POPC $dst,$dst" %}
10483 10518 ins_encode %{
10484 10519 Register Rdst = $dst$$Register;
10485 10520 Register Rsrc = $src$$Register;
10486 10521 __ sub(Rsrc, 1, Rdst);
10487 10522 __ andn(Rdst, Rsrc, Rdst);
10488 10523 __ popc(Rdst, Rdst);
10489 10524 %}
10490 10525 ins_pipe(ialu_reg);
10491 10526 %}
10492 10527
10493 10528
10494 10529 //---------- Population Count Instructions -------------------------------------
10495 10530
10496 10531 instruct popCountI(iRegIsafe dst, iRegI src) %{
10497 10532 predicate(UsePopCountInstruction);
10498 10533 match(Set dst (PopCountI src));
10499 10534
10500 10535 format %{ "SRL $src, G0, $dst\t! clear upper word for 64 bit POPC\n\t"
10501 10536 "POPC $dst, $dst" %}
10502 10537 ins_encode %{
10503 10538 __ srl($src$$Register, G0, $dst$$Register);
10504 10539 __ popc($dst$$Register, $dst$$Register);
10505 10540 %}
10506 10541 ins_pipe(ialu_reg);
10507 10542 %}
10508 10543
10509 10544 // Note: Long.bitCount(long) returns an int.
10510 10545 instruct popCountL(iRegIsafe dst, iRegL src) %{
10511 10546 predicate(UsePopCountInstruction);
10512 10547 match(Set dst (PopCountL src));
10513 10548
10514 10549 format %{ "POPC $src, $dst" %}
10515 10550 ins_encode %{
10516 10551 __ popc($src$$Register, $dst$$Register);
10517 10552 %}
10518 10553 ins_pipe(ialu_reg);
10519 10554 %}
10520 10555
10521 10556
10522 10557 // ============================================================================
10523 10558 //------------Bytes reverse--------------------------------------------------
10524 10559
10525 10560 instruct bytes_reverse_int(iRegI dst, stackSlotI src) %{
10526 10561 match(Set dst (ReverseBytesI src));
10527 10562
10528 10563 // Op cost is artificially doubled to make sure that load or store
10529 10564 // instructions are preferred over this one which requires a spill
10530 10565 // onto a stack slot.
10531 10566 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
10532 10567 format %{ "LDUWA $src, $dst\t!asi=primary_little" %}
10533 10568
10534 10569 ins_encode %{
10535 10570 __ set($src$$disp + STACK_BIAS, O7);
10536 10571 __ lduwa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10537 10572 %}
10538 10573 ins_pipe( iload_mem );
10539 10574 %}
10540 10575
10541 10576 instruct bytes_reverse_long(iRegL dst, stackSlotL src) %{
10542 10577 match(Set dst (ReverseBytesL src));
10543 10578
10544 10579 // Op cost is artificially doubled to make sure that load or store
10545 10580 // instructions are preferred over this one which requires a spill
10546 10581 // onto a stack slot.
10547 10582 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
10548 10583 format %{ "LDXA $src, $dst\t!asi=primary_little" %}
10549 10584
10550 10585 ins_encode %{
10551 10586 __ set($src$$disp + STACK_BIAS, O7);
10552 10587 __ ldxa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10553 10588 %}
10554 10589 ins_pipe( iload_mem );
10555 10590 %}
10556 10591
10557 10592 instruct bytes_reverse_unsigned_short(iRegI dst, stackSlotI src) %{
10558 10593 match(Set dst (ReverseBytesUS src));
10559 10594
10560 10595 // Op cost is artificially doubled to make sure that load or store
10561 10596 // instructions are preferred over this one which requires a spill
10562 10597 // onto a stack slot.
10563 10598 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
10564 10599 format %{ "LDUHA $src, $dst\t!asi=primary_little\n\t" %}
10565 10600
10566 10601 ins_encode %{
10567 10602 // the value was spilled as an int so bias the load
10568 10603 __ set($src$$disp + STACK_BIAS + 2, O7);
10569 10604 __ lduha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10570 10605 %}
10571 10606 ins_pipe( iload_mem );
10572 10607 %}
10573 10608
10574 10609 instruct bytes_reverse_short(iRegI dst, stackSlotI src) %{
10575 10610 match(Set dst (ReverseBytesS src));
10576 10611
10577 10612 // Op cost is artificially doubled to make sure that load or store
10578 10613 // instructions are preferred over this one which requires a spill
10579 10614 // onto a stack slot.
10580 10615 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
10581 10616 format %{ "LDSHA $src, $dst\t!asi=primary_little\n\t" %}
10582 10617
10583 10618 ins_encode %{
10584 10619 // the value was spilled as an int so bias the load
10585 10620 __ set($src$$disp + STACK_BIAS + 2, O7);
10586 10621 __ ldsha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10587 10622 %}
10588 10623 ins_pipe( iload_mem );
10589 10624 %}
10590 10625
10591 10626 // Load Integer reversed byte order
10592 10627 instruct loadI_reversed(iRegI dst, indIndexMemory src) %{
10593 10628 match(Set dst (ReverseBytesI (LoadI src)));
10594 10629
10595 10630 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
10596 10631 size(4);
10597 10632 format %{ "LDUWA $src, $dst\t!asi=primary_little" %}
10598 10633
10599 10634 ins_encode %{
10600 10635 __ lduwa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10601 10636 %}
10602 10637 ins_pipe(iload_mem);
10603 10638 %}
10604 10639
10605 10640 // Load Long - aligned and reversed
10606 10641 instruct loadL_reversed(iRegL dst, indIndexMemory src) %{
10607 10642 match(Set dst (ReverseBytesL (LoadL src)));
10608 10643
10609 10644 ins_cost(MEMORY_REF_COST);
10610 10645 size(4);
10611 10646 format %{ "LDXA $src, $dst\t!asi=primary_little" %}
10612 10647
10613 10648 ins_encode %{
10614 10649 __ ldxa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10615 10650 %}
10616 10651 ins_pipe(iload_mem);
10617 10652 %}
10618 10653
10619 10654 // Load unsigned short / char reversed byte order
10620 10655 instruct loadUS_reversed(iRegI dst, indIndexMemory src) %{
10621 10656 match(Set dst (ReverseBytesUS (LoadUS src)));
10622 10657
10623 10658 ins_cost(MEMORY_REF_COST);
10624 10659 size(4);
10625 10660 format %{ "LDUHA $src, $dst\t!asi=primary_little" %}
10626 10661
10627 10662 ins_encode %{
10628 10663 __ lduha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10629 10664 %}
10630 10665 ins_pipe(iload_mem);
10631 10666 %}
10632 10667
10633 10668 // Load short reversed byte order
10634 10669 instruct loadS_reversed(iRegI dst, indIndexMemory src) %{
10635 10670 match(Set dst (ReverseBytesS (LoadS src)));
10636 10671
10637 10672 ins_cost(MEMORY_REF_COST);
10638 10673 size(4);
10639 10674 format %{ "LDSHA $src, $dst\t!asi=primary_little" %}
10640 10675
10641 10676 ins_encode %{
10642 10677 __ ldsha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10643 10678 %}
10644 10679 ins_pipe(iload_mem);
10645 10680 %}
10646 10681
10647 10682 // Store Integer reversed byte order
10648 10683 instruct storeI_reversed(indIndexMemory dst, iRegI src) %{
10649 10684 match(Set dst (StoreI dst (ReverseBytesI src)));
10650 10685
10651 10686 ins_cost(MEMORY_REF_COST);
10652 10687 size(4);
10653 10688 format %{ "STWA $src, $dst\t!asi=primary_little" %}
10654 10689
10655 10690 ins_encode %{
10656 10691 __ stwa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
10657 10692 %}
10658 10693 ins_pipe(istore_mem_reg);
10659 10694 %}
10660 10695
10661 10696 // Store Long reversed byte order
10662 10697 instruct storeL_reversed(indIndexMemory dst, iRegL src) %{
10663 10698 match(Set dst (StoreL dst (ReverseBytesL src)));
10664 10699
10665 10700 ins_cost(MEMORY_REF_COST);
10666 10701 size(4);
10667 10702 format %{ "STXA $src, $dst\t!asi=primary_little" %}
10668 10703
10669 10704 ins_encode %{
10670 10705 __ stxa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
10671 10706 %}
10672 10707 ins_pipe(istore_mem_reg);
10673 10708 %}
10674 10709
10675 10710 // Store unsighed short/char reversed byte order
10676 10711 instruct storeUS_reversed(indIndexMemory dst, iRegI src) %{
10677 10712 match(Set dst (StoreC dst (ReverseBytesUS src)));
10678 10713
10679 10714 ins_cost(MEMORY_REF_COST);
10680 10715 size(4);
10681 10716 format %{ "STHA $src, $dst\t!asi=primary_little" %}
10682 10717
10683 10718 ins_encode %{
10684 10719 __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
10685 10720 %}
10686 10721 ins_pipe(istore_mem_reg);
10687 10722 %}
10688 10723
10689 10724 // Store short reversed byte order
10690 10725 instruct storeS_reversed(indIndexMemory dst, iRegI src) %{
10691 10726 match(Set dst (StoreC dst (ReverseBytesS src)));
10692 10727
10693 10728 ins_cost(MEMORY_REF_COST);
10694 10729 size(4);
10695 10730 format %{ "STHA $src, $dst\t!asi=primary_little" %}
10696 10731
10697 10732 ins_encode %{
10698 10733 __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
10699 10734 %}
10700 10735 ins_pipe(istore_mem_reg);
10701 10736 %}
10702 10737
10703 10738 // ====================VECTOR INSTRUCTIONS=====================================
10704 10739
10705 10740 // Load Aligned Packed values into a Double Register
10706 10741 instruct loadV8(regD dst, memory mem) %{
10707 10742 predicate(n->as_LoadVector()->memory_size() == 8);
10708 10743 match(Set dst (LoadVector mem));
10709 10744 ins_cost(MEMORY_REF_COST);
10710 10745 size(4);
10711 10746 format %{ "LDDF $mem,$dst\t! load vector (8 bytes)" %}
10712 10747 ins_encode %{
10713 10748 __ ldf(FloatRegisterImpl::D, $mem$$Address, as_DoubleFloatRegister($dst$$reg));
10714 10749 %}
10715 10750 ins_pipe(floadD_mem);
10716 10751 %}
10717 10752
10718 10753 // Store Vector in Double register to memory
10719 10754 instruct storeV8(memory mem, regD src) %{
10720 10755 predicate(n->as_StoreVector()->memory_size() == 8);
10721 10756 match(Set mem (StoreVector mem src));
10722 10757 ins_cost(MEMORY_REF_COST);
10723 10758 size(4);
10724 10759 format %{ "STDF $src,$mem\t! store vector (8 bytes)" %}
10725 10760 ins_encode %{
10726 10761 __ stf(FloatRegisterImpl::D, as_DoubleFloatRegister($src$$reg), $mem$$Address);
10727 10762 %}
10728 10763 ins_pipe(fstoreD_mem_reg);
10729 10764 %}
10730 10765
10731 10766 // Store Zero into vector in memory
10732 10767 instruct storeV8B_zero(memory mem, immI0 zero) %{
10733 10768 predicate(n->as_StoreVector()->memory_size() == 8);
10734 10769 match(Set mem (StoreVector mem (ReplicateB zero)));
10735 10770 ins_cost(MEMORY_REF_COST);
10736 10771 size(4);
10737 10772 format %{ "STX $zero,$mem\t! store zero vector (8 bytes)" %}
10738 10773 ins_encode %{
10739 10774 __ stx(G0, $mem$$Address);
10740 10775 %}
10741 10776 ins_pipe(fstoreD_mem_zero);
10742 10777 %}
10743 10778
10744 10779 instruct storeV4S_zero(memory mem, immI0 zero) %{
10745 10780 predicate(n->as_StoreVector()->memory_size() == 8);
10746 10781 match(Set mem (StoreVector mem (ReplicateS zero)));
10747 10782 ins_cost(MEMORY_REF_COST);
10748 10783 size(4);
10749 10784 format %{ "STX $zero,$mem\t! store zero vector (4 shorts)" %}
10750 10785 ins_encode %{
10751 10786 __ stx(G0, $mem$$Address);
10752 10787 %}
10753 10788 ins_pipe(fstoreD_mem_zero);
10754 10789 %}
10755 10790
10756 10791 instruct storeV2I_zero(memory mem, immI0 zero) %{
10757 10792 predicate(n->as_StoreVector()->memory_size() == 8);
10758 10793 match(Set mem (StoreVector mem (ReplicateI zero)));
10759 10794 ins_cost(MEMORY_REF_COST);
10760 10795 size(4);
10761 10796 format %{ "STX $zero,$mem\t! store zero vector (2 ints)" %}
10762 10797 ins_encode %{
10763 10798 __ stx(G0, $mem$$Address);
10764 10799 %}
10765 10800 ins_pipe(fstoreD_mem_zero);
10766 10801 %}
10767 10802
10768 10803 instruct storeV2F_zero(memory mem, immF0 zero) %{
10769 10804 predicate(n->as_StoreVector()->memory_size() == 8);
10770 10805 match(Set mem (StoreVector mem (ReplicateF zero)));
10771 10806 ins_cost(MEMORY_REF_COST);
10772 10807 size(4);
10773 10808 format %{ "STX $zero,$mem\t! store zero vector (2 floats)" %}
10774 10809 ins_encode %{
10775 10810 __ stx(G0, $mem$$Address);
10776 10811 %}
10777 10812 ins_pipe(fstoreD_mem_zero);
10778 10813 %}
10779 10814
10780 10815 // Replicate scalar to packed byte values into Double register
10781 10816 instruct Repl8B_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{
10782 10817 predicate(n->as_Vector()->length() == 8 && UseVIS >= 3);
10783 10818 match(Set dst (ReplicateB src));
10784 10819 effect(DEF dst, USE src, TEMP tmp, KILL tmp2);
10785 10820 format %{ "SLLX $src,56,$tmp\n\t"
10786 10821 "SRLX $tmp, 8,$tmp2\n\t"
10787 10822 "OR $tmp,$tmp2,$tmp\n\t"
10788 10823 "SRLX $tmp,16,$tmp2\n\t"
10789 10824 "OR $tmp,$tmp2,$tmp\n\t"
10790 10825 "SRLX $tmp,32,$tmp2\n\t"
10791 10826 "OR $tmp,$tmp2,$tmp\t! replicate8B\n\t"
10792 10827 "MOVXTOD $tmp,$dst\t! MoveL2D" %}
10793 10828 ins_encode %{
10794 10829 Register Rsrc = $src$$Register;
10795 10830 Register Rtmp = $tmp$$Register;
10796 10831 Register Rtmp2 = $tmp2$$Register;
10797 10832 __ sllx(Rsrc, 56, Rtmp);
10798 10833 __ srlx(Rtmp, 8, Rtmp2);
10799 10834 __ or3 (Rtmp, Rtmp2, Rtmp);
10800 10835 __ srlx(Rtmp, 16, Rtmp2);
10801 10836 __ or3 (Rtmp, Rtmp2, Rtmp);
10802 10837 __ srlx(Rtmp, 32, Rtmp2);
10803 10838 __ or3 (Rtmp, Rtmp2, Rtmp);
10804 10839 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg));
10805 10840 %}
10806 10841 ins_pipe(ialu_reg);
10807 10842 %}
10808 10843
10809 10844 // Replicate scalar to packed byte values into Double stack
10810 10845 instruct Repl8B_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{
10811 10846 predicate(n->as_Vector()->length() == 8 && UseVIS < 3);
10812 10847 match(Set dst (ReplicateB src));
10813 10848 effect(DEF dst, USE src, TEMP tmp, KILL tmp2);
10814 10849 format %{ "SLLX $src,56,$tmp\n\t"
10815 10850 "SRLX $tmp, 8,$tmp2\n\t"
10816 10851 "OR $tmp,$tmp2,$tmp\n\t"
10817 10852 "SRLX $tmp,16,$tmp2\n\t"
10818 10853 "OR $tmp,$tmp2,$tmp\n\t"
10819 10854 "SRLX $tmp,32,$tmp2\n\t"
10820 10855 "OR $tmp,$tmp2,$tmp\t! replicate8B\n\t"
10821 10856 "STX $tmp,$dst\t! regL to stkD" %}
10822 10857 ins_encode %{
10823 10858 Register Rsrc = $src$$Register;
10824 10859 Register Rtmp = $tmp$$Register;
10825 10860 Register Rtmp2 = $tmp2$$Register;
10826 10861 __ sllx(Rsrc, 56, Rtmp);
10827 10862 __ srlx(Rtmp, 8, Rtmp2);
10828 10863 __ or3 (Rtmp, Rtmp2, Rtmp);
10829 10864 __ srlx(Rtmp, 16, Rtmp2);
10830 10865 __ or3 (Rtmp, Rtmp2, Rtmp);
10831 10866 __ srlx(Rtmp, 32, Rtmp2);
10832 10867 __ or3 (Rtmp, Rtmp2, Rtmp);
10833 10868 __ set ($dst$$disp + STACK_BIAS, Rtmp2);
10834 10869 __ stx (Rtmp, Rtmp2, $dst$$base$$Register);
10835 10870 %}
10836 10871 ins_pipe(ialu_reg);
10837 10872 %}
10838 10873
10839 10874 // Replicate scalar constant to packed byte values in Double register
10840 10875 instruct Repl8B_immI(regD dst, immI13 con, o7RegI tmp) %{
10841 10876 predicate(n->as_Vector()->length() == 8);
10842 10877 match(Set dst (ReplicateB con));
10843 10878 effect(KILL tmp);
10844 10879 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl8B($con)" %}
10845 10880 ins_encode %{
10846 10881 // XXX This is a quick fix for 6833573.
10847 10882 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 8, 1)), $dst$$FloatRegister);
10848 10883 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 8, 1)), $tmp$$Register);
10849 10884 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
10850 10885 %}
10851 10886 ins_pipe(loadConFD);
10852 10887 %}
10853 10888
10854 10889 // Replicate scalar to packed char/short values into Double register
10855 10890 instruct Repl4S_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{
10856 10891 predicate(n->as_Vector()->length() == 4 && UseVIS >= 3);
10857 10892 match(Set dst (ReplicateS src));
10858 10893 effect(DEF dst, USE src, TEMP tmp, KILL tmp2);
10859 10894 format %{ "SLLX $src,48,$tmp\n\t"
10860 10895 "SRLX $tmp,16,$tmp2\n\t"
10861 10896 "OR $tmp,$tmp2,$tmp\n\t"
10862 10897 "SRLX $tmp,32,$tmp2\n\t"
10863 10898 "OR $tmp,$tmp2,$tmp\t! replicate4S\n\t"
10864 10899 "MOVXTOD $tmp,$dst\t! MoveL2D" %}
10865 10900 ins_encode %{
10866 10901 Register Rsrc = $src$$Register;
10867 10902 Register Rtmp = $tmp$$Register;
10868 10903 Register Rtmp2 = $tmp2$$Register;
10869 10904 __ sllx(Rsrc, 48, Rtmp);
10870 10905 __ srlx(Rtmp, 16, Rtmp2);
10871 10906 __ or3 (Rtmp, Rtmp2, Rtmp);
10872 10907 __ srlx(Rtmp, 32, Rtmp2);
10873 10908 __ or3 (Rtmp, Rtmp2, Rtmp);
10874 10909 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg));
10875 10910 %}
10876 10911 ins_pipe(ialu_reg);
10877 10912 %}
10878 10913
10879 10914 // Replicate scalar to packed char/short values into Double stack
10880 10915 instruct Repl4S_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{
10881 10916 predicate(n->as_Vector()->length() == 4 && UseVIS < 3);
10882 10917 match(Set dst (ReplicateS src));
10883 10918 effect(DEF dst, USE src, TEMP tmp, KILL tmp2);
10884 10919 format %{ "SLLX $src,48,$tmp\n\t"
10885 10920 "SRLX $tmp,16,$tmp2\n\t"
10886 10921 "OR $tmp,$tmp2,$tmp\n\t"
10887 10922 "SRLX $tmp,32,$tmp2\n\t"
10888 10923 "OR $tmp,$tmp2,$tmp\t! replicate4S\n\t"
10889 10924 "STX $tmp,$dst\t! regL to stkD" %}
10890 10925 ins_encode %{
10891 10926 Register Rsrc = $src$$Register;
10892 10927 Register Rtmp = $tmp$$Register;
10893 10928 Register Rtmp2 = $tmp2$$Register;
10894 10929 __ sllx(Rsrc, 48, Rtmp);
10895 10930 __ srlx(Rtmp, 16, Rtmp2);
10896 10931 __ or3 (Rtmp, Rtmp2, Rtmp);
10897 10932 __ srlx(Rtmp, 32, Rtmp2);
10898 10933 __ or3 (Rtmp, Rtmp2, Rtmp);
10899 10934 __ set ($dst$$disp + STACK_BIAS, Rtmp2);
10900 10935 __ stx (Rtmp, Rtmp2, $dst$$base$$Register);
10901 10936 %}
10902 10937 ins_pipe(ialu_reg);
10903 10938 %}
10904 10939
10905 10940 // Replicate scalar constant to packed char/short values in Double register
10906 10941 instruct Repl4S_immI(regD dst, immI con, o7RegI tmp) %{
10907 10942 predicate(n->as_Vector()->length() == 4);
10908 10943 match(Set dst (ReplicateS con));
10909 10944 effect(KILL tmp);
10910 10945 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl4S($con)" %}
10911 10946 ins_encode %{
10912 10947 // XXX This is a quick fix for 6833573.
10913 10948 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 4, 2)), $dst$$FloatRegister);
10914 10949 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 4, 2)), $tmp$$Register);
10915 10950 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
10916 10951 %}
10917 10952 ins_pipe(loadConFD);
10918 10953 %}
10919 10954
10920 10955 // Replicate scalar to packed int values into Double register
10921 10956 instruct Repl2I_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{
10922 10957 predicate(n->as_Vector()->length() == 2 && UseVIS >= 3);
10923 10958 match(Set dst (ReplicateI src));
10924 10959 effect(DEF dst, USE src, TEMP tmp, KILL tmp2);
10925 10960 format %{ "SLLX $src,32,$tmp\n\t"
10926 10961 "SRLX $tmp,32,$tmp2\n\t"
10927 10962 "OR $tmp,$tmp2,$tmp\t! replicate2I\n\t"
10928 10963 "MOVXTOD $tmp,$dst\t! MoveL2D" %}
10929 10964 ins_encode %{
10930 10965 Register Rsrc = $src$$Register;
10931 10966 Register Rtmp = $tmp$$Register;
10932 10967 Register Rtmp2 = $tmp2$$Register;
10933 10968 __ sllx(Rsrc, 32, Rtmp);
10934 10969 __ srlx(Rtmp, 32, Rtmp2);
10935 10970 __ or3 (Rtmp, Rtmp2, Rtmp);
10936 10971 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg));
10937 10972 %}
10938 10973 ins_pipe(ialu_reg);
10939 10974 %}
10940 10975
10941 10976 // Replicate scalar to packed int values into Double stack
10942 10977 instruct Repl2I_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{
10943 10978 predicate(n->as_Vector()->length() == 2 && UseVIS < 3);
10944 10979 match(Set dst (ReplicateI src));
10945 10980 effect(DEF dst, USE src, TEMP tmp, KILL tmp2);
10946 10981 format %{ "SLLX $src,32,$tmp\n\t"
10947 10982 "SRLX $tmp,32,$tmp2\n\t"
10948 10983 "OR $tmp,$tmp2,$tmp\t! replicate2I\n\t"
10949 10984 "STX $tmp,$dst\t! regL to stkD" %}
10950 10985 ins_encode %{
10951 10986 Register Rsrc = $src$$Register;
10952 10987 Register Rtmp = $tmp$$Register;
10953 10988 Register Rtmp2 = $tmp2$$Register;
10954 10989 __ sllx(Rsrc, 32, Rtmp);
10955 10990 __ srlx(Rtmp, 32, Rtmp2);
10956 10991 __ or3 (Rtmp, Rtmp2, Rtmp);
10957 10992 __ set ($dst$$disp + STACK_BIAS, Rtmp2);
10958 10993 __ stx (Rtmp, Rtmp2, $dst$$base$$Register);
10959 10994 %}
10960 10995 ins_pipe(ialu_reg);
10961 10996 %}
10962 10997
10963 10998 // Replicate scalar zero constant to packed int values in Double register
10964 10999 instruct Repl2I_immI(regD dst, immI con, o7RegI tmp) %{
10965 11000 predicate(n->as_Vector()->length() == 2);
10966 11001 match(Set dst (ReplicateI con));
10967 11002 effect(KILL tmp);
10968 11003 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl2I($con)" %}
10969 11004 ins_encode %{
10970 11005 // XXX This is a quick fix for 6833573.
10971 11006 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 2, 4)), $dst$$FloatRegister);
10972 11007 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 2, 4)), $tmp$$Register);
10973 11008 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
10974 11009 %}
10975 11010 ins_pipe(loadConFD);
10976 11011 %}
10977 11012
10978 11013 // Replicate scalar to packed float values into Double stack
10979 11014 instruct Repl2F_stk(stackSlotD dst, regF src) %{
10980 11015 predicate(n->as_Vector()->length() == 2);
10981 11016 match(Set dst (ReplicateF src));
10982 11017 ins_cost(MEMORY_REF_COST*2);
10983 11018 format %{ "STF $src,$dst.hi\t! packed2F\n\t"
10984 11019 "STF $src,$dst.lo" %}
10985 11020 opcode(Assembler::stf_op3);
10986 11021 ins_encode(simple_form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, src));
10987 11022 ins_pipe(fstoreF_stk_reg);
10988 11023 %}
10989 11024
10990 11025 // Replicate scalar zero constant to packed float values in Double register
10991 11026 instruct Repl2F_immF(regD dst, immF con, o7RegI tmp) %{
10992 11027 predicate(n->as_Vector()->length() == 2);
10993 11028 match(Set dst (ReplicateF con));
10994 11029 effect(KILL tmp);
10995 11030 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl2F($con)" %}
10996 11031 ins_encode %{
10997 11032 // XXX This is a quick fix for 6833573.
10998 11033 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immF($con$$constant)), $dst$$FloatRegister);
10999 11034 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immF($con$$constant)), $tmp$$Register);
11000 11035 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
11001 11036 %}
11002 11037 ins_pipe(loadConFD);
11003 11038 %}
11004 11039
11005 11040 //----------PEEPHOLE RULES-----------------------------------------------------
11006 11041 // These must follow all instruction definitions as they use the names
11007 11042 // defined in the instructions definitions.
11008 11043 //
11009 11044 // peepmatch ( root_instr_name [preceding_instruction]* );
11010 11045 //
11011 11046 // peepconstraint %{
11012 11047 // (instruction_number.operand_name relational_op instruction_number.operand_name
11013 11048 // [, ...] );
11014 11049 // // instruction numbers are zero-based using left to right order in peepmatch
11015 11050 //
11016 11051 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
11017 11052 // // provide an instruction_number.operand_name for each operand that appears
11018 11053 // // in the replacement instruction's match rule
11019 11054 //
11020 11055 // ---------VM FLAGS---------------------------------------------------------
11021 11056 //
11022 11057 // All peephole optimizations can be turned off using -XX:-OptoPeephole
11023 11058 //
11024 11059 // Each peephole rule is given an identifying number starting with zero and
11025 11060 // increasing by one in the order seen by the parser. An individual peephole
11026 11061 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
11027 11062 // on the command-line.
11028 11063 //
11029 11064 // ---------CURRENT LIMITATIONS----------------------------------------------
11030 11065 //
11031 11066 // Only match adjacent instructions in same basic block
11032 11067 // Only equality constraints
11033 11068 // Only constraints between operands, not (0.dest_reg == EAX_enc)
11034 11069 // Only one replacement instruction
11035 11070 //
11036 11071 // ---------EXAMPLE----------------------------------------------------------
11037 11072 //
11038 11073 // // pertinent parts of existing instructions in architecture description
11039 11074 // instruct movI(eRegI dst, eRegI src) %{
11040 11075 // match(Set dst (CopyI src));
11041 11076 // %}
11042 11077 //
11043 11078 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
11044 11079 // match(Set dst (AddI dst src));
11045 11080 // effect(KILL cr);
11046 11081 // %}
11047 11082 //
11048 11083 // // Change (inc mov) to lea
11049 11084 // peephole %{
11050 11085 // // increment preceeded by register-register move
11051 11086 // peepmatch ( incI_eReg movI );
11052 11087 // // require that the destination register of the increment
11053 11088 // // match the destination register of the move
11054 11089 // peepconstraint ( 0.dst == 1.dst );
11055 11090 // // construct a replacement instruction that sets
11056 11091 // // the destination to ( move's source register + one )
11057 11092 // peepreplace ( incI_eReg_immI1( 0.dst 1.src 0.src ) );
11058 11093 // %}
11059 11094 //
11060 11095
11061 11096 // // Change load of spilled value to only a spill
11062 11097 // instruct storeI(memory mem, eRegI src) %{
11063 11098 // match(Set mem (StoreI mem src));
11064 11099 // %}
11065 11100 //
11066 11101 // instruct loadI(eRegI dst, memory mem) %{
11067 11102 // match(Set dst (LoadI mem));
11068 11103 // %}
11069 11104 //
11070 11105 // peephole %{
11071 11106 // peepmatch ( loadI storeI );
11072 11107 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
11073 11108 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
11074 11109 // %}
11075 11110
11076 11111 //----------SMARTSPILL RULES---------------------------------------------------
11077 11112 // These must follow all instruction definitions as they use the names
11078 11113 // defined in the instructions definitions.
11079 11114 //
11080 11115 // SPARC will probably not have any of these rules due to RISC instruction set.
11081 11116
11082 11117 //----------PIPELINE-----------------------------------------------------------
11083 11118 // Rules which define the behavior of the target architectures pipeline.
↓ open down ↓ |
623 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX