Print this page
rev 2237 : [mq]: initial-intrinsification-changes
rev 2238 : [mq]: code-review-comments-vladimir
Split |
Close |
Expand all |
Collapse all |
--- old/src/cpu/sparc/vm/assembler_sparc.hpp
+++ new/src/cpu/sparc/vm/assembler_sparc.hpp
1 1 /*
2 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef CPU_SPARC_VM_ASSEMBLER_SPARC_HPP
26 26 #define CPU_SPARC_VM_ASSEMBLER_SPARC_HPP
27 27
28 28 class BiasedLockingCounters;
29 29
30 30 // <sys/trap.h> promises that the system will not use traps 16-31
31 31 #define ST_RESERVED_FOR_USER_0 0x10
32 32
33 33 /* Written: David Ungar 4/19/97 */
34 34
35 35 // Contains all the definitions needed for sparc assembly code generation.
36 36
37 37 // Register aliases for parts of the system:
38 38
39 39 // 64 bit values can be kept in g1-g5, o1-o5 and o7 and all 64 bits are safe
40 40 // across context switches in V8+ ABI. Of course, there are no 64 bit regs
41 41 // in V8 ABI. All 64 bits are preserved in V9 ABI for all registers.
42 42
43 43 // g2-g4 are scratch registers called "application globals". Their
44 44 // meaning is reserved to the "compilation system"--which means us!
45 45 // They are are not supposed to be touched by ordinary C code, although
46 46 // highly-optimized C code might steal them for temps. They are safe
47 47 // across thread switches, and the ABI requires that they be safe
48 48 // across function calls.
49 49 //
50 50 // g1 and g3 are touched by more modules. V8 allows g1 to be clobbered
51 51 // across func calls, and V8+ also allows g5 to be clobbered across
52 52 // func calls. Also, g1 and g5 can get touched while doing shared
53 53 // library loading.
54 54 //
55 55 // We must not touch g7 (it is the thread-self register) and g6 is
56 56 // reserved for certain tools. g0, of course, is always zero.
57 57 //
58 58 // (Sources: SunSoft Compilers Group, thread library engineers.)
59 59
60 60 // %%%% The interpreter should be revisited to reduce global scratch regs.
61 61
62 62 // This global always holds the current JavaThread pointer:
63 63
64 64 REGISTER_DECLARATION(Register, G2_thread , G2);
65 65 REGISTER_DECLARATION(Register, G6_heapbase , G6);
66 66
67 67 // The following globals are part of the Java calling convention:
68 68
69 69 REGISTER_DECLARATION(Register, G5_method , G5);
70 70 REGISTER_DECLARATION(Register, G5_megamorphic_method , G5_method);
71 71 REGISTER_DECLARATION(Register, G5_inline_cache_reg , G5_method);
72 72
73 73 // The following globals are used for the new C1 & interpreter calling convention:
74 74 REGISTER_DECLARATION(Register, Gargs , G4); // pointing to the last argument
75 75
76 76 // This local is used to preserve G2_thread in the interpreter and in stubs:
77 77 REGISTER_DECLARATION(Register, L7_thread_cache , L7);
78 78
79 79 // These globals are used as scratch registers in the interpreter:
80 80
81 81 REGISTER_DECLARATION(Register, Gframe_size , G1); // SAME REG as G1_scratch
82 82 REGISTER_DECLARATION(Register, G1_scratch , G1); // also SAME
83 83 REGISTER_DECLARATION(Register, G3_scratch , G3);
84 84 REGISTER_DECLARATION(Register, G4_scratch , G4);
85 85
86 86 // These globals are used as short-lived scratch registers in the compiler:
87 87
88 88 REGISTER_DECLARATION(Register, Gtemp , G5);
89 89
90 90 // JSR 292 fixed register usages:
91 91 REGISTER_DECLARATION(Register, G5_method_type , G5);
92 92 REGISTER_DECLARATION(Register, G3_method_handle , G3);
93 93 REGISTER_DECLARATION(Register, L7_mh_SP_save , L7);
94 94
95 95 // The compiler requires that G5_megamorphic_method is G5_inline_cache_klass,
96 96 // because a single patchable "set" instruction (NativeMovConstReg,
97 97 // or NativeMovConstPatching for compiler1) instruction
98 98 // serves to set up either quantity, depending on whether the compiled
99 99 // call site is an inline cache or is megamorphic. See the function
100 100 // CompiledIC::set_to_megamorphic.
101 101 //
102 102 // If a inline cache targets an interpreted method, then the
103 103 // G5 register will be used twice during the call. First,
104 104 // the call site will be patched to load a compiledICHolder
105 105 // into G5. (This is an ordered pair of ic_klass, method.)
106 106 // The c2i adapter will first check the ic_klass, then load
107 107 // G5_method with the method part of the pair just before
108 108 // jumping into the interpreter.
109 109 //
110 110 // Note that G5_method is only the method-self for the interpreter,
111 111 // and is logically unrelated to G5_megamorphic_method.
112 112 //
113 113 // Invariants on G2_thread (the JavaThread pointer):
114 114 // - it should not be used for any other purpose anywhere
115 115 // - it must be re-initialized by StubRoutines::call_stub()
116 116 // - it must be preserved around every use of call_VM
117 117
118 118 // We can consider using g2/g3/g4 to cache more values than the
119 119 // JavaThread, such as the card-marking base or perhaps pointers into
120 120 // Eden. It's something of a waste to use them as scratch temporaries,
121 121 // since they are not supposed to be volatile. (Of course, if we find
122 122 // that Java doesn't benefit from application globals, then we can just
123 123 // use them as ordinary temporaries.)
124 124 //
125 125 // Since g1 and g5 (and/or g6) are the volatile (caller-save) registers,
126 126 // it makes sense to use them routinely for procedure linkage,
127 127 // whenever the On registers are not applicable. Examples: G5_method,
128 128 // G5_inline_cache_klass, and a double handful of miscellaneous compiler
129 129 // stubs. This means that compiler stubs, etc., should be kept to a
130 130 // maximum of two or three G-register arguments.
131 131
132 132
133 133 // stub frames
134 134
135 135 REGISTER_DECLARATION(Register, Lentry_args , L0); // pointer to args passed to callee (interpreter) not stub itself
136 136
137 137 // Interpreter frames
138 138
139 139 #ifdef CC_INTERP
140 140 REGISTER_DECLARATION(Register, Lstate , L0); // interpreter state object pointer
141 141 REGISTER_DECLARATION(Register, L1_scratch , L1); // scratch
142 142 REGISTER_DECLARATION(Register, Lmirror , L1); // mirror (for native methods only)
143 143 REGISTER_DECLARATION(Register, L2_scratch , L2);
144 144 REGISTER_DECLARATION(Register, L3_scratch , L3);
145 145 REGISTER_DECLARATION(Register, L4_scratch , L4);
146 146 REGISTER_DECLARATION(Register, Lscratch , L5); // C1 uses
147 147 REGISTER_DECLARATION(Register, Lscratch2 , L6); // C1 uses
148 148 REGISTER_DECLARATION(Register, L7_scratch , L7); // constant pool cache
149 149 REGISTER_DECLARATION(Register, O5_savedSP , O5);
150 150 REGISTER_DECLARATION(Register, I5_savedSP , I5); // Saved SP before bumping for locals. This is simply
151 151 // a copy SP, so in 64-bit it's a biased value. The bias
152 152 // is added and removed as needed in the frame code.
153 153 // Interface to signature handler
154 154 REGISTER_DECLARATION(Register, Llocals , L7); // pointer to locals for signature handler
155 155 REGISTER_DECLARATION(Register, Lmethod , L6); // methodOop when calling signature handler
156 156
157 157 #else
158 158 REGISTER_DECLARATION(Register, Lesp , L0); // expression stack pointer
159 159 REGISTER_DECLARATION(Register, Lbcp , L1); // pointer to next bytecode
160 160 REGISTER_DECLARATION(Register, Lmethod , L2);
161 161 REGISTER_DECLARATION(Register, Llocals , L3);
162 162 REGISTER_DECLARATION(Register, Largs , L3); // pointer to locals for signature handler
163 163 // must match Llocals in asm interpreter
164 164 REGISTER_DECLARATION(Register, Lmonitors , L4);
165 165 REGISTER_DECLARATION(Register, Lbyte_code , L5);
166 166 // When calling out from the interpreter we record SP so that we can remove any extra stack
167 167 // space allocated during adapter transitions. This register is only live from the point
168 168 // of the call until we return.
169 169 REGISTER_DECLARATION(Register, Llast_SP , L5);
170 170 REGISTER_DECLARATION(Register, Lscratch , L5);
171 171 REGISTER_DECLARATION(Register, Lscratch2 , L6);
172 172 REGISTER_DECLARATION(Register, LcpoolCache , L6); // constant pool cache
173 173
174 174 REGISTER_DECLARATION(Register, O5_savedSP , O5);
175 175 REGISTER_DECLARATION(Register, I5_savedSP , I5); // Saved SP before bumping for locals. This is simply
176 176 // a copy SP, so in 64-bit it's a biased value. The bias
177 177 // is added and removed as needed in the frame code.
178 178 REGISTER_DECLARATION(Register, IdispatchTables , I4); // Base address of the bytecode dispatch tables
179 179 REGISTER_DECLARATION(Register, IdispatchAddress , I3); // Register which saves the dispatch address for each bytecode
180 180 REGISTER_DECLARATION(Register, ImethodDataPtr , I2); // Pointer to the current method data
181 181 #endif /* CC_INTERP */
182 182
183 183 // NOTE: Lscratch2 and LcpoolCache point to the same registers in
184 184 // the interpreter code. If Lscratch2 needs to be used for some
185 185 // purpose than LcpoolCache should be restore after that for
186 186 // the interpreter to work right
187 187 // (These assignments must be compatible with L7_thread_cache; see above.)
188 188
189 189 // Since Lbcp points into the middle of the method object,
190 190 // it is temporarily converted into a "bcx" during GC.
191 191
192 192 // Exception processing
193 193 // These registers are passed into exception handlers.
194 194 // All exception handlers require the exception object being thrown.
195 195 // In addition, an nmethod's exception handler must be passed
196 196 // the address of the call site within the nmethod, to allow
197 197 // proper selection of the applicable catch block.
198 198 // (Interpreter frames use their own bcp() for this purpose.)
199 199 //
200 200 // The Oissuing_pc value is not always needed. When jumping to a
201 201 // handler that is known to be interpreted, the Oissuing_pc value can be
202 202 // omitted. An actual catch block in compiled code receives (from its
203 203 // nmethod's exception handler) the thrown exception in the Oexception,
204 204 // but it doesn't need the Oissuing_pc.
205 205 //
206 206 // If an exception handler (either interpreted or compiled)
207 207 // discovers there is no applicable catch block, it updates
208 208 // the Oissuing_pc to the continuation PC of its own caller,
209 209 // pops back to that caller's stack frame, and executes that
210 210 // caller's exception handler. Obviously, this process will
211 211 // iterate until the control stack is popped back to a method
212 212 // containing an applicable catch block. A key invariant is
213 213 // that the Oissuing_pc value is always a value local to
214 214 // the method whose exception handler is currently executing.
215 215 //
216 216 // Note: The issuing PC value is __not__ a raw return address (I7 value).
217 217 // It is a "return pc", the address __following__ the call.
218 218 // Raw return addresses are converted to issuing PCs by frame::pc(),
219 219 // or by stubs. Issuing PCs can be used directly with PC range tables.
220 220 //
221 221 REGISTER_DECLARATION(Register, Oexception , O0); // exception being thrown
222 222 REGISTER_DECLARATION(Register, Oissuing_pc , O1); // where the exception is coming from
223 223
224 224
225 225 // These must occur after the declarations above
226 226 #ifndef DONT_USE_REGISTER_DEFINES
227 227
228 228 #define Gthread AS_REGISTER(Register, Gthread)
229 229 #define Gmethod AS_REGISTER(Register, Gmethod)
230 230 #define Gmegamorphic_method AS_REGISTER(Register, Gmegamorphic_method)
231 231 #define Ginline_cache_reg AS_REGISTER(Register, Ginline_cache_reg)
232 232 #define Gargs AS_REGISTER(Register, Gargs)
233 233 #define Lthread_cache AS_REGISTER(Register, Lthread_cache)
234 234 #define Gframe_size AS_REGISTER(Register, Gframe_size)
235 235 #define Gtemp AS_REGISTER(Register, Gtemp)
236 236
237 237 #ifdef CC_INTERP
238 238 #define Lstate AS_REGISTER(Register, Lstate)
239 239 #define Lesp AS_REGISTER(Register, Lesp)
240 240 #define L1_scratch AS_REGISTER(Register, L1_scratch)
241 241 #define Lmirror AS_REGISTER(Register, Lmirror)
242 242 #define L2_scratch AS_REGISTER(Register, L2_scratch)
243 243 #define L3_scratch AS_REGISTER(Register, L3_scratch)
244 244 #define L4_scratch AS_REGISTER(Register, L4_scratch)
245 245 #define Lscratch AS_REGISTER(Register, Lscratch)
246 246 #define Lscratch2 AS_REGISTER(Register, Lscratch2)
247 247 #define L7_scratch AS_REGISTER(Register, L7_scratch)
248 248 #define Ostate AS_REGISTER(Register, Ostate)
249 249 #else
250 250 #define Lesp AS_REGISTER(Register, Lesp)
251 251 #define Lbcp AS_REGISTER(Register, Lbcp)
252 252 #define Lmethod AS_REGISTER(Register, Lmethod)
253 253 #define Llocals AS_REGISTER(Register, Llocals)
254 254 #define Lmonitors AS_REGISTER(Register, Lmonitors)
255 255 #define Lbyte_code AS_REGISTER(Register, Lbyte_code)
256 256 #define Lscratch AS_REGISTER(Register, Lscratch)
257 257 #define Lscratch2 AS_REGISTER(Register, Lscratch2)
258 258 #define LcpoolCache AS_REGISTER(Register, LcpoolCache)
259 259 #endif /* ! CC_INTERP */
260 260
261 261 #define Lentry_args AS_REGISTER(Register, Lentry_args)
262 262 #define I5_savedSP AS_REGISTER(Register, I5_savedSP)
263 263 #define O5_savedSP AS_REGISTER(Register, O5_savedSP)
264 264 #define IdispatchAddress AS_REGISTER(Register, IdispatchAddress)
265 265 #define ImethodDataPtr AS_REGISTER(Register, ImethodDataPtr)
266 266 #define IdispatchTables AS_REGISTER(Register, IdispatchTables)
267 267
268 268 #define Oexception AS_REGISTER(Register, Oexception)
269 269 #define Oissuing_pc AS_REGISTER(Register, Oissuing_pc)
270 270
271 271
272 272 #endif
273 273
274 274 // Address is an abstraction used to represent a memory location.
275 275 //
276 276 // Note: A register location is represented via a Register, not
277 277 // via an address for efficiency & simplicity reasons.
278 278
279 279 class Address VALUE_OBJ_CLASS_SPEC {
280 280 private:
281 281 Register _base; // Base register.
282 282 RegisterOrConstant _index_or_disp; // Index register or constant displacement.
283 283 RelocationHolder _rspec;
284 284
285 285 public:
286 286 Address() : _base(noreg), _index_or_disp(noreg) {}
287 287
288 288 Address(Register base, RegisterOrConstant index_or_disp)
289 289 : _base(base),
290 290 _index_or_disp(index_or_disp) {
291 291 }
292 292
293 293 Address(Register base, Register index)
294 294 : _base(base),
295 295 _index_or_disp(index) {
296 296 }
297 297
298 298 Address(Register base, int disp)
299 299 : _base(base),
300 300 _index_or_disp(disp) {
301 301 }
302 302
303 303 #ifdef ASSERT
304 304 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
305 305 Address(Register base, ByteSize disp)
306 306 : _base(base),
307 307 _index_or_disp(in_bytes(disp)) {
308 308 }
309 309 #endif
310 310
311 311 // accessors
312 312 Register base() const { return _base; }
313 313 Register index() const { return _index_or_disp.as_register(); }
314 314 int disp() const { return _index_or_disp.as_constant(); }
315 315
316 316 bool has_index() const { return _index_or_disp.is_register(); }
317 317 bool has_disp() const { return _index_or_disp.is_constant(); }
318 318
319 319 const relocInfo::relocType rtype() { return _rspec.type(); }
320 320 const RelocationHolder& rspec() { return _rspec; }
321 321
322 322 RelocationHolder rspec(int offset) const {
323 323 return offset == 0 ? _rspec : _rspec.plus(offset);
324 324 }
325 325
326 326 inline bool is_simm13(int offset = 0); // check disp+offset for overflow
327 327
328 328 Address plus_disp(int plusdisp) const { // bump disp by a small amount
329 329 assert(_index_or_disp.is_constant(), "must have a displacement");
330 330 Address a(base(), disp() + plusdisp);
331 331 return a;
332 332 }
333 333
334 334 Address after_save() const {
335 335 Address a = (*this);
336 336 a._base = a._base->after_save();
337 337 return a;
338 338 }
339 339
340 340 Address after_restore() const {
341 341 Address a = (*this);
342 342 a._base = a._base->after_restore();
343 343 return a;
344 344 }
345 345
346 346 // Convert the raw encoding form into the form expected by the
347 347 // constructor for Address.
348 348 static Address make_raw(int base, int index, int scale, int disp, bool disp_is_oop);
349 349
350 350 friend class Assembler;
351 351 };
352 352
353 353
354 354 class AddressLiteral VALUE_OBJ_CLASS_SPEC {
355 355 private:
356 356 address _address;
357 357 RelocationHolder _rspec;
358 358
359 359 RelocationHolder rspec_from_rtype(relocInfo::relocType rtype, address addr) {
360 360 switch (rtype) {
361 361 case relocInfo::external_word_type:
362 362 return external_word_Relocation::spec(addr);
363 363 case relocInfo::internal_word_type:
364 364 return internal_word_Relocation::spec(addr);
365 365 #ifdef _LP64
366 366 case relocInfo::opt_virtual_call_type:
367 367 return opt_virtual_call_Relocation::spec();
368 368 case relocInfo::static_call_type:
369 369 return static_call_Relocation::spec();
370 370 case relocInfo::runtime_call_type:
371 371 return runtime_call_Relocation::spec();
372 372 #endif
373 373 case relocInfo::none:
374 374 return RelocationHolder();
375 375 default:
376 376 ShouldNotReachHere();
377 377 return RelocationHolder();
378 378 }
379 379 }
380 380
381 381 protected:
382 382 // creation
383 383 AddressLiteral() : _address(NULL), _rspec(NULL) {}
384 384
385 385 public:
386 386 AddressLiteral(address addr, RelocationHolder const& rspec)
387 387 : _address(addr),
388 388 _rspec(rspec) {}
389 389
390 390 // Some constructors to avoid casting at the call site.
391 391 AddressLiteral(jobject obj, RelocationHolder const& rspec)
392 392 : _address((address) obj),
393 393 _rspec(rspec) {}
394 394
395 395 AddressLiteral(intptr_t value, RelocationHolder const& rspec)
396 396 : _address((address) value),
397 397 _rspec(rspec) {}
398 398
399 399 AddressLiteral(address addr, relocInfo::relocType rtype = relocInfo::none)
400 400 : _address((address) addr),
401 401 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
402 402
403 403 // Some constructors to avoid casting at the call site.
404 404 AddressLiteral(address* addr, relocInfo::relocType rtype = relocInfo::none)
405 405 : _address((address) addr),
406 406 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
407 407
408 408 AddressLiteral(bool* addr, relocInfo::relocType rtype = relocInfo::none)
409 409 : _address((address) addr),
410 410 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
411 411
412 412 AddressLiteral(const bool* addr, relocInfo::relocType rtype = relocInfo::none)
413 413 : _address((address) addr),
414 414 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
415 415
416 416 AddressLiteral(signed char* addr, relocInfo::relocType rtype = relocInfo::none)
417 417 : _address((address) addr),
418 418 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
419 419
420 420 AddressLiteral(int* addr, relocInfo::relocType rtype = relocInfo::none)
421 421 : _address((address) addr),
422 422 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
423 423
424 424 AddressLiteral(intptr_t addr, relocInfo::relocType rtype = relocInfo::none)
425 425 : _address((address) addr),
426 426 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
427 427
428 428 #ifdef _LP64
429 429 // 32-bit complains about a multiple declaration for int*.
430 430 AddressLiteral(intptr_t* addr, relocInfo::relocType rtype = relocInfo::none)
431 431 : _address((address) addr),
432 432 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
433 433 #endif
434 434
435 435 AddressLiteral(oop addr, relocInfo::relocType rtype = relocInfo::none)
436 436 : _address((address) addr),
437 437 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
438 438
439 439 AddressLiteral(float* addr, relocInfo::relocType rtype = relocInfo::none)
440 440 : _address((address) addr),
441 441 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
442 442
443 443 AddressLiteral(double* addr, relocInfo::relocType rtype = relocInfo::none)
444 444 : _address((address) addr),
445 445 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
446 446
447 447 intptr_t value() const { return (intptr_t) _address; }
448 448 int low10() const;
449 449
450 450 const relocInfo::relocType rtype() const { return _rspec.type(); }
451 451 const RelocationHolder& rspec() const { return _rspec; }
452 452
453 453 RelocationHolder rspec(int offset) const {
454 454 return offset == 0 ? _rspec : _rspec.plus(offset);
455 455 }
456 456 };
457 457
458 458
459 459 inline Address RegisterImpl::address_in_saved_window() const {
460 460 return (Address(SP, (sp_offset_in_saved_window() * wordSize) + STACK_BIAS));
461 461 }
462 462
463 463
464 464
465 465 // Argument is an abstraction used to represent an outgoing
466 466 // actual argument or an incoming formal parameter, whether
467 467 // it resides in memory or in a register, in a manner consistent
468 468 // with the SPARC Application Binary Interface, or ABI. This is
469 469 // often referred to as the native or C calling convention.
470 470
471 471 class Argument VALUE_OBJ_CLASS_SPEC {
472 472 private:
473 473 int _number;
474 474 bool _is_in;
475 475
476 476 public:
477 477 #ifdef _LP64
478 478 enum {
479 479 n_register_parameters = 6, // only 6 registers may contain integer parameters
480 480 n_float_register_parameters = 16 // Can have up to 16 floating registers
481 481 };
482 482 #else
483 483 enum {
484 484 n_register_parameters = 6 // only 6 registers may contain integer parameters
485 485 };
486 486 #endif
487 487
488 488 // creation
489 489 Argument(int number, bool is_in) : _number(number), _is_in(is_in) {}
490 490
491 491 int number() const { return _number; }
492 492 bool is_in() const { return _is_in; }
493 493 bool is_out() const { return !is_in(); }
494 494
495 495 Argument successor() const { return Argument(number() + 1, is_in()); }
496 496 Argument as_in() const { return Argument(number(), true ); }
497 497 Argument as_out() const { return Argument(number(), false); }
498 498
499 499 // locating register-based arguments:
500 500 bool is_register() const { return _number < n_register_parameters; }
501 501
502 502 #ifdef _LP64
503 503 // locating Floating Point register-based arguments:
504 504 bool is_float_register() const { return _number < n_float_register_parameters; }
505 505
506 506 FloatRegister as_float_register() const {
507 507 assert(is_float_register(), "must be a register argument");
508 508 return as_FloatRegister(( number() *2 ) + 1);
509 509 }
510 510 FloatRegister as_double_register() const {
511 511 assert(is_float_register(), "must be a register argument");
512 512 return as_FloatRegister(( number() *2 ));
513 513 }
514 514 #endif
515 515
516 516 Register as_register() const {
517 517 assert(is_register(), "must be a register argument");
518 518 return is_in() ? as_iRegister(number()) : as_oRegister(number());
519 519 }
520 520
521 521 // locating memory-based arguments
522 522 Address as_address() const {
523 523 assert(!is_register(), "must be a memory argument");
524 524 return address_in_frame();
525 525 }
526 526
527 527 // When applied to a register-based argument, give the corresponding address
528 528 // into the 6-word area "into which callee may store register arguments"
529 529 // (This is a different place than the corresponding register-save area location.)
530 530 Address address_in_frame() const;
531 531
532 532 // debugging
533 533 const char* name() const;
534 534
535 535 friend class Assembler;
536 536 };
537 537
538 538
539 539 // The SPARC Assembler: Pure assembler doing NO optimizations on the instruction
540 540 // level; i.e., what you write
541 541 // is what you get. The Assembler is generating code into a CodeBuffer.
542 542
543 543 class Assembler : public AbstractAssembler {
544 544 protected:
545 545
546 546 static void print_instruction(int inst);
547 547 static int patched_branch(int dest_pos, int inst, int inst_pos);
548 548 static int branch_destination(int inst, int pos);
549 549
550 550
551 551 friend class AbstractAssembler;
552 552 friend class AddressLiteral;
553 553
554 554 // code patchers need various routines like inv_wdisp()
555 555 friend class NativeInstruction;
556 556 friend class NativeGeneralJump;
557 557 friend class Relocation;
558 558 friend class Label;
559 559
560 560 public:
561 561 // op carries format info; see page 62 & 267
562 562
563 563 enum ops {
564 564 call_op = 1, // fmt 1
565 565 branch_op = 0, // also sethi (fmt2)
566 566 arith_op = 2, // fmt 3, arith & misc
567 567 ldst_op = 3 // fmt 3, load/store
568 568 };
569 569
570 570 enum op2s {
571 571 bpr_op2 = 3,
572 572 fb_op2 = 6,
573 573 fbp_op2 = 5,
574 574 br_op2 = 2,
575 575 bp_op2 = 1,
576 576 cb_op2 = 7, // V8
577 577 sethi_op2 = 4
578 578 };
579 579
580 580 enum op3s {
581 581 // selected op3s
582 582 add_op3 = 0x00,
583 583 and_op3 = 0x01,
584 584 or_op3 = 0x02,
585 585 xor_op3 = 0x03,
586 586 sub_op3 = 0x04,
587 587 andn_op3 = 0x05,
588 588 orn_op3 = 0x06,
589 589 xnor_op3 = 0x07,
590 590 addc_op3 = 0x08,
591 591 mulx_op3 = 0x09,
592 592 umul_op3 = 0x0a,
593 593 smul_op3 = 0x0b,
594 594 subc_op3 = 0x0c,
595 595 udivx_op3 = 0x0d,
596 596 udiv_op3 = 0x0e,
597 597 sdiv_op3 = 0x0f,
598 598
599 599 addcc_op3 = 0x10,
600 600 andcc_op3 = 0x11,
601 601 orcc_op3 = 0x12,
602 602 xorcc_op3 = 0x13,
603 603 subcc_op3 = 0x14,
604 604 andncc_op3 = 0x15,
605 605 orncc_op3 = 0x16,
606 606 xnorcc_op3 = 0x17,
607 607 addccc_op3 = 0x18,
608 608 umulcc_op3 = 0x1a,
609 609 smulcc_op3 = 0x1b,
610 610 subccc_op3 = 0x1c,
611 611 udivcc_op3 = 0x1e,
612 612 sdivcc_op3 = 0x1f,
613 613
614 614 taddcc_op3 = 0x20,
615 615 tsubcc_op3 = 0x21,
616 616 taddcctv_op3 = 0x22,
617 617 tsubcctv_op3 = 0x23,
618 618 mulscc_op3 = 0x24,
619 619 sll_op3 = 0x25,
620 620 sllx_op3 = 0x25,
621 621 srl_op3 = 0x26,
622 622 srlx_op3 = 0x26,
623 623 sra_op3 = 0x27,
624 624 srax_op3 = 0x27,
625 625 rdreg_op3 = 0x28,
626 626 membar_op3 = 0x28,
627 627
628 628 flushw_op3 = 0x2b,
629 629 movcc_op3 = 0x2c,
630 630 sdivx_op3 = 0x2d,
631 631 popc_op3 = 0x2e,
632 632 movr_op3 = 0x2f,
633 633
634 634 sir_op3 = 0x30,
635 635 wrreg_op3 = 0x30,
636 636 saved_op3 = 0x31,
637 637
638 638 fpop1_op3 = 0x34,
639 639 fpop2_op3 = 0x35,
640 640 impdep1_op3 = 0x36,
641 641 impdep2_op3 = 0x37,
642 642 jmpl_op3 = 0x38,
643 643 rett_op3 = 0x39,
644 644 trap_op3 = 0x3a,
645 645 flush_op3 = 0x3b,
646 646 save_op3 = 0x3c,
647 647 restore_op3 = 0x3d,
648 648 done_op3 = 0x3e,
649 649 retry_op3 = 0x3e,
650 650
651 651 lduw_op3 = 0x00,
652 652 ldub_op3 = 0x01,
653 653 lduh_op3 = 0x02,
654 654 ldd_op3 = 0x03,
655 655 stw_op3 = 0x04,
656 656 stb_op3 = 0x05,
657 657 sth_op3 = 0x06,
658 658 std_op3 = 0x07,
659 659 ldsw_op3 = 0x08,
660 660 ldsb_op3 = 0x09,
661 661 ldsh_op3 = 0x0a,
662 662 ldx_op3 = 0x0b,
663 663
664 664 ldstub_op3 = 0x0d,
665 665 stx_op3 = 0x0e,
666 666 swap_op3 = 0x0f,
667 667
668 668 stwa_op3 = 0x14,
669 669 stxa_op3 = 0x1e,
670 670
671 671 ldf_op3 = 0x20,
672 672 ldfsr_op3 = 0x21,
673 673 ldqf_op3 = 0x22,
674 674 lddf_op3 = 0x23,
675 675 stf_op3 = 0x24,
676 676 stfsr_op3 = 0x25,
677 677 stqf_op3 = 0x26,
678 678 stdf_op3 = 0x27,
679 679
680 680 prefetch_op3 = 0x2d,
681 681
682 682
683 683 ldc_op3 = 0x30,
684 684 ldcsr_op3 = 0x31,
685 685 lddc_op3 = 0x33,
686 686 stc_op3 = 0x34,
687 687 stcsr_op3 = 0x35,
688 688 stdcq_op3 = 0x36,
689 689 stdc_op3 = 0x37,
690 690
691 691 casa_op3 = 0x3c,
692 692 casxa_op3 = 0x3e,
693 693
694 694 alt_bit_op3 = 0x10,
695 695 cc_bit_op3 = 0x10
696 696 };
697 697
698 698 enum opfs {
699 699 // selected opfs
700 700 fmovs_opf = 0x01,
701 701 fmovd_opf = 0x02,
702 702
703 703 fnegs_opf = 0x05,
704 704 fnegd_opf = 0x06,
705 705
706 706 fadds_opf = 0x41,
707 707 faddd_opf = 0x42,
708 708 fsubs_opf = 0x45,
709 709 fsubd_opf = 0x46,
710 710
711 711 fmuls_opf = 0x49,
712 712 fmuld_opf = 0x4a,
713 713 fdivs_opf = 0x4d,
714 714 fdivd_opf = 0x4e,
715 715
716 716 fcmps_opf = 0x51,
717 717 fcmpd_opf = 0x52,
718 718
719 719 fstox_opf = 0x81,
720 720 fdtox_opf = 0x82,
721 721 fxtos_opf = 0x84,
722 722 fxtod_opf = 0x88,
723 723 fitos_opf = 0xc4,
724 724 fdtos_opf = 0xc6,
725 725 fitod_opf = 0xc8,
726 726 fstod_opf = 0xc9,
727 727 fstoi_opf = 0xd1,
728 728 fdtoi_opf = 0xd2
729 729 };
730 730
731 731 enum RCondition { rc_z = 1, rc_lez = 2, rc_lz = 3, rc_nz = 5, rc_gz = 6, rc_gez = 7 };
732 732
733 733 enum Condition {
734 734 // for FBfcc & FBPfcc instruction
735 735 f_never = 0,
736 736 f_notEqual = 1,
737 737 f_notZero = 1,
738 738 f_lessOrGreater = 2,
739 739 f_unorderedOrLess = 3,
740 740 f_less = 4,
741 741 f_unorderedOrGreater = 5,
742 742 f_greater = 6,
743 743 f_unordered = 7,
744 744 f_always = 8,
745 745 f_equal = 9,
746 746 f_zero = 9,
747 747 f_unorderedOrEqual = 10,
748 748 f_greaterOrEqual = 11,
749 749 f_unorderedOrGreaterOrEqual = 12,
750 750 f_lessOrEqual = 13,
751 751 f_unorderedOrLessOrEqual = 14,
752 752 f_ordered = 15,
753 753
754 754 // V8 coproc, pp 123 v8 manual
755 755
756 756 cp_always = 8,
757 757 cp_never = 0,
758 758 cp_3 = 7,
759 759 cp_2 = 6,
760 760 cp_2or3 = 5,
761 761 cp_1 = 4,
762 762 cp_1or3 = 3,
763 763 cp_1or2 = 2,
764 764 cp_1or2or3 = 1,
765 765 cp_0 = 9,
766 766 cp_0or3 = 10,
767 767 cp_0or2 = 11,
768 768 cp_0or2or3 = 12,
769 769 cp_0or1 = 13,
770 770 cp_0or1or3 = 14,
771 771 cp_0or1or2 = 15,
772 772
773 773
774 774 // for integers
775 775
776 776 never = 0,
777 777 equal = 1,
778 778 zero = 1,
779 779 lessEqual = 2,
780 780 less = 3,
781 781 lessEqualUnsigned = 4,
782 782 lessUnsigned = 5,
783 783 carrySet = 5,
784 784 negative = 6,
785 785 overflowSet = 7,
786 786 always = 8,
787 787 notEqual = 9,
788 788 notZero = 9,
789 789 greater = 10,
790 790 greaterEqual = 11,
791 791 greaterUnsigned = 12,
792 792 greaterEqualUnsigned = 13,
793 793 carryClear = 13,
794 794 positive = 14,
795 795 overflowClear = 15
796 796 };
797 797
798 798 enum CC {
799 799 icc = 0, xcc = 2,
800 800 // ptr_cc is the correct condition code for a pointer or intptr_t:
801 801 ptr_cc = NOT_LP64(icc) LP64_ONLY(xcc),
802 802 fcc0 = 0, fcc1 = 1, fcc2 = 2, fcc3 = 3
803 803 };
804 804
805 805 enum PrefetchFcn {
806 806 severalReads = 0, oneRead = 1, severalWritesAndPossiblyReads = 2, oneWrite = 3, page = 4
807 807 };
808 808
809 809 public:
810 810 // Helper functions for groups of instructions
811 811
812 812 enum Predict { pt = 1, pn = 0 }; // pt = predict taken
813 813
814 814 enum Membar_mask_bits { // page 184, v9
815 815 StoreStore = 1 << 3,
816 816 LoadStore = 1 << 2,
817 817 StoreLoad = 1 << 1,
818 818 LoadLoad = 1 << 0,
819 819
820 820 Sync = 1 << 6,
821 821 MemIssue = 1 << 5,
822 822 Lookaside = 1 << 4
823 823 };
824 824
825 825 // test if x is within signed immediate range for nbits
826 826 static bool is_simm(intptr_t x, int nbits) { return -( intptr_t(1) << nbits-1 ) <= x && x < ( intptr_t(1) << nbits-1 ); }
827 827
828 828 // test if -4096 <= x <= 4095
829 829 static bool is_simm13(intptr_t x) { return is_simm(x, 13); }
830 830
831 831 static bool is_in_wdisp_range(address a, address b, int nbits) {
832 832 intptr_t d = intptr_t(b) - intptr_t(a);
833 833 return is_simm(d, nbits + 2);
834 834 }
835 835
836 836 // test if label is in simm16 range in words (wdisp16).
837 837 bool is_in_wdisp16_range(Label& L) {
838 838 return is_in_wdisp_range(target(L), pc(), 16);
839 839 }
840 840 // test if the distance between two addresses fits in simm30 range in words
841 841 static bool is_in_wdisp30_range(address a, address b) {
842 842 return is_in_wdisp_range(a, b, 30);
843 843 }
844 844
845 845 enum ASIs { // page 72, v9
846 846 ASI_PRIMARY = 0x80,
847 847 ASI_PRIMARY_LITTLE = 0x88
848 848 // add more from book as needed
849 849 };
850 850
851 851 protected:
852 852 // helpers
853 853
854 854 // x is supposed to fit in a field "nbits" wide
855 855 // and be sign-extended. Check the range.
856 856
857 857 static void assert_signed_range(intptr_t x, int nbits) {
858 858 assert( nbits == 32
859 859 || -(1 << nbits-1) <= x && x < ( 1 << nbits-1),
860 860 "value out of range");
861 861 }
862 862
863 863 static void assert_signed_word_disp_range(intptr_t x, int nbits) {
864 864 assert( (x & 3) == 0, "not word aligned");
865 865 assert_signed_range(x, nbits + 2);
866 866 }
867 867
868 868 static void assert_unsigned_const(int x, int nbits) {
869 869 assert( juint(x) < juint(1 << nbits), "unsigned constant out of range");
870 870 }
871 871
872 872 // fields: note bits numbered from LSB = 0,
873 873 // fields known by inclusive bit range
874 874
875 875 static int fmask(juint hi_bit, juint lo_bit) {
876 876 assert( hi_bit >= lo_bit && 0 <= lo_bit && hi_bit < 32, "bad bits");
877 877 return (1 << ( hi_bit-lo_bit + 1 )) - 1;
878 878 }
879 879
880 880 // inverse of u_field
881 881
882 882 static int inv_u_field(int x, int hi_bit, int lo_bit) {
883 883 juint r = juint(x) >> lo_bit;
884 884 r &= fmask( hi_bit, lo_bit);
885 885 return int(r);
886 886 }
887 887
888 888
889 889 // signed version: extract from field and sign-extend
890 890
891 891 static int inv_s_field(int x, int hi_bit, int lo_bit) {
892 892 int sign_shift = 31 - hi_bit;
893 893 return inv_u_field( ((x << sign_shift) >> sign_shift), hi_bit, lo_bit);
894 894 }
895 895
896 896 // given a field that ranges from hi_bit to lo_bit (inclusive,
897 897 // LSB = 0), and an unsigned value for the field,
898 898 // shift it into the field
899 899
900 900 #ifdef ASSERT
901 901 static int u_field(int x, int hi_bit, int lo_bit) {
902 902 assert( ( x & ~fmask(hi_bit, lo_bit)) == 0,
903 903 "value out of range");
904 904 int r = x << lo_bit;
905 905 assert( inv_u_field(r, hi_bit, lo_bit) == x, "just checking");
906 906 return r;
907 907 }
908 908 #else
909 909 // make sure this is inlined as it will reduce code size significantly
910 910 #define u_field(x, hi_bit, lo_bit) ((x) << (lo_bit))
911 911 #endif
912 912
913 913 static int inv_op( int x ) { return inv_u_field(x, 31, 30); }
914 914 static int inv_op2( int x ) { return inv_u_field(x, 24, 22); }
915 915 static int inv_op3( int x ) { return inv_u_field(x, 24, 19); }
916 916 static int inv_cond( int x ){ return inv_u_field(x, 28, 25); }
917 917
918 918 static bool inv_immed( int x ) { return (x & Assembler::immed(true)) != 0; }
919 919
920 920 static Register inv_rd( int x ) { return as_Register(inv_u_field(x, 29, 25)); }
921 921 static Register inv_rs1( int x ) { return as_Register(inv_u_field(x, 18, 14)); }
922 922 static Register inv_rs2( int x ) { return as_Register(inv_u_field(x, 4, 0)); }
923 923
924 924 static int op( int x) { return u_field(x, 31, 30); }
925 925 static int rd( Register r) { return u_field(r->encoding(), 29, 25); }
926 926 static int fcn( int x) { return u_field(x, 29, 25); }
927 927 static int op3( int x) { return u_field(x, 24, 19); }
928 928 static int rs1( Register r) { return u_field(r->encoding(), 18, 14); }
929 929 static int rs2( Register r) { return u_field(r->encoding(), 4, 0); }
930 930 static int annul( bool a) { return u_field(a ? 1 : 0, 29, 29); }
931 931 static int cond( int x) { return u_field(x, 28, 25); }
932 932 static int cond_mov( int x) { return u_field(x, 17, 14); }
933 933 static int rcond( RCondition x) { return u_field(x, 12, 10); }
934 934 static int op2( int x) { return u_field(x, 24, 22); }
935 935 static int predict( bool p) { return u_field(p ? 1 : 0, 19, 19); }
936 936 static int branchcc( CC fcca) { return u_field(fcca, 21, 20); }
937 937 static int cmpcc( CC fcca) { return u_field(fcca, 26, 25); }
938 938 static int imm_asi( int x) { return u_field(x, 12, 5); }
939 939 static int immed( bool i) { return u_field(i ? 1 : 0, 13, 13); }
940 940 static int opf_low6( int w) { return u_field(w, 10, 5); }
941 941 static int opf_low5( int w) { return u_field(w, 9, 5); }
942 942 static int trapcc( CC cc) { return u_field(cc, 12, 11); }
943 943 static int sx( int i) { return u_field(i, 12, 12); } // shift x=1 means 64-bit
944 944 static int opf( int x) { return u_field(x, 13, 5); }
945 945
946 946 static int opf_cc( CC c, bool useFloat ) { return u_field((useFloat ? 0 : 4) + c, 13, 11); }
947 947 static int mov_cc( CC c, bool useFloat ) { return u_field(useFloat ? 0 : 1, 18, 18) | u_field(c, 12, 11); }
948 948
949 949 static int fd( FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 29, 25); };
950 950 static int fs1(FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 18, 14); };
951 951 static int fs2(FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 4, 0); };
952 952
953 953 // some float instructions use this encoding on the op3 field
954 954 static int alt_op3(int op, FloatRegisterImpl::Width w) {
955 955 int r;
956 956 switch(w) {
957 957 case FloatRegisterImpl::S: r = op + 0; break;
958 958 case FloatRegisterImpl::D: r = op + 3; break;
959 959 case FloatRegisterImpl::Q: r = op + 2; break;
960 960 default: ShouldNotReachHere(); break;
961 961 }
962 962 return op3(r);
963 963 }
964 964
965 965
966 966 // compute inverse of simm
967 967 static int inv_simm(int x, int nbits) {
968 968 return (int)(x << (32 - nbits)) >> (32 - nbits);
969 969 }
970 970
971 971 static int inv_simm13( int x ) { return inv_simm(x, 13); }
972 972
973 973 // signed immediate, in low bits, nbits long
974 974 static int simm(int x, int nbits) {
975 975 assert_signed_range(x, nbits);
976 976 return x & (( 1 << nbits ) - 1);
977 977 }
978 978
979 979 // compute inverse of wdisp16
980 980 static intptr_t inv_wdisp16(int x, intptr_t pos) {
981 981 int lo = x & (( 1 << 14 ) - 1);
982 982 int hi = (x >> 20) & 3;
983 983 if (hi >= 2) hi |= ~1;
984 984 return (((hi << 14) | lo) << 2) + pos;
985 985 }
986 986
987 987 // word offset, 14 bits at LSend, 2 bits at B21, B20
988 988 static int wdisp16(intptr_t x, intptr_t off) {
989 989 intptr_t xx = x - off;
990 990 assert_signed_word_disp_range(xx, 16);
991 991 int r = (xx >> 2) & ((1 << 14) - 1)
992 992 | ( ( (xx>>(2+14)) & 3 ) << 20 );
993 993 assert( inv_wdisp16(r, off) == x, "inverse is not inverse");
994 994 return r;
995 995 }
996 996
997 997
998 998 // word displacement in low-order nbits bits
999 999
1000 1000 static intptr_t inv_wdisp( int x, intptr_t pos, int nbits ) {
1001 1001 int pre_sign_extend = x & (( 1 << nbits ) - 1);
1002 1002 int r = pre_sign_extend >= ( 1 << (nbits-1) )
1003 1003 ? pre_sign_extend | ~(( 1 << nbits ) - 1)
1004 1004 : pre_sign_extend;
1005 1005 return (r << 2) + pos;
1006 1006 }
1007 1007
1008 1008 static int wdisp( intptr_t x, intptr_t off, int nbits ) {
1009 1009 intptr_t xx = x - off;
1010 1010 assert_signed_word_disp_range(xx, nbits);
1011 1011 int r = (xx >> 2) & (( 1 << nbits ) - 1);
1012 1012 assert( inv_wdisp( r, off, nbits ) == x, "inverse not inverse");
1013 1013 return r;
1014 1014 }
1015 1015
1016 1016
1017 1017 // Extract the top 32 bits in a 64 bit word
1018 1018 static int32_t hi32( int64_t x ) {
1019 1019 int32_t r = int32_t( (uint64_t)x >> 32 );
1020 1020 return r;
1021 1021 }
1022 1022
1023 1023 // given a sethi instruction, extract the constant, left-justified
1024 1024 static int inv_hi22( int x ) {
1025 1025 return x << 10;
1026 1026 }
1027 1027
1028 1028 // create an imm22 field, given a 32-bit left-justified constant
1029 1029 static int hi22( int x ) {
1030 1030 int r = int( juint(x) >> 10 );
1031 1031 assert( (r & ~((1 << 22) - 1)) == 0, "just checkin'");
1032 1032 return r;
1033 1033 }
1034 1034
1035 1035 // create a low10 __value__ (not a field) for a given a 32-bit constant
1036 1036 static int low10( int x ) {
1037 1037 return x & ((1 << 10) - 1);
1038 1038 }
1039 1039
1040 1040 // instruction only in v9
1041 1041 static void v9_only() { assert( VM_Version::v9_instructions_work(), "This instruction only works on SPARC V9"); }
1042 1042
1043 1043 // instruction only in v8
1044 1044 static void v8_only() { assert( VM_Version::v8_instructions_work(), "This instruction only works on SPARC V8"); }
1045 1045
1046 1046 // instruction deprecated in v9
1047 1047 static void v9_dep() { } // do nothing for now
1048 1048
1049 1049 // some float instructions only exist for single prec. on v8
1050 1050 static void v8_s_only(FloatRegisterImpl::Width w) { if (w != FloatRegisterImpl::S) v9_only(); }
1051 1051
1052 1052 // v8 has no CC field
1053 1053 static void v8_no_cc(CC cc) { if (cc) v9_only(); }
1054 1054
1055 1055 protected:
1056 1056 // Simple delay-slot scheme:
1057 1057 // In order to check the programmer, the assembler keeps track of deley slots.
1058 1058 // It forbids CTIs in delay slots (conservative, but should be OK).
1059 1059 // Also, when putting an instruction into a delay slot, you must say
1060 1060 // asm->delayed()->add(...), in order to check that you don't omit
1061 1061 // delay-slot instructions.
1062 1062 // To implement this, we use a simple FSA
1063 1063
1064 1064 #ifdef ASSERT
1065 1065 #define CHECK_DELAY
1066 1066 #endif
1067 1067 #ifdef CHECK_DELAY
1068 1068 enum Delay_state { no_delay, at_delay_slot, filling_delay_slot } delay_state;
1069 1069 #endif
1070 1070
1071 1071 public:
1072 1072 // Tells assembler next instruction must NOT be in delay slot.
1073 1073 // Use at start of multinstruction macros.
1074 1074 void assert_not_delayed() {
1075 1075 // This is a separate overloading to avoid creation of string constants
1076 1076 // in non-asserted code--with some compilers this pollutes the object code.
1077 1077 #ifdef CHECK_DELAY
1078 1078 assert_not_delayed("next instruction should not be a delay slot");
1079 1079 #endif
1080 1080 }
1081 1081 void assert_not_delayed(const char* msg) {
1082 1082 #ifdef CHECK_DELAY
1083 1083 assert(delay_state == no_delay, msg);
1084 1084 #endif
1085 1085 }
1086 1086
1087 1087 protected:
1088 1088 // Delay slot helpers
1089 1089 // cti is called when emitting control-transfer instruction,
1090 1090 // BEFORE doing the emitting.
1091 1091 // Only effective when assertion-checking is enabled.
1092 1092 void cti() {
1093 1093 #ifdef CHECK_DELAY
1094 1094 assert_not_delayed("cti should not be in delay slot");
1095 1095 #endif
1096 1096 }
1097 1097
1098 1098 // called when emitting cti with a delay slot, AFTER emitting
1099 1099 void has_delay_slot() {
1100 1100 #ifdef CHECK_DELAY
1101 1101 assert_not_delayed("just checking");
1102 1102 delay_state = at_delay_slot;
1103 1103 #endif
1104 1104 }
1105 1105
1106 1106 public:
1107 1107 // Tells assembler you know that next instruction is delayed
1108 1108 Assembler* delayed() {
1109 1109 #ifdef CHECK_DELAY
1110 1110 assert ( delay_state == at_delay_slot, "delayed instruction is not in delay slot");
1111 1111 delay_state = filling_delay_slot;
1112 1112 #endif
1113 1113 return this;
1114 1114 }
1115 1115
1116 1116 void flush() {
1117 1117 #ifdef CHECK_DELAY
1118 1118 assert ( delay_state == no_delay, "ending code with a delay slot");
1119 1119 #endif
1120 1120 AbstractAssembler::flush();
1121 1121 }
1122 1122
1123 1123 inline void emit_long(int); // shadows AbstractAssembler::emit_long
1124 1124 inline void emit_data(int x) { emit_long(x); }
1125 1125 inline void emit_data(int, RelocationHolder const&);
1126 1126 inline void emit_data(int, relocInfo::relocType rtype);
1127 1127 // helper for above fcns
1128 1128 inline void check_delay();
1129 1129
1130 1130
1131 1131 public:
1132 1132 // instructions, refer to page numbers in the SPARC Architecture Manual, V9
1133 1133
1134 1134 // pp 135 (addc was addx in v8)
1135 1135
1136 1136 inline void add(Register s1, Register s2, Register d );
1137 1137 inline void add(Register s1, int simm13a, Register d, relocInfo::relocType rtype = relocInfo::none);
1138 1138 inline void add(Register s1, int simm13a, Register d, RelocationHolder const& rspec);
1139 1139 inline void add(Register s1, RegisterOrConstant s2, Register d, int offset = 0);
1140 1140 inline void add(const Address& a, Register d, int offset = 0);
1141 1141
1142 1142 void addcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1143 1143 void addcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1144 1144 void addc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(addc_op3 ) | rs1(s1) | rs2(s2) ); }
1145 1145 void addc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(addc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1146 1146 void addccc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(addc_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1147 1147 void addccc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(addc_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1148 1148
1149 1149 // pp 136
1150 1150
1151 1151 inline void bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt = relocInfo::none );
1152 1152 inline void bpr( RCondition c, bool a, Predict p, Register s1, Label& L);
1153 1153
1154 1154 protected: // use MacroAssembler::br instead
1155 1155
1156 1156 // pp 138
1157 1157
1158 1158 inline void fb( Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none );
1159 1159 inline void fb( Condition c, bool a, Label& L );
1160 1160
1161 1161 // pp 141
1162 1162
1163 1163 inline void fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1164 1164 inline void fbp( Condition c, bool a, CC cc, Predict p, Label& L );
1165 1165
1166 1166 public:
1167 1167
1168 1168 // pp 144
1169 1169
1170 1170 inline void br( Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none );
1171 1171 inline void br( Condition c, bool a, Label& L );
1172 1172
1173 1173 // pp 146
1174 1174
1175 1175 inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1176 1176 inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
1177 1177
1178 1178 // pp 121 (V8)
1179 1179
1180 1180 inline void cb( Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none );
1181 1181 inline void cb( Condition c, bool a, Label& L );
1182 1182
1183 1183 // pp 149
1184 1184
1185 1185 inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type );
1186 1186 inline void call( Label& L, relocInfo::relocType rt = relocInfo::runtime_call_type );
1187 1187
1188 1188 // pp 150
1189 1189
1190 1190 // These instructions compare the contents of s2 with the contents of
1191 1191 // memory at address in s1. If the values are equal, the contents of memory
1192 1192 // at address s1 is swapped with the data in d. If the values are not equal,
1193 1193 // the the contents of memory at s1 is loaded into d, without the swap.
1194 1194
1195 1195 void casa( Register s1, Register s2, Register d, int ia = -1 ) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(casa_op3 ) | rs1(s1) | (ia == -1 ? immed(true) : imm_asi(ia)) | rs2(s2)); }
1196 1196 void casxa( Register s1, Register s2, Register d, int ia = -1 ) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(casxa_op3) | rs1(s1) | (ia == -1 ? immed(true) : imm_asi(ia)) | rs2(s2)); }
1197 1197
1198 1198 // pp 152
1199 1199
1200 1200 void udiv( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(udiv_op3 ) | rs1(s1) | rs2(s2)); }
1201 1201 void udiv( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(udiv_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1202 1202 void sdiv( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sdiv_op3 ) | rs1(s1) | rs2(s2)); }
1203 1203 void sdiv( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sdiv_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1204 1204 void udivcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(udiv_op3 | cc_bit_op3) | rs1(s1) | rs2(s2)); }
1205 1205 void udivcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(udiv_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1206 1206 void sdivcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sdiv_op3 | cc_bit_op3) | rs1(s1) | rs2(s2)); }
1207 1207 void sdivcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sdiv_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1208 1208
1209 1209 // pp 155
1210 1210
1211 1211 void done() { v9_only(); cti(); emit_long( op(arith_op) | fcn(0) | op3(done_op3) ); }
1212 1212 void retry() { v9_only(); cti(); emit_long( op(arith_op) | fcn(1) | op3(retry_op3) ); }
1213 1213
1214 1214 // pp 156
1215 1215
1216 1216 void fadd( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x40 + w) | fs2(s2, w)); }
1217 1217 void fsub( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x44 + w) | fs2(s2, w)); }
1218 1218
1219 1219 // pp 157
1220 1220
1221 1221 void fcmp( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc); emit_long( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); }
1222 1222 void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc); emit_long( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); }
1223 1223
1224 1224 // pp 159
1225 1225
1226 1226 void ftox( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only(); emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x80 + w) | fs2(s, w)); }
1227 1227 void ftoi( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0xd0 + w) | fs2(s, w)); }
1228 1228
1229 1229 // pp 160
1230 1230
1231 1231 void ftof( FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw, FloatRegister s, FloatRegister d ) { emit_long( op(arith_op) | fd(d, dw) | op3(fpop1_op3) | opf(0xc0 + sw + dw*4) | fs2(s, sw)); }
1232 1232
1233 1233 // pp 161
1234 1234
1235 1235 void fxtof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only(); emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x80 + w*4) | fs2(s, w)); }
1236 1236 void fitof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0xc0 + w*4) | fs2(s, w)); }
1237 1237
1238 1238 // pp 162
1239 1239
1240 1240 void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); }
1241 1241
1242 1242 void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); }
1243 1243
1244 1244 // page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fnegs is the only instruction available
1245 1245 // on v8 to do negation of single, double and quad precision floats.
1246 1246
1247 1247 void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_long( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(sd, w)); else emit_long( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x05) | fs2(sd, w)); }
1248 1248
1249 1249 void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); }
1250 1250
1251 1251 // page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fabss is the only instruction available
1252 1252 // on v8 to do abs operation on single/double/quad precision floats.
1253 1253
1254 1254 void fabs( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_long( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(sd, w)); else emit_long( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x09) | fs2(sd, w)); }
1255 1255
1256 1256 // pp 163
1257 1257
1258 1258 void fmul( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x48 + w) | fs2(s2, w)); }
1259 1259 void fmul( FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_long( op(arith_op) | fd(d, dw) | op3(fpop1_op3) | fs1(s1, sw) | opf(0x60 + sw + dw*4) | fs2(s2, sw)); }
1260 1260 void fdiv( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x4c + w) | fs2(s2, w)); }
1261 1261
1262 1262 // pp 164
1263 1263
1264 1264 void fsqrt( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x28 + w) | fs2(s, w)); }
1265 1265
1266 1266 // pp 165
1267 1267
1268 1268 inline void flush( Register s1, Register s2 );
1269 1269 inline void flush( Register s1, int simm13a);
1270 1270
1271 1271 // pp 167
1272 1272
1273 1273 void flushw() { v9_only(); emit_long( op(arith_op) | op3(flushw_op3) ); }
1274 1274
1275 1275 // pp 168
1276 1276
1277 1277 void illtrap( int const22a) { if (const22a != 0) v9_only(); emit_long( op(branch_op) | u_field(const22a, 21, 0) ); }
1278 1278 // v8 unimp == illtrap(0)
1279 1279
1280 1280 // pp 169
1281 1281
1282 1282 void impdep1( int id1, int const19a ) { v9_only(); emit_long( op(arith_op) | fcn(id1) | op3(impdep1_op3) | u_field(const19a, 18, 0)); }
1283 1283 void impdep2( int id1, int const19a ) { v9_only(); emit_long( op(arith_op) | fcn(id1) | op3(impdep2_op3) | u_field(const19a, 18, 0)); }
1284 1284
1285 1285 // pp 149 (v8)
1286 1286
1287 1287 void cpop1( int opc, int cr1, int cr2, int crd ) { v8_only(); emit_long( op(arith_op) | fcn(crd) | op3(impdep1_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); }
1288 1288 void cpop2( int opc, int cr1, int cr2, int crd ) { v8_only(); emit_long( op(arith_op) | fcn(crd) | op3(impdep2_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); }
1289 1289
1290 1290 // pp 170
1291 1291
1292 1292 void jmpl( Register s1, Register s2, Register d );
1293 1293 void jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec = RelocationHolder() );
1294 1294
1295 1295 // 171
1296 1296
1297 1297 inline void ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d);
1298 1298 inline void ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d);
1299 1299 inline void ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec = RelocationHolder());
1300 1300
1301 1301 inline void ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset = 0);
1302 1302
1303 1303
1304 1304 inline void ldfsr( Register s1, Register s2 );
1305 1305 inline void ldfsr( Register s1, int simm13a);
1306 1306 inline void ldxfsr( Register s1, Register s2 );
1307 1307 inline void ldxfsr( Register s1, int simm13a);
1308 1308
1309 1309 // pp 94 (v8)
1310 1310
1311 1311 inline void ldc( Register s1, Register s2, int crd );
1312 1312 inline void ldc( Register s1, int simm13a, int crd);
1313 1313 inline void lddc( Register s1, Register s2, int crd );
1314 1314 inline void lddc( Register s1, int simm13a, int crd);
1315 1315 inline void ldcsr( Register s1, Register s2, int crd );
1316 1316 inline void ldcsr( Register s1, int simm13a, int crd);
1317 1317
1318 1318
1319 1319 // 173
1320 1320
1321 1321 void ldfa( FloatRegisterImpl::Width w, Register s1, Register s2, int ia, FloatRegister d ) { v9_only(); emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1322 1322 void ldfa( FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d ) { v9_only(); emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1323 1323
1324 1324 // pp 175, lduw is ld on v8
1325 1325
1326 1326 inline void ldsb( Register s1, Register s2, Register d );
1327 1327 inline void ldsb( Register s1, int simm13a, Register d);
1328 1328 inline void ldsh( Register s1, Register s2, Register d );
1329 1329 inline void ldsh( Register s1, int simm13a, Register d);
1330 1330 inline void ldsw( Register s1, Register s2, Register d );
1331 1331 inline void ldsw( Register s1, int simm13a, Register d);
1332 1332 inline void ldub( Register s1, Register s2, Register d );
1333 1333 inline void ldub( Register s1, int simm13a, Register d);
1334 1334 inline void lduh( Register s1, Register s2, Register d );
1335 1335 inline void lduh( Register s1, int simm13a, Register d);
1336 1336 inline void lduw( Register s1, Register s2, Register d );
1337 1337 inline void lduw( Register s1, int simm13a, Register d);
1338 1338 inline void ldx( Register s1, Register s2, Register d );
1339 1339 inline void ldx( Register s1, int simm13a, Register d);
1340 1340 inline void ld( Register s1, Register s2, Register d );
1341 1341 inline void ld( Register s1, int simm13a, Register d);
1342 1342 inline void ldd( Register s1, Register s2, Register d );
1343 1343 inline void ldd( Register s1, int simm13a, Register d);
1344 1344
1345 1345 #ifdef ASSERT
1346 1346 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
1347 1347 inline void ld( Register s1, ByteSize simm13a, Register d);
1348 1348 #endif
1349 1349
1350 1350 inline void ldsb(const Address& a, Register d, int offset = 0);
1351 1351 inline void ldsh(const Address& a, Register d, int offset = 0);
1352 1352 inline void ldsw(const Address& a, Register d, int offset = 0);
1353 1353 inline void ldub(const Address& a, Register d, int offset = 0);
1354 1354 inline void lduh(const Address& a, Register d, int offset = 0);
1355 1355 inline void lduw(const Address& a, Register d, int offset = 0);
1356 1356 inline void ldx( const Address& a, Register d, int offset = 0);
1357 1357 inline void ld( const Address& a, Register d, int offset = 0);
1358 1358 inline void ldd( const Address& a, Register d, int offset = 0);
1359 1359
1360 1360 inline void ldub( Register s1, RegisterOrConstant s2, Register d );
1361 1361 inline void ldsb( Register s1, RegisterOrConstant s2, Register d );
1362 1362 inline void lduh( Register s1, RegisterOrConstant s2, Register d );
1363 1363 inline void ldsh( Register s1, RegisterOrConstant s2, Register d );
1364 1364 inline void lduw( Register s1, RegisterOrConstant s2, Register d );
1365 1365 inline void ldsw( Register s1, RegisterOrConstant s2, Register d );
1366 1366 inline void ldx( Register s1, RegisterOrConstant s2, Register d );
1367 1367 inline void ld( Register s1, RegisterOrConstant s2, Register d );
1368 1368 inline void ldd( Register s1, RegisterOrConstant s2, Register d );
1369 1369
1370 1370 // pp 177
1371 1371
1372 1372 void ldsba( Register s1, Register s2, int ia, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldsb_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1373 1373 void ldsba( Register s1, int simm13a, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldsb_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1374 1374 void ldsha( Register s1, Register s2, int ia, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldsh_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1375 1375 void ldsha( Register s1, int simm13a, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldsh_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1376 1376 void ldswa( Register s1, Register s2, int ia, Register d ) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(ldsw_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1377 1377 void ldswa( Register s1, int simm13a, Register d ) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(ldsw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1378 1378 void lduba( Register s1, Register s2, int ia, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldub_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1379 1379 void lduba( Register s1, int simm13a, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldub_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1380 1380 void lduha( Register s1, Register s2, int ia, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(lduh_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1381 1381 void lduha( Register s1, int simm13a, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(lduh_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1382 1382 void lduwa( Register s1, Register s2, int ia, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1383 1383 void lduwa( Register s1, int simm13a, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1384 1384 void ldxa( Register s1, Register s2, int ia, Register d ) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1385 1385 void ldxa( Register s1, int simm13a, Register d ) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1386 1386 void ldda( Register s1, Register s2, int ia, Register d ) { v9_dep(); emit_long( op(ldst_op) | rd(d) | op3(ldd_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1387 1387 void ldda( Register s1, int simm13a, Register d ) { v9_dep(); emit_long( op(ldst_op) | rd(d) | op3(ldd_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1388 1388
1389 1389 // pp 179
1390 1390
1391 1391 inline void ldstub( Register s1, Register s2, Register d );
1392 1392 inline void ldstub( Register s1, int simm13a, Register d);
1393 1393
1394 1394 // pp 180
1395 1395
1396 1396 void ldstuba( Register s1, Register s2, int ia, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1397 1397 void ldstuba( Register s1, int simm13a, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1398 1398
1399 1399 // pp 181
1400 1400
1401 1401 void and3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 ) | rs1(s1) | rs2(s2) ); }
1402 1402 void and3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1403 1403 void andcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1404 1404 void andcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1405 1405 void andn( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 ) | rs1(s1) | rs2(s2) ); }
1406 1406 void andn( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1407 1407 void andn( Register s1, RegisterOrConstant s2, Register d);
1408 1408 void andncc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1409 1409 void andncc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1410 1410 void or3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | rs2(s2) ); }
1411 1411 void or3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1412 1412 void orcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1413 1413 void orcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1414 1414 void orn( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | rs2(s2) ); }
1415 1415 void orn( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1416 1416 void orncc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1417 1417 void orncc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1418 1418 void xor3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 ) | rs1(s1) | rs2(s2) ); }
1419 1419 void xor3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1420 1420 void xorcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1421 1421 void xorcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1422 1422 void xnor( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xnor_op3 ) | rs1(s1) | rs2(s2) ); }
1423 1423 void xnor( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xnor_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1424 1424 void xnorcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xnor_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1425 1425 void xnorcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xnor_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1426 1426
1427 1427 // pp 183
1428 1428
1429 1429 void membar( Membar_mask_bits const7a ) { v9_only(); emit_long( op(arith_op) | op3(membar_op3) | rs1(O7) | immed(true) | u_field( int(const7a), 6, 0)); }
1430 1430
1431 1431 // pp 185
1432 1432
1433 1433 void fmov( FloatRegisterImpl::Width w, Condition c, bool floatCC, CC cca, FloatRegister s2, FloatRegister d ) { v9_only(); emit_long( op(arith_op) | fd(d, w) | op3(fpop2_op3) | cond_mov(c) | opf_cc(cca, floatCC) | opf_low6(w) | fs2(s2, w)); }
1434 1434
1435 1435 // pp 189
1436 1436
1437 1437 void fmov( FloatRegisterImpl::Width w, RCondition c, Register s1, FloatRegister s2, FloatRegister d ) { v9_only(); emit_long( op(arith_op) | fd(d, w) | op3(fpop2_op3) | rs1(s1) | rcond(c) | opf_low5(4 + w) | fs2(s2, w)); }
1438 1438
1439 1439 // pp 191
1440 1440
1441 1441 void movcc( Condition c, bool floatCC, CC cca, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(movcc_op3) | mov_cc(cca, floatCC) | cond_mov(c) | rs2(s2) ); }
1442 1442 void movcc( Condition c, bool floatCC, CC cca, int simm11a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(movcc_op3) | mov_cc(cca, floatCC) | cond_mov(c) | immed(true) | simm(simm11a, 11) ); }
1443 1443
1444 1444 // pp 195
1445 1445
1446 1446 void movr( RCondition c, Register s1, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(movr_op3) | rs1(s1) | rcond(c) | rs2(s2) ); }
1447 1447 void movr( RCondition c, Register s1, int simm10a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(movr_op3) | rs1(s1) | rcond(c) | immed(true) | simm(simm10a, 10) ); }
1448 1448
1449 1449 // pp 196
1450 1450
1451 1451 void mulx( Register s1, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(mulx_op3 ) | rs1(s1) | rs2(s2) ); }
1452 1452 void mulx( Register s1, int simm13a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(mulx_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1453 1453 void sdivx( Register s1, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(sdivx_op3) | rs1(s1) | rs2(s2) ); }
1454 1454 void sdivx( Register s1, int simm13a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(sdivx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1455 1455 void udivx( Register s1, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(udivx_op3) | rs1(s1) | rs2(s2) ); }
1456 1456 void udivx( Register s1, int simm13a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(udivx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1457 1457
1458 1458 // pp 197
1459 1459
1460 1460 void umul( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(umul_op3 ) | rs1(s1) | rs2(s2) ); }
1461 1461 void umul( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(umul_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1462 1462 void smul( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(smul_op3 ) | rs1(s1) | rs2(s2) ); }
1463 1463 void smul( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(smul_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1464 1464 void umulcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(umul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1465 1465 void umulcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(umul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1466 1466 void smulcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1467 1467 void smulcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1468 1468
1469 1469 // pp 199
1470 1470
1471 1471 void mulscc( Register s1, Register s2, Register d ) { v9_dep(); emit_long( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | rs2(s2) ); }
1472 1472 void mulscc( Register s1, int simm13a, Register d ) { v9_dep(); emit_long( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1473 1473
1474 1474 // pp 201
1475 1475
1476 1476 void nop() { emit_long( op(branch_op) | op2(sethi_op2) ); }
1477 1477
1478 1478
1479 1479 // pp 202
1480 1480
1481 1481 void popc( Register s, Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(popc_op3) | rs2(s)); }
1482 1482 void popc( int simm13a, Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(popc_op3) | immed(true) | simm(simm13a, 13)); }
1483 1483
1484 1484 // pp 203
1485 1485
1486 1486 void prefetch( Register s1, Register s2, PrefetchFcn f);
1487 1487 void prefetch( Register s1, int simm13a, PrefetchFcn f);
1488 1488 void prefetcha( Register s1, Register s2, int ia, PrefetchFcn f ) { v9_only(); emit_long( op(ldst_op) | fcn(f) | op3(prefetch_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1489 1489 void prefetcha( Register s1, int simm13a, PrefetchFcn f ) { v9_only(); emit_long( op(ldst_op) | fcn(f) | op3(prefetch_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1490 1490
1491 1491 inline void prefetch(const Address& a, PrefetchFcn F, int offset = 0);
1492 1492
1493 1493 // pp 208
1494 1494
1495 1495 // not implementing read privileged register
1496 1496
1497 1497 inline void rdy( Register d) { v9_dep(); emit_long( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(0, 18, 14)); }
1498 1498 inline void rdccr( Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(2, 18, 14)); }
1499 1499 inline void rdasi( Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(3, 18, 14)); }
1500 1500 inline void rdtick( Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(4, 18, 14)); } // Spoon!
1501 1501 inline void rdpc( Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(5, 18, 14)); }
1502 1502 inline void rdfprs( Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(6, 18, 14)); }
1503 1503
1504 1504 // pp 213
1505 1505
1506 1506 inline void rett( Register s1, Register s2);
1507 1507 inline void rett( Register s1, int simm13a, relocInfo::relocType rt = relocInfo::none);
1508 1508
1509 1509 // pp 214
1510 1510
1511 1511 void save( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(save_op3) | rs1(s1) | rs2(s2) ); }
1512 1512 void save( Register s1, int simm13a, Register d ) {
1513 1513 // make sure frame is at least large enough for the register save area
1514 1514 assert(-simm13a >= 16 * wordSize, "frame too small");
1515 1515 emit_long( op(arith_op) | rd(d) | op3(save_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) );
1516 1516 }
1517 1517
1518 1518 void restore( Register s1 = G0, Register s2 = G0, Register d = G0 ) { emit_long( op(arith_op) | rd(d) | op3(restore_op3) | rs1(s1) | rs2(s2) ); }
1519 1519 void restore( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(restore_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1520 1520
1521 1521 // pp 216
1522 1522
1523 1523 void saved() { v9_only(); emit_long( op(arith_op) | fcn(0) | op3(saved_op3)); }
1524 1524 void restored() { v9_only(); emit_long( op(arith_op) | fcn(1) | op3(saved_op3)); }
1525 1525
1526 1526 // pp 217
1527 1527
1528 1528 inline void sethi( int imm22a, Register d, RelocationHolder const& rspec = RelocationHolder() );
1529 1529 // pp 218
1530 1530
1531 1531 void sll( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(0) | rs2(s2) ); }
1532 1532 void sll( Register s1, int imm5a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(0) | immed(true) | u_field(imm5a, 4, 0) ); }
1533 1533 void srl( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(0) | rs2(s2) ); }
1534 1534 void srl( Register s1, int imm5a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(0) | immed(true) | u_field(imm5a, 4, 0) ); }
1535 1535 void sra( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(0) | rs2(s2) ); }
1536 1536 void sra( Register s1, int imm5a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(0) | immed(true) | u_field(imm5a, 4, 0) ); }
1537 1537
1538 1538 void sllx( Register s1, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(1) | rs2(s2) ); }
1539 1539 void sllx( Register s1, int imm6a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(1) | immed(true) | u_field(imm6a, 5, 0) ); }
1540 1540 void srlx( Register s1, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(1) | rs2(s2) ); }
1541 1541 void srlx( Register s1, int imm6a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(1) | immed(true) | u_field(imm6a, 5, 0) ); }
1542 1542 void srax( Register s1, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(1) | rs2(s2) ); }
1543 1543 void srax( Register s1, int imm6a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(1) | immed(true) | u_field(imm6a, 5, 0) ); }
1544 1544
1545 1545 // pp 220
1546 1546
1547 1547 void sir( int simm13a ) { emit_long( op(arith_op) | fcn(15) | op3(sir_op3) | immed(true) | simm(simm13a, 13)); }
1548 1548
1549 1549 // pp 221
1550 1550
1551 1551 void stbar() { emit_long( op(arith_op) | op3(membar_op3) | u_field(15, 18, 14)); }
1552 1552
1553 1553 // pp 222
1554 1554
1555 1555 inline void stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2);
1556 1556 inline void stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2);
1557 1557 inline void stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a);
1558 1558 inline void stf( FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset = 0);
1559 1559
1560 1560 inline void stfsr( Register s1, Register s2 );
1561 1561 inline void stfsr( Register s1, int simm13a);
1562 1562 inline void stxfsr( Register s1, Register s2 );
1563 1563 inline void stxfsr( Register s1, int simm13a);
1564 1564
1565 1565 // pp 224
1566 1566
1567 1567 void stfa( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2, int ia ) { v9_only(); emit_long( op(ldst_op) | fd(d, w) | alt_op3(stf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1568 1568 void stfa( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a ) { v9_only(); emit_long( op(ldst_op) | fd(d, w) | alt_op3(stf_op3 | alt_bit_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1569 1569
1570 1570 // p 226
1571 1571
1572 1572 inline void stb( Register d, Register s1, Register s2 );
1573 1573 inline void stb( Register d, Register s1, int simm13a);
1574 1574 inline void sth( Register d, Register s1, Register s2 );
1575 1575 inline void sth( Register d, Register s1, int simm13a);
1576 1576 inline void stw( Register d, Register s1, Register s2 );
1577 1577 inline void stw( Register d, Register s1, int simm13a);
1578 1578 inline void st( Register d, Register s1, Register s2 );
1579 1579 inline void st( Register d, Register s1, int simm13a);
1580 1580 inline void stx( Register d, Register s1, Register s2 );
1581 1581 inline void stx( Register d, Register s1, int simm13a);
1582 1582 inline void std( Register d, Register s1, Register s2 );
1583 1583 inline void std( Register d, Register s1, int simm13a);
1584 1584
1585 1585 #ifdef ASSERT
1586 1586 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
1587 1587 inline void st( Register d, Register s1, ByteSize simm13a);
1588 1588 #endif
1589 1589
1590 1590 inline void stb( Register d, const Address& a, int offset = 0 );
1591 1591 inline void sth( Register d, const Address& a, int offset = 0 );
1592 1592 inline void stw( Register d, const Address& a, int offset = 0 );
1593 1593 inline void stx( Register d, const Address& a, int offset = 0 );
1594 1594 inline void st( Register d, const Address& a, int offset = 0 );
1595 1595 inline void std( Register d, const Address& a, int offset = 0 );
1596 1596
1597 1597 inline void stb( Register d, Register s1, RegisterOrConstant s2 );
1598 1598 inline void sth( Register d, Register s1, RegisterOrConstant s2 );
1599 1599 inline void stw( Register d, Register s1, RegisterOrConstant s2 );
1600 1600 inline void stx( Register d, Register s1, RegisterOrConstant s2 );
1601 1601 inline void std( Register d, Register s1, RegisterOrConstant s2 );
1602 1602 inline void st( Register d, Register s1, RegisterOrConstant s2 );
1603 1603
1604 1604 // pp 177
1605 1605
1606 1606 void stba( Register d, Register s1, Register s2, int ia ) { emit_long( op(ldst_op) | rd(d) | op3(stb_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1607 1607 void stba( Register d, Register s1, int simm13a ) { emit_long( op(ldst_op) | rd(d) | op3(stb_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1608 1608 void stha( Register d, Register s1, Register s2, int ia ) { emit_long( op(ldst_op) | rd(d) | op3(sth_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1609 1609 void stha( Register d, Register s1, int simm13a ) { emit_long( op(ldst_op) | rd(d) | op3(sth_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1610 1610 void stwa( Register d, Register s1, Register s2, int ia ) { emit_long( op(ldst_op) | rd(d) | op3(stw_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1611 1611 void stwa( Register d, Register s1, int simm13a ) { emit_long( op(ldst_op) | rd(d) | op3(stw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1612 1612 void stxa( Register d, Register s1, Register s2, int ia ) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(stx_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1613 1613 void stxa( Register d, Register s1, int simm13a ) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(stx_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1614 1614 void stda( Register d, Register s1, Register s2, int ia ) { emit_long( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1615 1615 void stda( Register d, Register s1, int simm13a ) { emit_long( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1616 1616
1617 1617 // pp 97 (v8)
1618 1618
1619 1619 inline void stc( int crd, Register s1, Register s2 );
1620 1620 inline void stc( int crd, Register s1, int simm13a);
1621 1621 inline void stdc( int crd, Register s1, Register s2 );
1622 1622 inline void stdc( int crd, Register s1, int simm13a);
1623 1623 inline void stcsr( int crd, Register s1, Register s2 );
1624 1624 inline void stcsr( int crd, Register s1, int simm13a);
1625 1625 inline void stdcq( int crd, Register s1, Register s2 );
1626 1626 inline void stdcq( int crd, Register s1, int simm13a);
1627 1627
1628 1628 // pp 230
1629 1629
1630 1630 void sub( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | rs2(s2) ); }
1631 1631 void sub( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1632 1632
1633 1633 // Note: offset is added to s2.
1634 1634 inline void sub(Register s1, RegisterOrConstant s2, Register d, int offset = 0);
1635 1635
1636 1636 void subcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 | cc_bit_op3 ) | rs1(s1) | rs2(s2) ); }
1637 1637 void subcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 | cc_bit_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1638 1638 void subc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(subc_op3 ) | rs1(s1) | rs2(s2) ); }
1639 1639 void subc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(subc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1640 1640 void subccc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(subc_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1641 1641 void subccc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(subc_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1642 1642
1643 1643 // pp 231
1644 1644
1645 1645 inline void swap( Register s1, Register s2, Register d );
1646 1646 inline void swap( Register s1, int simm13a, Register d);
1647 1647 inline void swap( Address& a, Register d, int offset = 0 );
1648 1648
1649 1649 // pp 232
1650 1650
1651 1651 void swapa( Register s1, Register s2, int ia, Register d ) { v9_dep(); emit_long( op(ldst_op) | rd(d) | op3(swap_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1652 1652 void swapa( Register s1, int simm13a, Register d ) { v9_dep(); emit_long( op(ldst_op) | rd(d) | op3(swap_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1653 1653
1654 1654 // pp 234, note op in book is wrong, see pp 268
1655 1655
1656 1656 void taddcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | rs2(s2) ); }
1657 1657 void taddcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1658 1658 void taddcctv( Register s1, Register s2, Register d ) { v9_dep(); emit_long( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | rs2(s2) ); }
1659 1659 void taddcctv( Register s1, int simm13a, Register d ) { v9_dep(); emit_long( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1660 1660
1661 1661 // pp 235
1662 1662
1663 1663 void tsubcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | rs2(s2) ); }
1664 1664 void tsubcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1665 1665 void tsubcctv( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | rs2(s2) ); }
1666 1666 void tsubcctv( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1667 1667
1668 1668 // pp 237
1669 1669
1670 1670 void trap( Condition c, CC cc, Register s1, Register s2 ) { v8_no_cc(cc); emit_long( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); }
1671 1671 void trap( Condition c, CC cc, Register s1, int trapa ) { v8_no_cc(cc); emit_long( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); }
1672 1672 // simple uncond. trap
1673 1673 void trap( int trapa ) { trap( always, icc, G0, trapa ); }
1674 1674
1675 1675 // pp 239 omit write priv register for now
1676 1676
1677 1677 inline void wry( Register d) { v9_dep(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(0, 29, 25)); }
1678 1678 inline void wrccr(Register s) { v9_only(); emit_long( op(arith_op) | rs1(s) | op3(wrreg_op3) | u_field(2, 29, 25)); }
1679 1679 inline void wrccr(Register s, int simm13a) { v9_only(); emit_long( op(arith_op) |
1680 1680 rs1(s) |
1681 1681 op3(wrreg_op3) |
1682 1682 u_field(2, 29, 25) |
1683 1683 u_field(1, 13, 13) |
1684 1684 simm(simm13a, 13)); }
1685 1685 inline void wrasi( Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(3, 29, 25)); }
1686 1686 inline void wrfprs( Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(6, 29, 25)); }
1687 1687
1688 1688 // For a given register condition, return the appropriate condition code
1689 1689 // Condition (the one you would use to get the same effect after "tst" on
1690 1690 // the target register.)
1691 1691 Assembler::Condition reg_cond_to_cc_cond(RCondition in);
1692 1692
1693 1693
1694 1694 // Creation
1695 1695 Assembler(CodeBuffer* code) : AbstractAssembler(code) {
1696 1696 #ifdef CHECK_DELAY
1697 1697 delay_state = no_delay;
1698 1698 #endif
1699 1699 }
1700 1700
1701 1701 // Testing
1702 1702 #ifndef PRODUCT
1703 1703 void test_v9();
1704 1704 void test_v8_onlys();
1705 1705 #endif
1706 1706 };
1707 1707
1708 1708
1709 1709 class RegistersForDebugging : public StackObj {
1710 1710 public:
1711 1711 intptr_t i[8], l[8], o[8], g[8];
1712 1712 float f[32];
1713 1713 double d[32];
1714 1714
1715 1715 void print(outputStream* s);
1716 1716
1717 1717 static int i_offset(int j) { return offset_of(RegistersForDebugging, i[j]); }
1718 1718 static int l_offset(int j) { return offset_of(RegistersForDebugging, l[j]); }
1719 1719 static int o_offset(int j) { return offset_of(RegistersForDebugging, o[j]); }
1720 1720 static int g_offset(int j) { return offset_of(RegistersForDebugging, g[j]); }
1721 1721 static int f_offset(int j) { return offset_of(RegistersForDebugging, f[j]); }
1722 1722 static int d_offset(int j) { return offset_of(RegistersForDebugging, d[j / 2]); }
1723 1723
1724 1724 // gen asm code to save regs
1725 1725 static void save_registers(MacroAssembler* a);
1726 1726
1727 1727 // restore global registers in case C code disturbed them
1728 1728 static void restore_registers(MacroAssembler* a, Register r);
1729 1729
1730 1730
1731 1731 };
1732 1732
1733 1733
1734 1734 // MacroAssembler extends Assembler by a few frequently used macros.
1735 1735 //
1736 1736 // Most of the standard SPARC synthetic ops are defined here.
1737 1737 // Instructions for which a 'better' code sequence exists depending
1738 1738 // on arguments should also go in here.
1739 1739
1740 1740 #define JMP2(r1, r2) jmp(r1, r2, __FILE__, __LINE__)
1741 1741 #define JMP(r1, off) jmp(r1, off, __FILE__, __LINE__)
1742 1742 #define JUMP(a, temp, off) jump(a, temp, off, __FILE__, __LINE__)
1743 1743 #define JUMPL(a, temp, d, off) jumpl(a, temp, d, off, __FILE__, __LINE__)
1744 1744
1745 1745
1746 1746 class MacroAssembler: public Assembler {
1747 1747 protected:
1748 1748 // Support for VM calls
1749 1749 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
1750 1750 // may customize this version by overriding it for its purposes (e.g., to save/restore
1751 1751 // additional registers when doing a VM call).
1752 1752 #ifdef CC_INTERP
1753 1753 #define VIRTUAL
1754 1754 #else
1755 1755 #define VIRTUAL virtual
1756 1756 #endif
1757 1757
1758 1758 VIRTUAL void call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments);
1759 1759
1760 1760 //
1761 1761 // It is imperative that all calls into the VM are handled via the call_VM macros.
1762 1762 // They make sure that the stack linkage is setup correctly. call_VM's correspond
1763 1763 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
1764 1764 //
1765 1765 // This is the base routine called by the different versions of call_VM. The interpreter
1766 1766 // may customize this version by overriding it for its purposes (e.g., to save/restore
1767 1767 // additional registers when doing a VM call).
1768 1768 //
1769 1769 // A non-volatile java_thread_cache register should be specified so
1770 1770 // that the G2_thread value can be preserved across the call.
1771 1771 // (If java_thread_cache is noreg, then a slow get_thread call
1772 1772 // will re-initialize the G2_thread.) call_VM_base returns the register that contains the
1773 1773 // thread.
1774 1774 //
1775 1775 // If no last_java_sp is specified (noreg) than SP will be used instead.
1776 1776
1777 1777 virtual void call_VM_base(
1778 1778 Register oop_result, // where an oop-result ends up if any; use noreg otherwise
1779 1779 Register java_thread_cache, // the thread if computed before ; use noreg otherwise
1780 1780 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
1781 1781 address entry_point, // the entry point
1782 1782 int number_of_arguments, // the number of arguments (w/o thread) to pop after call
1783 1783 bool check_exception=true // flag which indicates if exception should be checked
1784 1784 );
1785 1785
1786 1786 // This routine should emit JVMTI PopFrame and ForceEarlyReturn handling code.
1787 1787 // The implementation is only non-empty for the InterpreterMacroAssembler,
1788 1788 // as only the interpreter handles and ForceEarlyReturn PopFrame requests.
1789 1789 virtual void check_and_handle_popframe(Register scratch_reg);
1790 1790 virtual void check_and_handle_earlyret(Register scratch_reg);
1791 1791
1792 1792 public:
1793 1793 MacroAssembler(CodeBuffer* code) : Assembler(code) {}
1794 1794
1795 1795 // Support for NULL-checks
1796 1796 //
1797 1797 // Generates code that causes a NULL OS exception if the content of reg is NULL.
1798 1798 // If the accessed location is M[reg + offset] and the offset is known, provide the
1799 1799 // offset. No explicit code generation is needed if the offset is within a certain
1800 1800 // range (0 <= offset <= page_size).
1801 1801 //
1802 1802 // %%%%%% Currently not done for SPARC
1803 1803
1804 1804 void null_check(Register reg, int offset = -1);
1805 1805 static bool needs_explicit_null_check(intptr_t offset);
1806 1806
1807 1807 // support for delayed instructions
1808 1808 MacroAssembler* delayed() { Assembler::delayed(); return this; }
1809 1809
1810 1810 // branches that use right instruction for v8 vs. v9
1811 1811 inline void br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1812 1812 inline void br( Condition c, bool a, Predict p, Label& L );
1813 1813
1814 1814 inline void fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1815 1815 inline void fb( Condition c, bool a, Predict p, Label& L );
1816 1816
1817 1817 // compares register with zero and branches (V9 and V8 instructions)
1818 1818 void br_zero( Condition c, bool a, Predict p, Register s1, Label& L);
1819 1819 // Compares a pointer register with zero and branches on (not)null.
1820 1820 // Does a test & branch on 32-bit systems and a register-branch on 64-bit.
1821 1821 void br_null ( Register s1, bool a, Predict p, Label& L );
1822 1822 void br_notnull( Register s1, bool a, Predict p, Label& L );
1823 1823
1824 1824 // These versions will do the most efficient thing on v8 and v9. Perhaps
1825 1825 // this is what the routine above was meant to do, but it didn't (and
1826 1826 // didn't cover both target address kinds.)
1827 1827 void br_on_reg_cond( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt = relocInfo::none );
1828 1828 void br_on_reg_cond( RCondition c, bool a, Predict p, Register s1, Label& L);
1829 1829
1830 1830 inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1831 1831 inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
1832 1832
1833 1833 // Branch that tests xcc in LP64 and icc in !LP64
1834 1834 inline void brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1835 1835 inline void brx( Condition c, bool a, Predict p, Label& L );
1836 1836
1837 1837 // unconditional short branch
1838 1838 inline void ba( bool a, Label& L );
1839 1839
1840 1840 // Branch that tests fp condition codes
1841 1841 inline void fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1842 1842 inline void fbp( Condition c, bool a, CC cc, Predict p, Label& L );
1843 1843
1844 1844 // get PC the best way
1845 1845 inline int get_pc( Register d );
1846 1846
1847 1847 // Sparc shorthands(pp 85, V8 manual, pp 289 V9 manual)
1848 1848 inline void cmp( Register s1, Register s2 ) { subcc( s1, s2, G0 ); }
1849 1849 inline void cmp( Register s1, int simm13a ) { subcc( s1, simm13a, G0 ); }
1850 1850
1851 1851 inline void jmp( Register s1, Register s2 );
1852 1852 inline void jmp( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
1853 1853
1854 1854 // Check if the call target is out of wdisp30 range (relative to the code cache)
1855 1855 static inline bool is_far_target(address d);
1856 1856 inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type );
1857 1857 inline void call( Label& L, relocInfo::relocType rt = relocInfo::runtime_call_type );
1858 1858 inline void callr( Register s1, Register s2 );
1859 1859 inline void callr( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
1860 1860
1861 1861 // Emits nothing on V8
1862 1862 inline void iprefetch( address d, relocInfo::relocType rt = relocInfo::none );
1863 1863 inline void iprefetch( Label& L);
1864 1864
1865 1865 inline void tst( Register s ) { orcc( G0, s, G0 ); }
1866 1866
1867 1867 #ifdef PRODUCT
1868 1868 inline void ret( bool trace = TraceJumps ) { if (trace) {
1869 1869 mov(I7, O7); // traceable register
1870 1870 JMP(O7, 2 * BytesPerInstWord);
1871 1871 } else {
1872 1872 jmpl( I7, 2 * BytesPerInstWord, G0 );
1873 1873 }
1874 1874 }
1875 1875
1876 1876 inline void retl( bool trace = TraceJumps ) { if (trace) JMP(O7, 2 * BytesPerInstWord);
1877 1877 else jmpl( O7, 2 * BytesPerInstWord, G0 ); }
1878 1878 #else
1879 1879 void ret( bool trace = TraceJumps );
1880 1880 void retl( bool trace = TraceJumps );
1881 1881 #endif /* PRODUCT */
1882 1882
1883 1883 // Required platform-specific helpers for Label::patch_instructions.
1884 1884 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
1885 1885 void pd_patch_instruction(address branch, address target);
1886 1886 #ifndef PRODUCT
1887 1887 static void pd_print_patched_instruction(address branch);
1888 1888 #endif
1889 1889
1890 1890 // sethi Macro handles optimizations and relocations
1891 1891 private:
1892 1892 void internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable);
1893 1893 public:
1894 1894 void sethi(const AddressLiteral& addrlit, Register d);
1895 1895 void patchable_sethi(const AddressLiteral& addrlit, Register d);
1896 1896
1897 1897 // compute the number of instructions for a sethi/set
1898 1898 static int insts_for_sethi( address a, bool worst_case = false );
1899 1899 static int worst_case_insts_for_set();
1900 1900
1901 1901 // set may be either setsw or setuw (high 32 bits may be zero or sign)
1902 1902 private:
1903 1903 void internal_set(const AddressLiteral& al, Register d, bool ForceRelocatable);
1904 1904 static int insts_for_internal_set(intptr_t value);
1905 1905 public:
1906 1906 void set(const AddressLiteral& addrlit, Register d);
1907 1907 void set(intptr_t value, Register d);
1908 1908 void set(address addr, Register d, RelocationHolder const& rspec);
1909 1909 static int insts_for_set(intptr_t value) { return insts_for_internal_set(value); }
1910 1910
1911 1911 void patchable_set(const AddressLiteral& addrlit, Register d);
1912 1912 void patchable_set(intptr_t value, Register d);
1913 1913 void set64(jlong value, Register d, Register tmp);
1914 1914 static int insts_for_set64(jlong value);
1915 1915
1916 1916 // sign-extend 32 to 64
1917 1917 inline void signx( Register s, Register d ) { sra( s, G0, d); }
1918 1918 inline void signx( Register d ) { sra( d, G0, d); }
1919 1919
1920 1920 inline void not1( Register s, Register d ) { xnor( s, G0, d ); }
1921 1921 inline void not1( Register d ) { xnor( d, G0, d ); }
1922 1922
1923 1923 inline void neg( Register s, Register d ) { sub( G0, s, d ); }
1924 1924 inline void neg( Register d ) { sub( G0, d, d ); }
1925 1925
1926 1926 inline void cas( Register s1, Register s2, Register d) { casa( s1, s2, d, ASI_PRIMARY); }
1927 1927 inline void casx( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY); }
1928 1928 // Functions for isolating 64 bit atomic swaps for LP64
1929 1929 // cas_ptr will perform cas for 32 bit VM's and casx for 64 bit VM's
1930 1930 inline void cas_ptr( Register s1, Register s2, Register d) {
1931 1931 #ifdef _LP64
1932 1932 casx( s1, s2, d );
1933 1933 #else
1934 1934 cas( s1, s2, d );
1935 1935 #endif
1936 1936 }
1937 1937
1938 1938 // Functions for isolating 64 bit shifts for LP64
1939 1939 inline void sll_ptr( Register s1, Register s2, Register d );
1940 1940 inline void sll_ptr( Register s1, int imm6a, Register d );
1941 1941 inline void sll_ptr( Register s1, RegisterOrConstant s2, Register d );
1942 1942 inline void srl_ptr( Register s1, Register s2, Register d );
1943 1943 inline void srl_ptr( Register s1, int imm6a, Register d );
1944 1944
1945 1945 // little-endian
1946 1946 inline void casl( Register s1, Register s2, Register d) { casa( s1, s2, d, ASI_PRIMARY_LITTLE); }
1947 1947 inline void casxl( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY_LITTLE); }
1948 1948
1949 1949 inline void inc( Register d, int const13 = 1 ) { add( d, const13, d); }
1950 1950 inline void inccc( Register d, int const13 = 1 ) { addcc( d, const13, d); }
1951 1951
1952 1952 inline void dec( Register d, int const13 = 1 ) { sub( d, const13, d); }
1953 1953 inline void deccc( Register d, int const13 = 1 ) { subcc( d, const13, d); }
1954 1954
1955 1955 inline void btst( Register s1, Register s2 ) { andcc( s1, s2, G0 ); }
1956 1956 inline void btst( int simm13a, Register s ) { andcc( s, simm13a, G0 ); }
1957 1957
1958 1958 inline void bset( Register s1, Register s2 ) { or3( s1, s2, s2 ); }
1959 1959 inline void bset( int simm13a, Register s ) { or3( s, simm13a, s ); }
1960 1960
1961 1961 inline void bclr( Register s1, Register s2 ) { andn( s1, s2, s2 ); }
1962 1962 inline void bclr( int simm13a, Register s ) { andn( s, simm13a, s ); }
1963 1963
1964 1964 inline void btog( Register s1, Register s2 ) { xor3( s1, s2, s2 ); }
1965 1965 inline void btog( int simm13a, Register s ) { xor3( s, simm13a, s ); }
1966 1966
1967 1967 inline void clr( Register d ) { or3( G0, G0, d ); }
1968 1968
1969 1969 inline void clrb( Register s1, Register s2);
1970 1970 inline void clrh( Register s1, Register s2);
1971 1971 inline void clr( Register s1, Register s2);
1972 1972 inline void clrx( Register s1, Register s2);
1973 1973
1974 1974 inline void clrb( Register s1, int simm13a);
1975 1975 inline void clrh( Register s1, int simm13a);
1976 1976 inline void clr( Register s1, int simm13a);
1977 1977 inline void clrx( Register s1, int simm13a);
1978 1978
1979 1979 // copy & clear upper word
1980 1980 inline void clruw( Register s, Register d ) { srl( s, G0, d); }
1981 1981 // clear upper word
1982 1982 inline void clruwu( Register d ) { srl( d, G0, d); }
1983 1983
1984 1984 // membar psuedo instruction. takes into account target memory model.
1985 1985 inline void membar( Assembler::Membar_mask_bits const7a );
1986 1986
1987 1987 // returns if membar generates anything.
1988 1988 inline bool membar_has_effect( Assembler::Membar_mask_bits const7a );
1989 1989
1990 1990 // mov pseudo instructions
1991 1991 inline void mov( Register s, Register d) {
1992 1992 if ( s != d ) or3( G0, s, d);
1993 1993 else assert_not_delayed(); // Put something useful in the delay slot!
1994 1994 }
1995 1995
1996 1996 inline void mov_or_nop( Register s, Register d) {
1997 1997 if ( s != d ) or3( G0, s, d);
1998 1998 else nop();
1999 1999 }
2000 2000
2001 2001 inline void mov( int simm13a, Register d) { or3( G0, simm13a, d); }
2002 2002
2003 2003 // address pseudos: make these names unlike instruction names to avoid confusion
2004 2004 inline intptr_t load_pc_address( Register reg, int bytes_to_skip );
2005 2005 inline void load_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
2006 2006 inline void load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
2007 2007 inline void store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset = 0);
2008 2008 inline void store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset = 0);
2009 2009 inline void jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset = 0);
2010 2010 inline void jump_to(const AddressLiteral& addrlit, Register temp, int offset = 0);
2011 2011 inline void jump_indirect_to(Address& a, Register temp, int ld_offset = 0, int jmp_offset = 0);
2012 2012
2013 2013 // ring buffer traceable jumps
2014 2014
2015 2015 void jmp2( Register r1, Register r2, const char* file, int line );
2016 2016 void jmp ( Register r1, int offset, const char* file, int line );
2017 2017
2018 2018 void jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line);
2019 2019 void jump (const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line);
2020 2020
2021 2021
2022 2022 // argument pseudos:
2023 2023
2024 2024 inline void load_argument( Argument& a, Register d );
2025 2025 inline void store_argument( Register s, Argument& a );
2026 2026 inline void store_ptr_argument( Register s, Argument& a );
2027 2027 inline void store_float_argument( FloatRegister s, Argument& a );
2028 2028 inline void store_double_argument( FloatRegister s, Argument& a );
2029 2029 inline void store_long_argument( Register s, Argument& a );
2030 2030
2031 2031 // handy macros:
2032 2032
2033 2033 inline void round_to( Register r, int modulus ) {
2034 2034 assert_not_delayed();
2035 2035 inc( r, modulus - 1 );
2036 2036 and3( r, -modulus, r );
2037 2037 }
2038 2038
2039 2039 // --------------------------------------------------
2040 2040
2041 2041 // Functions for isolating 64 bit loads for LP64
2042 2042 // ld_ptr will perform ld for 32 bit VM's and ldx for 64 bit VM's
2043 2043 // st_ptr will perform st for 32 bit VM's and stx for 64 bit VM's
2044 2044 inline void ld_ptr(Register s1, Register s2, Register d);
2045 2045 inline void ld_ptr(Register s1, int simm13a, Register d);
2046 2046 inline void ld_ptr(Register s1, RegisterOrConstant s2, Register d);
2047 2047 inline void ld_ptr(const Address& a, Register d, int offset = 0);
2048 2048 inline void st_ptr(Register d, Register s1, Register s2);
2049 2049 inline void st_ptr(Register d, Register s1, int simm13a);
2050 2050 inline void st_ptr(Register d, Register s1, RegisterOrConstant s2);
2051 2051 inline void st_ptr(Register d, const Address& a, int offset = 0);
2052 2052
2053 2053 #ifdef ASSERT
2054 2054 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
2055 2055 inline void ld_ptr(Register s1, ByteSize simm13a, Register d);
2056 2056 inline void st_ptr(Register d, Register s1, ByteSize simm13a);
2057 2057 #endif
2058 2058
2059 2059 // ld_long will perform ldd for 32 bit VM's and ldx for 64 bit VM's
2060 2060 // st_long will perform std for 32 bit VM's and stx for 64 bit VM's
2061 2061 inline void ld_long(Register s1, Register s2, Register d);
2062 2062 inline void ld_long(Register s1, int simm13a, Register d);
2063 2063 inline void ld_long(Register s1, RegisterOrConstant s2, Register d);
2064 2064 inline void ld_long(const Address& a, Register d, int offset = 0);
2065 2065 inline void st_long(Register d, Register s1, Register s2);
2066 2066 inline void st_long(Register d, Register s1, int simm13a);
2067 2067 inline void st_long(Register d, Register s1, RegisterOrConstant s2);
2068 2068 inline void st_long(Register d, const Address& a, int offset = 0);
2069 2069
2070 2070 // Helpers for address formation.
2071 2071 // - They emit only a move if s2 is a constant zero.
2072 2072 // - If dest is a constant and either s1 or s2 is a register, the temp argument is required and becomes the result.
2073 2073 // - If dest is a register and either s1 or s2 is a non-simm13 constant, the temp argument is required and used to materialize the constant.
2074 2074 RegisterOrConstant regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
2075 2075 RegisterOrConstant regcon_inc_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
2076 2076 RegisterOrConstant regcon_sll_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
2077 2077
2078 2078 RegisterOrConstant ensure_simm13_or_reg(RegisterOrConstant src, Register temp) {
2079 2079 if (is_simm13(src.constant_or_zero()))
2080 2080 return src; // register or short constant
2081 2081 guarantee(temp != noreg, "constant offset overflow");
2082 2082 set(src.as_constant(), temp);
2083 2083 return temp;
2084 2084 }
2085 2085
2086 2086 // --------------------------------------------------
2087 2087
2088 2088 public:
2089 2089 // traps as per trap.h (SPARC ABI?)
2090 2090
2091 2091 void breakpoint_trap();
2092 2092 void breakpoint_trap(Condition c, CC cc = icc);
2093 2093 void flush_windows_trap();
2094 2094 void clean_windows_trap();
2095 2095 void get_psr_trap();
2096 2096 void set_psr_trap();
2097 2097
2098 2098 // V8/V9 flush_windows
2099 2099 void flush_windows();
2100 2100
2101 2101 // Support for serializing memory accesses between threads
2102 2102 void serialize_memory(Register thread, Register tmp1, Register tmp2);
2103 2103
2104 2104 // Stack frame creation/removal
2105 2105 void enter();
2106 2106 void leave();
2107 2107
2108 2108 // V8/V9 integer multiply
2109 2109 void mult(Register s1, Register s2, Register d);
2110 2110 void mult(Register s1, int simm13a, Register d);
2111 2111
2112 2112 // V8/V9 read and write of condition codes.
2113 2113 void read_ccr(Register d);
2114 2114 void write_ccr(Register s);
2115 2115
2116 2116 // Manipulation of C++ bools
2117 2117 // These are idioms to flag the need for care with accessing bools but on
2118 2118 // this platform we assume byte size
2119 2119
2120 2120 inline void stbool(Register d, const Address& a) { stb(d, a); }
2121 2121 inline void ldbool(const Address& a, Register d) { ldsb(a, d); }
2122 2122 inline void tstbool( Register s ) { tst(s); }
2123 2123 inline void movbool( bool boolconst, Register d) { mov( (int) boolconst, d); }
2124 2124
2125 2125 // klass oop manipulations if compressed
2126 2126 void load_klass(Register src_oop, Register klass);
2127 2127 void store_klass(Register klass, Register dst_oop);
2128 2128 void store_klass_gap(Register s, Register dst_oop);
2129 2129
2130 2130 // oop manipulations
2131 2131 void load_heap_oop(const Address& s, Register d);
2132 2132 void load_heap_oop(Register s1, Register s2, Register d);
2133 2133 void load_heap_oop(Register s1, int simm13a, Register d);
2134 2134 void load_heap_oop(Register s1, RegisterOrConstant s2, Register d);
2135 2135 void store_heap_oop(Register d, Register s1, Register s2);
2136 2136 void store_heap_oop(Register d, Register s1, int simm13a);
2137 2137 void store_heap_oop(Register d, const Address& a, int offset = 0);
2138 2138
2139 2139 void encode_heap_oop(Register src, Register dst);
2140 2140 void encode_heap_oop(Register r) {
2141 2141 encode_heap_oop(r, r);
2142 2142 }
2143 2143 void decode_heap_oop(Register src, Register dst);
2144 2144 void decode_heap_oop(Register r) {
2145 2145 decode_heap_oop(r, r);
2146 2146 }
2147 2147 void encode_heap_oop_not_null(Register r);
2148 2148 void decode_heap_oop_not_null(Register r);
2149 2149 void encode_heap_oop_not_null(Register src, Register dst);
2150 2150 void decode_heap_oop_not_null(Register src, Register dst);
2151 2151
2152 2152 // Support for managing the JavaThread pointer (i.e.; the reference to
2153 2153 // thread-local information).
2154 2154 void get_thread(); // load G2_thread
2155 2155 void verify_thread(); // verify G2_thread contents
2156 2156 void save_thread (const Register threache); // save to cache
2157 2157 void restore_thread(const Register thread_cache); // restore from cache
2158 2158
2159 2159 // Support for last Java frame (but use call_VM instead where possible)
2160 2160 void set_last_Java_frame(Register last_java_sp, Register last_Java_pc);
2161 2161 void reset_last_Java_frame(void);
2162 2162
2163 2163 // Call into the VM.
2164 2164 // Passes the thread pointer (in O0) as a prepended argument.
2165 2165 // Makes sure oop return values are visible to the GC.
2166 2166 void call_VM(Register oop_result, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
2167 2167 void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
2168 2168 void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
2169 2169 void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
2170 2170
2171 2171 // these overloadings are not presently used on SPARC:
2172 2172 void call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
2173 2173 void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
2174 2174 void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
2175 2175 void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
2176 2176
2177 2177 void call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments = 0);
2178 2178 void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1);
2179 2179 void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2);
2180 2180 void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3);
2181 2181
2182 2182 void get_vm_result (Register oop_result);
2183 2183 void get_vm_result_2(Register oop_result);
2184 2184
2185 2185 // vm result is currently getting hijacked to for oop preservation
2186 2186 void set_vm_result(Register oop_result);
2187 2187
2188 2188 // if call_VM_base was called with check_exceptions=false, then call
2189 2189 // check_and_forward_exception to handle exceptions when it is safe
2190 2190 void check_and_forward_exception(Register scratch_reg);
2191 2191
2192 2192 private:
2193 2193 // For V8
2194 2194 void read_ccr_trap(Register ccr_save);
2195 2195 void write_ccr_trap(Register ccr_save1, Register scratch1, Register scratch2);
2196 2196
2197 2197 #ifdef ASSERT
2198 2198 // For V8 debugging. Uses V8 instruction sequence and checks
2199 2199 // result with V9 insturctions rdccr and wrccr.
2200 2200 // Uses Gscatch and Gscatch2
2201 2201 void read_ccr_v8_assert(Register ccr_save);
2202 2202 void write_ccr_v8_assert(Register ccr_save);
↓ open down ↓ |
2202 lines elided |
↑ open up ↑ |
2203 2203 #endif // ASSERT
2204 2204
2205 2205 public:
2206 2206
2207 2207 // Write to card table for - register is destroyed afterwards.
2208 2208 void card_table_write(jbyte* byte_map_base, Register tmp, Register obj);
2209 2209
2210 2210 void card_write_barrier_post(Register store_addr, Register new_val, Register tmp);
2211 2211
2212 2212 #ifndef SERIALGC
2213 - // Array store and offset
2214 - void g1_write_barrier_pre(Register obj, Register index, int offset, Register tmp, bool preserve_o_regs);
2213 + // General G1 pre-barrier generator.
2214 + void g1_write_barrier_pre(Register obj, Register index, int offset, Register pre_val, Register tmp, bool preserve_o_regs);
2215 2215
2216 + // General G1 post-barrier generator
2216 2217 void g1_write_barrier_post(Register store_addr, Register new_val, Register tmp);
2217 -
2218 - // May do filtering, depending on the boolean arguments.
2219 - void g1_card_table_write(jbyte* byte_map_base,
2220 - Register tmp, Register obj, Register new_val,
2221 - bool region_filter, bool null_filter);
2222 2218 #endif // SERIALGC
2223 2219
2224 2220 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
2225 2221 void push_fTOS();
2226 2222
2227 2223 // pops double TOS element from CPU stack and pushes on FPU stack
2228 2224 void pop_fTOS();
2229 2225
2230 2226 void empty_FPU_stack();
2231 2227
2232 2228 void push_IU_state();
2233 2229 void pop_IU_state();
2234 2230
2235 2231 void push_FPU_state();
2236 2232 void pop_FPU_state();
2237 2233
2238 2234 void push_CPU_state();
2239 2235 void pop_CPU_state();
2240 2236
2241 2237 // if heap base register is used - reinit it with the correct value
2242 2238 void reinit_heapbase();
2243 2239
2244 2240 // Debugging
2245 2241 void _verify_oop(Register reg, const char * msg, const char * file, int line);
2246 2242 void _verify_oop_addr(Address addr, const char * msg, const char * file, int line);
2247 2243
2248 2244 #define verify_oop(reg) _verify_oop(reg, "broken oop " #reg, __FILE__, __LINE__)
2249 2245 #define verify_oop_addr(addr) _verify_oop_addr(addr, "broken oop addr ", __FILE__, __LINE__)
2250 2246
2251 2247 // only if +VerifyOops
2252 2248 void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
2253 2249 // only if +VerifyFPU
2254 2250 void stop(const char* msg); // prints msg, dumps registers and stops execution
2255 2251 void warn(const char* msg); // prints msg, but don't stop
2256 2252 void untested(const char* what = "");
2257 2253 void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, 1024, "unimplemented: %s", what); stop(b); }
2258 2254 void should_not_reach_here() { stop("should not reach here"); }
2259 2255 void print_CPU_state();
2260 2256
2261 2257 // oops in code
2262 2258 AddressLiteral allocate_oop_address(jobject obj); // allocate_index
2263 2259 AddressLiteral constant_oop_address(jobject obj); // find_index
2264 2260 inline void set_oop (jobject obj, Register d); // uses allocate_oop_address
2265 2261 inline void set_oop_constant (jobject obj, Register d); // uses constant_oop_address
2266 2262 inline void set_oop (const AddressLiteral& obj_addr, Register d); // same as load_address
2267 2263
2268 2264 void set_narrow_oop( jobject obj, Register d );
2269 2265
2270 2266 // nop padding
2271 2267 void align(int modulus);
2272 2268
2273 2269 // declare a safepoint
2274 2270 void safepoint();
2275 2271
2276 2272 // factor out part of stop into subroutine to save space
2277 2273 void stop_subroutine();
2278 2274 // factor out part of verify_oop into subroutine to save space
2279 2275 void verify_oop_subroutine();
2280 2276
2281 2277 // side-door communication with signalHandler in os_solaris.cpp
2282 2278 static address _verify_oop_implicit_branch[3];
2283 2279
2284 2280 #ifndef PRODUCT
2285 2281 static void test();
2286 2282 #endif
2287 2283
2288 2284 // convert an incoming arglist to varargs format; put the pointer in d
2289 2285 void set_varargs( Argument a, Register d );
2290 2286
2291 2287 int total_frame_size_in_bytes(int extraWords);
2292 2288
2293 2289 // used when extraWords known statically
2294 2290 void save_frame(int extraWords);
2295 2291 void save_frame_c1(int size_in_bytes);
2296 2292 // make a frame, and simultaneously pass up one or two register value
2297 2293 // into the new register window
2298 2294 void save_frame_and_mov(int extraWords, Register s1, Register d1, Register s2 = Register(), Register d2 = Register());
2299 2295
2300 2296 // give no. (outgoing) params, calc # of words will need on frame
2301 2297 void calc_mem_param_words(Register Rparam_words, Register Rresult);
2302 2298
2303 2299 // used to calculate frame size dynamically
2304 2300 // result is in bytes and must be negated for save inst
2305 2301 void calc_frame_size(Register extraWords, Register resultReg);
2306 2302
2307 2303 // calc and also save
2308 2304 void calc_frame_size_and_save(Register extraWords, Register resultReg);
2309 2305
2310 2306 static void debug(char* msg, RegistersForDebugging* outWindow);
2311 2307
2312 2308 // implementations of bytecodes used by both interpreter and compiler
2313 2309
2314 2310 void lcmp( Register Ra_hi, Register Ra_low,
2315 2311 Register Rb_hi, Register Rb_low,
2316 2312 Register Rresult);
2317 2313
2318 2314 void lneg( Register Rhi, Register Rlow );
2319 2315
2320 2316 void lshl( Register Rin_high, Register Rin_low, Register Rcount,
2321 2317 Register Rout_high, Register Rout_low, Register Rtemp );
2322 2318
2323 2319 void lshr( Register Rin_high, Register Rin_low, Register Rcount,
2324 2320 Register Rout_high, Register Rout_low, Register Rtemp );
2325 2321
2326 2322 void lushr( Register Rin_high, Register Rin_low, Register Rcount,
2327 2323 Register Rout_high, Register Rout_low, Register Rtemp );
2328 2324
2329 2325 #ifdef _LP64
2330 2326 void lcmp( Register Ra, Register Rb, Register Rresult);
2331 2327 #endif
2332 2328
2333 2329 // Load and store values by size and signed-ness
2334 2330 void load_sized_value( Address src, Register dst, size_t size_in_bytes, bool is_signed);
2335 2331 void store_sized_value(Register src, Address dst, size_t size_in_bytes);
2336 2332
2337 2333 void float_cmp( bool is_float, int unordered_result,
2338 2334 FloatRegister Fa, FloatRegister Fb,
2339 2335 Register Rresult);
2340 2336
2341 2337 void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
2342 2338 void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { Assembler::fneg(w, sd); }
2343 2339 void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
2344 2340 void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
2345 2341
2346 2342 void save_all_globals_into_locals();
2347 2343 void restore_globals_from_locals();
2348 2344
2349 2345 void casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
2350 2346 address lock_addr=0, bool use_call_vm=false);
2351 2347 void cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
2352 2348 address lock_addr=0, bool use_call_vm=false);
2353 2349 void casn (Register addr_reg, Register cmp_reg, Register set_reg) ;
2354 2350
2355 2351 // These set the icc condition code to equal if the lock succeeded
2356 2352 // and notEqual if it failed and requires a slow case
2357 2353 void compiler_lock_object(Register Roop, Register Rmark, Register Rbox,
2358 2354 Register Rscratch,
2359 2355 BiasedLockingCounters* counters = NULL,
2360 2356 bool try_bias = UseBiasedLocking);
2361 2357 void compiler_unlock_object(Register Roop, Register Rmark, Register Rbox,
2362 2358 Register Rscratch,
2363 2359 bool try_bias = UseBiasedLocking);
2364 2360
2365 2361 // Biased locking support
2366 2362 // Upon entry, lock_reg must point to the lock record on the stack,
2367 2363 // obj_reg must contain the target object, and mark_reg must contain
2368 2364 // the target object's header.
2369 2365 // Destroys mark_reg if an attempt is made to bias an anonymously
2370 2366 // biased lock. In this case a failure will go either to the slow
2371 2367 // case or fall through with the notEqual condition code set with
2372 2368 // the expectation that the slow case in the runtime will be called.
2373 2369 // In the fall-through case where the CAS-based lock is done,
2374 2370 // mark_reg is not destroyed.
2375 2371 void biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg,
2376 2372 Label& done, Label* slow_case = NULL,
2377 2373 BiasedLockingCounters* counters = NULL);
2378 2374 // Upon entry, the base register of mark_addr must contain the oop.
2379 2375 // Destroys temp_reg.
2380 2376
2381 2377 // If allow_delay_slot_filling is set to true, the next instruction
2382 2378 // emitted after this one will go in an annulled delay slot if the
2383 2379 // biased locking exit case failed.
2384 2380 void biased_locking_exit(Address mark_addr, Register temp_reg, Label& done, bool allow_delay_slot_filling = false);
2385 2381
2386 2382 // allocation
2387 2383 void eden_allocate(
2388 2384 Register obj, // result: pointer to object after successful allocation
2389 2385 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
2390 2386 int con_size_in_bytes, // object size in bytes if known at compile time
2391 2387 Register t1, // temp register
2392 2388 Register t2, // temp register
2393 2389 Label& slow_case // continuation point if fast allocation fails
2394 2390 );
2395 2391 void tlab_allocate(
2396 2392 Register obj, // result: pointer to object after successful allocation
2397 2393 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
2398 2394 int con_size_in_bytes, // object size in bytes if known at compile time
2399 2395 Register t1, // temp register
2400 2396 Label& slow_case // continuation point if fast allocation fails
2401 2397 );
2402 2398 void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case);
2403 2399 void incr_allocated_bytes(RegisterOrConstant size_in_bytes,
2404 2400 Register t1, Register t2);
2405 2401
2406 2402 // interface method calling
2407 2403 void lookup_interface_method(Register recv_klass,
2408 2404 Register intf_klass,
2409 2405 RegisterOrConstant itable_index,
2410 2406 Register method_result,
2411 2407 Register temp_reg, Register temp2_reg,
2412 2408 Label& no_such_interface);
2413 2409
2414 2410 // Test sub_klass against super_klass, with fast and slow paths.
2415 2411
2416 2412 // The fast path produces a tri-state answer: yes / no / maybe-slow.
2417 2413 // One of the three labels can be NULL, meaning take the fall-through.
2418 2414 // If super_check_offset is -1, the value is loaded up from super_klass.
2419 2415 // No registers are killed, except temp_reg and temp2_reg.
2420 2416 // If super_check_offset is not -1, temp2_reg is not used and can be noreg.
2421 2417 void check_klass_subtype_fast_path(Register sub_klass,
2422 2418 Register super_klass,
2423 2419 Register temp_reg,
2424 2420 Register temp2_reg,
2425 2421 Label* L_success,
2426 2422 Label* L_failure,
2427 2423 Label* L_slow_path,
2428 2424 RegisterOrConstant super_check_offset = RegisterOrConstant(-1),
2429 2425 Register instanceof_hack = noreg);
2430 2426
2431 2427 // The rest of the type check; must be wired to a corresponding fast path.
2432 2428 // It does not repeat the fast path logic, so don't use it standalone.
2433 2429 // The temp_reg can be noreg, if no temps are available.
2434 2430 // It can also be sub_klass or super_klass, meaning it's OK to kill that one.
2435 2431 // Updates the sub's secondary super cache as necessary.
2436 2432 void check_klass_subtype_slow_path(Register sub_klass,
2437 2433 Register super_klass,
2438 2434 Register temp_reg,
2439 2435 Register temp2_reg,
2440 2436 Register temp3_reg,
2441 2437 Register temp4_reg,
2442 2438 Label* L_success,
2443 2439 Label* L_failure);
2444 2440
2445 2441 // Simplified, combined version, good for typical uses.
2446 2442 // Falls through on failure.
2447 2443 void check_klass_subtype(Register sub_klass,
2448 2444 Register super_klass,
2449 2445 Register temp_reg,
2450 2446 Register temp2_reg,
2451 2447 Label& L_success);
2452 2448
2453 2449 // method handles (JSR 292)
2454 2450 void check_method_handle_type(Register mtype_reg, Register mh_reg,
2455 2451 Register temp_reg,
2456 2452 Label& wrong_method_type);
2457 2453 void load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
2458 2454 Register temp_reg);
2459 2455 void jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop = true);
2460 2456 // offset relative to Gargs of argument at tos[arg_slot].
2461 2457 // (arg_slot == 0 means the last argument, not the first).
2462 2458 RegisterOrConstant argument_offset(RegisterOrConstant arg_slot,
2463 2459 int extra_slot_offset = 0);
2464 2460 // Address of Gargs and argument_offset.
2465 2461 Address argument_address(RegisterOrConstant arg_slot,
2466 2462 int extra_slot_offset = 0);
2467 2463
2468 2464 // Stack overflow checking
2469 2465
2470 2466 // Note: this clobbers G3_scratch
2471 2467 void bang_stack_with_offset(int offset) {
2472 2468 // stack grows down, caller passes positive offset
2473 2469 assert(offset > 0, "must bang with negative offset");
2474 2470 set((-offset)+STACK_BIAS, G3_scratch);
2475 2471 st(G0, SP, G3_scratch);
2476 2472 }
2477 2473
2478 2474 // Writes to stack successive pages until offset reached to check for
2479 2475 // stack overflow + shadow pages. Clobbers tsp and scratch registers.
2480 2476 void bang_stack_size(Register Rsize, Register Rtsp, Register Rscratch);
2481 2477
2482 2478 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset);
2483 2479
2484 2480 void verify_tlab();
2485 2481
2486 2482 Condition negate_condition(Condition cond);
2487 2483
2488 2484 // Helper functions for statistics gathering.
2489 2485 // Conditionally (non-atomically) increments passed counter address, preserving condition codes.
2490 2486 void cond_inc(Condition cond, address counter_addr, Register Rtemp1, Register Rtemp2);
2491 2487 // Unconditional increment.
2492 2488 void inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2);
2493 2489 void inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2);
2494 2490
2495 2491 // Compare char[] arrays aligned to 4 bytes.
2496 2492 void char_arrays_equals(Register ary1, Register ary2,
2497 2493 Register limit, Register result,
2498 2494 Register chr1, Register chr2, Label& Ldone);
2499 2495
2500 2496 #undef VIRTUAL
2501 2497
2502 2498 };
2503 2499
2504 2500 /**
2505 2501 * class SkipIfEqual:
2506 2502 *
2507 2503 * Instantiating this class will result in assembly code being output that will
2508 2504 * jump around any code emitted between the creation of the instance and it's
2509 2505 * automatic destruction at the end of a scope block, depending on the value of
2510 2506 * the flag passed to the constructor, which will be checked at run-time.
2511 2507 */
2512 2508 class SkipIfEqual : public StackObj {
2513 2509 private:
2514 2510 MacroAssembler* _masm;
2515 2511 Label _label;
2516 2512
2517 2513 public:
2518 2514 // 'temp' is a temp register that this object can use (and trash)
2519 2515 SkipIfEqual(MacroAssembler*, Register temp,
2520 2516 const bool* flag_addr, Assembler::Condition condition);
2521 2517 ~SkipIfEqual();
2522 2518 };
2523 2519
2524 2520 #ifdef ASSERT
2525 2521 // On RISC, there's no benefit to verifying instruction boundaries.
2526 2522 inline bool AbstractAssembler::pd_check_instruction_mark() { return false; }
2527 2523 #endif
2528 2524
2529 2525 #endif // CPU_SPARC_VM_ASSEMBLER_SPARC_HPP
↓ open down ↓ |
298 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX