Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/cpu/sparc/vm/assembler_sparc.hpp
+++ new/src/cpu/sparc/vm/assembler_sparc.hpp
1 1 /*
2 2 * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 21 * have any questions.
22 22 *
23 23 */
24 24
25 25 class BiasedLockingCounters;
26 26
27 27 // <sys/trap.h> promises that the system will not use traps 16-31
28 28 #define ST_RESERVED_FOR_USER_0 0x10
29 29
30 30 /* Written: David Ungar 4/19/97 */
31 31
32 32 // Contains all the definitions needed for sparc assembly code generation.
33 33
34 34 // Register aliases for parts of the system:
35 35
36 36 // 64 bit values can be kept in g1-g5, o1-o5 and o7 and all 64 bits are safe
37 37 // across context switches in V8+ ABI. Of course, there are no 64 bit regs
38 38 // in V8 ABI. All 64 bits are preserved in V9 ABI for all registers.
39 39
40 40 // g2-g4 are scratch registers called "application globals". Their
41 41 // meaning is reserved to the "compilation system"--which means us!
42 42 // They are are not supposed to be touched by ordinary C code, although
43 43 // highly-optimized C code might steal them for temps. They are safe
44 44 // across thread switches, and the ABI requires that they be safe
45 45 // across function calls.
46 46 //
47 47 // g1 and g3 are touched by more modules. V8 allows g1 to be clobbered
48 48 // across func calls, and V8+ also allows g5 to be clobbered across
49 49 // func calls. Also, g1 and g5 can get touched while doing shared
50 50 // library loading.
51 51 //
52 52 // We must not touch g7 (it is the thread-self register) and g6 is
53 53 // reserved for certain tools. g0, of course, is always zero.
54 54 //
55 55 // (Sources: SunSoft Compilers Group, thread library engineers.)
56 56
57 57 // %%%% The interpreter should be revisited to reduce global scratch regs.
58 58
59 59 // This global always holds the current JavaThread pointer:
60 60
61 61 REGISTER_DECLARATION(Register, G2_thread , G2);
62 62 REGISTER_DECLARATION(Register, G6_heapbase , G6);
63 63
64 64 // The following globals are part of the Java calling convention:
65 65
66 66 REGISTER_DECLARATION(Register, G5_method , G5);
67 67 REGISTER_DECLARATION(Register, G5_megamorphic_method , G5_method);
68 68 REGISTER_DECLARATION(Register, G5_inline_cache_reg , G5_method);
69 69
70 70 // The following globals are used for the new C1 & interpreter calling convention:
71 71 REGISTER_DECLARATION(Register, Gargs , G4); // pointing to the last argument
72 72
73 73 // This local is used to preserve G2_thread in the interpreter and in stubs:
74 74 REGISTER_DECLARATION(Register, L7_thread_cache , L7);
75 75
76 76 // These globals are used as scratch registers in the interpreter:
77 77
78 78 REGISTER_DECLARATION(Register, Gframe_size , G1); // SAME REG as G1_scratch
79 79 REGISTER_DECLARATION(Register, G1_scratch , G1); // also SAME
↓ open down ↓ |
79 lines elided |
↑ open up ↑ |
80 80 REGISTER_DECLARATION(Register, G3_scratch , G3);
81 81 REGISTER_DECLARATION(Register, G4_scratch , G4);
82 82
83 83 // These globals are used as short-lived scratch registers in the compiler:
84 84
85 85 REGISTER_DECLARATION(Register, Gtemp , G5);
86 86
87 87 // JSR 292 fixed register usages:
88 88 REGISTER_DECLARATION(Register, G5_method_type , G5);
89 89 REGISTER_DECLARATION(Register, G3_method_handle , G3);
90 +REGISTER_DECLARATION(Register, L7_mh_SP_save , L7);
90 91
91 92 // The compiler requires that G5_megamorphic_method is G5_inline_cache_klass,
92 93 // because a single patchable "set" instruction (NativeMovConstReg,
93 94 // or NativeMovConstPatching for compiler1) instruction
94 95 // serves to set up either quantity, depending on whether the compiled
95 96 // call site is an inline cache or is megamorphic. See the function
96 97 // CompiledIC::set_to_megamorphic.
97 98 //
98 99 // If a inline cache targets an interpreted method, then the
99 100 // G5 register will be used twice during the call. First,
100 101 // the call site will be patched to load a compiledICHolder
101 102 // into G5. (This is an ordered pair of ic_klass, method.)
102 103 // The c2i adapter will first check the ic_klass, then load
103 104 // G5_method with the method part of the pair just before
104 105 // jumping into the interpreter.
105 106 //
106 107 // Note that G5_method is only the method-self for the interpreter,
107 108 // and is logically unrelated to G5_megamorphic_method.
108 109 //
109 110 // Invariants on G2_thread (the JavaThread pointer):
110 111 // - it should not be used for any other purpose anywhere
111 112 // - it must be re-initialized by StubRoutines::call_stub()
112 113 // - it must be preserved around every use of call_VM
113 114
114 115 // We can consider using g2/g3/g4 to cache more values than the
115 116 // JavaThread, such as the card-marking base or perhaps pointers into
116 117 // Eden. It's something of a waste to use them as scratch temporaries,
117 118 // since they are not supposed to be volatile. (Of course, if we find
118 119 // that Java doesn't benefit from application globals, then we can just
119 120 // use them as ordinary temporaries.)
120 121 //
121 122 // Since g1 and g5 (and/or g6) are the volatile (caller-save) registers,
122 123 // it makes sense to use them routinely for procedure linkage,
123 124 // whenever the On registers are not applicable. Examples: G5_method,
124 125 // G5_inline_cache_klass, and a double handful of miscellaneous compiler
125 126 // stubs. This means that compiler stubs, etc., should be kept to a
126 127 // maximum of two or three G-register arguments.
127 128
128 129
129 130 // stub frames
130 131
131 132 REGISTER_DECLARATION(Register, Lentry_args , L0); // pointer to args passed to callee (interpreter) not stub itself
132 133
133 134 // Interpreter frames
134 135
135 136 #ifdef CC_INTERP
136 137 REGISTER_DECLARATION(Register, Lstate , L0); // interpreter state object pointer
137 138 REGISTER_DECLARATION(Register, L1_scratch , L1); // scratch
138 139 REGISTER_DECLARATION(Register, Lmirror , L1); // mirror (for native methods only)
139 140 REGISTER_DECLARATION(Register, L2_scratch , L2);
140 141 REGISTER_DECLARATION(Register, L3_scratch , L3);
141 142 REGISTER_DECLARATION(Register, L4_scratch , L4);
142 143 REGISTER_DECLARATION(Register, Lscratch , L5); // C1 uses
143 144 REGISTER_DECLARATION(Register, Lscratch2 , L6); // C1 uses
144 145 REGISTER_DECLARATION(Register, L7_scratch , L7); // constant pool cache
145 146 REGISTER_DECLARATION(Register, O5_savedSP , O5);
146 147 REGISTER_DECLARATION(Register, I5_savedSP , I5); // Saved SP before bumping for locals. This is simply
147 148 // a copy SP, so in 64-bit it's a biased value. The bias
148 149 // is added and removed as needed in the frame code.
149 150 // Interface to signature handler
150 151 REGISTER_DECLARATION(Register, Llocals , L7); // pointer to locals for signature handler
151 152 REGISTER_DECLARATION(Register, Lmethod , L6); // methodOop when calling signature handler
152 153
153 154 #else
154 155 REGISTER_DECLARATION(Register, Lesp , L0); // expression stack pointer
155 156 REGISTER_DECLARATION(Register, Lbcp , L1); // pointer to next bytecode
156 157 REGISTER_DECLARATION(Register, Lmethod , L2);
157 158 REGISTER_DECLARATION(Register, Llocals , L3);
158 159 REGISTER_DECLARATION(Register, Largs , L3); // pointer to locals for signature handler
159 160 // must match Llocals in asm interpreter
160 161 REGISTER_DECLARATION(Register, Lmonitors , L4);
161 162 REGISTER_DECLARATION(Register, Lbyte_code , L5);
162 163 // When calling out from the interpreter we record SP so that we can remove any extra stack
163 164 // space allocated during adapter transitions. This register is only live from the point
164 165 // of the call until we return.
165 166 REGISTER_DECLARATION(Register, Llast_SP , L5);
166 167 REGISTER_DECLARATION(Register, Lscratch , L5);
167 168 REGISTER_DECLARATION(Register, Lscratch2 , L6);
168 169 REGISTER_DECLARATION(Register, LcpoolCache , L6); // constant pool cache
169 170
170 171 REGISTER_DECLARATION(Register, O5_savedSP , O5);
171 172 REGISTER_DECLARATION(Register, I5_savedSP , I5); // Saved SP before bumping for locals. This is simply
172 173 // a copy SP, so in 64-bit it's a biased value. The bias
173 174 // is added and removed as needed in the frame code.
174 175 REGISTER_DECLARATION(Register, IdispatchTables , I4); // Base address of the bytecode dispatch tables
175 176 REGISTER_DECLARATION(Register, IdispatchAddress , I3); // Register which saves the dispatch address for each bytecode
176 177 REGISTER_DECLARATION(Register, ImethodDataPtr , I2); // Pointer to the current method data
177 178 #endif /* CC_INTERP */
178 179
179 180 // NOTE: Lscratch2 and LcpoolCache point to the same registers in
180 181 // the interpreter code. If Lscratch2 needs to be used for some
181 182 // purpose than LcpoolCache should be restore after that for
182 183 // the interpreter to work right
183 184 // (These assignments must be compatible with L7_thread_cache; see above.)
184 185
185 186 // Since Lbcp points into the middle of the method object,
186 187 // it is temporarily converted into a "bcx" during GC.
187 188
188 189 // Exception processing
189 190 // These registers are passed into exception handlers.
190 191 // All exception handlers require the exception object being thrown.
191 192 // In addition, an nmethod's exception handler must be passed
192 193 // the address of the call site within the nmethod, to allow
193 194 // proper selection of the applicable catch block.
194 195 // (Interpreter frames use their own bcp() for this purpose.)
195 196 //
196 197 // The Oissuing_pc value is not always needed. When jumping to a
197 198 // handler that is known to be interpreted, the Oissuing_pc value can be
198 199 // omitted. An actual catch block in compiled code receives (from its
199 200 // nmethod's exception handler) the thrown exception in the Oexception,
200 201 // but it doesn't need the Oissuing_pc.
201 202 //
202 203 // If an exception handler (either interpreted or compiled)
203 204 // discovers there is no applicable catch block, it updates
204 205 // the Oissuing_pc to the continuation PC of its own caller,
205 206 // pops back to that caller's stack frame, and executes that
206 207 // caller's exception handler. Obviously, this process will
207 208 // iterate until the control stack is popped back to a method
208 209 // containing an applicable catch block. A key invariant is
209 210 // that the Oissuing_pc value is always a value local to
210 211 // the method whose exception handler is currently executing.
211 212 //
212 213 // Note: The issuing PC value is __not__ a raw return address (I7 value).
213 214 // It is a "return pc", the address __following__ the call.
214 215 // Raw return addresses are converted to issuing PCs by frame::pc(),
215 216 // or by stubs. Issuing PCs can be used directly with PC range tables.
216 217 //
217 218 REGISTER_DECLARATION(Register, Oexception , O0); // exception being thrown
218 219 REGISTER_DECLARATION(Register, Oissuing_pc , O1); // where the exception is coming from
219 220
220 221
221 222 // These must occur after the declarations above
222 223 #ifndef DONT_USE_REGISTER_DEFINES
223 224
224 225 #define Gthread AS_REGISTER(Register, Gthread)
225 226 #define Gmethod AS_REGISTER(Register, Gmethod)
226 227 #define Gmegamorphic_method AS_REGISTER(Register, Gmegamorphic_method)
227 228 #define Ginline_cache_reg AS_REGISTER(Register, Ginline_cache_reg)
228 229 #define Gargs AS_REGISTER(Register, Gargs)
229 230 #define Lthread_cache AS_REGISTER(Register, Lthread_cache)
230 231 #define Gframe_size AS_REGISTER(Register, Gframe_size)
231 232 #define Gtemp AS_REGISTER(Register, Gtemp)
232 233
233 234 #ifdef CC_INTERP
234 235 #define Lstate AS_REGISTER(Register, Lstate)
235 236 #define Lesp AS_REGISTER(Register, Lesp)
236 237 #define L1_scratch AS_REGISTER(Register, L1_scratch)
237 238 #define Lmirror AS_REGISTER(Register, Lmirror)
238 239 #define L2_scratch AS_REGISTER(Register, L2_scratch)
239 240 #define L3_scratch AS_REGISTER(Register, L3_scratch)
240 241 #define L4_scratch AS_REGISTER(Register, L4_scratch)
241 242 #define Lscratch AS_REGISTER(Register, Lscratch)
242 243 #define Lscratch2 AS_REGISTER(Register, Lscratch2)
243 244 #define L7_scratch AS_REGISTER(Register, L7_scratch)
244 245 #define Ostate AS_REGISTER(Register, Ostate)
245 246 #else
246 247 #define Lesp AS_REGISTER(Register, Lesp)
247 248 #define Lbcp AS_REGISTER(Register, Lbcp)
248 249 #define Lmethod AS_REGISTER(Register, Lmethod)
249 250 #define Llocals AS_REGISTER(Register, Llocals)
250 251 #define Lmonitors AS_REGISTER(Register, Lmonitors)
251 252 #define Lbyte_code AS_REGISTER(Register, Lbyte_code)
252 253 #define Lscratch AS_REGISTER(Register, Lscratch)
253 254 #define Lscratch2 AS_REGISTER(Register, Lscratch2)
254 255 #define LcpoolCache AS_REGISTER(Register, LcpoolCache)
255 256 #endif /* ! CC_INTERP */
256 257
257 258 #define Lentry_args AS_REGISTER(Register, Lentry_args)
258 259 #define I5_savedSP AS_REGISTER(Register, I5_savedSP)
259 260 #define O5_savedSP AS_REGISTER(Register, O5_savedSP)
260 261 #define IdispatchAddress AS_REGISTER(Register, IdispatchAddress)
261 262 #define ImethodDataPtr AS_REGISTER(Register, ImethodDataPtr)
262 263 #define IdispatchTables AS_REGISTER(Register, IdispatchTables)
263 264
264 265 #define Oexception AS_REGISTER(Register, Oexception)
265 266 #define Oissuing_pc AS_REGISTER(Register, Oissuing_pc)
266 267
267 268
268 269 #endif
269 270
270 271 // Address is an abstraction used to represent a memory location.
271 272 //
272 273 // Note: A register location is represented via a Register, not
273 274 // via an address for efficiency & simplicity reasons.
274 275
275 276 class Address VALUE_OBJ_CLASS_SPEC {
276 277 private:
277 278 Register _base; // Base register.
278 279 RegisterOrConstant _index_or_disp; // Index register or constant displacement.
279 280 RelocationHolder _rspec;
280 281
281 282 public:
282 283 Address() : _base(noreg), _index_or_disp(noreg) {}
283 284
284 285 Address(Register base, RegisterOrConstant index_or_disp)
285 286 : _base(base),
286 287 _index_or_disp(index_or_disp) {
287 288 }
288 289
289 290 Address(Register base, Register index)
290 291 : _base(base),
291 292 _index_or_disp(index) {
292 293 }
293 294
294 295 Address(Register base, int disp)
295 296 : _base(base),
296 297 _index_or_disp(disp) {
297 298 }
298 299
299 300 #ifdef ASSERT
300 301 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
301 302 Address(Register base, ByteSize disp)
302 303 : _base(base),
303 304 _index_or_disp(in_bytes(disp)) {
304 305 }
305 306 #endif
306 307
307 308 // accessors
308 309 Register base() const { return _base; }
309 310 Register index() const { return _index_or_disp.as_register(); }
310 311 int disp() const { return _index_or_disp.as_constant(); }
311 312
312 313 bool has_index() const { return _index_or_disp.is_register(); }
313 314 bool has_disp() const { return _index_or_disp.is_constant(); }
314 315
315 316 const relocInfo::relocType rtype() { return _rspec.type(); }
316 317 const RelocationHolder& rspec() { return _rspec; }
317 318
318 319 RelocationHolder rspec(int offset) const {
319 320 return offset == 0 ? _rspec : _rspec.plus(offset);
320 321 }
321 322
322 323 inline bool is_simm13(int offset = 0); // check disp+offset for overflow
323 324
324 325 Address plus_disp(int plusdisp) const { // bump disp by a small amount
325 326 assert(_index_or_disp.is_constant(), "must have a displacement");
326 327 Address a(base(), disp() + plusdisp);
327 328 return a;
328 329 }
329 330
330 331 Address after_save() const {
331 332 Address a = (*this);
332 333 a._base = a._base->after_save();
333 334 return a;
334 335 }
335 336
336 337 Address after_restore() const {
337 338 Address a = (*this);
338 339 a._base = a._base->after_restore();
339 340 return a;
340 341 }
341 342
342 343 // Convert the raw encoding form into the form expected by the
343 344 // constructor for Address.
344 345 static Address make_raw(int base, int index, int scale, int disp, bool disp_is_oop);
345 346
346 347 friend class Assembler;
347 348 };
348 349
349 350
350 351 class AddressLiteral VALUE_OBJ_CLASS_SPEC {
351 352 private:
352 353 address _address;
353 354 RelocationHolder _rspec;
354 355
355 356 RelocationHolder rspec_from_rtype(relocInfo::relocType rtype, address addr) {
356 357 switch (rtype) {
357 358 case relocInfo::external_word_type:
358 359 return external_word_Relocation::spec(addr);
359 360 case relocInfo::internal_word_type:
360 361 return internal_word_Relocation::spec(addr);
361 362 #ifdef _LP64
362 363 case relocInfo::opt_virtual_call_type:
363 364 return opt_virtual_call_Relocation::spec();
364 365 case relocInfo::static_call_type:
365 366 return static_call_Relocation::spec();
366 367 case relocInfo::runtime_call_type:
367 368 return runtime_call_Relocation::spec();
368 369 #endif
369 370 case relocInfo::none:
370 371 return RelocationHolder();
371 372 default:
372 373 ShouldNotReachHere();
373 374 return RelocationHolder();
374 375 }
375 376 }
376 377
377 378 protected:
378 379 // creation
379 380 AddressLiteral() : _address(NULL), _rspec(NULL) {}
380 381
381 382 public:
382 383 AddressLiteral(address addr, RelocationHolder const& rspec)
383 384 : _address(addr),
384 385 _rspec(rspec) {}
385 386
386 387 // Some constructors to avoid casting at the call site.
387 388 AddressLiteral(jobject obj, RelocationHolder const& rspec)
388 389 : _address((address) obj),
389 390 _rspec(rspec) {}
390 391
391 392 AddressLiteral(intptr_t value, RelocationHolder const& rspec)
392 393 : _address((address) value),
393 394 _rspec(rspec) {}
394 395
395 396 AddressLiteral(address addr, relocInfo::relocType rtype = relocInfo::none)
396 397 : _address((address) addr),
397 398 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
398 399
399 400 // Some constructors to avoid casting at the call site.
400 401 AddressLiteral(address* addr, relocInfo::relocType rtype = relocInfo::none)
401 402 : _address((address) addr),
402 403 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
403 404
404 405 AddressLiteral(bool* addr, relocInfo::relocType rtype = relocInfo::none)
405 406 : _address((address) addr),
406 407 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
407 408
408 409 AddressLiteral(const bool* addr, relocInfo::relocType rtype = relocInfo::none)
409 410 : _address((address) addr),
410 411 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
411 412
412 413 AddressLiteral(signed char* addr, relocInfo::relocType rtype = relocInfo::none)
413 414 : _address((address) addr),
414 415 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
415 416
416 417 AddressLiteral(int* addr, relocInfo::relocType rtype = relocInfo::none)
417 418 : _address((address) addr),
418 419 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
419 420
420 421 AddressLiteral(intptr_t addr, relocInfo::relocType rtype = relocInfo::none)
421 422 : _address((address) addr),
422 423 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
423 424
424 425 #ifdef _LP64
425 426 // 32-bit complains about a multiple declaration for int*.
426 427 AddressLiteral(intptr_t* addr, relocInfo::relocType rtype = relocInfo::none)
427 428 : _address((address) addr),
428 429 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
429 430 #endif
430 431
431 432 AddressLiteral(oop addr, relocInfo::relocType rtype = relocInfo::none)
432 433 : _address((address) addr),
433 434 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
434 435
435 436 AddressLiteral(float* addr, relocInfo::relocType rtype = relocInfo::none)
436 437 : _address((address) addr),
437 438 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
438 439
439 440 AddressLiteral(double* addr, relocInfo::relocType rtype = relocInfo::none)
440 441 : _address((address) addr),
441 442 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
442 443
443 444 intptr_t value() const { return (intptr_t) _address; }
444 445 int low10() const;
445 446
446 447 const relocInfo::relocType rtype() const { return _rspec.type(); }
447 448 const RelocationHolder& rspec() const { return _rspec; }
448 449
449 450 RelocationHolder rspec(int offset) const {
450 451 return offset == 0 ? _rspec : _rspec.plus(offset);
451 452 }
452 453 };
453 454
454 455
455 456 inline Address RegisterImpl::address_in_saved_window() const {
456 457 return (Address(SP, (sp_offset_in_saved_window() * wordSize) + STACK_BIAS));
457 458 }
458 459
459 460
460 461
461 462 // Argument is an abstraction used to represent an outgoing
462 463 // actual argument or an incoming formal parameter, whether
463 464 // it resides in memory or in a register, in a manner consistent
464 465 // with the SPARC Application Binary Interface, or ABI. This is
465 466 // often referred to as the native or C calling convention.
466 467
467 468 class Argument VALUE_OBJ_CLASS_SPEC {
468 469 private:
469 470 int _number;
470 471 bool _is_in;
471 472
472 473 public:
473 474 #ifdef _LP64
474 475 enum {
475 476 n_register_parameters = 6, // only 6 registers may contain integer parameters
476 477 n_float_register_parameters = 16 // Can have up to 16 floating registers
477 478 };
478 479 #else
479 480 enum {
480 481 n_register_parameters = 6 // only 6 registers may contain integer parameters
481 482 };
482 483 #endif
483 484
484 485 // creation
485 486 Argument(int number, bool is_in) : _number(number), _is_in(is_in) {}
486 487
487 488 int number() const { return _number; }
488 489 bool is_in() const { return _is_in; }
489 490 bool is_out() const { return !is_in(); }
490 491
491 492 Argument successor() const { return Argument(number() + 1, is_in()); }
492 493 Argument as_in() const { return Argument(number(), true ); }
493 494 Argument as_out() const { return Argument(number(), false); }
494 495
495 496 // locating register-based arguments:
496 497 bool is_register() const { return _number < n_register_parameters; }
497 498
498 499 #ifdef _LP64
499 500 // locating Floating Point register-based arguments:
500 501 bool is_float_register() const { return _number < n_float_register_parameters; }
501 502
502 503 FloatRegister as_float_register() const {
503 504 assert(is_float_register(), "must be a register argument");
504 505 return as_FloatRegister(( number() *2 ) + 1);
505 506 }
506 507 FloatRegister as_double_register() const {
507 508 assert(is_float_register(), "must be a register argument");
508 509 return as_FloatRegister(( number() *2 ));
509 510 }
510 511 #endif
511 512
512 513 Register as_register() const {
513 514 assert(is_register(), "must be a register argument");
514 515 return is_in() ? as_iRegister(number()) : as_oRegister(number());
515 516 }
516 517
517 518 // locating memory-based arguments
518 519 Address as_address() const {
519 520 assert(!is_register(), "must be a memory argument");
520 521 return address_in_frame();
521 522 }
522 523
523 524 // When applied to a register-based argument, give the corresponding address
524 525 // into the 6-word area "into which callee may store register arguments"
525 526 // (This is a different place than the corresponding register-save area location.)
526 527 Address address_in_frame() const;
527 528
528 529 // debugging
529 530 const char* name() const;
530 531
531 532 friend class Assembler;
532 533 };
533 534
534 535
535 536 // The SPARC Assembler: Pure assembler doing NO optimizations on the instruction
536 537 // level; i.e., what you write
537 538 // is what you get. The Assembler is generating code into a CodeBuffer.
538 539
539 540 class Assembler : public AbstractAssembler {
540 541 protected:
541 542
542 543 static void print_instruction(int inst);
543 544 static int patched_branch(int dest_pos, int inst, int inst_pos);
544 545 static int branch_destination(int inst, int pos);
545 546
546 547
547 548 friend class AbstractAssembler;
548 549 friend class AddressLiteral;
549 550
550 551 // code patchers need various routines like inv_wdisp()
551 552 friend class NativeInstruction;
552 553 friend class NativeGeneralJump;
553 554 friend class Relocation;
554 555 friend class Label;
555 556
556 557 public:
557 558 // op carries format info; see page 62 & 267
558 559
559 560 enum ops {
560 561 call_op = 1, // fmt 1
561 562 branch_op = 0, // also sethi (fmt2)
562 563 arith_op = 2, // fmt 3, arith & misc
563 564 ldst_op = 3 // fmt 3, load/store
564 565 };
565 566
566 567 enum op2s {
567 568 bpr_op2 = 3,
568 569 fb_op2 = 6,
569 570 fbp_op2 = 5,
570 571 br_op2 = 2,
571 572 bp_op2 = 1,
572 573 cb_op2 = 7, // V8
573 574 sethi_op2 = 4
574 575 };
575 576
576 577 enum op3s {
577 578 // selected op3s
578 579 add_op3 = 0x00,
579 580 and_op3 = 0x01,
580 581 or_op3 = 0x02,
581 582 xor_op3 = 0x03,
582 583 sub_op3 = 0x04,
583 584 andn_op3 = 0x05,
584 585 orn_op3 = 0x06,
585 586 xnor_op3 = 0x07,
586 587 addc_op3 = 0x08,
587 588 mulx_op3 = 0x09,
588 589 umul_op3 = 0x0a,
589 590 smul_op3 = 0x0b,
590 591 subc_op3 = 0x0c,
591 592 udivx_op3 = 0x0d,
592 593 udiv_op3 = 0x0e,
593 594 sdiv_op3 = 0x0f,
594 595
595 596 addcc_op3 = 0x10,
596 597 andcc_op3 = 0x11,
597 598 orcc_op3 = 0x12,
598 599 xorcc_op3 = 0x13,
599 600 subcc_op3 = 0x14,
600 601 andncc_op3 = 0x15,
601 602 orncc_op3 = 0x16,
602 603 xnorcc_op3 = 0x17,
603 604 addccc_op3 = 0x18,
604 605 umulcc_op3 = 0x1a,
605 606 smulcc_op3 = 0x1b,
606 607 subccc_op3 = 0x1c,
607 608 udivcc_op3 = 0x1e,
608 609 sdivcc_op3 = 0x1f,
609 610
610 611 taddcc_op3 = 0x20,
611 612 tsubcc_op3 = 0x21,
612 613 taddcctv_op3 = 0x22,
613 614 tsubcctv_op3 = 0x23,
614 615 mulscc_op3 = 0x24,
615 616 sll_op3 = 0x25,
616 617 sllx_op3 = 0x25,
617 618 srl_op3 = 0x26,
618 619 srlx_op3 = 0x26,
619 620 sra_op3 = 0x27,
620 621 srax_op3 = 0x27,
621 622 rdreg_op3 = 0x28,
622 623 membar_op3 = 0x28,
623 624
624 625 flushw_op3 = 0x2b,
625 626 movcc_op3 = 0x2c,
626 627 sdivx_op3 = 0x2d,
627 628 popc_op3 = 0x2e,
628 629 movr_op3 = 0x2f,
629 630
630 631 sir_op3 = 0x30,
631 632 wrreg_op3 = 0x30,
632 633 saved_op3 = 0x31,
633 634
634 635 fpop1_op3 = 0x34,
635 636 fpop2_op3 = 0x35,
636 637 impdep1_op3 = 0x36,
637 638 impdep2_op3 = 0x37,
638 639 jmpl_op3 = 0x38,
639 640 rett_op3 = 0x39,
640 641 trap_op3 = 0x3a,
641 642 flush_op3 = 0x3b,
642 643 save_op3 = 0x3c,
643 644 restore_op3 = 0x3d,
644 645 done_op3 = 0x3e,
645 646 retry_op3 = 0x3e,
646 647
647 648 lduw_op3 = 0x00,
648 649 ldub_op3 = 0x01,
649 650 lduh_op3 = 0x02,
650 651 ldd_op3 = 0x03,
651 652 stw_op3 = 0x04,
652 653 stb_op3 = 0x05,
653 654 sth_op3 = 0x06,
654 655 std_op3 = 0x07,
655 656 ldsw_op3 = 0x08,
656 657 ldsb_op3 = 0x09,
657 658 ldsh_op3 = 0x0a,
658 659 ldx_op3 = 0x0b,
659 660
660 661 ldstub_op3 = 0x0d,
661 662 stx_op3 = 0x0e,
662 663 swap_op3 = 0x0f,
663 664
664 665 lduwa_op3 = 0x10,
665 666 ldxa_op3 = 0x1b,
666 667
667 668 stwa_op3 = 0x14,
668 669 stxa_op3 = 0x1e,
669 670
670 671 ldf_op3 = 0x20,
671 672 ldfsr_op3 = 0x21,
672 673 ldqf_op3 = 0x22,
673 674 lddf_op3 = 0x23,
674 675 stf_op3 = 0x24,
675 676 stfsr_op3 = 0x25,
676 677 stqf_op3 = 0x26,
677 678 stdf_op3 = 0x27,
678 679
679 680 prefetch_op3 = 0x2d,
680 681
681 682
682 683 ldc_op3 = 0x30,
683 684 ldcsr_op3 = 0x31,
684 685 lddc_op3 = 0x33,
685 686 stc_op3 = 0x34,
686 687 stcsr_op3 = 0x35,
687 688 stdcq_op3 = 0x36,
688 689 stdc_op3 = 0x37,
689 690
690 691 casa_op3 = 0x3c,
691 692 casxa_op3 = 0x3e,
692 693
693 694 alt_bit_op3 = 0x10,
694 695 cc_bit_op3 = 0x10
695 696 };
696 697
697 698 enum opfs {
698 699 // selected opfs
699 700 fmovs_opf = 0x01,
700 701 fmovd_opf = 0x02,
701 702
702 703 fnegs_opf = 0x05,
703 704 fnegd_opf = 0x06,
704 705
705 706 fadds_opf = 0x41,
706 707 faddd_opf = 0x42,
707 708 fsubs_opf = 0x45,
708 709 fsubd_opf = 0x46,
709 710
710 711 fmuls_opf = 0x49,
711 712 fmuld_opf = 0x4a,
712 713 fdivs_opf = 0x4d,
713 714 fdivd_opf = 0x4e,
714 715
715 716 fcmps_opf = 0x51,
716 717 fcmpd_opf = 0x52,
717 718
718 719 fstox_opf = 0x81,
719 720 fdtox_opf = 0x82,
720 721 fxtos_opf = 0x84,
721 722 fxtod_opf = 0x88,
722 723 fitos_opf = 0xc4,
723 724 fdtos_opf = 0xc6,
724 725 fitod_opf = 0xc8,
725 726 fstod_opf = 0xc9,
726 727 fstoi_opf = 0xd1,
727 728 fdtoi_opf = 0xd2
728 729 };
729 730
730 731 enum RCondition { rc_z = 1, rc_lez = 2, rc_lz = 3, rc_nz = 5, rc_gz = 6, rc_gez = 7 };
731 732
732 733 enum Condition {
733 734 // for FBfcc & FBPfcc instruction
734 735 f_never = 0,
735 736 f_notEqual = 1,
736 737 f_notZero = 1,
737 738 f_lessOrGreater = 2,
738 739 f_unorderedOrLess = 3,
739 740 f_less = 4,
740 741 f_unorderedOrGreater = 5,
741 742 f_greater = 6,
742 743 f_unordered = 7,
743 744 f_always = 8,
744 745 f_equal = 9,
745 746 f_zero = 9,
746 747 f_unorderedOrEqual = 10,
747 748 f_greaterOrEqual = 11,
748 749 f_unorderedOrGreaterOrEqual = 12,
749 750 f_lessOrEqual = 13,
750 751 f_unorderedOrLessOrEqual = 14,
751 752 f_ordered = 15,
752 753
753 754 // V8 coproc, pp 123 v8 manual
754 755
755 756 cp_always = 8,
756 757 cp_never = 0,
757 758 cp_3 = 7,
758 759 cp_2 = 6,
759 760 cp_2or3 = 5,
760 761 cp_1 = 4,
761 762 cp_1or3 = 3,
762 763 cp_1or2 = 2,
763 764 cp_1or2or3 = 1,
764 765 cp_0 = 9,
765 766 cp_0or3 = 10,
766 767 cp_0or2 = 11,
767 768 cp_0or2or3 = 12,
768 769 cp_0or1 = 13,
769 770 cp_0or1or3 = 14,
770 771 cp_0or1or2 = 15,
771 772
772 773
773 774 // for integers
774 775
775 776 never = 0,
776 777 equal = 1,
777 778 zero = 1,
778 779 lessEqual = 2,
779 780 less = 3,
780 781 lessEqualUnsigned = 4,
781 782 lessUnsigned = 5,
782 783 carrySet = 5,
783 784 negative = 6,
784 785 overflowSet = 7,
785 786 always = 8,
786 787 notEqual = 9,
787 788 notZero = 9,
788 789 greater = 10,
789 790 greaterEqual = 11,
790 791 greaterUnsigned = 12,
791 792 greaterEqualUnsigned = 13,
792 793 carryClear = 13,
793 794 positive = 14,
794 795 overflowClear = 15
795 796 };
796 797
797 798 enum CC {
798 799 icc = 0, xcc = 2,
799 800 // ptr_cc is the correct condition code for a pointer or intptr_t:
800 801 ptr_cc = NOT_LP64(icc) LP64_ONLY(xcc),
801 802 fcc0 = 0, fcc1 = 1, fcc2 = 2, fcc3 = 3
802 803 };
803 804
804 805 enum PrefetchFcn {
805 806 severalReads = 0, oneRead = 1, severalWritesAndPossiblyReads = 2, oneWrite = 3, page = 4
806 807 };
807 808
808 809 public:
809 810 // Helper functions for groups of instructions
810 811
811 812 enum Predict { pt = 1, pn = 0 }; // pt = predict taken
812 813
813 814 enum Membar_mask_bits { // page 184, v9
814 815 StoreStore = 1 << 3,
815 816 LoadStore = 1 << 2,
816 817 StoreLoad = 1 << 1,
817 818 LoadLoad = 1 << 0,
818 819
819 820 Sync = 1 << 6,
820 821 MemIssue = 1 << 5,
821 822 Lookaside = 1 << 4
822 823 };
823 824
824 825 // test if x is within signed immediate range for nbits
825 826 static bool is_simm(int x, int nbits) { return -( 1 << nbits-1 ) <= x && x < ( 1 << nbits-1 ); }
826 827
827 828 // test if -4096 <= x <= 4095
828 829 static bool is_simm13(int x) { return is_simm(x, 13); }
829 830
830 831 enum ASIs { // page 72, v9
831 832 ASI_PRIMARY = 0x80,
832 833 ASI_PRIMARY_LITTLE = 0x88
833 834 // add more from book as needed
834 835 };
835 836
836 837 protected:
837 838 // helpers
838 839
839 840 // x is supposed to fit in a field "nbits" wide
840 841 // and be sign-extended. Check the range.
841 842
842 843 static void assert_signed_range(intptr_t x, int nbits) {
843 844 assert( nbits == 32
844 845 || -(1 << nbits-1) <= x && x < ( 1 << nbits-1),
845 846 "value out of range");
846 847 }
847 848
848 849 static void assert_signed_word_disp_range(intptr_t x, int nbits) {
849 850 assert( (x & 3) == 0, "not word aligned");
850 851 assert_signed_range(x, nbits + 2);
851 852 }
852 853
853 854 static void assert_unsigned_const(int x, int nbits) {
854 855 assert( juint(x) < juint(1 << nbits), "unsigned constant out of range");
855 856 }
856 857
857 858 // fields: note bits numbered from LSB = 0,
858 859 // fields known by inclusive bit range
859 860
860 861 static int fmask(juint hi_bit, juint lo_bit) {
861 862 assert( hi_bit >= lo_bit && 0 <= lo_bit && hi_bit < 32, "bad bits");
862 863 return (1 << ( hi_bit-lo_bit + 1 )) - 1;
863 864 }
864 865
865 866 // inverse of u_field
866 867
867 868 static int inv_u_field(int x, int hi_bit, int lo_bit) {
868 869 juint r = juint(x) >> lo_bit;
869 870 r &= fmask( hi_bit, lo_bit);
870 871 return int(r);
871 872 }
872 873
873 874
874 875 // signed version: extract from field and sign-extend
875 876
876 877 static int inv_s_field(int x, int hi_bit, int lo_bit) {
877 878 int sign_shift = 31 - hi_bit;
878 879 return inv_u_field( ((x << sign_shift) >> sign_shift), hi_bit, lo_bit);
879 880 }
880 881
881 882 // given a field that ranges from hi_bit to lo_bit (inclusive,
882 883 // LSB = 0), and an unsigned value for the field,
883 884 // shift it into the field
884 885
885 886 #ifdef ASSERT
886 887 static int u_field(int x, int hi_bit, int lo_bit) {
887 888 assert( ( x & ~fmask(hi_bit, lo_bit)) == 0,
888 889 "value out of range");
889 890 int r = x << lo_bit;
890 891 assert( inv_u_field(r, hi_bit, lo_bit) == x, "just checking");
891 892 return r;
892 893 }
893 894 #else
894 895 // make sure this is inlined as it will reduce code size significantly
895 896 #define u_field(x, hi_bit, lo_bit) ((x) << (lo_bit))
896 897 #endif
897 898
898 899 static int inv_op( int x ) { return inv_u_field(x, 31, 30); }
899 900 static int inv_op2( int x ) { return inv_u_field(x, 24, 22); }
900 901 static int inv_op3( int x ) { return inv_u_field(x, 24, 19); }
901 902 static int inv_cond( int x ){ return inv_u_field(x, 28, 25); }
902 903
903 904 static bool inv_immed( int x ) { return (x & Assembler::immed(true)) != 0; }
904 905
905 906 static Register inv_rd( int x ) { return as_Register(inv_u_field(x, 29, 25)); }
906 907 static Register inv_rs1( int x ) { return as_Register(inv_u_field(x, 18, 14)); }
907 908 static Register inv_rs2( int x ) { return as_Register(inv_u_field(x, 4, 0)); }
908 909
909 910 static int op( int x) { return u_field(x, 31, 30); }
910 911 static int rd( Register r) { return u_field(r->encoding(), 29, 25); }
911 912 static int fcn( int x) { return u_field(x, 29, 25); }
912 913 static int op3( int x) { return u_field(x, 24, 19); }
913 914 static int rs1( Register r) { return u_field(r->encoding(), 18, 14); }
914 915 static int rs2( Register r) { return u_field(r->encoding(), 4, 0); }
915 916 static int annul( bool a) { return u_field(a ? 1 : 0, 29, 29); }
916 917 static int cond( int x) { return u_field(x, 28, 25); }
917 918 static int cond_mov( int x) { return u_field(x, 17, 14); }
918 919 static int rcond( RCondition x) { return u_field(x, 12, 10); }
919 920 static int op2( int x) { return u_field(x, 24, 22); }
920 921 static int predict( bool p) { return u_field(p ? 1 : 0, 19, 19); }
921 922 static int branchcc( CC fcca) { return u_field(fcca, 21, 20); }
922 923 static int cmpcc( CC fcca) { return u_field(fcca, 26, 25); }
923 924 static int imm_asi( int x) { return u_field(x, 12, 5); }
924 925 static int immed( bool i) { return u_field(i ? 1 : 0, 13, 13); }
925 926 static int opf_low6( int w) { return u_field(w, 10, 5); }
926 927 static int opf_low5( int w) { return u_field(w, 9, 5); }
927 928 static int trapcc( CC cc) { return u_field(cc, 12, 11); }
928 929 static int sx( int i) { return u_field(i, 12, 12); } // shift x=1 means 64-bit
929 930 static int opf( int x) { return u_field(x, 13, 5); }
930 931
931 932 static int opf_cc( CC c, bool useFloat ) { return u_field((useFloat ? 0 : 4) + c, 13, 11); }
932 933 static int mov_cc( CC c, bool useFloat ) { return u_field(useFloat ? 0 : 1, 18, 18) | u_field(c, 12, 11); }
933 934
934 935 static int fd( FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 29, 25); };
935 936 static int fs1(FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 18, 14); };
936 937 static int fs2(FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 4, 0); };
937 938
938 939 // some float instructions use this encoding on the op3 field
939 940 static int alt_op3(int op, FloatRegisterImpl::Width w) {
940 941 int r;
941 942 switch(w) {
942 943 case FloatRegisterImpl::S: r = op + 0; break;
943 944 case FloatRegisterImpl::D: r = op + 3; break;
944 945 case FloatRegisterImpl::Q: r = op + 2; break;
945 946 default: ShouldNotReachHere(); break;
946 947 }
947 948 return op3(r);
948 949 }
949 950
950 951
951 952 // compute inverse of simm
952 953 static int inv_simm(int x, int nbits) {
953 954 return (int)(x << (32 - nbits)) >> (32 - nbits);
954 955 }
955 956
956 957 static int inv_simm13( int x ) { return inv_simm(x, 13); }
957 958
958 959 // signed immediate, in low bits, nbits long
959 960 static int simm(int x, int nbits) {
960 961 assert_signed_range(x, nbits);
961 962 return x & (( 1 << nbits ) - 1);
962 963 }
963 964
964 965 // compute inverse of wdisp16
965 966 static intptr_t inv_wdisp16(int x, intptr_t pos) {
966 967 int lo = x & (( 1 << 14 ) - 1);
967 968 int hi = (x >> 20) & 3;
968 969 if (hi >= 2) hi |= ~1;
969 970 return (((hi << 14) | lo) << 2) + pos;
970 971 }
971 972
972 973 // word offset, 14 bits at LSend, 2 bits at B21, B20
973 974 static int wdisp16(intptr_t x, intptr_t off) {
974 975 intptr_t xx = x - off;
975 976 assert_signed_word_disp_range(xx, 16);
976 977 int r = (xx >> 2) & ((1 << 14) - 1)
977 978 | ( ( (xx>>(2+14)) & 3 ) << 20 );
978 979 assert( inv_wdisp16(r, off) == x, "inverse is not inverse");
979 980 return r;
980 981 }
981 982
982 983
983 984 // word displacement in low-order nbits bits
984 985
985 986 static intptr_t inv_wdisp( int x, intptr_t pos, int nbits ) {
986 987 int pre_sign_extend = x & (( 1 << nbits ) - 1);
987 988 int r = pre_sign_extend >= ( 1 << (nbits-1) )
988 989 ? pre_sign_extend | ~(( 1 << nbits ) - 1)
989 990 : pre_sign_extend;
990 991 return (r << 2) + pos;
991 992 }
992 993
993 994 static int wdisp( intptr_t x, intptr_t off, int nbits ) {
994 995 intptr_t xx = x - off;
995 996 assert_signed_word_disp_range(xx, nbits);
996 997 int r = (xx >> 2) & (( 1 << nbits ) - 1);
997 998 assert( inv_wdisp( r, off, nbits ) == x, "inverse not inverse");
998 999 return r;
999 1000 }
1000 1001
1001 1002
1002 1003 // Extract the top 32 bits in a 64 bit word
1003 1004 static int32_t hi32( int64_t x ) {
1004 1005 int32_t r = int32_t( (uint64_t)x >> 32 );
1005 1006 return r;
1006 1007 }
1007 1008
1008 1009 // given a sethi instruction, extract the constant, left-justified
1009 1010 static int inv_hi22( int x ) {
1010 1011 return x << 10;
1011 1012 }
1012 1013
1013 1014 // create an imm22 field, given a 32-bit left-justified constant
1014 1015 static int hi22( int x ) {
1015 1016 int r = int( juint(x) >> 10 );
1016 1017 assert( (r & ~((1 << 22) - 1)) == 0, "just checkin'");
1017 1018 return r;
1018 1019 }
1019 1020
1020 1021 // create a low10 __value__ (not a field) for a given a 32-bit constant
1021 1022 static int low10( int x ) {
1022 1023 return x & ((1 << 10) - 1);
1023 1024 }
1024 1025
1025 1026 // instruction only in v9
1026 1027 static void v9_only() { assert( VM_Version::v9_instructions_work(), "This instruction only works on SPARC V9"); }
1027 1028
1028 1029 // instruction only in v8
1029 1030 static void v8_only() { assert( VM_Version::v8_instructions_work(), "This instruction only works on SPARC V8"); }
1030 1031
1031 1032 // instruction deprecated in v9
1032 1033 static void v9_dep() { } // do nothing for now
1033 1034
1034 1035 // some float instructions only exist for single prec. on v8
1035 1036 static void v8_s_only(FloatRegisterImpl::Width w) { if (w != FloatRegisterImpl::S) v9_only(); }
1036 1037
1037 1038 // v8 has no CC field
1038 1039 static void v8_no_cc(CC cc) { if (cc) v9_only(); }
1039 1040
1040 1041 protected:
1041 1042 // Simple delay-slot scheme:
1042 1043 // In order to check the programmer, the assembler keeps track of deley slots.
1043 1044 // It forbids CTIs in delay slots (conservative, but should be OK).
1044 1045 // Also, when putting an instruction into a delay slot, you must say
1045 1046 // asm->delayed()->add(...), in order to check that you don't omit
1046 1047 // delay-slot instructions.
1047 1048 // To implement this, we use a simple FSA
1048 1049
1049 1050 #ifdef ASSERT
1050 1051 #define CHECK_DELAY
1051 1052 #endif
1052 1053 #ifdef CHECK_DELAY
1053 1054 enum Delay_state { no_delay, at_delay_slot, filling_delay_slot } delay_state;
1054 1055 #endif
1055 1056
1056 1057 public:
1057 1058 // Tells assembler next instruction must NOT be in delay slot.
1058 1059 // Use at start of multinstruction macros.
1059 1060 void assert_not_delayed() {
1060 1061 // This is a separate overloading to avoid creation of string constants
1061 1062 // in non-asserted code--with some compilers this pollutes the object code.
1062 1063 #ifdef CHECK_DELAY
1063 1064 assert_not_delayed("next instruction should not be a delay slot");
1064 1065 #endif
1065 1066 }
1066 1067 void assert_not_delayed(const char* msg) {
1067 1068 #ifdef CHECK_DELAY
1068 1069 assert_msg ( delay_state == no_delay, msg);
1069 1070 #endif
1070 1071 }
1071 1072
1072 1073 protected:
1073 1074 // Delay slot helpers
1074 1075 // cti is called when emitting control-transfer instruction,
1075 1076 // BEFORE doing the emitting.
1076 1077 // Only effective when assertion-checking is enabled.
1077 1078 void cti() {
1078 1079 #ifdef CHECK_DELAY
1079 1080 assert_not_delayed("cti should not be in delay slot");
1080 1081 #endif
1081 1082 }
1082 1083
1083 1084 // called when emitting cti with a delay slot, AFTER emitting
1084 1085 void has_delay_slot() {
1085 1086 #ifdef CHECK_DELAY
1086 1087 assert_not_delayed("just checking");
1087 1088 delay_state = at_delay_slot;
1088 1089 #endif
1089 1090 }
1090 1091
1091 1092 public:
1092 1093 // Tells assembler you know that next instruction is delayed
1093 1094 Assembler* delayed() {
1094 1095 #ifdef CHECK_DELAY
1095 1096 assert ( delay_state == at_delay_slot, "delayed instruction is not in delay slot");
1096 1097 delay_state = filling_delay_slot;
1097 1098 #endif
1098 1099 return this;
1099 1100 }
1100 1101
1101 1102 void flush() {
1102 1103 #ifdef CHECK_DELAY
1103 1104 assert ( delay_state == no_delay, "ending code with a delay slot");
1104 1105 #endif
1105 1106 AbstractAssembler::flush();
1106 1107 }
1107 1108
1108 1109 inline void emit_long(int); // shadows AbstractAssembler::emit_long
1109 1110 inline void emit_data(int x) { emit_long(x); }
1110 1111 inline void emit_data(int, RelocationHolder const&);
1111 1112 inline void emit_data(int, relocInfo::relocType rtype);
1112 1113 // helper for above fcns
1113 1114 inline void check_delay();
1114 1115
1115 1116
1116 1117 public:
1117 1118 // instructions, refer to page numbers in the SPARC Architecture Manual, V9
1118 1119
1119 1120 // pp 135 (addc was addx in v8)
1120 1121
1121 1122 inline void add(Register s1, Register s2, Register d );
1122 1123 inline void add(Register s1, int simm13a, Register d, relocInfo::relocType rtype = relocInfo::none);
1123 1124 inline void add(Register s1, int simm13a, Register d, RelocationHolder const& rspec);
1124 1125 inline void add(Register s1, RegisterOrConstant s2, Register d, int offset = 0);
1125 1126 inline void add(const Address& a, Register d, int offset = 0) { add( a.base(), a.disp() + offset, d, a.rspec(offset)); }
1126 1127
1127 1128 void addcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1128 1129 void addcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1129 1130 void addc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(addc_op3 ) | rs1(s1) | rs2(s2) ); }
1130 1131 void addc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(addc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1131 1132 void addccc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(addc_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1132 1133 void addccc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(addc_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1133 1134
1134 1135 // pp 136
1135 1136
1136 1137 inline void bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt = relocInfo::none );
1137 1138 inline void bpr( RCondition c, bool a, Predict p, Register s1, Label& L);
1138 1139
1139 1140 protected: // use MacroAssembler::br instead
1140 1141
1141 1142 // pp 138
1142 1143
1143 1144 inline void fb( Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none );
1144 1145 inline void fb( Condition c, bool a, Label& L );
1145 1146
1146 1147 // pp 141
1147 1148
1148 1149 inline void fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1149 1150 inline void fbp( Condition c, bool a, CC cc, Predict p, Label& L );
1150 1151
1151 1152 public:
1152 1153
1153 1154 // pp 144
1154 1155
1155 1156 inline void br( Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none );
1156 1157 inline void br( Condition c, bool a, Label& L );
1157 1158
1158 1159 // pp 146
1159 1160
1160 1161 inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1161 1162 inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
1162 1163
1163 1164 // pp 121 (V8)
1164 1165
1165 1166 inline void cb( Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none );
1166 1167 inline void cb( Condition c, bool a, Label& L );
1167 1168
1168 1169 // pp 149
1169 1170
1170 1171 inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type );
1171 1172 inline void call( Label& L, relocInfo::relocType rt = relocInfo::runtime_call_type );
1172 1173
1173 1174 // pp 150
1174 1175
1175 1176 // These instructions compare the contents of s2 with the contents of
1176 1177 // memory at address in s1. If the values are equal, the contents of memory
1177 1178 // at address s1 is swapped with the data in d. If the values are not equal,
1178 1179 // the the contents of memory at s1 is loaded into d, without the swap.
1179 1180
1180 1181 void casa( Register s1, Register s2, Register d, int ia = -1 ) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(casa_op3 ) | rs1(s1) | (ia == -1 ? immed(true) : imm_asi(ia)) | rs2(s2)); }
1181 1182 void casxa( Register s1, Register s2, Register d, int ia = -1 ) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(casxa_op3) | rs1(s1) | (ia == -1 ? immed(true) : imm_asi(ia)) | rs2(s2)); }
1182 1183
1183 1184 // pp 152
1184 1185
1185 1186 void udiv( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(udiv_op3 ) | rs1(s1) | rs2(s2)); }
1186 1187 void udiv( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(udiv_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1187 1188 void sdiv( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sdiv_op3 ) | rs1(s1) | rs2(s2)); }
1188 1189 void sdiv( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sdiv_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1189 1190 void udivcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(udiv_op3 | cc_bit_op3) | rs1(s1) | rs2(s2)); }
1190 1191 void udivcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(udiv_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1191 1192 void sdivcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sdiv_op3 | cc_bit_op3) | rs1(s1) | rs2(s2)); }
1192 1193 void sdivcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sdiv_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1193 1194
1194 1195 // pp 155
1195 1196
1196 1197 void done() { v9_only(); cti(); emit_long( op(arith_op) | fcn(0) | op3(done_op3) ); }
1197 1198 void retry() { v9_only(); cti(); emit_long( op(arith_op) | fcn(1) | op3(retry_op3) ); }
1198 1199
1199 1200 // pp 156
1200 1201
1201 1202 void fadd( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x40 + w) | fs2(s2, w)); }
1202 1203 void fsub( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x44 + w) | fs2(s2, w)); }
1203 1204
1204 1205 // pp 157
1205 1206
1206 1207 void fcmp( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc); emit_long( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); }
1207 1208 void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc); emit_long( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); }
1208 1209
1209 1210 // pp 159
1210 1211
1211 1212 void ftox( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only(); emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x80 + w) | fs2(s, w)); }
1212 1213 void ftoi( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0xd0 + w) | fs2(s, w)); }
1213 1214
1214 1215 // pp 160
1215 1216
1216 1217 void ftof( FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw, FloatRegister s, FloatRegister d ) { emit_long( op(arith_op) | fd(d, dw) | op3(fpop1_op3) | opf(0xc0 + sw + dw*4) | fs2(s, sw)); }
1217 1218
1218 1219 // pp 161
1219 1220
1220 1221 void fxtof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only(); emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x80 + w*4) | fs2(s, w)); }
1221 1222 void fitof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0xc0 + w*4) | fs2(s, w)); }
1222 1223
1223 1224 // pp 162
1224 1225
1225 1226 void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); }
1226 1227
1227 1228 void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); }
1228 1229
1229 1230 // page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fnegs is the only instruction available
1230 1231 // on v8 to do negation of single, double and quad precision floats.
1231 1232
1232 1233 void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_long( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(sd, w)); else emit_long( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x05) | fs2(sd, w)); }
1233 1234
1234 1235 void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); }
1235 1236
1236 1237 // page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fabss is the only instruction available
1237 1238 // on v8 to do abs operation on single/double/quad precision floats.
1238 1239
1239 1240 void fabs( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_long( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(sd, w)); else emit_long( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x09) | fs2(sd, w)); }
1240 1241
1241 1242 // pp 163
1242 1243
1243 1244 void fmul( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x48 + w) | fs2(s2, w)); }
1244 1245 void fmul( FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_long( op(arith_op) | fd(d, dw) | op3(fpop1_op3) | fs1(s1, sw) | opf(0x60 + sw + dw*4) | fs2(s2, sw)); }
1245 1246 void fdiv( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x4c + w) | fs2(s2, w)); }
1246 1247
1247 1248 // pp 164
1248 1249
1249 1250 void fsqrt( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x28 + w) | fs2(s, w)); }
1250 1251
1251 1252 // pp 165
1252 1253
1253 1254 inline void flush( Register s1, Register s2 );
1254 1255 inline void flush( Register s1, int simm13a);
1255 1256
1256 1257 // pp 167
1257 1258
1258 1259 void flushw() { v9_only(); emit_long( op(arith_op) | op3(flushw_op3) ); }
1259 1260
1260 1261 // pp 168
1261 1262
1262 1263 void illtrap( int const22a) { if (const22a != 0) v9_only(); emit_long( op(branch_op) | u_field(const22a, 21, 0) ); }
1263 1264 // v8 unimp == illtrap(0)
1264 1265
1265 1266 // pp 169
1266 1267
1267 1268 void impdep1( int id1, int const19a ) { v9_only(); emit_long( op(arith_op) | fcn(id1) | op3(impdep1_op3) | u_field(const19a, 18, 0)); }
1268 1269 void impdep2( int id1, int const19a ) { v9_only(); emit_long( op(arith_op) | fcn(id1) | op3(impdep2_op3) | u_field(const19a, 18, 0)); }
1269 1270
1270 1271 // pp 149 (v8)
1271 1272
1272 1273 void cpop1( int opc, int cr1, int cr2, int crd ) { v8_only(); emit_long( op(arith_op) | fcn(crd) | op3(impdep1_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); }
1273 1274 void cpop2( int opc, int cr1, int cr2, int crd ) { v8_only(); emit_long( op(arith_op) | fcn(crd) | op3(impdep2_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); }
1274 1275
1275 1276 // pp 170
1276 1277
1277 1278 void jmpl( Register s1, Register s2, Register d );
1278 1279 void jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec = RelocationHolder() );
1279 1280
1280 1281 // 171
1281 1282
1282 1283 inline void ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d);
1283 1284 inline void ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d);
1284 1285 inline void ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec = RelocationHolder());
1285 1286
1286 1287 inline void ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset = 0);
1287 1288
1288 1289
1289 1290 inline void ldfsr( Register s1, Register s2 );
1290 1291 inline void ldfsr( Register s1, int simm13a);
1291 1292 inline void ldxfsr( Register s1, Register s2 );
1292 1293 inline void ldxfsr( Register s1, int simm13a);
1293 1294
1294 1295 // pp 94 (v8)
1295 1296
1296 1297 inline void ldc( Register s1, Register s2, int crd );
1297 1298 inline void ldc( Register s1, int simm13a, int crd);
1298 1299 inline void lddc( Register s1, Register s2, int crd );
1299 1300 inline void lddc( Register s1, int simm13a, int crd);
1300 1301 inline void ldcsr( Register s1, Register s2, int crd );
1301 1302 inline void ldcsr( Register s1, int simm13a, int crd);
1302 1303
1303 1304
1304 1305 // 173
1305 1306
1306 1307 void ldfa( FloatRegisterImpl::Width w, Register s1, Register s2, int ia, FloatRegister d ) { v9_only(); emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1307 1308 void ldfa( FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d ) { v9_only(); emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1308 1309
1309 1310 // pp 175, lduw is ld on v8
1310 1311
1311 1312 inline void ldsb( Register s1, Register s2, Register d );
1312 1313 inline void ldsb( Register s1, int simm13a, Register d);
1313 1314 inline void ldsh( Register s1, Register s2, Register d );
1314 1315 inline void ldsh( Register s1, int simm13a, Register d);
1315 1316 inline void ldsw( Register s1, Register s2, Register d );
1316 1317 inline void ldsw( Register s1, int simm13a, Register d);
1317 1318 inline void ldub( Register s1, Register s2, Register d );
1318 1319 inline void ldub( Register s1, int simm13a, Register d);
1319 1320 inline void lduh( Register s1, Register s2, Register d );
1320 1321 inline void lduh( Register s1, int simm13a, Register d);
1321 1322 inline void lduw( Register s1, Register s2, Register d );
1322 1323 inline void lduw( Register s1, int simm13a, Register d);
1323 1324 inline void ldx( Register s1, Register s2, Register d );
1324 1325 inline void ldx( Register s1, int simm13a, Register d);
1325 1326 inline void ld( Register s1, Register s2, Register d );
1326 1327 inline void ld( Register s1, int simm13a, Register d);
1327 1328 inline void ldd( Register s1, Register s2, Register d );
1328 1329 inline void ldd( Register s1, int simm13a, Register d);
1329 1330
1330 1331 #ifdef ASSERT
1331 1332 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
1332 1333 inline void ld( Register s1, ByteSize simm13a, Register d);
1333 1334 #endif
1334 1335
1335 1336 inline void ldsb(const Address& a, Register d, int offset = 0);
1336 1337 inline void ldsh(const Address& a, Register d, int offset = 0);
1337 1338 inline void ldsw(const Address& a, Register d, int offset = 0);
1338 1339 inline void ldub(const Address& a, Register d, int offset = 0);
1339 1340 inline void lduh(const Address& a, Register d, int offset = 0);
1340 1341 inline void lduw(const Address& a, Register d, int offset = 0);
1341 1342 inline void ldx( const Address& a, Register d, int offset = 0);
1342 1343 inline void ld( const Address& a, Register d, int offset = 0);
1343 1344 inline void ldd( const Address& a, Register d, int offset = 0);
1344 1345
1345 1346 inline void ldub( Register s1, RegisterOrConstant s2, Register d );
1346 1347 inline void ldsb( Register s1, RegisterOrConstant s2, Register d );
1347 1348 inline void lduh( Register s1, RegisterOrConstant s2, Register d );
1348 1349 inline void ldsh( Register s1, RegisterOrConstant s2, Register d );
1349 1350 inline void lduw( Register s1, RegisterOrConstant s2, Register d );
1350 1351 inline void ldsw( Register s1, RegisterOrConstant s2, Register d );
1351 1352 inline void ldx( Register s1, RegisterOrConstant s2, Register d );
1352 1353 inline void ld( Register s1, RegisterOrConstant s2, Register d );
1353 1354 inline void ldd( Register s1, RegisterOrConstant s2, Register d );
1354 1355
1355 1356 // pp 177
1356 1357
1357 1358 void ldsba( Register s1, Register s2, int ia, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldsb_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1358 1359 void ldsba( Register s1, int simm13a, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldsb_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1359 1360 void ldsha( Register s1, Register s2, int ia, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldsh_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1360 1361 void ldsha( Register s1, int simm13a, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldsh_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1361 1362 void ldswa( Register s1, Register s2, int ia, Register d ) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(ldsw_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1362 1363 void ldswa( Register s1, int simm13a, Register d ) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(ldsw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1363 1364 void lduba( Register s1, Register s2, int ia, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldub_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1364 1365 void lduba( Register s1, int simm13a, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldub_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1365 1366 void lduha( Register s1, Register s2, int ia, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(lduh_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1366 1367 void lduha( Register s1, int simm13a, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(lduh_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1367 1368 void lduwa( Register s1, Register s2, int ia, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1368 1369 void lduwa( Register s1, int simm13a, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1369 1370 void ldxa( Register s1, Register s2, int ia, Register d ) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1370 1371 void ldxa( Register s1, int simm13a, Register d ) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1371 1372 void ldda( Register s1, Register s2, int ia, Register d ) { v9_dep(); emit_long( op(ldst_op) | rd(d) | op3(ldd_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1372 1373 void ldda( Register s1, int simm13a, Register d ) { v9_dep(); emit_long( op(ldst_op) | rd(d) | op3(ldd_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1373 1374
1374 1375 // pp 179
1375 1376
1376 1377 inline void ldstub( Register s1, Register s2, Register d );
1377 1378 inline void ldstub( Register s1, int simm13a, Register d);
1378 1379
1379 1380 // pp 180
1380 1381
1381 1382 void ldstuba( Register s1, Register s2, int ia, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1382 1383 void ldstuba( Register s1, int simm13a, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1383 1384
1384 1385 // pp 181
1385 1386
1386 1387 void and3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 ) | rs1(s1) | rs2(s2) ); }
1387 1388 void and3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1388 1389 void andcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1389 1390 void andcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1390 1391 void andn( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 ) | rs1(s1) | rs2(s2) ); }
1391 1392 void andn( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1392 1393 void andn( Register s1, RegisterOrConstant s2, Register d, int offset = 0);
1393 1394 void andncc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1394 1395 void andncc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1395 1396 void or3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | rs2(s2) ); }
1396 1397 void or3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1397 1398 void orcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1398 1399 void orcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1399 1400 void orn( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | rs2(s2) ); }
1400 1401 void orn( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1401 1402 void orncc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1402 1403 void orncc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1403 1404 void xor3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 ) | rs1(s1) | rs2(s2) ); }
1404 1405 void xor3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1405 1406 void xorcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1406 1407 void xorcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1407 1408 void xnor( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xnor_op3 ) | rs1(s1) | rs2(s2) ); }
1408 1409 void xnor( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xnor_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1409 1410 void xnorcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xnor_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1410 1411 void xnorcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xnor_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1411 1412
1412 1413 // pp 183
1413 1414
1414 1415 void membar( Membar_mask_bits const7a ) { v9_only(); emit_long( op(arith_op) | op3(membar_op3) | rs1(O7) | immed(true) | u_field( int(const7a), 6, 0)); }
1415 1416
1416 1417 // pp 185
1417 1418
1418 1419 void fmov( FloatRegisterImpl::Width w, Condition c, bool floatCC, CC cca, FloatRegister s2, FloatRegister d ) { v9_only(); emit_long( op(arith_op) | fd(d, w) | op3(fpop2_op3) | cond_mov(c) | opf_cc(cca, floatCC) | opf_low6(w) | fs2(s2, w)); }
1419 1420
1420 1421 // pp 189
1421 1422
1422 1423 void fmov( FloatRegisterImpl::Width w, RCondition c, Register s1, FloatRegister s2, FloatRegister d ) { v9_only(); emit_long( op(arith_op) | fd(d, w) | op3(fpop2_op3) | rs1(s1) | rcond(c) | opf_low5(4 + w) | fs2(s2, w)); }
1423 1424
1424 1425 // pp 191
1425 1426
1426 1427 void movcc( Condition c, bool floatCC, CC cca, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(movcc_op3) | mov_cc(cca, floatCC) | cond_mov(c) | rs2(s2) ); }
1427 1428 void movcc( Condition c, bool floatCC, CC cca, int simm11a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(movcc_op3) | mov_cc(cca, floatCC) | cond_mov(c) | immed(true) | simm(simm11a, 11) ); }
1428 1429
1429 1430 // pp 195
1430 1431
1431 1432 void movr( RCondition c, Register s1, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(movr_op3) | rs1(s1) | rcond(c) | rs2(s2) ); }
1432 1433 void movr( RCondition c, Register s1, int simm10a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(movr_op3) | rs1(s1) | rcond(c) | immed(true) | simm(simm10a, 10) ); }
1433 1434
1434 1435 // pp 196
1435 1436
1436 1437 void mulx( Register s1, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(mulx_op3 ) | rs1(s1) | rs2(s2) ); }
1437 1438 void mulx( Register s1, int simm13a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(mulx_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1438 1439 void sdivx( Register s1, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(sdivx_op3) | rs1(s1) | rs2(s2) ); }
1439 1440 void sdivx( Register s1, int simm13a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(sdivx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1440 1441 void udivx( Register s1, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(udivx_op3) | rs1(s1) | rs2(s2) ); }
1441 1442 void udivx( Register s1, int simm13a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(udivx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1442 1443
1443 1444 // pp 197
1444 1445
1445 1446 void umul( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(umul_op3 ) | rs1(s1) | rs2(s2) ); }
1446 1447 void umul( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(umul_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1447 1448 void smul( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(smul_op3 ) | rs1(s1) | rs2(s2) ); }
1448 1449 void smul( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(smul_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1449 1450 void umulcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(umul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1450 1451 void umulcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(umul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1451 1452 void smulcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1452 1453 void smulcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1453 1454
1454 1455 // pp 199
1455 1456
1456 1457 void mulscc( Register s1, Register s2, Register d ) { v9_dep(); emit_long( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | rs2(s2) ); }
1457 1458 void mulscc( Register s1, int simm13a, Register d ) { v9_dep(); emit_long( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1458 1459
1459 1460 // pp 201
1460 1461
1461 1462 void nop() { emit_long( op(branch_op) | op2(sethi_op2) ); }
1462 1463
1463 1464
1464 1465 // pp 202
1465 1466
1466 1467 void popc( Register s, Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(popc_op3) | rs2(s)); }
1467 1468 void popc( int simm13a, Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(popc_op3) | immed(true) | simm(simm13a, 13)); }
1468 1469
1469 1470 // pp 203
1470 1471
1471 1472 void prefetch( Register s1, Register s2, PrefetchFcn f);
1472 1473 void prefetch( Register s1, int simm13a, PrefetchFcn f);
1473 1474 void prefetcha( Register s1, Register s2, int ia, PrefetchFcn f ) { v9_only(); emit_long( op(ldst_op) | fcn(f) | op3(prefetch_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1474 1475 void prefetcha( Register s1, int simm13a, PrefetchFcn f ) { v9_only(); emit_long( op(ldst_op) | fcn(f) | op3(prefetch_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1475 1476
1476 1477 inline void prefetch(const Address& a, PrefetchFcn F, int offset = 0);
1477 1478
1478 1479 // pp 208
1479 1480
1480 1481 // not implementing read privileged register
1481 1482
1482 1483 inline void rdy( Register d) { v9_dep(); emit_long( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(0, 18, 14)); }
1483 1484 inline void rdccr( Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(2, 18, 14)); }
1484 1485 inline void rdasi( Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(3, 18, 14)); }
1485 1486 inline void rdtick( Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(4, 18, 14)); } // Spoon!
1486 1487 inline void rdpc( Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(5, 18, 14)); }
1487 1488 inline void rdfprs( Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(6, 18, 14)); }
1488 1489
1489 1490 // pp 213
1490 1491
1491 1492 inline void rett( Register s1, Register s2);
1492 1493 inline void rett( Register s1, int simm13a, relocInfo::relocType rt = relocInfo::none);
1493 1494
1494 1495 // pp 214
1495 1496
1496 1497 void save( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(save_op3) | rs1(s1) | rs2(s2) ); }
1497 1498 void save( Register s1, int simm13a, Register d ) {
1498 1499 // make sure frame is at least large enough for the register save area
1499 1500 assert(-simm13a >= 16 * wordSize, "frame too small");
1500 1501 emit_long( op(arith_op) | rd(d) | op3(save_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) );
1501 1502 }
1502 1503
1503 1504 void restore( Register s1 = G0, Register s2 = G0, Register d = G0 ) { emit_long( op(arith_op) | rd(d) | op3(restore_op3) | rs1(s1) | rs2(s2) ); }
1504 1505 void restore( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(restore_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1505 1506
1506 1507 // pp 216
1507 1508
1508 1509 void saved() { v9_only(); emit_long( op(arith_op) | fcn(0) | op3(saved_op3)); }
1509 1510 void restored() { v9_only(); emit_long( op(arith_op) | fcn(1) | op3(saved_op3)); }
1510 1511
1511 1512 // pp 217
1512 1513
1513 1514 inline void sethi( int imm22a, Register d, RelocationHolder const& rspec = RelocationHolder() );
1514 1515 // pp 218
1515 1516
1516 1517 void sll( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(0) | rs2(s2) ); }
1517 1518 void sll( Register s1, int imm5a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(0) | immed(true) | u_field(imm5a, 4, 0) ); }
1518 1519 void srl( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(0) | rs2(s2) ); }
1519 1520 void srl( Register s1, int imm5a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(0) | immed(true) | u_field(imm5a, 4, 0) ); }
1520 1521 void sra( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(0) | rs2(s2) ); }
1521 1522 void sra( Register s1, int imm5a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(0) | immed(true) | u_field(imm5a, 4, 0) ); }
1522 1523
1523 1524 void sllx( Register s1, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(1) | rs2(s2) ); }
1524 1525 void sllx( Register s1, int imm6a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(1) | immed(true) | u_field(imm6a, 5, 0) ); }
1525 1526 void srlx( Register s1, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(1) | rs2(s2) ); }
1526 1527 void srlx( Register s1, int imm6a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(1) | immed(true) | u_field(imm6a, 5, 0) ); }
1527 1528 void srax( Register s1, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(1) | rs2(s2) ); }
1528 1529 void srax( Register s1, int imm6a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(1) | immed(true) | u_field(imm6a, 5, 0) ); }
1529 1530
1530 1531 // pp 220
1531 1532
1532 1533 void sir( int simm13a ) { emit_long( op(arith_op) | fcn(15) | op3(sir_op3) | immed(true) | simm(simm13a, 13)); }
1533 1534
1534 1535 // pp 221
1535 1536
1536 1537 void stbar() { emit_long( op(arith_op) | op3(membar_op3) | u_field(15, 18, 14)); }
1537 1538
1538 1539 // pp 222
1539 1540
1540 1541 inline void stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2);
1541 1542 inline void stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2);
1542 1543 inline void stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a);
1543 1544 inline void stf( FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset = 0);
1544 1545
1545 1546 inline void stfsr( Register s1, Register s2 );
1546 1547 inline void stfsr( Register s1, int simm13a);
1547 1548 inline void stxfsr( Register s1, Register s2 );
1548 1549 inline void stxfsr( Register s1, int simm13a);
1549 1550
1550 1551 // pp 224
1551 1552
1552 1553 void stfa( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2, int ia ) { v9_only(); emit_long( op(ldst_op) | fd(d, w) | alt_op3(stf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1553 1554 void stfa( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a ) { v9_only(); emit_long( op(ldst_op) | fd(d, w) | alt_op3(stf_op3 | alt_bit_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1554 1555
1555 1556 // p 226
1556 1557
1557 1558 inline void stb( Register d, Register s1, Register s2 );
1558 1559 inline void stb( Register d, Register s1, int simm13a);
1559 1560 inline void sth( Register d, Register s1, Register s2 );
1560 1561 inline void sth( Register d, Register s1, int simm13a);
1561 1562 inline void stw( Register d, Register s1, Register s2 );
1562 1563 inline void stw( Register d, Register s1, int simm13a);
1563 1564 inline void st( Register d, Register s1, Register s2 );
1564 1565 inline void st( Register d, Register s1, int simm13a);
1565 1566 inline void stx( Register d, Register s1, Register s2 );
1566 1567 inline void stx( Register d, Register s1, int simm13a);
1567 1568 inline void std( Register d, Register s1, Register s2 );
1568 1569 inline void std( Register d, Register s1, int simm13a);
1569 1570
1570 1571 #ifdef ASSERT
1571 1572 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
1572 1573 inline void st( Register d, Register s1, ByteSize simm13a);
1573 1574 #endif
1574 1575
1575 1576 inline void stb( Register d, const Address& a, int offset = 0 );
1576 1577 inline void sth( Register d, const Address& a, int offset = 0 );
1577 1578 inline void stw( Register d, const Address& a, int offset = 0 );
1578 1579 inline void stx( Register d, const Address& a, int offset = 0 );
1579 1580 inline void st( Register d, const Address& a, int offset = 0 );
1580 1581 inline void std( Register d, const Address& a, int offset = 0 );
1581 1582
1582 1583 inline void stb( Register d, Register s1, RegisterOrConstant s2 );
1583 1584 inline void sth( Register d, Register s1, RegisterOrConstant s2 );
1584 1585 inline void stw( Register d, Register s1, RegisterOrConstant s2 );
1585 1586 inline void stx( Register d, Register s1, RegisterOrConstant s2 );
1586 1587 inline void std( Register d, Register s1, RegisterOrConstant s2 );
1587 1588 inline void st( Register d, Register s1, RegisterOrConstant s2 );
1588 1589
1589 1590 // pp 177
1590 1591
1591 1592 void stba( Register d, Register s1, Register s2, int ia ) { emit_long( op(ldst_op) | rd(d) | op3(stb_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1592 1593 void stba( Register d, Register s1, int simm13a ) { emit_long( op(ldst_op) | rd(d) | op3(stb_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1593 1594 void stha( Register d, Register s1, Register s2, int ia ) { emit_long( op(ldst_op) | rd(d) | op3(sth_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1594 1595 void stha( Register d, Register s1, int simm13a ) { emit_long( op(ldst_op) | rd(d) | op3(sth_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1595 1596 void stwa( Register d, Register s1, Register s2, int ia ) { emit_long( op(ldst_op) | rd(d) | op3(stw_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1596 1597 void stwa( Register d, Register s1, int simm13a ) { emit_long( op(ldst_op) | rd(d) | op3(stw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1597 1598 void stxa( Register d, Register s1, Register s2, int ia ) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(stx_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1598 1599 void stxa( Register d, Register s1, int simm13a ) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(stx_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1599 1600 void stda( Register d, Register s1, Register s2, int ia ) { emit_long( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1600 1601 void stda( Register d, Register s1, int simm13a ) { emit_long( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1601 1602
1602 1603 // pp 97 (v8)
1603 1604
1604 1605 inline void stc( int crd, Register s1, Register s2 );
1605 1606 inline void stc( int crd, Register s1, int simm13a);
1606 1607 inline void stdc( int crd, Register s1, Register s2 );
1607 1608 inline void stdc( int crd, Register s1, int simm13a);
1608 1609 inline void stcsr( int crd, Register s1, Register s2 );
1609 1610 inline void stcsr( int crd, Register s1, int simm13a);
1610 1611 inline void stdcq( int crd, Register s1, Register s2 );
1611 1612 inline void stdcq( int crd, Register s1, int simm13a);
1612 1613
1613 1614 // pp 230
1614 1615
1615 1616 void sub( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | rs2(s2) ); }
1616 1617 void sub( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1617 1618 void subcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 | cc_bit_op3 ) | rs1(s1) | rs2(s2) ); }
1618 1619 void subcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 | cc_bit_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1619 1620 void subc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(subc_op3 ) | rs1(s1) | rs2(s2) ); }
1620 1621 void subc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(subc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1621 1622 void subccc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(subc_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1622 1623 void subccc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(subc_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1623 1624
1624 1625 // pp 231
1625 1626
1626 1627 inline void swap( Register s1, Register s2, Register d );
1627 1628 inline void swap( Register s1, int simm13a, Register d);
1628 1629 inline void swap( Address& a, Register d, int offset = 0 );
1629 1630
1630 1631 // pp 232
1631 1632
1632 1633 void swapa( Register s1, Register s2, int ia, Register d ) { v9_dep(); emit_long( op(ldst_op) | rd(d) | op3(swap_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1633 1634 void swapa( Register s1, int simm13a, Register d ) { v9_dep(); emit_long( op(ldst_op) | rd(d) | op3(swap_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1634 1635
1635 1636 // pp 234, note op in book is wrong, see pp 268
1636 1637
1637 1638 void taddcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | rs2(s2) ); }
1638 1639 void taddcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1639 1640 void taddcctv( Register s1, Register s2, Register d ) { v9_dep(); emit_long( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | rs2(s2) ); }
1640 1641 void taddcctv( Register s1, int simm13a, Register d ) { v9_dep(); emit_long( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1641 1642
1642 1643 // pp 235
1643 1644
1644 1645 void tsubcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | rs2(s2) ); }
1645 1646 void tsubcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1646 1647 void tsubcctv( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | rs2(s2) ); }
1647 1648 void tsubcctv( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1648 1649
1649 1650 // pp 237
1650 1651
1651 1652 void trap( Condition c, CC cc, Register s1, Register s2 ) { v8_no_cc(cc); emit_long( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); }
1652 1653 void trap( Condition c, CC cc, Register s1, int trapa ) { v8_no_cc(cc); emit_long( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); }
1653 1654 // simple uncond. trap
1654 1655 void trap( int trapa ) { trap( always, icc, G0, trapa ); }
1655 1656
1656 1657 // pp 239 omit write priv register for now
1657 1658
1658 1659 inline void wry( Register d) { v9_dep(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(0, 29, 25)); }
1659 1660 inline void wrccr(Register s) { v9_only(); emit_long( op(arith_op) | rs1(s) | op3(wrreg_op3) | u_field(2, 29, 25)); }
1660 1661 inline void wrccr(Register s, int simm13a) { v9_only(); emit_long( op(arith_op) |
1661 1662 rs1(s) |
1662 1663 op3(wrreg_op3) |
1663 1664 u_field(2, 29, 25) |
1664 1665 u_field(1, 13, 13) |
1665 1666 simm(simm13a, 13)); }
1666 1667 inline void wrasi( Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(3, 29, 25)); }
1667 1668 inline void wrfprs( Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(6, 29, 25)); }
1668 1669
1669 1670 // For a given register condition, return the appropriate condition code
1670 1671 // Condition (the one you would use to get the same effect after "tst" on
1671 1672 // the target register.)
1672 1673 Assembler::Condition reg_cond_to_cc_cond(RCondition in);
1673 1674
1674 1675
1675 1676 // Creation
1676 1677 Assembler(CodeBuffer* code) : AbstractAssembler(code) {
1677 1678 #ifdef CHECK_DELAY
1678 1679 delay_state = no_delay;
1679 1680 #endif
1680 1681 }
1681 1682
1682 1683 // Testing
1683 1684 #ifndef PRODUCT
1684 1685 void test_v9();
1685 1686 void test_v8_onlys();
1686 1687 #endif
1687 1688 };
1688 1689
1689 1690
1690 1691 class RegistersForDebugging : public StackObj {
1691 1692 public:
1692 1693 intptr_t i[8], l[8], o[8], g[8];
1693 1694 float f[32];
1694 1695 double d[32];
1695 1696
1696 1697 void print(outputStream* s);
1697 1698
1698 1699 static int i_offset(int j) { return offset_of(RegistersForDebugging, i[j]); }
1699 1700 static int l_offset(int j) { return offset_of(RegistersForDebugging, l[j]); }
1700 1701 static int o_offset(int j) { return offset_of(RegistersForDebugging, o[j]); }
1701 1702 static int g_offset(int j) { return offset_of(RegistersForDebugging, g[j]); }
1702 1703 static int f_offset(int j) { return offset_of(RegistersForDebugging, f[j]); }
1703 1704 static int d_offset(int j) { return offset_of(RegistersForDebugging, d[j / 2]); }
1704 1705
1705 1706 // gen asm code to save regs
1706 1707 static void save_registers(MacroAssembler* a);
1707 1708
1708 1709 // restore global registers in case C code disturbed them
1709 1710 static void restore_registers(MacroAssembler* a, Register r);
1710 1711
1711 1712
1712 1713 };
1713 1714
1714 1715
1715 1716 // MacroAssembler extends Assembler by a few frequently used macros.
1716 1717 //
1717 1718 // Most of the standard SPARC synthetic ops are defined here.
1718 1719 // Instructions for which a 'better' code sequence exists depending
1719 1720 // on arguments should also go in here.
1720 1721
1721 1722 #define JMP2(r1, r2) jmp(r1, r2, __FILE__, __LINE__)
1722 1723 #define JMP(r1, off) jmp(r1, off, __FILE__, __LINE__)
1723 1724 #define JUMP(a, temp, off) jump(a, temp, off, __FILE__, __LINE__)
1724 1725 #define JUMPL(a, temp, d, off) jumpl(a, temp, d, off, __FILE__, __LINE__)
1725 1726
1726 1727
1727 1728 class MacroAssembler: public Assembler {
1728 1729 protected:
1729 1730 // Support for VM calls
1730 1731 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
1731 1732 // may customize this version by overriding it for its purposes (e.g., to save/restore
1732 1733 // additional registers when doing a VM call).
1733 1734 #ifdef CC_INTERP
1734 1735 #define VIRTUAL
1735 1736 #else
1736 1737 #define VIRTUAL virtual
1737 1738 #endif
1738 1739
1739 1740 VIRTUAL void call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments);
1740 1741
1741 1742 //
1742 1743 // It is imperative that all calls into the VM are handled via the call_VM macros.
1743 1744 // They make sure that the stack linkage is setup correctly. call_VM's correspond
1744 1745 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
1745 1746 //
1746 1747 // This is the base routine called by the different versions of call_VM. The interpreter
1747 1748 // may customize this version by overriding it for its purposes (e.g., to save/restore
1748 1749 // additional registers when doing a VM call).
1749 1750 //
1750 1751 // A non-volatile java_thread_cache register should be specified so
1751 1752 // that the G2_thread value can be preserved across the call.
1752 1753 // (If java_thread_cache is noreg, then a slow get_thread call
1753 1754 // will re-initialize the G2_thread.) call_VM_base returns the register that contains the
1754 1755 // thread.
1755 1756 //
1756 1757 // If no last_java_sp is specified (noreg) than SP will be used instead.
1757 1758
1758 1759 virtual void call_VM_base(
1759 1760 Register oop_result, // where an oop-result ends up if any; use noreg otherwise
1760 1761 Register java_thread_cache, // the thread if computed before ; use noreg otherwise
1761 1762 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
1762 1763 address entry_point, // the entry point
1763 1764 int number_of_arguments, // the number of arguments (w/o thread) to pop after call
1764 1765 bool check_exception=true // flag which indicates if exception should be checked
1765 1766 );
1766 1767
1767 1768 // This routine should emit JVMTI PopFrame and ForceEarlyReturn handling code.
1768 1769 // The implementation is only non-empty for the InterpreterMacroAssembler,
1769 1770 // as only the interpreter handles and ForceEarlyReturn PopFrame requests.
1770 1771 virtual void check_and_handle_popframe(Register scratch_reg);
1771 1772 virtual void check_and_handle_earlyret(Register scratch_reg);
1772 1773
1773 1774 public:
1774 1775 MacroAssembler(CodeBuffer* code) : Assembler(code) {}
1775 1776
1776 1777 // Support for NULL-checks
1777 1778 //
1778 1779 // Generates code that causes a NULL OS exception if the content of reg is NULL.
1779 1780 // If the accessed location is M[reg + offset] and the offset is known, provide the
1780 1781 // offset. No explicit code generation is needed if the offset is within a certain
1781 1782 // range (0 <= offset <= page_size).
1782 1783 //
1783 1784 // %%%%%% Currently not done for SPARC
1784 1785
1785 1786 void null_check(Register reg, int offset = -1);
1786 1787 static bool needs_explicit_null_check(intptr_t offset);
1787 1788
1788 1789 // support for delayed instructions
1789 1790 MacroAssembler* delayed() { Assembler::delayed(); return this; }
1790 1791
1791 1792 // branches that use right instruction for v8 vs. v9
1792 1793 inline void br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1793 1794 inline void br( Condition c, bool a, Predict p, Label& L );
1794 1795 inline void fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1795 1796 inline void fb( Condition c, bool a, Predict p, Label& L );
1796 1797
1797 1798 // compares register with zero and branches (V9 and V8 instructions)
1798 1799 void br_zero( Condition c, bool a, Predict p, Register s1, Label& L);
1799 1800 // Compares a pointer register with zero and branches on (not)null.
1800 1801 // Does a test & branch on 32-bit systems and a register-branch on 64-bit.
1801 1802 void br_null ( Register s1, bool a, Predict p, Label& L );
1802 1803 void br_notnull( Register s1, bool a, Predict p, Label& L );
1803 1804
1804 1805 // These versions will do the most efficient thing on v8 and v9. Perhaps
1805 1806 // this is what the routine above was meant to do, but it didn't (and
1806 1807 // didn't cover both target address kinds.)
1807 1808 void br_on_reg_cond( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt = relocInfo::none );
1808 1809 void br_on_reg_cond( RCondition c, bool a, Predict p, Register s1, Label& L);
1809 1810
1810 1811 inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1811 1812 inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
1812 1813
1813 1814 // Branch that tests xcc in LP64 and icc in !LP64
1814 1815 inline void brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1815 1816 inline void brx( Condition c, bool a, Predict p, Label& L );
1816 1817
1817 1818 // unconditional short branch
1818 1819 inline void ba( bool a, Label& L );
1819 1820
1820 1821 // Branch that tests fp condition codes
1821 1822 inline void fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1822 1823 inline void fbp( Condition c, bool a, CC cc, Predict p, Label& L );
1823 1824
1824 1825 // get PC the best way
1825 1826 inline int get_pc( Register d );
1826 1827
1827 1828 // Sparc shorthands(pp 85, V8 manual, pp 289 V9 manual)
1828 1829 inline void cmp( Register s1, Register s2 ) { subcc( s1, s2, G0 ); }
1829 1830 inline void cmp( Register s1, int simm13a ) { subcc( s1, simm13a, G0 ); }
1830 1831
1831 1832 inline void jmp( Register s1, Register s2 );
1832 1833 inline void jmp( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
1833 1834
1834 1835 inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type );
1835 1836 inline void call( Label& L, relocInfo::relocType rt = relocInfo::runtime_call_type );
1836 1837 inline void callr( Register s1, Register s2 );
1837 1838 inline void callr( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
1838 1839
1839 1840 // Emits nothing on V8
1840 1841 inline void iprefetch( address d, relocInfo::relocType rt = relocInfo::none );
1841 1842 inline void iprefetch( Label& L);
1842 1843
1843 1844 inline void tst( Register s ) { orcc( G0, s, G0 ); }
1844 1845
1845 1846 #ifdef PRODUCT
1846 1847 inline void ret( bool trace = TraceJumps ) { if (trace) {
1847 1848 mov(I7, O7); // traceable register
1848 1849 JMP(O7, 2 * BytesPerInstWord);
1849 1850 } else {
1850 1851 jmpl( I7, 2 * BytesPerInstWord, G0 );
1851 1852 }
1852 1853 }
1853 1854
1854 1855 inline void retl( bool trace = TraceJumps ) { if (trace) JMP(O7, 2 * BytesPerInstWord);
1855 1856 else jmpl( O7, 2 * BytesPerInstWord, G0 ); }
1856 1857 #else
1857 1858 void ret( bool trace = TraceJumps );
1858 1859 void retl( bool trace = TraceJumps );
1859 1860 #endif /* PRODUCT */
1860 1861
1861 1862 // Required platform-specific helpers for Label::patch_instructions.
1862 1863 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
1863 1864 void pd_patch_instruction(address branch, address target);
1864 1865 #ifndef PRODUCT
1865 1866 static void pd_print_patched_instruction(address branch);
1866 1867 #endif
1867 1868
1868 1869 // sethi Macro handles optimizations and relocations
1869 1870 private:
1870 1871 void internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable);
1871 1872 public:
1872 1873 void sethi(const AddressLiteral& addrlit, Register d);
1873 1874 void patchable_sethi(const AddressLiteral& addrlit, Register d);
1874 1875
1875 1876 // compute the size of a sethi/set
1876 1877 static int size_of_sethi( address a, bool worst_case = false );
1877 1878 static int worst_case_size_of_set();
1878 1879
1879 1880 // set may be either setsw or setuw (high 32 bits may be zero or sign)
1880 1881 private:
1881 1882 void internal_set(const AddressLiteral& al, Register d, bool ForceRelocatable);
1882 1883 public:
1883 1884 void set(const AddressLiteral& addrlit, Register d);
1884 1885 void set(intptr_t value, Register d);
1885 1886 void set(address addr, Register d, RelocationHolder const& rspec);
1886 1887 void patchable_set(const AddressLiteral& addrlit, Register d);
1887 1888 void patchable_set(intptr_t value, Register d);
1888 1889 void set64(jlong value, Register d, Register tmp);
1889 1890
1890 1891 // sign-extend 32 to 64
1891 1892 inline void signx( Register s, Register d ) { sra( s, G0, d); }
1892 1893 inline void signx( Register d ) { sra( d, G0, d); }
1893 1894
1894 1895 inline void not1( Register s, Register d ) { xnor( s, G0, d ); }
1895 1896 inline void not1( Register d ) { xnor( d, G0, d ); }
1896 1897
1897 1898 inline void neg( Register s, Register d ) { sub( G0, s, d ); }
1898 1899 inline void neg( Register d ) { sub( G0, d, d ); }
1899 1900
1900 1901 inline void cas( Register s1, Register s2, Register d) { casa( s1, s2, d, ASI_PRIMARY); }
1901 1902 inline void casx( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY); }
1902 1903 // Functions for isolating 64 bit atomic swaps for LP64
1903 1904 // cas_ptr will perform cas for 32 bit VM's and casx for 64 bit VM's
1904 1905 inline void cas_ptr( Register s1, Register s2, Register d) {
1905 1906 #ifdef _LP64
1906 1907 casx( s1, s2, d );
1907 1908 #else
1908 1909 cas( s1, s2, d );
1909 1910 #endif
1910 1911 }
1911 1912
1912 1913 // Functions for isolating 64 bit shifts for LP64
1913 1914 inline void sll_ptr( Register s1, Register s2, Register d );
1914 1915 inline void sll_ptr( Register s1, int imm6a, Register d );
1915 1916 inline void sll_ptr( Register s1, RegisterOrConstant s2, Register d );
1916 1917 inline void srl_ptr( Register s1, Register s2, Register d );
1917 1918 inline void srl_ptr( Register s1, int imm6a, Register d );
1918 1919
1919 1920 // little-endian
1920 1921 inline void casl( Register s1, Register s2, Register d) { casa( s1, s2, d, ASI_PRIMARY_LITTLE); }
1921 1922 inline void casxl( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY_LITTLE); }
1922 1923
1923 1924 inline void inc( Register d, int const13 = 1 ) { add( d, const13, d); }
1924 1925 inline void inccc( Register d, int const13 = 1 ) { addcc( d, const13, d); }
1925 1926
1926 1927 inline void dec( Register d, int const13 = 1 ) { sub( d, const13, d); }
1927 1928 inline void deccc( Register d, int const13 = 1 ) { subcc( d, const13, d); }
1928 1929
1929 1930 inline void btst( Register s1, Register s2 ) { andcc( s1, s2, G0 ); }
1930 1931 inline void btst( int simm13a, Register s ) { andcc( s, simm13a, G0 ); }
1931 1932
1932 1933 inline void bset( Register s1, Register s2 ) { or3( s1, s2, s2 ); }
1933 1934 inline void bset( int simm13a, Register s ) { or3( s, simm13a, s ); }
1934 1935
1935 1936 inline void bclr( Register s1, Register s2 ) { andn( s1, s2, s2 ); }
1936 1937 inline void bclr( int simm13a, Register s ) { andn( s, simm13a, s ); }
1937 1938
1938 1939 inline void btog( Register s1, Register s2 ) { xor3( s1, s2, s2 ); }
1939 1940 inline void btog( int simm13a, Register s ) { xor3( s, simm13a, s ); }
1940 1941
1941 1942 inline void clr( Register d ) { or3( G0, G0, d ); }
1942 1943
1943 1944 inline void clrb( Register s1, Register s2);
1944 1945 inline void clrh( Register s1, Register s2);
1945 1946 inline void clr( Register s1, Register s2);
1946 1947 inline void clrx( Register s1, Register s2);
1947 1948
1948 1949 inline void clrb( Register s1, int simm13a);
1949 1950 inline void clrh( Register s1, int simm13a);
1950 1951 inline void clr( Register s1, int simm13a);
1951 1952 inline void clrx( Register s1, int simm13a);
1952 1953
1953 1954 // copy & clear upper word
1954 1955 inline void clruw( Register s, Register d ) { srl( s, G0, d); }
1955 1956 // clear upper word
1956 1957 inline void clruwu( Register d ) { srl( d, G0, d); }
1957 1958
1958 1959 // membar psuedo instruction. takes into account target memory model.
1959 1960 inline void membar( Assembler::Membar_mask_bits const7a );
1960 1961
1961 1962 // returns if membar generates anything.
1962 1963 inline bool membar_has_effect( Assembler::Membar_mask_bits const7a );
1963 1964
1964 1965 // mov pseudo instructions
1965 1966 inline void mov( Register s, Register d) {
1966 1967 if ( s != d ) or3( G0, s, d);
1967 1968 else assert_not_delayed(); // Put something useful in the delay slot!
1968 1969 }
1969 1970
1970 1971 inline void mov_or_nop( Register s, Register d) {
1971 1972 if ( s != d ) or3( G0, s, d);
1972 1973 else nop();
1973 1974 }
1974 1975
1975 1976 inline void mov( int simm13a, Register d) { or3( G0, simm13a, d); }
1976 1977
1977 1978 // address pseudos: make these names unlike instruction names to avoid confusion
1978 1979 inline intptr_t load_pc_address( Register reg, int bytes_to_skip );
1979 1980 inline void load_contents(AddressLiteral& addrlit, Register d, int offset = 0);
1980 1981 inline void load_ptr_contents(AddressLiteral& addrlit, Register d, int offset = 0);
1981 1982 inline void store_contents(Register s, AddressLiteral& addrlit, Register temp, int offset = 0);
1982 1983 inline void store_ptr_contents(Register s, AddressLiteral& addrlit, Register temp, int offset = 0);
1983 1984 inline void jumpl_to(AddressLiteral& addrlit, Register temp, Register d, int offset = 0);
1984 1985 inline void jump_to(AddressLiteral& addrlit, Register temp, int offset = 0);
1985 1986 inline void jump_indirect_to(Address& a, Register temp, int ld_offset = 0, int jmp_offset = 0);
1986 1987
1987 1988 // ring buffer traceable jumps
1988 1989
1989 1990 void jmp2( Register r1, Register r2, const char* file, int line );
1990 1991 void jmp ( Register r1, int offset, const char* file, int line );
1991 1992
1992 1993 void jumpl(AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line);
1993 1994 void jump (AddressLiteral& addrlit, Register temp, int offset, const char* file, int line);
1994 1995
1995 1996
1996 1997 // argument pseudos:
1997 1998
1998 1999 inline void load_argument( Argument& a, Register d );
1999 2000 inline void store_argument( Register s, Argument& a );
2000 2001 inline void store_ptr_argument( Register s, Argument& a );
2001 2002 inline void store_float_argument( FloatRegister s, Argument& a );
2002 2003 inline void store_double_argument( FloatRegister s, Argument& a );
2003 2004 inline void store_long_argument( Register s, Argument& a );
2004 2005
2005 2006 // handy macros:
2006 2007
2007 2008 inline void round_to( Register r, int modulus ) {
2008 2009 assert_not_delayed();
2009 2010 inc( r, modulus - 1 );
2010 2011 and3( r, -modulus, r );
2011 2012 }
2012 2013
2013 2014 // --------------------------------------------------
2014 2015
2015 2016 // Functions for isolating 64 bit loads for LP64
2016 2017 // ld_ptr will perform ld for 32 bit VM's and ldx for 64 bit VM's
2017 2018 // st_ptr will perform st for 32 bit VM's and stx for 64 bit VM's
2018 2019 inline void ld_ptr(Register s1, Register s2, Register d);
2019 2020 inline void ld_ptr(Register s1, int simm13a, Register d);
2020 2021 inline void ld_ptr(Register s1, RegisterOrConstant s2, Register d);
2021 2022 inline void ld_ptr(const Address& a, Register d, int offset = 0);
2022 2023 inline void st_ptr(Register d, Register s1, Register s2);
2023 2024 inline void st_ptr(Register d, Register s1, int simm13a);
2024 2025 inline void st_ptr(Register d, Register s1, RegisterOrConstant s2);
2025 2026 inline void st_ptr(Register d, const Address& a, int offset = 0);
2026 2027
2027 2028 #ifdef ASSERT
2028 2029 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
2029 2030 inline void ld_ptr(Register s1, ByteSize simm13a, Register d);
2030 2031 inline void st_ptr(Register d, Register s1, ByteSize simm13a);
2031 2032 #endif
2032 2033
2033 2034 // ld_long will perform ldd for 32 bit VM's and ldx for 64 bit VM's
2034 2035 // st_long will perform std for 32 bit VM's and stx for 64 bit VM's
2035 2036 inline void ld_long(Register s1, Register s2, Register d);
2036 2037 inline void ld_long(Register s1, int simm13a, Register d);
2037 2038 inline void ld_long(Register s1, RegisterOrConstant s2, Register d);
2038 2039 inline void ld_long(const Address& a, Register d, int offset = 0);
2039 2040 inline void st_long(Register d, Register s1, Register s2);
2040 2041 inline void st_long(Register d, Register s1, int simm13a);
2041 2042 inline void st_long(Register d, Register s1, RegisterOrConstant s2);
2042 2043 inline void st_long(Register d, const Address& a, int offset = 0);
2043 2044
2044 2045 // Helpers for address formation.
2045 2046 // They emit no code at all if src is a constant zero.
2046 2047 // If dest is a constant and src is a register, the temp argument
2047 2048 // is required, and becomes the result.
2048 2049 // If dest is a register and src is a non-simm13 constant,
2049 2050 // the temp argument is required, and is used to materialize the constant.
2050 2051 RegisterOrConstant regcon_andn_ptr(RegisterOrConstant dest, RegisterOrConstant src, Register temp = noreg);
2051 2052 RegisterOrConstant regcon_inc_ptr( RegisterOrConstant dest, RegisterOrConstant src, Register temp = noreg);
2052 2053 RegisterOrConstant regcon_sll_ptr( RegisterOrConstant dest, RegisterOrConstant src, Register temp = noreg);
2053 2054
2054 2055 RegisterOrConstant ensure_simm13_or_reg(RegisterOrConstant roc, Register Rtemp) {
2055 2056 guarantee(Rtemp != noreg, "constant offset overflow");
2056 2057 if (is_simm13(roc.constant_or_zero()))
2057 2058 return roc; // register or short constant
2058 2059 set(roc.as_constant(), Rtemp);
2059 2060 return RegisterOrConstant(Rtemp);
2060 2061 }
2061 2062
2062 2063 // --------------------------------------------------
2063 2064
2064 2065 public:
2065 2066 // traps as per trap.h (SPARC ABI?)
2066 2067
2067 2068 void breakpoint_trap();
2068 2069 void breakpoint_trap(Condition c, CC cc = icc);
2069 2070 void flush_windows_trap();
2070 2071 void clean_windows_trap();
2071 2072 void get_psr_trap();
2072 2073 void set_psr_trap();
2073 2074
2074 2075 // V8/V9 flush_windows
2075 2076 void flush_windows();
2076 2077
2077 2078 // Support for serializing memory accesses between threads
2078 2079 void serialize_memory(Register thread, Register tmp1, Register tmp2);
2079 2080
2080 2081 // Stack frame creation/removal
2081 2082 void enter();
2082 2083 void leave();
2083 2084
2084 2085 // V8/V9 integer multiply
2085 2086 void mult(Register s1, Register s2, Register d);
2086 2087 void mult(Register s1, int simm13a, Register d);
2087 2088
2088 2089 // V8/V9 read and write of condition codes.
2089 2090 void read_ccr(Register d);
2090 2091 void write_ccr(Register s);
2091 2092
2092 2093 // Manipulation of C++ bools
2093 2094 // These are idioms to flag the need for care with accessing bools but on
2094 2095 // this platform we assume byte size
2095 2096
2096 2097 inline void stbool(Register d, const Address& a) { stb(d, a); }
2097 2098 inline void ldbool(const Address& a, Register d) { ldsb(a, d); }
2098 2099 inline void tstbool( Register s ) { tst(s); }
2099 2100 inline void movbool( bool boolconst, Register d) { mov( (int) boolconst, d); }
2100 2101
2101 2102 // klass oop manipulations if compressed
2102 2103 void load_klass(Register src_oop, Register klass);
2103 2104 void store_klass(Register klass, Register dst_oop);
2104 2105 void store_klass_gap(Register s, Register dst_oop);
2105 2106
2106 2107 // oop manipulations
2107 2108 void load_heap_oop(const Address& s, Register d);
2108 2109 void load_heap_oop(Register s1, Register s2, Register d);
2109 2110 void load_heap_oop(Register s1, int simm13a, Register d);
2110 2111 void store_heap_oop(Register d, Register s1, Register s2);
2111 2112 void store_heap_oop(Register d, Register s1, int simm13a);
2112 2113 void store_heap_oop(Register d, const Address& a, int offset = 0);
2113 2114
2114 2115 void encode_heap_oop(Register src, Register dst);
2115 2116 void encode_heap_oop(Register r) {
2116 2117 encode_heap_oop(r, r);
2117 2118 }
2118 2119 void decode_heap_oop(Register src, Register dst);
2119 2120 void decode_heap_oop(Register r) {
2120 2121 decode_heap_oop(r, r);
2121 2122 }
2122 2123 void encode_heap_oop_not_null(Register r);
2123 2124 void decode_heap_oop_not_null(Register r);
2124 2125 void encode_heap_oop_not_null(Register src, Register dst);
2125 2126 void decode_heap_oop_not_null(Register src, Register dst);
2126 2127
2127 2128 // Support for managing the JavaThread pointer (i.e.; the reference to
2128 2129 // thread-local information).
2129 2130 void get_thread(); // load G2_thread
2130 2131 void verify_thread(); // verify G2_thread contents
2131 2132 void save_thread (const Register threache); // save to cache
2132 2133 void restore_thread(const Register thread_cache); // restore from cache
2133 2134
2134 2135 // Support for last Java frame (but use call_VM instead where possible)
2135 2136 void set_last_Java_frame(Register last_java_sp, Register last_Java_pc);
2136 2137 void reset_last_Java_frame(void);
2137 2138
2138 2139 // Call into the VM.
2139 2140 // Passes the thread pointer (in O0) as a prepended argument.
2140 2141 // Makes sure oop return values are visible to the GC.
2141 2142 void call_VM(Register oop_result, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
2142 2143 void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
2143 2144 void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
2144 2145 void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
2145 2146
2146 2147 // these overloadings are not presently used on SPARC:
2147 2148 void call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
2148 2149 void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
2149 2150 void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
2150 2151 void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
2151 2152
2152 2153 void call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments = 0);
2153 2154 void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1);
2154 2155 void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2);
2155 2156 void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3);
2156 2157
2157 2158 void get_vm_result (Register oop_result);
2158 2159 void get_vm_result_2(Register oop_result);
2159 2160
2160 2161 // vm result is currently getting hijacked to for oop preservation
2161 2162 void set_vm_result(Register oop_result);
2162 2163
2163 2164 // if call_VM_base was called with check_exceptions=false, then call
2164 2165 // check_and_forward_exception to handle exceptions when it is safe
2165 2166 void check_and_forward_exception(Register scratch_reg);
2166 2167
2167 2168 private:
2168 2169 // For V8
2169 2170 void read_ccr_trap(Register ccr_save);
2170 2171 void write_ccr_trap(Register ccr_save1, Register scratch1, Register scratch2);
2171 2172
2172 2173 #ifdef ASSERT
2173 2174 // For V8 debugging. Uses V8 instruction sequence and checks
2174 2175 // result with V9 insturctions rdccr and wrccr.
2175 2176 // Uses Gscatch and Gscatch2
2176 2177 void read_ccr_v8_assert(Register ccr_save);
2177 2178 void write_ccr_v8_assert(Register ccr_save);
2178 2179 #endif // ASSERT
2179 2180
2180 2181 public:
2181 2182
2182 2183 // Write to card table for - register is destroyed afterwards.
2183 2184 void card_table_write(jbyte* byte_map_base, Register tmp, Register obj);
2184 2185
2185 2186 void card_write_barrier_post(Register store_addr, Register new_val, Register tmp);
2186 2187
2187 2188 #ifndef SERIALGC
2188 2189 // Array store and offset
2189 2190 void g1_write_barrier_pre(Register obj, Register index, int offset, Register tmp, bool preserve_o_regs);
2190 2191
2191 2192 void g1_write_barrier_post(Register store_addr, Register new_val, Register tmp);
2192 2193
2193 2194 // May do filtering, depending on the boolean arguments.
2194 2195 void g1_card_table_write(jbyte* byte_map_base,
2195 2196 Register tmp, Register obj, Register new_val,
2196 2197 bool region_filter, bool null_filter);
2197 2198 #endif // SERIALGC
2198 2199
2199 2200 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
2200 2201 void push_fTOS();
2201 2202
2202 2203 // pops double TOS element from CPU stack and pushes on FPU stack
2203 2204 void pop_fTOS();
2204 2205
2205 2206 void empty_FPU_stack();
2206 2207
2207 2208 void push_IU_state();
2208 2209 void pop_IU_state();
2209 2210
2210 2211 void push_FPU_state();
2211 2212 void pop_FPU_state();
2212 2213
2213 2214 void push_CPU_state();
2214 2215 void pop_CPU_state();
2215 2216
2216 2217 // if heap base register is used - reinit it with the correct value
2217 2218 void reinit_heapbase();
2218 2219
2219 2220 // Debugging
2220 2221 void _verify_oop(Register reg, const char * msg, const char * file, int line);
2221 2222 void _verify_oop_addr(Address addr, const char * msg, const char * file, int line);
2222 2223
2223 2224 #define verify_oop(reg) _verify_oop(reg, "broken oop " #reg, __FILE__, __LINE__)
2224 2225 #define verify_oop_addr(addr) _verify_oop_addr(addr, "broken oop addr ", __FILE__, __LINE__)
2225 2226
2226 2227 // only if +VerifyOops
2227 2228 void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
2228 2229 // only if +VerifyFPU
2229 2230 void stop(const char* msg); // prints msg, dumps registers and stops execution
2230 2231 void warn(const char* msg); // prints msg, but don't stop
2231 2232 void untested(const char* what = "");
2232 2233 void unimplemented(const char* what = "") { char* b = new char[1024]; sprintf(b, "unimplemented: %s", what); stop(b); }
2233 2234 void should_not_reach_here() { stop("should not reach here"); }
2234 2235 void print_CPU_state();
2235 2236
2236 2237 // oops in code
2237 2238 AddressLiteral allocate_oop_address(jobject obj); // allocate_index
2238 2239 AddressLiteral constant_oop_address(jobject obj); // find_index
2239 2240 inline void set_oop (jobject obj, Register d); // uses allocate_oop_address
2240 2241 inline void set_oop_constant (jobject obj, Register d); // uses constant_oop_address
2241 2242 inline void set_oop (AddressLiteral& obj_addr, Register d); // same as load_address
2242 2243
2243 2244 void set_narrow_oop( jobject obj, Register d );
2244 2245
2245 2246 // nop padding
2246 2247 void align(int modulus);
2247 2248
2248 2249 // declare a safepoint
2249 2250 void safepoint();
2250 2251
2251 2252 // factor out part of stop into subroutine to save space
2252 2253 void stop_subroutine();
2253 2254 // factor out part of verify_oop into subroutine to save space
2254 2255 void verify_oop_subroutine();
2255 2256
2256 2257 // side-door communication with signalHandler in os_solaris.cpp
2257 2258 static address _verify_oop_implicit_branch[3];
2258 2259
2259 2260 #ifndef PRODUCT
2260 2261 static void test();
2261 2262 #endif
2262 2263
2263 2264 // convert an incoming arglist to varargs format; put the pointer in d
2264 2265 void set_varargs( Argument a, Register d );
2265 2266
2266 2267 int total_frame_size_in_bytes(int extraWords);
2267 2268
2268 2269 // used when extraWords known statically
2269 2270 void save_frame(int extraWords);
2270 2271 void save_frame_c1(int size_in_bytes);
2271 2272 // make a frame, and simultaneously pass up one or two register value
2272 2273 // into the new register window
2273 2274 void save_frame_and_mov(int extraWords, Register s1, Register d1, Register s2 = Register(), Register d2 = Register());
2274 2275
2275 2276 // give no. (outgoing) params, calc # of words will need on frame
2276 2277 void calc_mem_param_words(Register Rparam_words, Register Rresult);
2277 2278
2278 2279 // used to calculate frame size dynamically
2279 2280 // result is in bytes and must be negated for save inst
2280 2281 void calc_frame_size(Register extraWords, Register resultReg);
2281 2282
2282 2283 // calc and also save
2283 2284 void calc_frame_size_and_save(Register extraWords, Register resultReg);
2284 2285
2285 2286 static void debug(char* msg, RegistersForDebugging* outWindow);
2286 2287
2287 2288 // implementations of bytecodes used by both interpreter and compiler
2288 2289
2289 2290 void lcmp( Register Ra_hi, Register Ra_low,
2290 2291 Register Rb_hi, Register Rb_low,
2291 2292 Register Rresult);
2292 2293
2293 2294 void lneg( Register Rhi, Register Rlow );
2294 2295
2295 2296 void lshl( Register Rin_high, Register Rin_low, Register Rcount,
2296 2297 Register Rout_high, Register Rout_low, Register Rtemp );
2297 2298
2298 2299 void lshr( Register Rin_high, Register Rin_low, Register Rcount,
2299 2300 Register Rout_high, Register Rout_low, Register Rtemp );
2300 2301
2301 2302 void lushr( Register Rin_high, Register Rin_low, Register Rcount,
2302 2303 Register Rout_high, Register Rout_low, Register Rtemp );
2303 2304
2304 2305 #ifdef _LP64
2305 2306 void lcmp( Register Ra, Register Rb, Register Rresult);
2306 2307 #endif
2307 2308
2308 2309 // Loading values by size and signed-ness
2309 2310 void load_sized_value(Address src, Register dst, int size_in_bytes, bool is_signed);
2310 2311
2311 2312 void float_cmp( bool is_float, int unordered_result,
2312 2313 FloatRegister Fa, FloatRegister Fb,
2313 2314 Register Rresult);
2314 2315
2315 2316 void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
2316 2317 void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { Assembler::fneg(w, sd); }
2317 2318 void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
2318 2319 void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
2319 2320
2320 2321 void save_all_globals_into_locals();
2321 2322 void restore_globals_from_locals();
2322 2323
2323 2324 void casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
2324 2325 address lock_addr=0, bool use_call_vm=false);
2325 2326 void cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
2326 2327 address lock_addr=0, bool use_call_vm=false);
2327 2328 void casn (Register addr_reg, Register cmp_reg, Register set_reg) ;
2328 2329
2329 2330 // These set the icc condition code to equal if the lock succeeded
2330 2331 // and notEqual if it failed and requires a slow case
2331 2332 void compiler_lock_object(Register Roop, Register Rmark, Register Rbox,
2332 2333 Register Rscratch,
2333 2334 BiasedLockingCounters* counters = NULL,
2334 2335 bool try_bias = UseBiasedLocking);
2335 2336 void compiler_unlock_object(Register Roop, Register Rmark, Register Rbox,
2336 2337 Register Rscratch,
2337 2338 bool try_bias = UseBiasedLocking);
2338 2339
2339 2340 // Biased locking support
2340 2341 // Upon entry, lock_reg must point to the lock record on the stack,
2341 2342 // obj_reg must contain the target object, and mark_reg must contain
2342 2343 // the target object's header.
2343 2344 // Destroys mark_reg if an attempt is made to bias an anonymously
2344 2345 // biased lock. In this case a failure will go either to the slow
2345 2346 // case or fall through with the notEqual condition code set with
2346 2347 // the expectation that the slow case in the runtime will be called.
2347 2348 // In the fall-through case where the CAS-based lock is done,
2348 2349 // mark_reg is not destroyed.
2349 2350 void biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg,
2350 2351 Label& done, Label* slow_case = NULL,
2351 2352 BiasedLockingCounters* counters = NULL);
2352 2353 // Upon entry, the base register of mark_addr must contain the oop.
2353 2354 // Destroys temp_reg.
2354 2355
2355 2356 // If allow_delay_slot_filling is set to true, the next instruction
2356 2357 // emitted after this one will go in an annulled delay slot if the
2357 2358 // biased locking exit case failed.
2358 2359 void biased_locking_exit(Address mark_addr, Register temp_reg, Label& done, bool allow_delay_slot_filling = false);
2359 2360
2360 2361 // allocation
2361 2362 void eden_allocate(
2362 2363 Register obj, // result: pointer to object after successful allocation
2363 2364 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
2364 2365 int con_size_in_bytes, // object size in bytes if known at compile time
2365 2366 Register t1, // temp register
2366 2367 Register t2, // temp register
2367 2368 Label& slow_case // continuation point if fast allocation fails
2368 2369 );
2369 2370 void tlab_allocate(
2370 2371 Register obj, // result: pointer to object after successful allocation
2371 2372 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
2372 2373 int con_size_in_bytes, // object size in bytes if known at compile time
2373 2374 Register t1, // temp register
2374 2375 Label& slow_case // continuation point if fast allocation fails
2375 2376 );
2376 2377 void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case);
2377 2378
2378 2379 // interface method calling
2379 2380 void lookup_interface_method(Register recv_klass,
2380 2381 Register intf_klass,
2381 2382 RegisterOrConstant itable_index,
2382 2383 Register method_result,
2383 2384 Register temp_reg, Register temp2_reg,
2384 2385 Label& no_such_interface);
2385 2386
2386 2387 // Test sub_klass against super_klass, with fast and slow paths.
2387 2388
2388 2389 // The fast path produces a tri-state answer: yes / no / maybe-slow.
2389 2390 // One of the three labels can be NULL, meaning take the fall-through.
2390 2391 // If super_check_offset is -1, the value is loaded up from super_klass.
2391 2392 // No registers are killed, except temp_reg and temp2_reg.
2392 2393 // If super_check_offset is not -1, temp2_reg is not used and can be noreg.
2393 2394 void check_klass_subtype_fast_path(Register sub_klass,
2394 2395 Register super_klass,
2395 2396 Register temp_reg,
2396 2397 Register temp2_reg,
2397 2398 Label* L_success,
2398 2399 Label* L_failure,
2399 2400 Label* L_slow_path,
2400 2401 RegisterOrConstant super_check_offset = RegisterOrConstant(-1),
2401 2402 Register instanceof_hack = noreg);
2402 2403
2403 2404 // The rest of the type check; must be wired to a corresponding fast path.
2404 2405 // It does not repeat the fast path logic, so don't use it standalone.
2405 2406 // The temp_reg can be noreg, if no temps are available.
2406 2407 // It can also be sub_klass or super_klass, meaning it's OK to kill that one.
2407 2408 // Updates the sub's secondary super cache as necessary.
2408 2409 void check_klass_subtype_slow_path(Register sub_klass,
2409 2410 Register super_klass,
2410 2411 Register temp_reg,
2411 2412 Register temp2_reg,
2412 2413 Register temp3_reg,
2413 2414 Register temp4_reg,
2414 2415 Label* L_success,
2415 2416 Label* L_failure);
2416 2417
2417 2418 // Simplified, combined version, good for typical uses.
2418 2419 // Falls through on failure.
2419 2420 void check_klass_subtype(Register sub_klass,
2420 2421 Register super_klass,
2421 2422 Register temp_reg,
2422 2423 Register temp2_reg,
2423 2424 Label& L_success);
2424 2425
2425 2426 // method handles (JSR 292)
2426 2427 void check_method_handle_type(Register mtype_reg, Register mh_reg,
2427 2428 Register temp_reg,
2428 2429 Label& wrong_method_type);
2429 2430 void load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
2430 2431 Register temp_reg);
2431 2432 void jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop = true);
2432 2433 // offset relative to Gargs of argument at tos[arg_slot].
2433 2434 // (arg_slot == 0 means the last argument, not the first).
2434 2435 RegisterOrConstant argument_offset(RegisterOrConstant arg_slot,
2435 2436 int extra_slot_offset = 0);
2436 2437 // Address of Gargs and argument_offset.
2437 2438 Address argument_address(RegisterOrConstant arg_slot,
2438 2439 int extra_slot_offset = 0);
2439 2440
2440 2441 // Stack overflow checking
2441 2442
2442 2443 // Note: this clobbers G3_scratch
2443 2444 void bang_stack_with_offset(int offset) {
2444 2445 // stack grows down, caller passes positive offset
2445 2446 assert(offset > 0, "must bang with negative offset");
2446 2447 set((-offset)+STACK_BIAS, G3_scratch);
2447 2448 st(G0, SP, G3_scratch);
2448 2449 }
2449 2450
2450 2451 // Writes to stack successive pages until offset reached to check for
2451 2452 // stack overflow + shadow pages. Clobbers tsp and scratch registers.
2452 2453 void bang_stack_size(Register Rsize, Register Rtsp, Register Rscratch);
2453 2454
2454 2455 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset);
2455 2456
2456 2457 void verify_tlab();
2457 2458
2458 2459 Condition negate_condition(Condition cond);
2459 2460
2460 2461 // Helper functions for statistics gathering.
2461 2462 // Conditionally (non-atomically) increments passed counter address, preserving condition codes.
2462 2463 void cond_inc(Condition cond, address counter_addr, Register Rtemp1, Register Rtemp2);
2463 2464 // Unconditional increment.
2464 2465 void inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2);
2465 2466 void inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2);
2466 2467
2467 2468 // Compare char[] arrays aligned to 4 bytes.
2468 2469 void char_arrays_equals(Register ary1, Register ary2,
2469 2470 Register limit, Register result,
2470 2471 Register chr1, Register chr2, Label& Ldone);
2471 2472
2472 2473 #undef VIRTUAL
2473 2474
2474 2475 };
2475 2476
2476 2477 /**
2477 2478 * class SkipIfEqual:
2478 2479 *
2479 2480 * Instantiating this class will result in assembly code being output that will
2480 2481 * jump around any code emitted between the creation of the instance and it's
2481 2482 * automatic destruction at the end of a scope block, depending on the value of
2482 2483 * the flag passed to the constructor, which will be checked at run-time.
2483 2484 */
2484 2485 class SkipIfEqual : public StackObj {
2485 2486 private:
2486 2487 MacroAssembler* _masm;
2487 2488 Label _label;
2488 2489
2489 2490 public:
2490 2491 // 'temp' is a temp register that this object can use (and trash)
2491 2492 SkipIfEqual(MacroAssembler*, Register temp,
2492 2493 const bool* flag_addr, Assembler::Condition condition);
2493 2494 ~SkipIfEqual();
2494 2495 };
2495 2496
2496 2497 #ifdef ASSERT
2497 2498 // On RISC, there's no benefit to verifying instruction boundaries.
2498 2499 inline bool AbstractAssembler::pd_check_instruction_mark() { return false; }
2499 2500 #endif
↓ open down ↓ |
2400 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX