1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_SPARC_VM_MACROASSEMBLER_SPARC_HPP 26 #define CPU_SPARC_VM_MACROASSEMBLER_SPARC_HPP 27 28 #include "asm/assembler.hpp" 29 #include "utilities/macros.hpp" 30 31 // <sys/trap.h> promises that the system will not use traps 16-31 32 #define ST_RESERVED_FOR_USER_0 0x10 33 34 class BiasedLockingCounters; 35 36 37 // Register aliases for parts of the system: 38 39 // 64 bit values can be kept in g1-g5, o1-o5 and o7 and all 64 bits are safe 40 // across context switches in V8+ ABI. Of course, there are no 64 bit regs 41 // in V8 ABI. All 64 bits are preserved in V9 ABI for all registers. 42 43 // g2-g4 are scratch registers called "application globals". Their 44 // meaning is reserved to the "compilation system"--which means us! 45 // They are are not supposed to be touched by ordinary C code, although 46 // highly-optimized C code might steal them for temps. They are safe 47 // across thread switches, and the ABI requires that they be safe 48 // across function calls. 49 // 50 // g1 and g3 are touched by more modules. V8 allows g1 to be clobbered 51 // across func calls, and V8+ also allows g5 to be clobbered across 52 // func calls. Also, g1 and g5 can get touched while doing shared 53 // library loading. 54 // 55 // We must not touch g7 (it is the thread-self register) and g6 is 56 // reserved for certain tools. g0, of course, is always zero. 57 // 58 // (Sources: SunSoft Compilers Group, thread library engineers.) 59 60 // %%%% The interpreter should be revisited to reduce global scratch regs. 61 62 // This global always holds the current JavaThread pointer: 63 64 REGISTER_DECLARATION(Register, G2_thread , G2); 65 REGISTER_DECLARATION(Register, G6_heapbase , G6); 66 67 // The following globals are part of the Java calling convention: 68 69 REGISTER_DECLARATION(Register, G5_method , G5); 70 REGISTER_DECLARATION(Register, G5_megamorphic_method , G5_method); 71 REGISTER_DECLARATION(Register, G5_inline_cache_reg , G5_method); 72 73 // The following globals are used for the new C1 & interpreter calling convention: 74 REGISTER_DECLARATION(Register, Gargs , G4); // pointing to the last argument 75 76 // This local is used to preserve G2_thread in the interpreter and in stubs: 77 REGISTER_DECLARATION(Register, L7_thread_cache , L7); 78 79 // These globals are used as scratch registers in the interpreter: 80 81 REGISTER_DECLARATION(Register, Gframe_size , G1); // SAME REG as G1_scratch 82 REGISTER_DECLARATION(Register, G1_scratch , G1); // also SAME 83 REGISTER_DECLARATION(Register, G3_scratch , G3); 84 REGISTER_DECLARATION(Register, G4_scratch , G4); 85 86 // These globals are used as short-lived scratch registers in the compiler: 87 88 REGISTER_DECLARATION(Register, Gtemp , G5); 89 90 // JSR 292 fixed register usages: 91 REGISTER_DECLARATION(Register, G5_method_type , G5); 92 REGISTER_DECLARATION(Register, G3_method_handle , G3); 93 REGISTER_DECLARATION(Register, L7_mh_SP_save , L7); 94 95 // The compiler requires that G5_megamorphic_method is G5_inline_cache_klass, 96 // because a single patchable "set" instruction (NativeMovConstReg, 97 // or NativeMovConstPatching for compiler1) instruction 98 // serves to set up either quantity, depending on whether the compiled 99 // call site is an inline cache or is megamorphic. See the function 100 // CompiledIC::set_to_megamorphic. 101 // 102 // If a inline cache targets an interpreted method, then the 103 // G5 register will be used twice during the call. First, 104 // the call site will be patched to load a compiledICHolder 105 // into G5. (This is an ordered pair of ic_klass, method.) 106 // The c2i adapter will first check the ic_klass, then load 107 // G5_method with the method part of the pair just before 108 // jumping into the interpreter. 109 // 110 // Note that G5_method is only the method-self for the interpreter, 111 // and is logically unrelated to G5_megamorphic_method. 112 // 113 // Invariants on G2_thread (the JavaThread pointer): 114 // - it should not be used for any other purpose anywhere 115 // - it must be re-initialized by StubRoutines::call_stub() 116 // - it must be preserved around every use of call_VM 117 118 // We can consider using g2/g3/g4 to cache more values than the 119 // JavaThread, such as the card-marking base or perhaps pointers into 120 // Eden. It's something of a waste to use them as scratch temporaries, 121 // since they are not supposed to be volatile. (Of course, if we find 122 // that Java doesn't benefit from application globals, then we can just 123 // use them as ordinary temporaries.) 124 // 125 // Since g1 and g5 (and/or g6) are the volatile (caller-save) registers, 126 // it makes sense to use them routinely for procedure linkage, 127 // whenever the On registers are not applicable. Examples: G5_method, 128 // G5_inline_cache_klass, and a double handful of miscellaneous compiler 129 // stubs. This means that compiler stubs, etc., should be kept to a 130 // maximum of two or three G-register arguments. 131 132 133 // stub frames 134 135 REGISTER_DECLARATION(Register, Lentry_args , L0); // pointer to args passed to callee (interpreter) not stub itself 136 137 // Interpreter frames 138 139 #ifdef CC_INTERP 140 REGISTER_DECLARATION(Register, Lstate , L0); // interpreter state object pointer 141 REGISTER_DECLARATION(Register, L1_scratch , L1); // scratch 142 REGISTER_DECLARATION(Register, Lmirror , L1); // mirror (for native methods only) 143 REGISTER_DECLARATION(Register, L2_scratch , L2); 144 REGISTER_DECLARATION(Register, L3_scratch , L3); 145 REGISTER_DECLARATION(Register, L4_scratch , L4); 146 REGISTER_DECLARATION(Register, Lscratch , L5); // C1 uses 147 REGISTER_DECLARATION(Register, Lscratch2 , L6); // C1 uses 148 REGISTER_DECLARATION(Register, L7_scratch , L7); // constant pool cache 149 REGISTER_DECLARATION(Register, O5_savedSP , O5); 150 REGISTER_DECLARATION(Register, I5_savedSP , I5); // Saved SP before bumping for locals. This is simply 151 // a copy SP, so in 64-bit it's a biased value. The bias 152 // is added and removed as needed in the frame code. 153 // Interface to signature handler 154 REGISTER_DECLARATION(Register, Llocals , L7); // pointer to locals for signature handler 155 REGISTER_DECLARATION(Register, Lmethod , L6); // Method* when calling signature handler 156 157 #else 158 REGISTER_DECLARATION(Register, Lesp , L0); // expression stack pointer 159 REGISTER_DECLARATION(Register, Lbcp , L1); // pointer to next bytecode 160 REGISTER_DECLARATION(Register, Lmethod , L2); 161 REGISTER_DECLARATION(Register, Llocals , L3); 162 REGISTER_DECLARATION(Register, Largs , L3); // pointer to locals for signature handler 163 // must match Llocals in asm interpreter 164 REGISTER_DECLARATION(Register, Lmonitors , L4); 165 REGISTER_DECLARATION(Register, Lbyte_code , L5); 166 // When calling out from the interpreter we record SP so that we can remove any extra stack 167 // space allocated during adapter transitions. This register is only live from the point 168 // of the call until we return. 169 REGISTER_DECLARATION(Register, Llast_SP , L5); 170 REGISTER_DECLARATION(Register, Lscratch , L5); 171 REGISTER_DECLARATION(Register, Lscratch2 , L6); 172 REGISTER_DECLARATION(Register, LcpoolCache , L6); // constant pool cache 173 174 REGISTER_DECLARATION(Register, O5_savedSP , O5); 175 REGISTER_DECLARATION(Register, I5_savedSP , I5); // Saved SP before bumping for locals. This is simply 176 // a copy SP, so in 64-bit it's a biased value. The bias 177 // is added and removed as needed in the frame code. 178 REGISTER_DECLARATION(Register, IdispatchTables , I4); // Base address of the bytecode dispatch tables 179 REGISTER_DECLARATION(Register, IdispatchAddress , I3); // Register which saves the dispatch address for each bytecode 180 REGISTER_DECLARATION(Register, ImethodDataPtr , I2); // Pointer to the current method data 181 #endif /* CC_INTERP */ 182 183 // NOTE: Lscratch2 and LcpoolCache point to the same registers in 184 // the interpreter code. If Lscratch2 needs to be used for some 185 // purpose than LcpoolCache should be restore after that for 186 // the interpreter to work right 187 // (These assignments must be compatible with L7_thread_cache; see above.) 188 189 // Lbcp points into the middle of the method object. 190 191 // Exception processing 192 // These registers are passed into exception handlers. 193 // All exception handlers require the exception object being thrown. 194 // In addition, an nmethod's exception handler must be passed 195 // the address of the call site within the nmethod, to allow 196 // proper selection of the applicable catch block. 197 // (Interpreter frames use their own bcp() for this purpose.) 198 // 199 // The Oissuing_pc value is not always needed. When jumping to a 200 // handler that is known to be interpreted, the Oissuing_pc value can be 201 // omitted. An actual catch block in compiled code receives (from its 202 // nmethod's exception handler) the thrown exception in the Oexception, 203 // but it doesn't need the Oissuing_pc. 204 // 205 // If an exception handler (either interpreted or compiled) 206 // discovers there is no applicable catch block, it updates 207 // the Oissuing_pc to the continuation PC of its own caller, 208 // pops back to that caller's stack frame, and executes that 209 // caller's exception handler. Obviously, this process will 210 // iterate until the control stack is popped back to a method 211 // containing an applicable catch block. A key invariant is 212 // that the Oissuing_pc value is always a value local to 213 // the method whose exception handler is currently executing. 214 // 215 // Note: The issuing PC value is __not__ a raw return address (I7 value). 216 // It is a "return pc", the address __following__ the call. 217 // Raw return addresses are converted to issuing PCs by frame::pc(), 218 // or by stubs. Issuing PCs can be used directly with PC range tables. 219 // 220 REGISTER_DECLARATION(Register, Oexception , O0); // exception being thrown 221 REGISTER_DECLARATION(Register, Oissuing_pc , O1); // where the exception is coming from 222 223 224 // These must occur after the declarations above 225 #ifndef DONT_USE_REGISTER_DEFINES 226 227 #define Gthread AS_REGISTER(Register, Gthread) 228 #define Gmethod AS_REGISTER(Register, Gmethod) 229 #define Gmegamorphic_method AS_REGISTER(Register, Gmegamorphic_method) 230 #define Ginline_cache_reg AS_REGISTER(Register, Ginline_cache_reg) 231 #define Gargs AS_REGISTER(Register, Gargs) 232 #define Lthread_cache AS_REGISTER(Register, Lthread_cache) 233 #define Gframe_size AS_REGISTER(Register, Gframe_size) 234 #define Gtemp AS_REGISTER(Register, Gtemp) 235 236 #ifdef CC_INTERP 237 #define Lstate AS_REGISTER(Register, Lstate) 238 #define Lesp AS_REGISTER(Register, Lesp) 239 #define L1_scratch AS_REGISTER(Register, L1_scratch) 240 #define Lmirror AS_REGISTER(Register, Lmirror) 241 #define L2_scratch AS_REGISTER(Register, L2_scratch) 242 #define L3_scratch AS_REGISTER(Register, L3_scratch) 243 #define L4_scratch AS_REGISTER(Register, L4_scratch) 244 #define Lscratch AS_REGISTER(Register, Lscratch) 245 #define Lscratch2 AS_REGISTER(Register, Lscratch2) 246 #define L7_scratch AS_REGISTER(Register, L7_scratch) 247 #define Ostate AS_REGISTER(Register, Ostate) 248 #else 249 #define Lesp AS_REGISTER(Register, Lesp) 250 #define Lbcp AS_REGISTER(Register, Lbcp) 251 #define Lmethod AS_REGISTER(Register, Lmethod) 252 #define Llocals AS_REGISTER(Register, Llocals) 253 #define Lmonitors AS_REGISTER(Register, Lmonitors) 254 #define Lbyte_code AS_REGISTER(Register, Lbyte_code) 255 #define Lscratch AS_REGISTER(Register, Lscratch) 256 #define Lscratch2 AS_REGISTER(Register, Lscratch2) 257 #define LcpoolCache AS_REGISTER(Register, LcpoolCache) 258 #endif /* ! CC_INTERP */ 259 260 #define Lentry_args AS_REGISTER(Register, Lentry_args) 261 #define I5_savedSP AS_REGISTER(Register, I5_savedSP) 262 #define O5_savedSP AS_REGISTER(Register, O5_savedSP) 263 #define IdispatchAddress AS_REGISTER(Register, IdispatchAddress) 264 #define ImethodDataPtr AS_REGISTER(Register, ImethodDataPtr) 265 #define IdispatchTables AS_REGISTER(Register, IdispatchTables) 266 267 #define Oexception AS_REGISTER(Register, Oexception) 268 #define Oissuing_pc AS_REGISTER(Register, Oissuing_pc) 269 270 #endif 271 272 273 // Address is an abstraction used to represent a memory location. 274 // 275 // Note: A register location is represented via a Register, not 276 // via an address for efficiency & simplicity reasons. 277 278 class Address VALUE_OBJ_CLASS_SPEC { 279 private: 280 Register _base; // Base register. 281 RegisterOrConstant _index_or_disp; // Index register or constant displacement. 282 RelocationHolder _rspec; 283 284 public: 285 Address() : _base(noreg), _index_or_disp(noreg) {} 286 287 Address(Register base, RegisterOrConstant index_or_disp) 288 : _base(base), 289 _index_or_disp(index_or_disp) { 290 } 291 292 Address(Register base, Register index) 293 : _base(base), 294 _index_or_disp(index) { 295 } 296 297 Address(Register base, int disp) 298 : _base(base), 299 _index_or_disp(disp) { 300 } 301 302 #ifdef ASSERT 303 // ByteSize is only a class when ASSERT is defined, otherwise it's an int. 304 Address(Register base, ByteSize disp) 305 : _base(base), 306 _index_or_disp(in_bytes(disp)) { 307 } 308 #endif 309 310 // accessors 311 Register base() const { return _base; } 312 Register index() const { return _index_or_disp.as_register(); } 313 int disp() const { return _index_or_disp.as_constant(); } 314 315 bool has_index() const { return _index_or_disp.is_register(); } 316 bool has_disp() const { return _index_or_disp.is_constant(); } 317 318 bool uses(Register reg) const { return base() == reg || (has_index() && index() == reg); } 319 320 const relocInfo::relocType rtype() { return _rspec.type(); } 321 const RelocationHolder& rspec() { return _rspec; } 322 323 RelocationHolder rspec(int offset) const { 324 return offset == 0 ? _rspec : _rspec.plus(offset); 325 } 326 327 inline bool is_simm13(int offset = 0); // check disp+offset for overflow 328 329 Address plus_disp(int plusdisp) const { // bump disp by a small amount 330 assert(_index_or_disp.is_constant(), "must have a displacement"); 331 Address a(base(), disp() + plusdisp); 332 return a; 333 } 334 bool is_same_address(Address a) const { 335 // disregard _rspec 336 return base() == a.base() && (has_index() ? index() == a.index() : disp() == a.disp()); 337 } 338 339 Address after_save() const { 340 Address a = (*this); 341 a._base = a._base->after_save(); 342 return a; 343 } 344 345 Address after_restore() const { 346 Address a = (*this); 347 a._base = a._base->after_restore(); 348 return a; 349 } 350 351 // Convert the raw encoding form into the form expected by the 352 // constructor for Address. 353 static Address make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc); 354 355 friend class Assembler; 356 }; 357 358 359 class AddressLiteral VALUE_OBJ_CLASS_SPEC { 360 private: 361 address _address; 362 RelocationHolder _rspec; 363 364 RelocationHolder rspec_from_rtype(relocInfo::relocType rtype, address addr) { 365 switch (rtype) { 366 case relocInfo::external_word_type: 367 return external_word_Relocation::spec(addr); 368 case relocInfo::internal_word_type: 369 return internal_word_Relocation::spec(addr); 370 #ifdef _LP64 371 case relocInfo::opt_virtual_call_type: 372 return opt_virtual_call_Relocation::spec(); 373 case relocInfo::static_call_type: 374 return static_call_Relocation::spec(); 375 case relocInfo::runtime_call_type: 376 return runtime_call_Relocation::spec(); 377 #endif 378 case relocInfo::none: 379 return RelocationHolder(); 380 default: 381 ShouldNotReachHere(); 382 return RelocationHolder(); 383 } 384 } 385 386 protected: 387 // creation 388 AddressLiteral() : _address(NULL), _rspec(NULL) {} 389 390 public: 391 AddressLiteral(address addr, RelocationHolder const& rspec) 392 : _address(addr), 393 _rspec(rspec) {} 394 395 // Some constructors to avoid casting at the call site. 396 AddressLiteral(jobject obj, RelocationHolder const& rspec) 397 : _address((address) obj), 398 _rspec(rspec) {} 399 400 AddressLiteral(intptr_t value, RelocationHolder const& rspec) 401 : _address((address) value), 402 _rspec(rspec) {} 403 404 AddressLiteral(address addr, relocInfo::relocType rtype = relocInfo::none) 405 : _address((address) addr), 406 _rspec(rspec_from_rtype(rtype, (address) addr)) {} 407 408 // Some constructors to avoid casting at the call site. 409 AddressLiteral(address* addr, relocInfo::relocType rtype = relocInfo::none) 410 : _address((address) addr), 411 _rspec(rspec_from_rtype(rtype, (address) addr)) {} 412 413 AddressLiteral(bool* addr, relocInfo::relocType rtype = relocInfo::none) 414 : _address((address) addr), 415 _rspec(rspec_from_rtype(rtype, (address) addr)) {} 416 417 AddressLiteral(const bool* addr, relocInfo::relocType rtype = relocInfo::none) 418 : _address((address) addr), 419 _rspec(rspec_from_rtype(rtype, (address) addr)) {} 420 421 AddressLiteral(signed char* addr, relocInfo::relocType rtype = relocInfo::none) 422 : _address((address) addr), 423 _rspec(rspec_from_rtype(rtype, (address) addr)) {} 424 425 AddressLiteral(int* addr, relocInfo::relocType rtype = relocInfo::none) 426 : _address((address) addr), 427 _rspec(rspec_from_rtype(rtype, (address) addr)) {} 428 429 AddressLiteral(intptr_t addr, relocInfo::relocType rtype = relocInfo::none) 430 : _address((address) addr), 431 _rspec(rspec_from_rtype(rtype, (address) addr)) {} 432 433 #ifdef _LP64 434 // 32-bit complains about a multiple declaration for int*. 435 AddressLiteral(intptr_t* addr, relocInfo::relocType rtype = relocInfo::none) 436 : _address((address) addr), 437 _rspec(rspec_from_rtype(rtype, (address) addr)) {} 438 #endif 439 440 AddressLiteral(Metadata* addr, relocInfo::relocType rtype = relocInfo::none) 441 : _address((address) addr), 442 _rspec(rspec_from_rtype(rtype, (address) addr)) {} 443 444 AddressLiteral(Metadata** addr, relocInfo::relocType rtype = relocInfo::none) 445 : _address((address) addr), 446 _rspec(rspec_from_rtype(rtype, (address) addr)) {} 447 448 AddressLiteral(float* addr, relocInfo::relocType rtype = relocInfo::none) 449 : _address((address) addr), 450 _rspec(rspec_from_rtype(rtype, (address) addr)) {} 451 452 AddressLiteral(double* addr, relocInfo::relocType rtype = relocInfo::none) 453 : _address((address) addr), 454 _rspec(rspec_from_rtype(rtype, (address) addr)) {} 455 456 intptr_t value() const { return (intptr_t) _address; } 457 int low10() const; 458 459 const relocInfo::relocType rtype() const { return _rspec.type(); } 460 const RelocationHolder& rspec() const { return _rspec; } 461 462 RelocationHolder rspec(int offset) const { 463 return offset == 0 ? _rspec : _rspec.plus(offset); 464 } 465 }; 466 467 // Convenience classes 468 class ExternalAddress: public AddressLiteral { 469 private: 470 static relocInfo::relocType reloc_for_target(address target) { 471 // Sometimes ExternalAddress is used for values which aren't 472 // exactly addresses, like the card table base. 473 // external_word_type can't be used for values in the first page 474 // so just skip the reloc in that case. 475 return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none; 476 } 477 478 public: 479 ExternalAddress(address target) : AddressLiteral(target, reloc_for_target( target)) {} 480 ExternalAddress(Metadata** target) : AddressLiteral(target, reloc_for_target((address) target)) {} 481 }; 482 483 inline Address RegisterImpl::address_in_saved_window() const { 484 return (Address(SP, (sp_offset_in_saved_window() * wordSize) + STACK_BIAS)); 485 } 486 487 488 489 // Argument is an abstraction used to represent an outgoing 490 // actual argument or an incoming formal parameter, whether 491 // it resides in memory or in a register, in a manner consistent 492 // with the SPARC Application Binary Interface, or ABI. This is 493 // often referred to as the native or C calling convention. 494 495 class Argument VALUE_OBJ_CLASS_SPEC { 496 private: 497 int _number; 498 bool _is_in; 499 500 public: 501 #ifdef _LP64 502 enum { 503 n_register_parameters = 6, // only 6 registers may contain integer parameters 504 n_float_register_parameters = 16 // Can have up to 16 floating registers 505 }; 506 #else 507 enum { 508 n_register_parameters = 6 // only 6 registers may contain integer parameters 509 }; 510 #endif 511 512 // creation 513 Argument(int number, bool is_in) : _number(number), _is_in(is_in) {} 514 515 int number() const { return _number; } 516 bool is_in() const { return _is_in; } 517 bool is_out() const { return !is_in(); } 518 519 Argument successor() const { return Argument(number() + 1, is_in()); } 520 Argument as_in() const { return Argument(number(), true ); } 521 Argument as_out() const { return Argument(number(), false); } 522 523 // locating register-based arguments: 524 bool is_register() const { return _number < n_register_parameters; } 525 526 #ifdef _LP64 527 // locating Floating Point register-based arguments: 528 bool is_float_register() const { return _number < n_float_register_parameters; } 529 530 FloatRegister as_float_register() const { 531 assert(is_float_register(), "must be a register argument"); 532 return as_FloatRegister(( number() *2 ) + 1); 533 } 534 FloatRegister as_double_register() const { 535 assert(is_float_register(), "must be a register argument"); 536 return as_FloatRegister(( number() *2 )); 537 } 538 #endif 539 540 Register as_register() const { 541 assert(is_register(), "must be a register argument"); 542 return is_in() ? as_iRegister(number()) : as_oRegister(number()); 543 } 544 545 // locating memory-based arguments 546 Address as_address() const { 547 assert(!is_register(), "must be a memory argument"); 548 return address_in_frame(); 549 } 550 551 // When applied to a register-based argument, give the corresponding address 552 // into the 6-word area "into which callee may store register arguments" 553 // (This is a different place than the corresponding register-save area location.) 554 Address address_in_frame() const; 555 556 // debugging 557 const char* name() const; 558 559 friend class Assembler; 560 }; 561 562 563 class RegistersForDebugging : public StackObj { 564 public: 565 intptr_t i[8], l[8], o[8], g[8]; 566 float f[32]; 567 double d[32]; 568 569 void print(outputStream* s); 570 571 static int i_offset(int j) { return offset_of(RegistersForDebugging, i[j]); } 572 static int l_offset(int j) { return offset_of(RegistersForDebugging, l[j]); } 573 static int o_offset(int j) { return offset_of(RegistersForDebugging, o[j]); } 574 static int g_offset(int j) { return offset_of(RegistersForDebugging, g[j]); } 575 static int f_offset(int j) { return offset_of(RegistersForDebugging, f[j]); } 576 static int d_offset(int j) { return offset_of(RegistersForDebugging, d[j / 2]); } 577 578 // gen asm code to save regs 579 static void save_registers(MacroAssembler* a); 580 581 // restore global registers in case C code disturbed them 582 static void restore_registers(MacroAssembler* a, Register r); 583 }; 584 585 586 // MacroAssembler extends Assembler by a few frequently used macros. 587 // 588 // Most of the standard SPARC synthetic ops are defined here. 589 // Instructions for which a 'better' code sequence exists depending 590 // on arguments should also go in here. 591 592 #define JMP2(r1, r2) jmp(r1, r2, __FILE__, __LINE__) 593 #define JMP(r1, off) jmp(r1, off, __FILE__, __LINE__) 594 #define JUMP(a, temp, off) jump(a, temp, off, __FILE__, __LINE__) 595 #define JUMPL(a, temp, d, off) jumpl(a, temp, d, off, __FILE__, __LINE__) 596 597 598 class MacroAssembler : public Assembler { 599 // code patchers need various routines like inv_wdisp() 600 friend class NativeInstruction; 601 friend class NativeGeneralJump; 602 friend class Relocation; 603 friend class Label; 604 605 protected: 606 static int patched_branch(int dest_pos, int inst, int inst_pos); 607 static int branch_destination(int inst, int pos); 608 609 // Support for VM calls 610 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 611 // may customize this version by overriding it for its purposes (e.g., to save/restore 612 // additional registers when doing a VM call). 613 #ifdef CC_INTERP 614 #define VIRTUAL 615 #else 616 #define VIRTUAL virtual 617 #endif 618 619 VIRTUAL void call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments); 620 621 // 622 // It is imperative that all calls into the VM are handled via the call_VM macros. 623 // They make sure that the stack linkage is setup correctly. call_VM's correspond 624 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 625 // 626 // This is the base routine called by the different versions of call_VM. The interpreter 627 // may customize this version by overriding it for its purposes (e.g., to save/restore 628 // additional registers when doing a VM call). 629 // 630 // A non-volatile java_thread_cache register should be specified so 631 // that the G2_thread value can be preserved across the call. 632 // (If java_thread_cache is noreg, then a slow get_thread call 633 // will re-initialize the G2_thread.) call_VM_base returns the register that contains the 634 // thread. 635 // 636 // If no last_java_sp is specified (noreg) than SP will be used instead. 637 638 virtual void call_VM_base( 639 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 640 Register java_thread_cache, // the thread if computed before ; use noreg otherwise 641 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 642 address entry_point, // the entry point 643 int number_of_arguments, // the number of arguments (w/o thread) to pop after call 644 bool check_exception=true // flag which indicates if exception should be checked 645 ); 646 647 // This routine should emit JVMTI PopFrame and ForceEarlyReturn handling code. 648 // The implementation is only non-empty for the InterpreterMacroAssembler, 649 // as only the interpreter handles and ForceEarlyReturn PopFrame requests. 650 virtual void check_and_handle_popframe(Register scratch_reg); 651 virtual void check_and_handle_earlyret(Register scratch_reg); 652 653 public: 654 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 655 656 // Support for NULL-checks 657 // 658 // Generates code that causes a NULL OS exception if the content of reg is NULL. 659 // If the accessed location is M[reg + offset] and the offset is known, provide the 660 // offset. No explicit code generation is needed if the offset is within a certain 661 // range (0 <= offset <= page_size). 662 // 663 // %%%%%% Currently not done for SPARC 664 665 void null_check(Register reg, int offset = -1); 666 static bool needs_explicit_null_check(intptr_t offset); 667 668 // support for delayed instructions 669 MacroAssembler* delayed() { Assembler::delayed(); return this; } 670 671 // branches that use right instruction for v8 vs. v9 672 inline void br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none ); 673 inline void br( Condition c, bool a, Predict p, Label& L ); 674 675 inline void fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none ); 676 inline void fb( Condition c, bool a, Predict p, Label& L ); 677 678 // compares register with zero (32 bit) and branches (V9 and V8 instructions) 679 void cmp_zero_and_br( Condition c, Register s1, Label& L, bool a = false, Predict p = pn ); 680 // Compares a pointer register with zero and branches on (not)null. 681 // Does a test & branch on 32-bit systems and a register-branch on 64-bit. 682 void br_null ( Register s1, bool a, Predict p, Label& L ); 683 void br_notnull( Register s1, bool a, Predict p, Label& L ); 684 685 // 686 // Compare registers and branch with nop in delay slot or cbcond without delay slot. 687 // 688 // ATTENTION: use these instructions with caution because cbcond instruction 689 // has very short distance: 512 instructions (2Kbyte). 690 691 // Compare integer (32 bit) values (icc only). 692 void cmp_and_br_short(Register s1, Register s2, Condition c, Predict p, Label& L); 693 void cmp_and_br_short(Register s1, int simm13a, Condition c, Predict p, Label& L); 694 // Platform depending version for pointer compare (icc on !LP64 and xcc on LP64). 695 void cmp_and_brx_short(Register s1, Register s2, Condition c, Predict p, Label& L); 696 void cmp_and_brx_short(Register s1, int simm13a, Condition c, Predict p, Label& L); 697 698 // Short branch version for compares a pointer pwith zero. 699 void br_null_short ( Register s1, Predict p, Label& L ); 700 void br_notnull_short( Register s1, Predict p, Label& L ); 701 702 // unconditional short branch 703 void ba_short(Label& L); 704 705 inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none ); 706 inline void bp( Condition c, bool a, CC cc, Predict p, Label& L ); 707 708 // Branch that tests xcc in LP64 and icc in !LP64 709 inline void brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none ); 710 inline void brx( Condition c, bool a, Predict p, Label& L ); 711 712 // unconditional branch 713 inline void ba( Label& L ); 714 715 // Branch that tests fp condition codes 716 inline void fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none ); 717 inline void fbp( Condition c, bool a, CC cc, Predict p, Label& L ); 718 719 // get PC the best way 720 inline int get_pc( Register d ); 721 722 // Sparc shorthands(pp 85, V8 manual, pp 289 V9 manual) 723 inline void cmp( Register s1, Register s2 ) { subcc( s1, s2, G0 ); } 724 inline void cmp( Register s1, int simm13a ) { subcc( s1, simm13a, G0 ); } 725 726 inline void jmp( Register s1, Register s2 ); 727 inline void jmp( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() ); 728 729 // Check if the call target is out of wdisp30 range (relative to the code cache) 730 static inline bool is_far_target(address d); 731 inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type ); 732 inline void call( Label& L, relocInfo::relocType rt = relocInfo::runtime_call_type ); 733 inline void callr( Register s1, Register s2 ); 734 inline void callr( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() ); 735 736 // Emits nothing on V8 737 inline void iprefetch( address d, relocInfo::relocType rt = relocInfo::none ); 738 inline void iprefetch( Label& L); 739 740 inline void tst( Register s ) { orcc( G0, s, G0 ); } 741 742 #ifdef PRODUCT 743 inline void ret( bool trace = TraceJumps ) { if (trace) { 744 mov(I7, O7); // traceable register 745 JMP(O7, 2 * BytesPerInstWord); 746 } else { 747 jmpl( I7, 2 * BytesPerInstWord, G0 ); 748 } 749 } 750 751 inline void retl( bool trace = TraceJumps ) { if (trace) JMP(O7, 2 * BytesPerInstWord); 752 else jmpl( O7, 2 * BytesPerInstWord, G0 ); } 753 #else 754 void ret( bool trace = TraceJumps ); 755 void retl( bool trace = TraceJumps ); 756 #endif /* PRODUCT */ 757 758 // Required platform-specific helpers for Label::patch_instructions. 759 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 760 void pd_patch_instruction(address branch, address target); 761 762 // sethi Macro handles optimizations and relocations 763 private: 764 void internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable); 765 public: 766 void sethi(const AddressLiteral& addrlit, Register d); 767 void patchable_sethi(const AddressLiteral& addrlit, Register d); 768 769 // compute the number of instructions for a sethi/set 770 static int insts_for_sethi( address a, bool worst_case = false ); 771 static int worst_case_insts_for_set(); 772 773 // set may be either setsw or setuw (high 32 bits may be zero or sign) 774 private: 775 void internal_set(const AddressLiteral& al, Register d, bool ForceRelocatable); 776 static int insts_for_internal_set(intptr_t value); 777 public: 778 void set(const AddressLiteral& addrlit, Register d); 779 void set(intptr_t value, Register d); 780 void set(address addr, Register d, RelocationHolder const& rspec); 781 static int insts_for_set(intptr_t value) { return insts_for_internal_set(value); } 782 783 void patchable_set(const AddressLiteral& addrlit, Register d); 784 void patchable_set(intptr_t value, Register d); 785 void set64(jlong value, Register d, Register tmp); 786 static int insts_for_set64(jlong value); 787 788 // sign-extend 32 to 64 789 inline void signx( Register s, Register d ) { sra( s, G0, d); } 790 inline void signx( Register d ) { sra( d, G0, d); } 791 792 inline void not1( Register s, Register d ) { xnor( s, G0, d ); } 793 inline void not1( Register d ) { xnor( d, G0, d ); } 794 795 inline void neg( Register s, Register d ) { sub( G0, s, d ); } 796 inline void neg( Register d ) { sub( G0, d, d ); } 797 798 inline void cas( Register s1, Register s2, Register d) { casa( s1, s2, d, ASI_PRIMARY); } 799 inline void casx( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY); } 800 // Functions for isolating 64 bit atomic swaps for LP64 801 // cas_ptr will perform cas for 32 bit VM's and casx for 64 bit VM's 802 inline void cas_ptr( Register s1, Register s2, Register d) { 803 #ifdef _LP64 804 casx( s1, s2, d ); 805 #else 806 cas( s1, s2, d ); 807 #endif 808 } 809 810 // Functions for isolating 64 bit shifts for LP64 811 inline void sll_ptr( Register s1, Register s2, Register d ); 812 inline void sll_ptr( Register s1, int imm6a, Register d ); 813 inline void sll_ptr( Register s1, RegisterOrConstant s2, Register d ); 814 inline void srl_ptr( Register s1, Register s2, Register d ); 815 inline void srl_ptr( Register s1, int imm6a, Register d ); 816 817 // little-endian 818 inline void casl( Register s1, Register s2, Register d) { casa( s1, s2, d, ASI_PRIMARY_LITTLE); } 819 inline void casxl( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY_LITTLE); } 820 821 inline void inc( Register d, int const13 = 1 ) { add( d, const13, d); } 822 inline void inccc( Register d, int const13 = 1 ) { addcc( d, const13, d); } 823 824 inline void dec( Register d, int const13 = 1 ) { sub( d, const13, d); } 825 inline void deccc( Register d, int const13 = 1 ) { subcc( d, const13, d); } 826 827 using Assembler::add; 828 inline void add(Register s1, int simm13a, Register d, relocInfo::relocType rtype); 829 inline void add(Register s1, int simm13a, Register d, RelocationHolder const& rspec); 830 inline void add(Register s1, RegisterOrConstant s2, Register d, int offset = 0); 831 inline void add(const Address& a, Register d, int offset = 0); 832 833 using Assembler::andn; 834 inline void andn( Register s1, RegisterOrConstant s2, Register d); 835 836 inline void btst( Register s1, Register s2 ) { andcc( s1, s2, G0 ); } 837 inline void btst( int simm13a, Register s ) { andcc( s, simm13a, G0 ); } 838 839 inline void bset( Register s1, Register s2 ) { or3( s1, s2, s2 ); } 840 inline void bset( int simm13a, Register s ) { or3( s, simm13a, s ); } 841 842 inline void bclr( Register s1, Register s2 ) { andn( s1, s2, s2 ); } 843 inline void bclr( int simm13a, Register s ) { andn( s, simm13a, s ); } 844 845 inline void btog( Register s1, Register s2 ) { xor3( s1, s2, s2 ); } 846 inline void btog( int simm13a, Register s ) { xor3( s, simm13a, s ); } 847 848 inline void clr( Register d ) { or3( G0, G0, d ); } 849 850 inline void clrb( Register s1, Register s2); 851 inline void clrh( Register s1, Register s2); 852 inline void clr( Register s1, Register s2); 853 inline void clrx( Register s1, Register s2); 854 855 inline void clrb( Register s1, int simm13a); 856 inline void clrh( Register s1, int simm13a); 857 inline void clr( Register s1, int simm13a); 858 inline void clrx( Register s1, int simm13a); 859 860 // copy & clear upper word 861 inline void clruw( Register s, Register d ) { srl( s, G0, d); } 862 // clear upper word 863 inline void clruwu( Register d ) { srl( d, G0, d); } 864 865 using Assembler::ldsb; 866 using Assembler::ldsh; 867 using Assembler::ldsw; 868 using Assembler::ldub; 869 using Assembler::lduh; 870 using Assembler::lduw; 871 using Assembler::ldx; 872 using Assembler::ldd; 873 874 #ifdef ASSERT 875 // ByteSize is only a class when ASSERT is defined, otherwise it's an int. 876 inline void ld(Register s1, ByteSize simm13a, Register d); 877 #endif 878 879 inline void ld(Register s1, Register s2, Register d); 880 inline void ld(Register s1, int simm13a, Register d); 881 882 inline void ldsb(const Address& a, Register d, int offset = 0); 883 inline void ldsh(const Address& a, Register d, int offset = 0); 884 inline void ldsw(const Address& a, Register d, int offset = 0); 885 inline void ldub(const Address& a, Register d, int offset = 0); 886 inline void lduh(const Address& a, Register d, int offset = 0); 887 inline void lduw(const Address& a, Register d, int offset = 0); 888 inline void ldx( const Address& a, Register d, int offset = 0); 889 inline void ld( const Address& a, Register d, int offset = 0); 890 inline void ldd( const Address& a, Register d, int offset = 0); 891 892 inline void ldub(Register s1, RegisterOrConstant s2, Register d ); 893 inline void ldsb(Register s1, RegisterOrConstant s2, Register d ); 894 inline void lduh(Register s1, RegisterOrConstant s2, Register d ); 895 inline void ldsh(Register s1, RegisterOrConstant s2, Register d ); 896 inline void lduw(Register s1, RegisterOrConstant s2, Register d ); 897 inline void ldsw(Register s1, RegisterOrConstant s2, Register d ); 898 inline void ldx( Register s1, RegisterOrConstant s2, Register d ); 899 inline void ld( Register s1, RegisterOrConstant s2, Register d ); 900 inline void ldd( Register s1, RegisterOrConstant s2, Register d ); 901 902 using Assembler::ldf; 903 inline void ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d); 904 inline void ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset = 0); 905 906 // little-endian 907 inline void ldxl(Register s1, Register s2, Register d) { ldxa(s1, s2, ASI_PRIMARY_LITTLE, d); } 908 inline void ldfl(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { ldfa(w, s1, s2, ASI_PRIMARY_LITTLE, d); } 909 910 // membar psuedo instruction. takes into account target memory model. 911 inline void membar( Assembler::Membar_mask_bits const7a ); 912 913 // returns if membar generates anything. 914 inline bool membar_has_effect( Assembler::Membar_mask_bits const7a ); 915 916 // mov pseudo instructions 917 inline void mov( Register s, Register d) { 918 if ( s != d ) or3( G0, s, d); 919 else assert_not_delayed(); // Put something useful in the delay slot! 920 } 921 922 inline void mov_or_nop( Register s, Register d) { 923 if ( s != d ) or3( G0, s, d); 924 else nop(); 925 } 926 927 inline void mov( int simm13a, Register d) { or3( G0, simm13a, d); } 928 929 using Assembler::prefetch; 930 inline void prefetch(const Address& a, PrefetchFcn F, int offset = 0); 931 932 using Assembler::stb; 933 using Assembler::sth; 934 using Assembler::stw; 935 using Assembler::stx; 936 using Assembler::std; 937 938 #ifdef ASSERT 939 // ByteSize is only a class when ASSERT is defined, otherwise it's an int. 940 inline void st(Register d, Register s1, ByteSize simm13a); 941 #endif 942 943 inline void st(Register d, Register s1, Register s2); 944 inline void st(Register d, Register s1, int simm13a); 945 946 inline void stb(Register d, const Address& a, int offset = 0 ); 947 inline void sth(Register d, const Address& a, int offset = 0 ); 948 inline void stw(Register d, const Address& a, int offset = 0 ); 949 inline void stx(Register d, const Address& a, int offset = 0 ); 950 inline void st( Register d, const Address& a, int offset = 0 ); 951 inline void std(Register d, const Address& a, int offset = 0 ); 952 953 inline void stb(Register d, Register s1, RegisterOrConstant s2 ); 954 inline void sth(Register d, Register s1, RegisterOrConstant s2 ); 955 inline void stw(Register d, Register s1, RegisterOrConstant s2 ); 956 inline void stx(Register d, Register s1, RegisterOrConstant s2 ); 957 inline void std(Register d, Register s1, RegisterOrConstant s2 ); 958 inline void st( Register d, Register s1, RegisterOrConstant s2 ); 959 960 using Assembler::stf; 961 inline void stf(FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2); 962 inline void stf(FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset = 0); 963 964 // Note: offset is added to s2. 965 using Assembler::sub; 966 inline void sub(Register s1, RegisterOrConstant s2, Register d, int offset = 0); 967 968 using Assembler::swap; 969 inline void swap(const Address& a, Register d, int offset = 0); 970 971 // address pseudos: make these names unlike instruction names to avoid confusion 972 inline intptr_t load_pc_address( Register reg, int bytes_to_skip ); 973 inline void load_contents(const AddressLiteral& addrlit, Register d, int offset = 0); 974 inline void load_bool_contents(const AddressLiteral& addrlit, Register d, int offset = 0); 975 inline void load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset = 0); 976 inline void store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset = 0); 977 inline void store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset = 0); 978 inline void jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset = 0); 979 inline void jump_to(const AddressLiteral& addrlit, Register temp, int offset = 0); 980 inline void jump_indirect_to(Address& a, Register temp, int ld_offset = 0, int jmp_offset = 0); 981 982 // ring buffer traceable jumps 983 984 void jmp2( Register r1, Register r2, const char* file, int line ); 985 void jmp ( Register r1, int offset, const char* file, int line ); 986 987 void jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line); 988 void jump (const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line); 989 990 991 // argument pseudos: 992 993 inline void load_argument( Argument& a, Register d ); 994 inline void store_argument( Register s, Argument& a ); 995 inline void store_ptr_argument( Register s, Argument& a ); 996 inline void store_float_argument( FloatRegister s, Argument& a ); 997 inline void store_double_argument( FloatRegister s, Argument& a ); 998 inline void store_long_argument( Register s, Argument& a ); 999 1000 // handy macros: 1001 1002 inline void round_to( Register r, int modulus ) { 1003 assert_not_delayed(); 1004 inc( r, modulus - 1 ); 1005 and3( r, -modulus, r ); 1006 } 1007 1008 // -------------------------------------------------- 1009 1010 // Functions for isolating 64 bit loads for LP64 1011 // ld_ptr will perform ld for 32 bit VM's and ldx for 64 bit VM's 1012 // st_ptr will perform st for 32 bit VM's and stx for 64 bit VM's 1013 inline void ld_ptr(Register s1, Register s2, Register d); 1014 inline void ld_ptr(Register s1, int simm13a, Register d); 1015 inline void ld_ptr(Register s1, RegisterOrConstant s2, Register d); 1016 inline void ld_ptr(const Address& a, Register d, int offset = 0); 1017 inline void st_ptr(Register d, Register s1, Register s2); 1018 inline void st_ptr(Register d, Register s1, int simm13a); 1019 inline void st_ptr(Register d, Register s1, RegisterOrConstant s2); 1020 inline void st_ptr(Register d, const Address& a, int offset = 0); 1021 1022 #ifdef ASSERT 1023 // ByteSize is only a class when ASSERT is defined, otherwise it's an int. 1024 inline void ld_ptr(Register s1, ByteSize simm13a, Register d); 1025 inline void st_ptr(Register d, Register s1, ByteSize simm13a); 1026 #endif 1027 1028 // ld_long will perform ldd for 32 bit VM's and ldx for 64 bit VM's 1029 // st_long will perform std for 32 bit VM's and stx for 64 bit VM's 1030 inline void ld_long(Register s1, Register s2, Register d); 1031 inline void ld_long(Register s1, int simm13a, Register d); 1032 inline void ld_long(Register s1, RegisterOrConstant s2, Register d); 1033 inline void ld_long(const Address& a, Register d, int offset = 0); 1034 inline void st_long(Register d, Register s1, Register s2); 1035 inline void st_long(Register d, Register s1, int simm13a); 1036 inline void st_long(Register d, Register s1, RegisterOrConstant s2); 1037 inline void st_long(Register d, const Address& a, int offset = 0); 1038 1039 // Helpers for address formation. 1040 // - They emit only a move if s2 is a constant zero. 1041 // - If dest is a constant and either s1 or s2 is a register, the temp argument is required and becomes the result. 1042 // - If dest is a register and either s1 or s2 is a non-simm13 constant, the temp argument is required and used to materialize the constant. 1043 RegisterOrConstant regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg); 1044 RegisterOrConstant regcon_inc_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg); 1045 RegisterOrConstant regcon_sll_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg); 1046 1047 RegisterOrConstant ensure_simm13_or_reg(RegisterOrConstant src, Register temp) { 1048 if (is_simm13(src.constant_or_zero())) 1049 return src; // register or short constant 1050 guarantee(temp != noreg, "constant offset overflow"); 1051 set(src.as_constant(), temp); 1052 return temp; 1053 } 1054 1055 // -------------------------------------------------- 1056 1057 public: 1058 // traps as per trap.h (SPARC ABI?) 1059 1060 void breakpoint_trap(); 1061 void breakpoint_trap(Condition c, CC cc); 1062 1063 // Support for serializing memory accesses between threads 1064 void serialize_memory(Register thread, Register tmp1, Register tmp2); 1065 1066 // Stack frame creation/removal 1067 void enter(); 1068 void leave(); 1069 1070 // Manipulation of C++ bools 1071 // These are idioms to flag the need for care with accessing bools but on 1072 // this platform we assume byte size 1073 1074 inline void stbool(Register d, const Address& a) { stb(d, a); } 1075 inline void ldbool(const Address& a, Register d) { ldub(a, d); } 1076 inline void movbool( bool boolconst, Register d) { mov( (int) boolconst, d); } 1077 1078 // klass oop manipulations if compressed 1079 void load_klass(Register src_oop, Register klass); 1080 void store_klass(Register klass, Register dst_oop); 1081 void store_klass_gap(Register s, Register dst_oop); 1082 1083 // oop manipulations 1084 void load_heap_oop(const Address& s, Register d); 1085 void load_heap_oop(Register s1, Register s2, Register d); 1086 void load_heap_oop(Register s1, int simm13a, Register d); 1087 void load_heap_oop(Register s1, RegisterOrConstant s2, Register d); 1088 void store_heap_oop(Register d, Register s1, Register s2); 1089 void store_heap_oop(Register d, Register s1, int simm13a); 1090 void store_heap_oop(Register d, const Address& a, int offset = 0); 1091 1092 void encode_heap_oop(Register src, Register dst); 1093 void encode_heap_oop(Register r) { 1094 encode_heap_oop(r, r); 1095 } 1096 void decode_heap_oop(Register src, Register dst); 1097 void decode_heap_oop(Register r) { 1098 decode_heap_oop(r, r); 1099 } 1100 void encode_heap_oop_not_null(Register r); 1101 void decode_heap_oop_not_null(Register r); 1102 void encode_heap_oop_not_null(Register src, Register dst); 1103 void decode_heap_oop_not_null(Register src, Register dst); 1104 1105 void encode_klass_not_null(Register r); 1106 void decode_klass_not_null(Register r); 1107 void encode_klass_not_null(Register src, Register dst); 1108 void decode_klass_not_null(Register src, Register dst); 1109 1110 // Support for managing the JavaThread pointer (i.e.; the reference to 1111 // thread-local information). 1112 void get_thread(); // load G2_thread 1113 void verify_thread(); // verify G2_thread contents 1114 void save_thread (const Register threache); // save to cache 1115 void restore_thread(const Register thread_cache); // restore from cache 1116 1117 // Support for last Java frame (but use call_VM instead where possible) 1118 void set_last_Java_frame(Register last_java_sp, Register last_Java_pc); 1119 void reset_last_Java_frame(void); 1120 1121 // Call into the VM. 1122 // Passes the thread pointer (in O0) as a prepended argument. 1123 // Makes sure oop return values are visible to the GC. 1124 void call_VM(Register oop_result, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 1125 void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true); 1126 void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 1127 void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 1128 1129 // these overloadings are not presently used on SPARC: 1130 void call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 1131 void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 1132 void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 1133 void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 1134 1135 void call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments = 0); 1136 void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1); 1137 void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2); 1138 void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3); 1139 1140 void get_vm_result (Register oop_result); 1141 void get_vm_result_2(Register metadata_result); 1142 1143 // vm result is currently getting hijacked to for oop preservation 1144 void set_vm_result(Register oop_result); 1145 1146 // Emit the CompiledIC call idiom 1147 void ic_call(address entry, bool emit_delay = true); 1148 1149 // if call_VM_base was called with check_exceptions=false, then call 1150 // check_and_forward_exception to handle exceptions when it is safe 1151 void check_and_forward_exception(Register scratch_reg); 1152 1153 // Write to card table for - register is destroyed afterwards. 1154 void card_table_write(jbyte* byte_map_base, Register tmp, Register obj); 1155 1156 void card_write_barrier_post(Register store_addr, Register new_val, Register tmp); 1157 1158 #if INCLUDE_ALL_GCS 1159 // General G1 pre-barrier generator. 1160 void g1_write_barrier_pre(Register obj, Register index, int offset, Register pre_val, Register tmp, bool preserve_o_regs); 1161 1162 // General G1 post-barrier generator 1163 void g1_write_barrier_post(Register store_addr, Register new_val, Register tmp); 1164 #endif // INCLUDE_ALL_GCS 1165 1166 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack 1167 void push_fTOS(); 1168 1169 // pops double TOS element from CPU stack and pushes on FPU stack 1170 void pop_fTOS(); 1171 1172 void empty_FPU_stack(); 1173 1174 void push_IU_state(); 1175 void pop_IU_state(); 1176 1177 void push_FPU_state(); 1178 void pop_FPU_state(); 1179 1180 void push_CPU_state(); 1181 void pop_CPU_state(); 1182 1183 // Returns the byte size of the instructions generated by decode_klass_not_null(). 1184 static int instr_size_for_decode_klass_not_null(); 1185 1186 // if heap base register is used - reinit it with the correct value 1187 void reinit_heapbase(); 1188 1189 // Debugging 1190 void _verify_oop(Register reg, const char * msg, const char * file, int line); 1191 void _verify_oop_addr(Address addr, const char * msg, const char * file, int line); 1192 1193 // TODO: verify_method and klass metadata (compare against vptr?) 1194 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 1195 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 1196 1197 #define verify_oop(reg) _verify_oop(reg, "broken oop " #reg, __FILE__, __LINE__) 1198 #define verify_oop_addr(addr) _verify_oop_addr(addr, "broken oop addr ", __FILE__, __LINE__) 1199 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 1200 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 1201 1202 // only if +VerifyOops 1203 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); 1204 // only if +VerifyFPU 1205 void stop(const char* msg); // prints msg, dumps registers and stops execution 1206 void warn(const char* msg); // prints msg, but don't stop 1207 void untested(const char* what = ""); 1208 void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, 1024, "unimplemented: %s", what); stop(b); } 1209 void should_not_reach_here() { stop("should not reach here"); } 1210 void print_CPU_state(); 1211 1212 // oops in code 1213 AddressLiteral allocate_oop_address(jobject obj); // allocate_index 1214 AddressLiteral constant_oop_address(jobject obj); // find_index 1215 inline void set_oop (jobject obj, Register d); // uses allocate_oop_address 1216 inline void set_oop_constant (jobject obj, Register d); // uses constant_oop_address 1217 inline void set_oop (const AddressLiteral& obj_addr, Register d); // same as load_address 1218 1219 // metadata in code that we have to keep track of 1220 AddressLiteral allocate_metadata_address(Metadata* obj); // allocate_index 1221 AddressLiteral constant_metadata_address(Metadata* obj); // find_index 1222 inline void set_metadata (Metadata* obj, Register d); // uses allocate_metadata_address 1223 inline void set_metadata_constant (Metadata* obj, Register d); // uses constant_metadata_address 1224 inline void set_metadata (const AddressLiteral& obj_addr, Register d); // same as load_address 1225 1226 void set_narrow_oop( jobject obj, Register d ); 1227 void set_narrow_klass( Klass* k, Register d ); 1228 1229 // nop padding 1230 void align(int modulus); 1231 1232 // declare a safepoint 1233 void safepoint(); 1234 1235 // factor out part of stop into subroutine to save space 1236 void stop_subroutine(); 1237 // factor out part of verify_oop into subroutine to save space 1238 void verify_oop_subroutine(); 1239 1240 // side-door communication with signalHandler in os_solaris.cpp 1241 static address _verify_oop_implicit_branch[3]; 1242 1243 int total_frame_size_in_bytes(int extraWords); 1244 1245 // used when extraWords known statically 1246 void save_frame(int extraWords = 0); 1247 void save_frame_c1(int size_in_bytes); 1248 // make a frame, and simultaneously pass up one or two register value 1249 // into the new register window 1250 void save_frame_and_mov(int extraWords, Register s1, Register d1, Register s2 = Register(), Register d2 = Register()); 1251 1252 // give no. (outgoing) params, calc # of words will need on frame 1253 void calc_mem_param_words(Register Rparam_words, Register Rresult); 1254 1255 // used to calculate frame size dynamically 1256 // result is in bytes and must be negated for save inst 1257 void calc_frame_size(Register extraWords, Register resultReg); 1258 1259 // calc and also save 1260 void calc_frame_size_and_save(Register extraWords, Register resultReg); 1261 1262 static void debug(char* msg, RegistersForDebugging* outWindow); 1263 1264 // implementations of bytecodes used by both interpreter and compiler 1265 1266 void lcmp( Register Ra_hi, Register Ra_low, 1267 Register Rb_hi, Register Rb_low, 1268 Register Rresult); 1269 1270 void lneg( Register Rhi, Register Rlow ); 1271 1272 void lshl( Register Rin_high, Register Rin_low, Register Rcount, 1273 Register Rout_high, Register Rout_low, Register Rtemp ); 1274 1275 void lshr( Register Rin_high, Register Rin_low, Register Rcount, 1276 Register Rout_high, Register Rout_low, Register Rtemp ); 1277 1278 void lushr( Register Rin_high, Register Rin_low, Register Rcount, 1279 Register Rout_high, Register Rout_low, Register Rtemp ); 1280 1281 #ifdef _LP64 1282 void lcmp( Register Ra, Register Rb, Register Rresult); 1283 #endif 1284 1285 // Load and store values by size and signed-ness 1286 void load_sized_value( Address src, Register dst, size_t size_in_bytes, bool is_signed); 1287 void store_sized_value(Register src, Address dst, size_t size_in_bytes); 1288 1289 void float_cmp( bool is_float, int unordered_result, 1290 FloatRegister Fa, FloatRegister Fb, 1291 Register Rresult); 1292 1293 void save_all_globals_into_locals(); 1294 void restore_globals_from_locals(); 1295 1296 // These set the icc condition code to equal if the lock succeeded 1297 // and notEqual if it failed and requires a slow case 1298 void compiler_lock_object(Register Roop, Register Rmark, Register Rbox, 1299 Register Rscratch, 1300 BiasedLockingCounters* counters = NULL, 1301 bool try_bias = UseBiasedLocking); 1302 void compiler_unlock_object(Register Roop, Register Rmark, Register Rbox, 1303 Register Rscratch, 1304 bool try_bias = UseBiasedLocking); 1305 1306 // Biased locking support 1307 // Upon entry, lock_reg must point to the lock record on the stack, 1308 // obj_reg must contain the target object, and mark_reg must contain 1309 // the target object's header. 1310 // Destroys mark_reg if an attempt is made to bias an anonymously 1311 // biased lock. In this case a failure will go either to the slow 1312 // case or fall through with the notEqual condition code set with 1313 // the expectation that the slow case in the runtime will be called. 1314 // In the fall-through case where the CAS-based lock is done, 1315 // mark_reg is not destroyed. 1316 void biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg, 1317 Label& done, Label* slow_case = NULL, 1318 BiasedLockingCounters* counters = NULL); 1319 // Upon entry, the base register of mark_addr must contain the oop. 1320 // Destroys temp_reg. 1321 1322 // If allow_delay_slot_filling is set to true, the next instruction 1323 // emitted after this one will go in an annulled delay slot if the 1324 // biased locking exit case failed. 1325 void biased_locking_exit(Address mark_addr, Register temp_reg, Label& done, bool allow_delay_slot_filling = false); 1326 1327 // allocation 1328 void eden_allocate( 1329 Register obj, // result: pointer to object after successful allocation 1330 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 1331 int con_size_in_bytes, // object size in bytes if known at compile time 1332 Register t1, // temp register 1333 Register t2, // temp register 1334 Label& slow_case // continuation point if fast allocation fails 1335 ); 1336 void tlab_allocate( 1337 Register obj, // result: pointer to object after successful allocation 1338 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 1339 int con_size_in_bytes, // object size in bytes if known at compile time 1340 Register t1, // temp register 1341 Label& slow_case // continuation point if fast allocation fails 1342 ); 1343 void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); 1344 void incr_allocated_bytes(RegisterOrConstant size_in_bytes, 1345 Register t1, Register t2); 1346 1347 // interface method calling 1348 void lookup_interface_method(Register recv_klass, 1349 Register intf_klass, 1350 RegisterOrConstant itable_index, 1351 Register method_result, 1352 Register temp_reg, Register temp2_reg, 1353 Label& no_such_interface); 1354 1355 // virtual method calling 1356 void lookup_virtual_method(Register recv_klass, 1357 RegisterOrConstant vtable_index, 1358 Register method_result); 1359 1360 // Test sub_klass against super_klass, with fast and slow paths. 1361 1362 // The fast path produces a tri-state answer: yes / no / maybe-slow. 1363 // One of the three labels can be NULL, meaning take the fall-through. 1364 // If super_check_offset is -1, the value is loaded up from super_klass. 1365 // No registers are killed, except temp_reg and temp2_reg. 1366 // If super_check_offset is not -1, temp2_reg is not used and can be noreg. 1367 void check_klass_subtype_fast_path(Register sub_klass, 1368 Register super_klass, 1369 Register temp_reg, 1370 Register temp2_reg, 1371 Label* L_success, 1372 Label* L_failure, 1373 Label* L_slow_path, 1374 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 1375 1376 // The rest of the type check; must be wired to a corresponding fast path. 1377 // It does not repeat the fast path logic, so don't use it standalone. 1378 // The temp_reg can be noreg, if no temps are available. 1379 // It can also be sub_klass or super_klass, meaning it's OK to kill that one. 1380 // Updates the sub's secondary super cache as necessary. 1381 void check_klass_subtype_slow_path(Register sub_klass, 1382 Register super_klass, 1383 Register temp_reg, 1384 Register temp2_reg, 1385 Register temp3_reg, 1386 Register temp4_reg, 1387 Label* L_success, 1388 Label* L_failure); 1389 1390 // Simplified, combined version, good for typical uses. 1391 // Falls through on failure. 1392 void check_klass_subtype(Register sub_klass, 1393 Register super_klass, 1394 Register temp_reg, 1395 Register temp2_reg, 1396 Label& L_success); 1397 1398 // method handles (JSR 292) 1399 // offset relative to Gargs of argument at tos[arg_slot]. 1400 // (arg_slot == 0 means the last argument, not the first). 1401 RegisterOrConstant argument_offset(RegisterOrConstant arg_slot, 1402 Register temp_reg, 1403 int extra_slot_offset = 0); 1404 // Address of Gargs and argument_offset. 1405 Address argument_address(RegisterOrConstant arg_slot, 1406 Register temp_reg = noreg, 1407 int extra_slot_offset = 0); 1408 1409 // Stack overflow checking 1410 1411 // Note: this clobbers G3_scratch 1412 void bang_stack_with_offset(int offset) { 1413 // stack grows down, caller passes positive offset 1414 assert(offset > 0, "must bang with negative offset"); 1415 set((-offset)+STACK_BIAS, G3_scratch); 1416 st(G0, SP, G3_scratch); 1417 } 1418 1419 // Writes to stack successive pages until offset reached to check for 1420 // stack overflow + shadow pages. Clobbers tsp and scratch registers. 1421 void bang_stack_size(Register Rsize, Register Rtsp, Register Rscratch); 1422 1423 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset); 1424 1425 void verify_tlab(); 1426 1427 Condition negate_condition(Condition cond); 1428 1429 // Helper functions for statistics gathering. 1430 // Conditionally (non-atomically) increments passed counter address, preserving condition codes. 1431 void cond_inc(Condition cond, address counter_addr, Register Rtemp1, Register Rtemp2); 1432 // Unconditional increment. 1433 void inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2); 1434 void inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2); 1435 1436 #ifdef COMPILER2 1437 // Compress char[] to byte[] by compressing 16 bytes at once. Return 0 on failure. 1438 void string_compress_16(Register src, Register dst, Register cnt, Register result, 1439 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 1440 FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, Label& Ldone); 1441 1442 // Compress char[] to byte[]. Return 0 on failure. 1443 void string_compress(Register src, Register dst, Register cnt, Register tmp, Register result, Label& Ldone); 1444 1445 // Inflate byte[] to char[] by inflating 16 bytes at once. 1446 void string_inflate_16(Register src, Register dst, Register cnt, Register tmp, 1447 FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, FloatRegister ftmp4, Label& Ldone); 1448 1449 // Inflate byte[] to char[]. 1450 void string_inflate(Register src, Register dst, Register cnt, Register tmp, Label& Ldone); 1451 1452 void string_compare(Register str1, Register str2, 1453 Register cnt1, Register cnt2, 1454 Register tmp1, Register tmp2, 1455 Register result, int ae); 1456 1457 void array_equals(bool is_array_equ, Register ary1, Register ary2, 1458 Register limit, Register tmp, Register result, bool is_byte); 1459 #endif 1460 1461 // Use BIS for zeroing 1462 void bis_zeroing(Register to, Register count, Register temp, Label& Ldone); 1463 1464 // Update CRC-32[C] with a byte value according to constants in table 1465 void update_byte_crc32(Register crc, Register val, Register table); 1466 1467 // Reverse byte order of lower 32 bits, assuming upper 32 bits all zeros 1468 void reverse_bytes_32(Register src, Register dst, Register tmp); 1469 void movitof_revbytes(Register src, FloatRegister dst, Register tmp1, Register tmp2); 1470 void movftoi_revbytes(FloatRegister src, Register dst, Register tmp1, Register tmp2); 1471 1472 #undef VIRTUAL 1473 }; 1474 1475 /** 1476 * class SkipIfEqual: 1477 * 1478 * Instantiating this class will result in assembly code being output that will 1479 * jump around any code emitted between the creation of the instance and it's 1480 * automatic destruction at the end of a scope block, depending on the value of 1481 * the flag passed to the constructor, which will be checked at run-time. 1482 */ 1483 class SkipIfEqual : public StackObj { 1484 private: 1485 MacroAssembler* _masm; 1486 Label _label; 1487 1488 public: 1489 // 'temp' is a temp register that this object can use (and trash) 1490 SkipIfEqual(MacroAssembler*, Register temp, 1491 const bool* flag_addr, Assembler::Condition condition); 1492 ~SkipIfEqual(); 1493 }; 1494 1495 #endif // CPU_SPARC_VM_MACROASSEMBLER_SPARC_HPP