1 /* 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "assembler_sparc.inline.hpp" 28 #include "gc_interface/collectedHeap.inline.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "memory/cardTableModRefBS.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "prims/methodHandles.hpp" 33 #include "runtime/biasedLocking.hpp" 34 #include "runtime/interfaceSupport.hpp" 35 #include "runtime/objectMonitor.hpp" 36 #include "runtime/os.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubRoutines.hpp" 39 #ifndef SERIALGC 40 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 41 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 42 #include "gc_implementation/g1/heapRegion.hpp" 43 #endif 44 45 #ifdef PRODUCT 46 #define BLOCK_COMMENT(str) /* nothing */ 47 #else 48 #define BLOCK_COMMENT(str) block_comment(str) 49 #endif 50 51 // Convert the raw encoding form into the form expected by the 52 // constructor for Address. 53 Address Address::make_raw(int base, int index, int scale, int disp, bool disp_is_oop) { 54 assert(scale == 0, "not supported"); 55 RelocationHolder rspec; 56 if (disp_is_oop) { 57 rspec = Relocation::spec_simple(relocInfo::oop_type); 58 } 59 60 Register rindex = as_Register(index); 61 if (rindex != G0) { 62 Address madr(as_Register(base), rindex); 63 madr._rspec = rspec; 64 return madr; 65 } else { 66 Address madr(as_Register(base), disp); 67 madr._rspec = rspec; 68 return madr; 69 } 70 } 71 72 Address Argument::address_in_frame() const { 73 // Warning: In LP64 mode disp will occupy more than 10 bits, but 74 // op codes such as ld or ldx, only access disp() to get 75 // their simm13 argument. 76 int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS; 77 if (is_in()) 78 return Address(FP, disp); // In argument. 79 else 80 return Address(SP, disp); // Out argument. 81 } 82 83 static const char* argumentNames[][2] = { 84 {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"}, 85 {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"}, 86 {"A(n>9)","P(n>9)"} 87 }; 88 89 const char* Argument::name() const { 90 int nofArgs = sizeof argumentNames / sizeof argumentNames[0]; 91 int num = number(); 92 if (num >= nofArgs) num = nofArgs - 1; 93 return argumentNames[num][is_in() ? 1 : 0]; 94 } 95 96 void Assembler::print_instruction(int inst) { 97 const char* s; 98 switch (inv_op(inst)) { 99 default: s = "????"; break; 100 case call_op: s = "call"; break; 101 case branch_op: 102 switch (inv_op2(inst)) { 103 case fb_op2: s = "fb"; break; 104 case fbp_op2: s = "fbp"; break; 105 case br_op2: s = "br"; break; 106 case bp_op2: s = "bp"; break; 107 case cb_op2: s = "cb"; break; 108 case bpr_op2: { 109 if (is_cbcond(inst)) { 110 s = is_cxb(inst) ? "cxb" : "cwb"; 111 } else { 112 s = "bpr"; 113 } 114 break; 115 } 116 default: s = "????"; break; 117 } 118 } 119 ::tty->print("%s", s); 120 } 121 122 123 // Patch instruction inst at offset inst_pos to refer to dest_pos 124 // and return the resulting instruction. 125 // We should have pcs, not offsets, but since all is relative, it will work out 126 // OK. 127 int Assembler::patched_branch(int dest_pos, int inst, int inst_pos) { 128 129 int m; // mask for displacement field 130 int v; // new value for displacement field 131 const int word_aligned_ones = -4; 132 switch (inv_op(inst)) { 133 default: ShouldNotReachHere(); 134 case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break; 135 case branch_op: 136 switch (inv_op2(inst)) { 137 case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 138 case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 139 case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 140 case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 141 case cb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 142 case bpr_op2: { 143 if (is_cbcond(inst)) { 144 m = wdisp10(word_aligned_ones, 0); 145 v = wdisp10(dest_pos, inst_pos); 146 } else { 147 m = wdisp16(word_aligned_ones, 0); 148 v = wdisp16(dest_pos, inst_pos); 149 } 150 break; 151 } 152 default: ShouldNotReachHere(); 153 } 154 } 155 return inst & ~m | v; 156 } 157 158 // Return the offset of the branch destionation of instruction inst 159 // at offset pos. 160 // Should have pcs, but since all is relative, it works out. 161 int Assembler::branch_destination(int inst, int pos) { 162 int r; 163 switch (inv_op(inst)) { 164 default: ShouldNotReachHere(); 165 case call_op: r = inv_wdisp(inst, pos, 30); break; 166 case branch_op: 167 switch (inv_op2(inst)) { 168 case fbp_op2: r = inv_wdisp( inst, pos, 19); break; 169 case bp_op2: r = inv_wdisp( inst, pos, 19); break; 170 case fb_op2: r = inv_wdisp( inst, pos, 22); break; 171 case br_op2: r = inv_wdisp( inst, pos, 22); break; 172 case cb_op2: r = inv_wdisp( inst, pos, 22); break; 173 case bpr_op2: { 174 if (is_cbcond(inst)) { 175 r = inv_wdisp10(inst, pos); 176 } else { 177 r = inv_wdisp16(inst, pos); 178 } 179 break; 180 } 181 default: ShouldNotReachHere(); 182 } 183 } 184 return r; 185 } 186 187 int AbstractAssembler::code_fill_byte() { 188 return 0x00; // illegal instruction 0x00000000 189 } 190 191 Assembler::Condition Assembler::reg_cond_to_cc_cond(Assembler::RCondition in) { 192 switch (in) { 193 case rc_z: return equal; 194 case rc_lez: return lessEqual; 195 case rc_lz: return less; 196 case rc_nz: return notEqual; 197 case rc_gz: return greater; 198 case rc_gez: return greaterEqual; 199 default: 200 ShouldNotReachHere(); 201 } 202 return equal; 203 } 204 205 // Generate a bunch 'o stuff (including v9's 206 #ifndef PRODUCT 207 void Assembler::test_v9() { 208 add( G0, G1, G2 ); 209 add( G3, 0, G4 ); 210 211 addcc( G5, G6, G7 ); 212 addcc( I0, 1, I1 ); 213 addc( I2, I3, I4 ); 214 addc( I5, -1, I6 ); 215 addccc( I7, L0, L1 ); 216 addccc( L2, (1 << 12) - 2, L3 ); 217 218 Label lbl1, lbl2, lbl3; 219 220 bind(lbl1); 221 222 bpr( rc_z, true, pn, L4, pc(), relocInfo::oop_type ); 223 delayed()->nop(); 224 bpr( rc_lez, false, pt, L5, lbl1); 225 delayed()->nop(); 226 227 fb( f_never, true, pc() + 4, relocInfo::none); 228 delayed()->nop(); 229 fb( f_notEqual, false, lbl2 ); 230 delayed()->nop(); 231 232 fbp( f_notZero, true, fcc0, pn, pc() - 4, relocInfo::none); 233 delayed()->nop(); 234 fbp( f_lessOrGreater, false, fcc1, pt, lbl3 ); 235 delayed()->nop(); 236 237 br( equal, true, pc() + 1024, relocInfo::none); 238 delayed()->nop(); 239 br( lessEqual, false, lbl1 ); 240 delayed()->nop(); 241 br( never, false, lbl1 ); 242 delayed()->nop(); 243 244 bp( less, true, icc, pn, pc(), relocInfo::none); 245 delayed()->nop(); 246 bp( lessEqualUnsigned, false, xcc, pt, lbl2 ); 247 delayed()->nop(); 248 249 call( pc(), relocInfo::none); 250 delayed()->nop(); 251 call( lbl3 ); 252 delayed()->nop(); 253 254 255 casa( L6, L7, O0 ); 256 casxa( O1, O2, O3, 0 ); 257 258 udiv( O4, O5, O7 ); 259 udiv( G0, (1 << 12) - 1, G1 ); 260 sdiv( G1, G2, G3 ); 261 sdiv( G4, -((1 << 12) - 1), G5 ); 262 udivcc( G6, G7, I0 ); 263 udivcc( I1, -((1 << 12) - 2), I2 ); 264 sdivcc( I3, I4, I5 ); 265 sdivcc( I6, -((1 << 12) - 0), I7 ); 266 267 done(); 268 retry(); 269 270 fadd( FloatRegisterImpl::S, F0, F1, F2 ); 271 fsub( FloatRegisterImpl::D, F34, F0, F62 ); 272 273 fcmp( FloatRegisterImpl::Q, fcc0, F0, F60); 274 fcmpe( FloatRegisterImpl::S, fcc1, F31, F30); 275 276 ftox( FloatRegisterImpl::D, F2, F4 ); 277 ftoi( FloatRegisterImpl::Q, F4, F8 ); 278 279 ftof( FloatRegisterImpl::S, FloatRegisterImpl::Q, F3, F12 ); 280 281 fxtof( FloatRegisterImpl::S, F4, F5 ); 282 fitof( FloatRegisterImpl::D, F6, F8 ); 283 284 fmov( FloatRegisterImpl::Q, F16, F20 ); 285 fneg( FloatRegisterImpl::S, F6, F7 ); 286 fabs( FloatRegisterImpl::D, F10, F12 ); 287 288 fmul( FloatRegisterImpl::Q, F24, F28, F32 ); 289 fmul( FloatRegisterImpl::S, FloatRegisterImpl::D, F8, F9, F14 ); 290 fdiv( FloatRegisterImpl::S, F10, F11, F12 ); 291 292 fsqrt( FloatRegisterImpl::S, F13, F14 ); 293 294 flush( L0, L1 ); 295 flush( L2, -1 ); 296 297 flushw(); 298 299 illtrap( (1 << 22) - 2); 300 301 impdep1( 17, (1 << 19) - 1 ); 302 impdep2( 3, 0 ); 303 304 jmpl( L3, L4, L5 ); 305 delayed()->nop(); 306 jmpl( L6, -1, L7, Relocation::spec_simple(relocInfo::none)); 307 delayed()->nop(); 308 309 310 ldf( FloatRegisterImpl::S, O0, O1, F15 ); 311 ldf( FloatRegisterImpl::D, O2, -1, F14 ); 312 313 314 ldfsr( O3, O4 ); 315 ldfsr( O5, -1 ); 316 ldxfsr( O6, O7 ); 317 ldxfsr( I0, -1 ); 318 319 ldfa( FloatRegisterImpl::D, I1, I2, 1, F16 ); 320 ldfa( FloatRegisterImpl::Q, I3, -1, F36 ); 321 322 ldsb( I4, I5, I6 ); 323 ldsb( I7, -1, G0 ); 324 ldsh( G1, G3, G4 ); 325 ldsh( G5, -1, G6 ); 326 ldsw( G7, L0, L1 ); 327 ldsw( L2, -1, L3 ); 328 ldub( L4, L5, L6 ); 329 ldub( L7, -1, O0 ); 330 lduh( O1, O2, O3 ); 331 lduh( O4, -1, O5 ); 332 lduw( O6, O7, G0 ); 333 lduw( G1, -1, G2 ); 334 ldx( G3, G4, G5 ); 335 ldx( G6, -1, G7 ); 336 ldd( I0, I1, I2 ); 337 ldd( I3, -1, I4 ); 338 339 ldsba( I5, I6, 2, I7 ); 340 ldsba( L0, -1, L1 ); 341 ldsha( L2, L3, 3, L4 ); 342 ldsha( L5, -1, L6 ); 343 ldswa( L7, O0, (1 << 8) - 1, O1 ); 344 ldswa( O2, -1, O3 ); 345 lduba( O4, O5, 0, O6 ); 346 lduba( O7, -1, I0 ); 347 lduha( I1, I2, 1, I3 ); 348 lduha( I4, -1, I5 ); 349 lduwa( I6, I7, 2, L0 ); 350 lduwa( L1, -1, L2 ); 351 ldxa( L3, L4, 3, L5 ); 352 ldxa( L6, -1, L7 ); 353 ldda( G0, G1, 4, G2 ); 354 ldda( G3, -1, G4 ); 355 356 ldstub( G5, G6, G7 ); 357 ldstub( O0, -1, O1 ); 358 359 ldstuba( O2, O3, 5, O4 ); 360 ldstuba( O5, -1, O6 ); 361 362 and3( I0, L0, O0 ); 363 and3( G7, -1, O7 ); 364 andcc( L2, I2, G2 ); 365 andcc( L4, -1, G4 ); 366 andn( I5, I6, I7 ); 367 andn( I6, -1, I7 ); 368 andncc( I5, I6, I7 ); 369 andncc( I7, -1, I6 ); 370 or3( I5, I6, I7 ); 371 or3( I7, -1, I6 ); 372 orcc( I5, I6, I7 ); 373 orcc( I7, -1, I6 ); 374 orn( I5, I6, I7 ); 375 orn( I7, -1, I6 ); 376 orncc( I5, I6, I7 ); 377 orncc( I7, -1, I6 ); 378 xor3( I5, I6, I7 ); 379 xor3( I7, -1, I6 ); 380 xorcc( I5, I6, I7 ); 381 xorcc( I7, -1, I6 ); 382 xnor( I5, I6, I7 ); 383 xnor( I7, -1, I6 ); 384 xnorcc( I5, I6, I7 ); 385 xnorcc( I7, -1, I6 ); 386 387 membar( Membar_mask_bits(StoreStore | LoadStore | StoreLoad | LoadLoad | Sync | MemIssue | Lookaside ) ); 388 membar( StoreStore ); 389 membar( LoadStore ); 390 membar( StoreLoad ); 391 membar( LoadLoad ); 392 membar( Sync ); 393 membar( MemIssue ); 394 membar( Lookaside ); 395 396 fmov( FloatRegisterImpl::S, f_ordered, true, fcc2, F16, F17 ); 397 fmov( FloatRegisterImpl::D, rc_lz, L5, F18, F20 ); 398 399 movcc( overflowClear, false, icc, I6, L4 ); 400 movcc( f_unorderedOrEqual, true, fcc2, (1 << 10) - 1, O0 ); 401 402 movr( rc_nz, I5, I6, I7 ); 403 movr( rc_gz, L1, -1, L2 ); 404 405 mulx( I5, I6, I7 ); 406 mulx( I7, -1, I6 ); 407 sdivx( I5, I6, I7 ); 408 sdivx( I7, -1, I6 ); 409 udivx( I5, I6, I7 ); 410 udivx( I7, -1, I6 ); 411 412 umul( I5, I6, I7 ); 413 umul( I7, -1, I6 ); 414 smul( I5, I6, I7 ); 415 smul( I7, -1, I6 ); 416 umulcc( I5, I6, I7 ); 417 umulcc( I7, -1, I6 ); 418 smulcc( I5, I6, I7 ); 419 smulcc( I7, -1, I6 ); 420 421 mulscc( I5, I6, I7 ); 422 mulscc( I7, -1, I6 ); 423 424 nop(); 425 426 427 popc( G0, G1); 428 popc( -1, G2); 429 430 prefetch( L1, L2, severalReads ); 431 prefetch( L3, -1, oneRead ); 432 prefetcha( O3, O2, 6, severalWritesAndPossiblyReads ); 433 prefetcha( G2, -1, oneWrite ); 434 435 rett( I7, I7); 436 delayed()->nop(); 437 rett( G0, -1, relocInfo::none); 438 delayed()->nop(); 439 440 save( I5, I6, I7 ); 441 save( I7, -1, I6 ); 442 restore( I5, I6, I7 ); 443 restore( I7, -1, I6 ); 444 445 saved(); 446 restored(); 447 448 sethi( 0xaaaaaaaa, I3, Relocation::spec_simple(relocInfo::none)); 449 450 sll( I5, I6, I7 ); 451 sll( I7, 31, I6 ); 452 srl( I5, I6, I7 ); 453 srl( I7, 0, I6 ); 454 sra( I5, I6, I7 ); 455 sra( I7, 30, I6 ); 456 sllx( I5, I6, I7 ); 457 sllx( I7, 63, I6 ); 458 srlx( I5, I6, I7 ); 459 srlx( I7, 0, I6 ); 460 srax( I5, I6, I7 ); 461 srax( I7, 62, I6 ); 462 463 sir( -1 ); 464 465 stbar(); 466 467 stf( FloatRegisterImpl::Q, F40, G0, I7 ); 468 stf( FloatRegisterImpl::S, F18, I3, -1 ); 469 470 stfsr( L1, L2 ); 471 stfsr( I7, -1 ); 472 stxfsr( I6, I5 ); 473 stxfsr( L4, -1 ); 474 475 stfa( FloatRegisterImpl::D, F22, I6, I7, 7 ); 476 stfa( FloatRegisterImpl::Q, F44, G0, -1 ); 477 478 stb( L5, O2, I7 ); 479 stb( I7, I6, -1 ); 480 sth( L5, O2, I7 ); 481 sth( I7, I6, -1 ); 482 stw( L5, O2, I7 ); 483 stw( I7, I6, -1 ); 484 stx( L5, O2, I7 ); 485 stx( I7, I6, -1 ); 486 std( L5, O2, I7 ); 487 std( I7, I6, -1 ); 488 489 stba( L5, O2, I7, 8 ); 490 stba( I7, I6, -1 ); 491 stha( L5, O2, I7, 9 ); 492 stha( I7, I6, -1 ); 493 stwa( L5, O2, I7, 0 ); 494 stwa( I7, I6, -1 ); 495 stxa( L5, O2, I7, 11 ); 496 stxa( I7, I6, -1 ); 497 stda( L5, O2, I7, 12 ); 498 stda( I7, I6, -1 ); 499 500 sub( I5, I6, I7 ); 501 sub( I7, -1, I6 ); 502 subcc( I5, I6, I7 ); 503 subcc( I7, -1, I6 ); 504 subc( I5, I6, I7 ); 505 subc( I7, -1, I6 ); 506 subccc( I5, I6, I7 ); 507 subccc( I7, -1, I6 ); 508 509 swap( I5, I6, I7 ); 510 swap( I7, -1, I6 ); 511 512 swapa( G0, G1, 13, G2 ); 513 swapa( I7, -1, I6 ); 514 515 taddcc( I5, I6, I7 ); 516 taddcc( I7, -1, I6 ); 517 taddcctv( I5, I6, I7 ); 518 taddcctv( I7, -1, I6 ); 519 520 tsubcc( I5, I6, I7 ); 521 tsubcc( I7, -1, I6 ); 522 tsubcctv( I5, I6, I7 ); 523 tsubcctv( I7, -1, I6 ); 524 525 trap( overflowClear, xcc, G0, G1 ); 526 trap( lessEqual, icc, I7, 17 ); 527 528 bind(lbl2); 529 bind(lbl3); 530 531 code()->decode(); 532 } 533 534 // Generate a bunch 'o stuff unique to V8 535 void Assembler::test_v8_onlys() { 536 Label lbl1; 537 538 cb( cp_0or1or2, false, pc() - 4, relocInfo::none); 539 delayed()->nop(); 540 cb( cp_never, true, lbl1); 541 delayed()->nop(); 542 543 cpop1(1, 2, 3, 4); 544 cpop2(5, 6, 7, 8); 545 546 ldc( I0, I1, 31); 547 ldc( I2, -1, 0); 548 549 lddc( I4, I4, 30); 550 lddc( I6, 0, 1 ); 551 552 ldcsr( L0, L1, 0); 553 ldcsr( L1, (1 << 12) - 1, 17 ); 554 555 stc( 31, L4, L5); 556 stc( 30, L6, -(1 << 12) ); 557 558 stdc( 0, L7, G0); 559 stdc( 1, G1, 0 ); 560 561 stcsr( 16, G2, G3); 562 stcsr( 17, G4, 1 ); 563 564 stdcq( 4, G5, G6); 565 stdcq( 5, G7, -1 ); 566 567 bind(lbl1); 568 569 code()->decode(); 570 } 571 #endif 572 573 // Implementation of MacroAssembler 574 575 void MacroAssembler::null_check(Register reg, int offset) { 576 if (needs_explicit_null_check((intptr_t)offset)) { 577 // provoke OS NULL exception if reg = NULL by 578 // accessing M[reg] w/o changing any registers 579 ld_ptr(reg, 0, G0); 580 } 581 else { 582 // nothing to do, (later) access of M[reg + offset] 583 // will provoke OS NULL exception if reg = NULL 584 } 585 } 586 587 // Ring buffer jumps 588 589 #ifndef PRODUCT 590 void MacroAssembler::ret( bool trace ) { if (trace) { 591 mov(I7, O7); // traceable register 592 JMP(O7, 2 * BytesPerInstWord); 593 } else { 594 jmpl( I7, 2 * BytesPerInstWord, G0 ); 595 } 596 } 597 598 void MacroAssembler::retl( bool trace ) { if (trace) JMP(O7, 2 * BytesPerInstWord); 599 else jmpl( O7, 2 * BytesPerInstWord, G0 ); } 600 #endif /* PRODUCT */ 601 602 603 void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) { 604 assert_not_delayed(); 605 // This can only be traceable if r1 & r2 are visible after a window save 606 if (TraceJumps) { 607 #ifndef PRODUCT 608 save_frame(0); 609 verify_thread(); 610 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0); 611 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1); 612 sll(O0, exact_log2(4*sizeof(intptr_t)), O2); 613 add(O2, O1, O1); 614 615 add(r1->after_save(), r2->after_save(), O2); 616 set((intptr_t)file, O3); 617 set(line, O4); 618 Label L; 619 // get nearby pc, store jmp target 620 call(L, relocInfo::none); // No relocation for call to pc+0x8 621 delayed()->st(O2, O1, 0); 622 bind(L); 623 624 // store nearby pc 625 st(O7, O1, sizeof(intptr_t)); 626 // store file 627 st(O3, O1, 2*sizeof(intptr_t)); 628 // store line 629 st(O4, O1, 3*sizeof(intptr_t)); 630 add(O0, 1, O0); 631 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0); 632 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset())); 633 restore(); 634 #endif /* PRODUCT */ 635 } 636 jmpl(r1, r2, G0); 637 } 638 void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) { 639 assert_not_delayed(); 640 // This can only be traceable if r1 is visible after a window save 641 if (TraceJumps) { 642 #ifndef PRODUCT 643 save_frame(0); 644 verify_thread(); 645 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0); 646 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1); 647 sll(O0, exact_log2(4*sizeof(intptr_t)), O2); 648 add(O2, O1, O1); 649 650 add(r1->after_save(), offset, O2); 651 set((intptr_t)file, O3); 652 set(line, O4); 653 Label L; 654 // get nearby pc, store jmp target 655 call(L, relocInfo::none); // No relocation for call to pc+0x8 656 delayed()->st(O2, O1, 0); 657 bind(L); 658 659 // store nearby pc 660 st(O7, O1, sizeof(intptr_t)); 661 // store file 662 st(O3, O1, 2*sizeof(intptr_t)); 663 // store line 664 st(O4, O1, 3*sizeof(intptr_t)); 665 add(O0, 1, O0); 666 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0); 667 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset())); 668 restore(); 669 #endif /* PRODUCT */ 670 } 671 jmp(r1, offset); 672 } 673 674 // This code sequence is relocatable to any address, even on LP64. 675 void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) { 676 assert_not_delayed(); 677 // Force fixed length sethi because NativeJump and NativeFarCall don't handle 678 // variable length instruction streams. 679 patchable_sethi(addrlit, temp); 680 Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement. 681 if (TraceJumps) { 682 #ifndef PRODUCT 683 // Must do the add here so relocation can find the remainder of the 684 // value to be relocated. 685 add(a.base(), a.disp(), a.base(), addrlit.rspec(offset)); 686 save_frame(0); 687 verify_thread(); 688 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0); 689 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1); 690 sll(O0, exact_log2(4*sizeof(intptr_t)), O2); 691 add(O2, O1, O1); 692 693 set((intptr_t)file, O3); 694 set(line, O4); 695 Label L; 696 697 // get nearby pc, store jmp target 698 call(L, relocInfo::none); // No relocation for call to pc+0x8 699 delayed()->st(a.base()->after_save(), O1, 0); 700 bind(L); 701 702 // store nearby pc 703 st(O7, O1, sizeof(intptr_t)); 704 // store file 705 st(O3, O1, 2*sizeof(intptr_t)); 706 // store line 707 st(O4, O1, 3*sizeof(intptr_t)); 708 add(O0, 1, O0); 709 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0); 710 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset())); 711 restore(); 712 jmpl(a.base(), G0, d); 713 #else 714 jmpl(a.base(), a.disp(), d); 715 #endif /* PRODUCT */ 716 } else { 717 jmpl(a.base(), a.disp(), d); 718 } 719 } 720 721 void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) { 722 jumpl(addrlit, temp, G0, offset, file, line); 723 } 724 725 726 // Convert to C varargs format 727 void MacroAssembler::set_varargs( Argument inArg, Register d ) { 728 // spill register-resident args to their memory slots 729 // (SPARC calling convention requires callers to have already preallocated these) 730 // Note that the inArg might in fact be an outgoing argument, 731 // if a leaf routine or stub does some tricky argument shuffling. 732 // This routine must work even though one of the saved arguments 733 // is in the d register (e.g., set_varargs(Argument(0, false), O0)). 734 for (Argument savePtr = inArg; 735 savePtr.is_register(); 736 savePtr = savePtr.successor()) { 737 st_ptr(savePtr.as_register(), savePtr.address_in_frame()); 738 } 739 // return the address of the first memory slot 740 Address a = inArg.address_in_frame(); 741 add(a.base(), a.disp(), d); 742 } 743 744 // Conditional breakpoint (for assertion checks in assembly code) 745 void MacroAssembler::breakpoint_trap(Condition c, CC cc) { 746 trap(c, cc, G0, ST_RESERVED_FOR_USER_0); 747 } 748 749 // We want to use ST_BREAKPOINT here, but the debugger is confused by it. 750 void MacroAssembler::breakpoint_trap() { 751 trap(ST_RESERVED_FOR_USER_0); 752 } 753 754 // flush windows (except current) using flushw instruction if avail. 755 void MacroAssembler::flush_windows() { 756 if (VM_Version::v9_instructions_work()) flushw(); 757 else flush_windows_trap(); 758 } 759 760 // Write serialization page so VM thread can do a pseudo remote membar 761 // We use the current thread pointer to calculate a thread specific 762 // offset to write to within the page. This minimizes bus traffic 763 // due to cache line collision. 764 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { 765 srl(thread, os::get_serialize_page_shift_count(), tmp2); 766 if (Assembler::is_simm13(os::vm_page_size())) { 767 and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2); 768 } 769 else { 770 set((os::vm_page_size() - sizeof(int)), tmp1); 771 and3(tmp2, tmp1, tmp2); 772 } 773 set(os::get_memory_serialize_page(), tmp1); 774 st(G0, tmp1, tmp2); 775 } 776 777 778 779 void MacroAssembler::enter() { 780 Unimplemented(); 781 } 782 783 void MacroAssembler::leave() { 784 Unimplemented(); 785 } 786 787 void MacroAssembler::mult(Register s1, Register s2, Register d) { 788 if(VM_Version::v9_instructions_work()) { 789 mulx (s1, s2, d); 790 } else { 791 smul (s1, s2, d); 792 } 793 } 794 795 void MacroAssembler::mult(Register s1, int simm13a, Register d) { 796 if(VM_Version::v9_instructions_work()) { 797 mulx (s1, simm13a, d); 798 } else { 799 smul (s1, simm13a, d); 800 } 801 } 802 803 804 #ifdef ASSERT 805 void MacroAssembler::read_ccr_v8_assert(Register ccr_save) { 806 const Register s1 = G3_scratch; 807 const Register s2 = G4_scratch; 808 Label get_psr_test; 809 // Get the condition codes the V8 way. 810 read_ccr_trap(s1); 811 mov(ccr_save, s2); 812 // This is a test of V8 which has icc but not xcc 813 // so mask off the xcc bits 814 and3(s2, 0xf, s2); 815 // Compare condition codes from the V8 and V9 ways. 816 subcc(s2, s1, G0); 817 br(Assembler::notEqual, true, Assembler::pt, get_psr_test); 818 delayed()->breakpoint_trap(); 819 bind(get_psr_test); 820 } 821 822 void MacroAssembler::write_ccr_v8_assert(Register ccr_save) { 823 const Register s1 = G3_scratch; 824 const Register s2 = G4_scratch; 825 Label set_psr_test; 826 // Write out the saved condition codes the V8 way 827 write_ccr_trap(ccr_save, s1, s2); 828 // Read back the condition codes using the V9 instruction 829 rdccr(s1); 830 mov(ccr_save, s2); 831 // This is a test of V8 which has icc but not xcc 832 // so mask off the xcc bits 833 and3(s2, 0xf, s2); 834 and3(s1, 0xf, s1); 835 // Compare the V8 way with the V9 way. 836 subcc(s2, s1, G0); 837 br(Assembler::notEqual, true, Assembler::pt, set_psr_test); 838 delayed()->breakpoint_trap(); 839 bind(set_psr_test); 840 } 841 #else 842 #define read_ccr_v8_assert(x) 843 #define write_ccr_v8_assert(x) 844 #endif // ASSERT 845 846 void MacroAssembler::read_ccr(Register ccr_save) { 847 if (VM_Version::v9_instructions_work()) { 848 rdccr(ccr_save); 849 // Test code sequence used on V8. Do not move above rdccr. 850 read_ccr_v8_assert(ccr_save); 851 } else { 852 read_ccr_trap(ccr_save); 853 } 854 } 855 856 void MacroAssembler::write_ccr(Register ccr_save) { 857 if (VM_Version::v9_instructions_work()) { 858 // Test code sequence used on V8. Do not move below wrccr. 859 write_ccr_v8_assert(ccr_save); 860 wrccr(ccr_save); 861 } else { 862 const Register temp_reg1 = G3_scratch; 863 const Register temp_reg2 = G4_scratch; 864 write_ccr_trap(ccr_save, temp_reg1, temp_reg2); 865 } 866 } 867 868 869 // Calls to C land 870 871 #ifdef ASSERT 872 // a hook for debugging 873 static Thread* reinitialize_thread() { 874 return ThreadLocalStorage::thread(); 875 } 876 #else 877 #define reinitialize_thread ThreadLocalStorage::thread 878 #endif 879 880 #ifdef ASSERT 881 address last_get_thread = NULL; 882 #endif 883 884 // call this when G2_thread is not known to be valid 885 void MacroAssembler::get_thread() { 886 save_frame(0); // to avoid clobbering O0 887 mov(G1, L0); // avoid clobbering G1 888 mov(G5_method, L1); // avoid clobbering G5 889 mov(G3, L2); // avoid clobbering G3 also 890 mov(G4, L5); // avoid clobbering G4 891 #ifdef ASSERT 892 AddressLiteral last_get_thread_addrlit(&last_get_thread); 893 set(last_get_thread_addrlit, L3); 894 inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call 895 st_ptr(L4, L3, 0); 896 #endif 897 call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type); 898 delayed()->nop(); 899 mov(L0, G1); 900 mov(L1, G5_method); 901 mov(L2, G3); 902 mov(L5, G4); 903 restore(O0, 0, G2_thread); 904 } 905 906 static Thread* verify_thread_subroutine(Thread* gthread_value) { 907 Thread* correct_value = ThreadLocalStorage::thread(); 908 guarantee(gthread_value == correct_value, "G2_thread value must be the thread"); 909 return correct_value; 910 } 911 912 void MacroAssembler::verify_thread() { 913 if (VerifyThread) { 914 // NOTE: this chops off the heads of the 64-bit O registers. 915 #ifdef CC_INTERP 916 save_frame(0); 917 #else 918 // make sure G2_thread contains the right value 919 save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod for -Xprof) 920 mov(G1, L1); // avoid clobbering G1 921 // G2 saved below 922 mov(G3, L3); // avoid clobbering G3 923 mov(G4, L4); // avoid clobbering G4 924 mov(G5_method, L5); // avoid clobbering G5_method 925 #endif /* CC_INTERP */ 926 #if defined(COMPILER2) && !defined(_LP64) 927 // Save & restore possible 64-bit Long arguments in G-regs 928 srlx(G1,32,L0); 929 srlx(G4,32,L6); 930 #endif 931 call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type); 932 delayed()->mov(G2_thread, O0); 933 934 mov(L1, G1); // Restore G1 935 // G2 restored below 936 mov(L3, G3); // restore G3 937 mov(L4, G4); // restore G4 938 mov(L5, G5_method); // restore G5_method 939 #if defined(COMPILER2) && !defined(_LP64) 940 // Save & restore possible 64-bit Long arguments in G-regs 941 sllx(L0,32,G2); // Move old high G1 bits high in G2 942 srl(G1, 0,G1); // Clear current high G1 bits 943 or3 (G1,G2,G1); // Recover 64-bit G1 944 sllx(L6,32,G2); // Move old high G4 bits high in G2 945 srl(G4, 0,G4); // Clear current high G4 bits 946 or3 (G4,G2,G4); // Recover 64-bit G4 947 #endif 948 restore(O0, 0, G2_thread); 949 } 950 } 951 952 953 void MacroAssembler::save_thread(const Register thread_cache) { 954 verify_thread(); 955 if (thread_cache->is_valid()) { 956 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 957 mov(G2_thread, thread_cache); 958 } 959 if (VerifyThread) { 960 // smash G2_thread, as if the VM were about to anyway 961 set(0x67676767, G2_thread); 962 } 963 } 964 965 966 void MacroAssembler::restore_thread(const Register thread_cache) { 967 if (thread_cache->is_valid()) { 968 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 969 mov(thread_cache, G2_thread); 970 verify_thread(); 971 } else { 972 // do it the slow way 973 get_thread(); 974 } 975 } 976 977 978 // %%% maybe get rid of [re]set_last_Java_frame 979 void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) { 980 assert_not_delayed(); 981 Address flags(G2_thread, JavaThread::frame_anchor_offset() + 982 JavaFrameAnchor::flags_offset()); 983 Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset()); 984 985 // Always set last_Java_pc and flags first because once last_Java_sp is visible 986 // has_last_Java_frame is true and users will look at the rest of the fields. 987 // (Note: flags should always be zero before we get here so doesn't need to be set.) 988 989 #ifdef ASSERT 990 // Verify that flags was zeroed on return to Java 991 Label PcOk; 992 save_frame(0); // to avoid clobbering O0 993 ld_ptr(pc_addr, L0); 994 br_null_short(L0, Assembler::pt, PcOk); 995 stop("last_Java_pc not zeroed before leaving Java"); 996 bind(PcOk); 997 998 // Verify that flags was zeroed on return to Java 999 Label FlagsOk; 1000 ld(flags, L0); 1001 tst(L0); 1002 br(Assembler::zero, false, Assembler::pt, FlagsOk); 1003 delayed() -> restore(); 1004 stop("flags not zeroed before leaving Java"); 1005 bind(FlagsOk); 1006 #endif /* ASSERT */ 1007 // 1008 // When returning from calling out from Java mode the frame anchor's last_Java_pc 1009 // will always be set to NULL. It is set here so that if we are doing a call to 1010 // native (not VM) that we capture the known pc and don't have to rely on the 1011 // native call having a standard frame linkage where we can find the pc. 1012 1013 if (last_Java_pc->is_valid()) { 1014 st_ptr(last_Java_pc, pc_addr); 1015 } 1016 1017 #ifdef _LP64 1018 #ifdef ASSERT 1019 // Make sure that we have an odd stack 1020 Label StackOk; 1021 andcc(last_java_sp, 0x01, G0); 1022 br(Assembler::notZero, false, Assembler::pt, StackOk); 1023 delayed()->nop(); 1024 stop("Stack Not Biased in set_last_Java_frame"); 1025 bind(StackOk); 1026 #endif // ASSERT 1027 assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame"); 1028 add( last_java_sp, STACK_BIAS, G4_scratch ); 1029 st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset()); 1030 #else 1031 st_ptr(last_java_sp, G2_thread, JavaThread::last_Java_sp_offset()); 1032 #endif // _LP64 1033 } 1034 1035 void MacroAssembler::reset_last_Java_frame(void) { 1036 assert_not_delayed(); 1037 1038 Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset()); 1039 Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 1040 Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 1041 1042 #ifdef ASSERT 1043 // check that it WAS previously set 1044 #ifdef CC_INTERP 1045 save_frame(0); 1046 #else 1047 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame for -Xprof 1048 #endif /* CC_INTERP */ 1049 ld_ptr(sp_addr, L0); 1050 tst(L0); 1051 breakpoint_trap(Assembler::zero, Assembler::ptr_cc); 1052 restore(); 1053 #endif // ASSERT 1054 1055 st_ptr(G0, sp_addr); 1056 // Always return last_Java_pc to zero 1057 st_ptr(G0, pc_addr); 1058 // Always null flags after return to Java 1059 st(G0, flags); 1060 } 1061 1062 1063 void MacroAssembler::call_VM_base( 1064 Register oop_result, 1065 Register thread_cache, 1066 Register last_java_sp, 1067 address entry_point, 1068 int number_of_arguments, 1069 bool check_exceptions) 1070 { 1071 assert_not_delayed(); 1072 1073 // determine last_java_sp register 1074 if (!last_java_sp->is_valid()) { 1075 last_java_sp = SP; 1076 } 1077 // debugging support 1078 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 1079 1080 // 64-bit last_java_sp is biased! 1081 set_last_Java_frame(last_java_sp, noreg); 1082 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early 1083 save_thread(thread_cache); 1084 // do the call 1085 call(entry_point, relocInfo::runtime_call_type); 1086 if (!VerifyThread) 1087 delayed()->mov(G2_thread, O0); // pass thread as first argument 1088 else 1089 delayed()->nop(); // (thread already passed) 1090 restore_thread(thread_cache); 1091 reset_last_Java_frame(); 1092 1093 // check for pending exceptions. use Gtemp as scratch register. 1094 if (check_exceptions) { 1095 check_and_forward_exception(Gtemp); 1096 } 1097 1098 #ifdef ASSERT 1099 set(badHeapWordVal, G3); 1100 set(badHeapWordVal, G4); 1101 set(badHeapWordVal, G5); 1102 #endif 1103 1104 // get oop result if there is one and reset the value in the thread 1105 if (oop_result->is_valid()) { 1106 get_vm_result(oop_result); 1107 } 1108 } 1109 1110 void MacroAssembler::check_and_forward_exception(Register scratch_reg) 1111 { 1112 Label L; 1113 1114 check_and_handle_popframe(scratch_reg); 1115 check_and_handle_earlyret(scratch_reg); 1116 1117 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 1118 ld_ptr(exception_addr, scratch_reg); 1119 br_null_short(scratch_reg, pt, L); 1120 // we use O7 linkage so that forward_exception_entry has the issuing PC 1121 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 1122 delayed()->nop(); 1123 bind(L); 1124 } 1125 1126 1127 void MacroAssembler::check_and_handle_popframe(Register scratch_reg) { 1128 } 1129 1130 1131 void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) { 1132 } 1133 1134 1135 void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 1136 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 1137 } 1138 1139 1140 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { 1141 // O0 is reserved for the thread 1142 mov(arg_1, O1); 1143 call_VM(oop_result, entry_point, 1, check_exceptions); 1144 } 1145 1146 1147 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 1148 // O0 is reserved for the thread 1149 mov(arg_1, O1); 1150 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 1151 call_VM(oop_result, entry_point, 2, check_exceptions); 1152 } 1153 1154 1155 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 1156 // O0 is reserved for the thread 1157 mov(arg_1, O1); 1158 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 1159 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 1160 call_VM(oop_result, entry_point, 3, check_exceptions); 1161 } 1162 1163 1164 1165 // Note: The following call_VM overloadings are useful when a "save" 1166 // has already been performed by a stub, and the last Java frame is 1167 // the previous one. In that case, last_java_sp must be passed as FP 1168 // instead of SP. 1169 1170 1171 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) { 1172 call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1173 } 1174 1175 1176 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { 1177 // O0 is reserved for the thread 1178 mov(arg_1, O1); 1179 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1180 } 1181 1182 1183 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 1184 // O0 is reserved for the thread 1185 mov(arg_1, O1); 1186 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 1187 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1188 } 1189 1190 1191 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 1192 // O0 is reserved for the thread 1193 mov(arg_1, O1); 1194 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 1195 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 1196 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1197 } 1198 1199 1200 1201 void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) { 1202 assert_not_delayed(); 1203 save_thread(thread_cache); 1204 // do the call 1205 call(entry_point, relocInfo::runtime_call_type); 1206 delayed()->nop(); 1207 restore_thread(thread_cache); 1208 #ifdef ASSERT 1209 set(badHeapWordVal, G3); 1210 set(badHeapWordVal, G4); 1211 set(badHeapWordVal, G5); 1212 #endif 1213 } 1214 1215 1216 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) { 1217 call_VM_leaf_base(thread_cache, entry_point, number_of_arguments); 1218 } 1219 1220 1221 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) { 1222 mov(arg_1, O0); 1223 call_VM_leaf(thread_cache, entry_point, 1); 1224 } 1225 1226 1227 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { 1228 mov(arg_1, O0); 1229 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 1230 call_VM_leaf(thread_cache, entry_point, 2); 1231 } 1232 1233 1234 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) { 1235 mov(arg_1, O0); 1236 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 1237 mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument"); 1238 call_VM_leaf(thread_cache, entry_point, 3); 1239 } 1240 1241 1242 void MacroAssembler::get_vm_result(Register oop_result) { 1243 verify_thread(); 1244 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 1245 ld_ptr( vm_result_addr, oop_result); 1246 st_ptr(G0, vm_result_addr); 1247 verify_oop(oop_result); 1248 } 1249 1250 1251 void MacroAssembler::get_vm_result_2(Register oop_result) { 1252 verify_thread(); 1253 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); 1254 ld_ptr(vm_result_addr_2, oop_result); 1255 st_ptr(G0, vm_result_addr_2); 1256 verify_oop(oop_result); 1257 } 1258 1259 1260 // We require that C code which does not return a value in vm_result will 1261 // leave it undisturbed. 1262 void MacroAssembler::set_vm_result(Register oop_result) { 1263 verify_thread(); 1264 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 1265 verify_oop(oop_result); 1266 1267 # ifdef ASSERT 1268 // Check that we are not overwriting any other oop. 1269 #ifdef CC_INTERP 1270 save_frame(0); 1271 #else 1272 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod for -Xprof 1273 #endif /* CC_INTERP */ 1274 ld_ptr(vm_result_addr, L0); 1275 tst(L0); 1276 restore(); 1277 breakpoint_trap(notZero, Assembler::ptr_cc); 1278 // } 1279 # endif 1280 1281 st_ptr(oop_result, vm_result_addr); 1282 } 1283 1284 1285 void MacroAssembler::card_table_write(jbyte* byte_map_base, 1286 Register tmp, Register obj) { 1287 #ifdef _LP64 1288 srlx(obj, CardTableModRefBS::card_shift, obj); 1289 #else 1290 srl(obj, CardTableModRefBS::card_shift, obj); 1291 #endif 1292 assert(tmp != obj, "need separate temp reg"); 1293 set((address) byte_map_base, tmp); 1294 stb(G0, tmp, obj); 1295 } 1296 1297 1298 void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 1299 address save_pc; 1300 int shiftcnt; 1301 #ifdef _LP64 1302 # ifdef CHECK_DELAY 1303 assert_not_delayed((char*) "cannot put two instructions in delay slot"); 1304 # endif 1305 v9_dep(); 1306 save_pc = pc(); 1307 1308 int msb32 = (int) (addrlit.value() >> 32); 1309 int lsb32 = (int) (addrlit.value()); 1310 1311 if (msb32 == 0 && lsb32 >= 0) { 1312 Assembler::sethi(lsb32, d, addrlit.rspec()); 1313 } 1314 else if (msb32 == -1) { 1315 Assembler::sethi(~lsb32, d, addrlit.rspec()); 1316 xor3(d, ~low10(~0), d); 1317 } 1318 else { 1319 Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits 1320 if (msb32 & 0x3ff) // Any bits? 1321 or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32 1322 if (lsb32 & 0xFFFFFC00) { // done? 1323 if ((lsb32 >> 20) & 0xfff) { // Any bits set? 1324 sllx(d, 12, d); // Make room for next 12 bits 1325 or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12 1326 shiftcnt = 0; // We already shifted 1327 } 1328 else 1329 shiftcnt = 12; 1330 if ((lsb32 >> 10) & 0x3ff) { 1331 sllx(d, shiftcnt + 10, d); // Make room for last 10 bits 1332 or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10 1333 shiftcnt = 0; 1334 } 1335 else 1336 shiftcnt = 10; 1337 sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd 1338 } 1339 else 1340 sllx(d, 32, d); 1341 } 1342 // Pad out the instruction sequence so it can be patched later. 1343 if (ForceRelocatable || (addrlit.rtype() != relocInfo::none && 1344 addrlit.rtype() != relocInfo::runtime_call_type)) { 1345 while (pc() < (save_pc + (7 * BytesPerInstWord))) 1346 nop(); 1347 } 1348 #else 1349 Assembler::sethi(addrlit.value(), d, addrlit.rspec()); 1350 #endif 1351 } 1352 1353 1354 void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) { 1355 internal_sethi(addrlit, d, false); 1356 } 1357 1358 1359 void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) { 1360 internal_sethi(addrlit, d, true); 1361 } 1362 1363 1364 int MacroAssembler::insts_for_sethi(address a, bool worst_case) { 1365 #ifdef _LP64 1366 if (worst_case) return 7; 1367 intptr_t iaddr = (intptr_t) a; 1368 int msb32 = (int) (iaddr >> 32); 1369 int lsb32 = (int) (iaddr); 1370 int count; 1371 if (msb32 == 0 && lsb32 >= 0) 1372 count = 1; 1373 else if (msb32 == -1) 1374 count = 2; 1375 else { 1376 count = 2; 1377 if (msb32 & 0x3ff) 1378 count++; 1379 if (lsb32 & 0xFFFFFC00 ) { 1380 if ((lsb32 >> 20) & 0xfff) count += 2; 1381 if ((lsb32 >> 10) & 0x3ff) count += 2; 1382 } 1383 } 1384 return count; 1385 #else 1386 return 1; 1387 #endif 1388 } 1389 1390 int MacroAssembler::worst_case_insts_for_set() { 1391 return insts_for_sethi(NULL, true) + 1; 1392 } 1393 1394 1395 // Keep in sync with MacroAssembler::insts_for_internal_set 1396 void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 1397 intptr_t value = addrlit.value(); 1398 1399 if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) { 1400 // can optimize 1401 if (-4096 <= value && value <= 4095) { 1402 or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended) 1403 return; 1404 } 1405 if (inv_hi22(hi22(value)) == value) { 1406 sethi(addrlit, d); 1407 return; 1408 } 1409 } 1410 assert_not_delayed((char*) "cannot put two instructions in delay slot"); 1411 internal_sethi(addrlit, d, ForceRelocatable); 1412 if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) { 1413 add(d, addrlit.low10(), d, addrlit.rspec()); 1414 } 1415 } 1416 1417 // Keep in sync with MacroAssembler::internal_set 1418 int MacroAssembler::insts_for_internal_set(intptr_t value) { 1419 // can optimize 1420 if (-4096 <= value && value <= 4095) { 1421 return 1; 1422 } 1423 if (inv_hi22(hi22(value)) == value) { 1424 return insts_for_sethi((address) value); 1425 } 1426 int count = insts_for_sethi((address) value); 1427 AddressLiteral al(value); 1428 if (al.low10() != 0) { 1429 count++; 1430 } 1431 return count; 1432 } 1433 1434 void MacroAssembler::set(const AddressLiteral& al, Register d) { 1435 internal_set(al, d, false); 1436 } 1437 1438 void MacroAssembler::set(intptr_t value, Register d) { 1439 AddressLiteral al(value); 1440 internal_set(al, d, false); 1441 } 1442 1443 void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) { 1444 AddressLiteral al(addr, rspec); 1445 internal_set(al, d, false); 1446 } 1447 1448 void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) { 1449 internal_set(al, d, true); 1450 } 1451 1452 void MacroAssembler::patchable_set(intptr_t value, Register d) { 1453 AddressLiteral al(value); 1454 internal_set(al, d, true); 1455 } 1456 1457 1458 void MacroAssembler::set64(jlong value, Register d, Register tmp) { 1459 assert_not_delayed(); 1460 v9_dep(); 1461 1462 int hi = (int)(value >> 32); 1463 int lo = (int)(value & ~0); 1464 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 1465 if (Assembler::is_simm13(lo) && value == lo) { 1466 or3(G0, lo, d); 1467 } else if (hi == 0) { 1468 Assembler::sethi(lo, d); // hardware version zero-extends to upper 32 1469 if (low10(lo) != 0) 1470 or3(d, low10(lo), d); 1471 } 1472 else if (hi == -1) { 1473 Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32 1474 xor3(d, low10(lo) ^ ~low10(~0), d); 1475 } 1476 else if (lo == 0) { 1477 if (Assembler::is_simm13(hi)) { 1478 or3(G0, hi, d); 1479 } else { 1480 Assembler::sethi(hi, d); // hardware version zero-extends to upper 32 1481 if (low10(hi) != 0) 1482 or3(d, low10(hi), d); 1483 } 1484 sllx(d, 32, d); 1485 } 1486 else { 1487 Assembler::sethi(hi, tmp); 1488 Assembler::sethi(lo, d); // macro assembler version sign-extends 1489 if (low10(hi) != 0) 1490 or3 (tmp, low10(hi), tmp); 1491 if (low10(lo) != 0) 1492 or3 ( d, low10(lo), d); 1493 sllx(tmp, 32, tmp); 1494 or3 (d, tmp, d); 1495 } 1496 } 1497 1498 int MacroAssembler::insts_for_set64(jlong value) { 1499 v9_dep(); 1500 1501 int hi = (int) (value >> 32); 1502 int lo = (int) (value & ~0); 1503 int count = 0; 1504 1505 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 1506 if (Assembler::is_simm13(lo) && value == lo) { 1507 count++; 1508 } else if (hi == 0) { 1509 count++; 1510 if (low10(lo) != 0) 1511 count++; 1512 } 1513 else if (hi == -1) { 1514 count += 2; 1515 } 1516 else if (lo == 0) { 1517 if (Assembler::is_simm13(hi)) { 1518 count++; 1519 } else { 1520 count++; 1521 if (low10(hi) != 0) 1522 count++; 1523 } 1524 count++; 1525 } 1526 else { 1527 count += 2; 1528 if (low10(hi) != 0) 1529 count++; 1530 if (low10(lo) != 0) 1531 count++; 1532 count += 2; 1533 } 1534 return count; 1535 } 1536 1537 // compute size in bytes of sparc frame, given 1538 // number of extraWords 1539 int MacroAssembler::total_frame_size_in_bytes(int extraWords) { 1540 1541 int nWords = frame::memory_parameter_word_sp_offset; 1542 1543 nWords += extraWords; 1544 1545 if (nWords & 1) ++nWords; // round up to double-word 1546 1547 return nWords * BytesPerWord; 1548 } 1549 1550 1551 // save_frame: given number of "extra" words in frame, 1552 // issue approp. save instruction (p 200, v8 manual) 1553 1554 void MacroAssembler::save_frame(int extraWords) { 1555 int delta = -total_frame_size_in_bytes(extraWords); 1556 if (is_simm13(delta)) { 1557 save(SP, delta, SP); 1558 } else { 1559 set(delta, G3_scratch); 1560 save(SP, G3_scratch, SP); 1561 } 1562 } 1563 1564 1565 void MacroAssembler::save_frame_c1(int size_in_bytes) { 1566 if (is_simm13(-size_in_bytes)) { 1567 save(SP, -size_in_bytes, SP); 1568 } else { 1569 set(-size_in_bytes, G3_scratch); 1570 save(SP, G3_scratch, SP); 1571 } 1572 } 1573 1574 1575 void MacroAssembler::save_frame_and_mov(int extraWords, 1576 Register s1, Register d1, 1577 Register s2, Register d2) { 1578 assert_not_delayed(); 1579 1580 // The trick here is to use precisely the same memory word 1581 // that trap handlers also use to save the register. 1582 // This word cannot be used for any other purpose, but 1583 // it works fine to save the register's value, whether or not 1584 // an interrupt flushes register windows at any given moment! 1585 Address s1_addr; 1586 if (s1->is_valid() && (s1->is_in() || s1->is_local())) { 1587 s1_addr = s1->address_in_saved_window(); 1588 st_ptr(s1, s1_addr); 1589 } 1590 1591 Address s2_addr; 1592 if (s2->is_valid() && (s2->is_in() || s2->is_local())) { 1593 s2_addr = s2->address_in_saved_window(); 1594 st_ptr(s2, s2_addr); 1595 } 1596 1597 save_frame(extraWords); 1598 1599 if (s1_addr.base() == SP) { 1600 ld_ptr(s1_addr.after_save(), d1); 1601 } else if (s1->is_valid()) { 1602 mov(s1->after_save(), d1); 1603 } 1604 1605 if (s2_addr.base() == SP) { 1606 ld_ptr(s2_addr.after_save(), d2); 1607 } else if (s2->is_valid()) { 1608 mov(s2->after_save(), d2); 1609 } 1610 } 1611 1612 1613 AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) { 1614 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1615 int oop_index = oop_recorder()->allocate_index(obj); 1616 return AddressLiteral(obj, oop_Relocation::spec(oop_index)); 1617 } 1618 1619 1620 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { 1621 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1622 int oop_index = oop_recorder()->find_index(obj); 1623 return AddressLiteral(obj, oop_Relocation::spec(oop_index)); 1624 } 1625 1626 void MacroAssembler::set_narrow_oop(jobject obj, Register d) { 1627 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1628 int oop_index = oop_recorder()->find_index(obj); 1629 RelocationHolder rspec = oop_Relocation::spec(oop_index); 1630 1631 assert_not_delayed(); 1632 // Relocation with special format (see relocInfo_sparc.hpp). 1633 relocate(rspec, 1); 1634 // Assembler::sethi(0x3fffff, d); 1635 emit_long( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) ); 1636 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1637 add(d, 0x3ff, d); 1638 1639 } 1640 1641 1642 void MacroAssembler::align(int modulus) { 1643 while (offset() % modulus != 0) nop(); 1644 } 1645 1646 1647 void MacroAssembler::safepoint() { 1648 relocate(breakpoint_Relocation::spec(breakpoint_Relocation::safepoint)); 1649 } 1650 1651 1652 void RegistersForDebugging::print(outputStream* s) { 1653 int j; 1654 for ( j = 0; j < 8; ++j ) 1655 if ( j != 6 ) s->print_cr("i%d = 0x%.16lx", j, i[j]); 1656 else s->print_cr( "fp = 0x%.16lx", i[j]); 1657 s->cr(); 1658 1659 for ( j = 0; j < 8; ++j ) 1660 s->print_cr("l%d = 0x%.16lx", j, l[j]); 1661 s->cr(); 1662 1663 for ( j = 0; j < 8; ++j ) 1664 if ( j != 6 ) s->print_cr("o%d = 0x%.16lx", j, o[j]); 1665 else s->print_cr( "sp = 0x%.16lx", o[j]); 1666 s->cr(); 1667 1668 for ( j = 0; j < 8; ++j ) 1669 s->print_cr("g%d = 0x%.16lx", j, g[j]); 1670 s->cr(); 1671 1672 // print out floats with compression 1673 for (j = 0; j < 32; ) { 1674 jfloat val = f[j]; 1675 int last = j; 1676 for ( ; last+1 < 32; ++last ) { 1677 char b1[1024], b2[1024]; 1678 sprintf(b1, "%f", val); 1679 sprintf(b2, "%f", f[last+1]); 1680 if (strcmp(b1, b2)) 1681 break; 1682 } 1683 s->print("f%d", j); 1684 if ( j != last ) s->print(" - f%d", last); 1685 s->print(" = %f", val); 1686 s->fill_to(25); 1687 s->print_cr(" (0x%x)", val); 1688 j = last + 1; 1689 } 1690 s->cr(); 1691 1692 // and doubles (evens only) 1693 for (j = 0; j < 32; ) { 1694 jdouble val = d[j]; 1695 int last = j; 1696 for ( ; last+1 < 32; ++last ) { 1697 char b1[1024], b2[1024]; 1698 sprintf(b1, "%f", val); 1699 sprintf(b2, "%f", d[last+1]); 1700 if (strcmp(b1, b2)) 1701 break; 1702 } 1703 s->print("d%d", 2 * j); 1704 if ( j != last ) s->print(" - d%d", last); 1705 s->print(" = %f", val); 1706 s->fill_to(30); 1707 s->print("(0x%x)", *(int*)&val); 1708 s->fill_to(42); 1709 s->print_cr("(0x%x)", *(1 + (int*)&val)); 1710 j = last + 1; 1711 } 1712 s->cr(); 1713 } 1714 1715 void RegistersForDebugging::save_registers(MacroAssembler* a) { 1716 a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0); 1717 a->flush_windows(); 1718 int i; 1719 for (i = 0; i < 8; ++i) { 1720 a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i)); 1721 a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i)); 1722 a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i)); 1723 a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i)); 1724 } 1725 for (i = 0; i < 32; ++i) { 1726 a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i)); 1727 } 1728 for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) { 1729 a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i)); 1730 } 1731 } 1732 1733 void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) { 1734 for (int i = 1; i < 8; ++i) { 1735 a->ld_ptr(r, g_offset(i), as_gRegister(i)); 1736 } 1737 for (int j = 0; j < 32; ++j) { 1738 a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j)); 1739 } 1740 for (int k = 0; k < (VM_Version::v9_instructions_work() ? 64 : 32); k += 2) { 1741 a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k)); 1742 } 1743 } 1744 1745 1746 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack 1747 void MacroAssembler::push_fTOS() { 1748 // %%%%%% need to implement this 1749 } 1750 1751 // pops double TOS element from CPU stack and pushes on FPU stack 1752 void MacroAssembler::pop_fTOS() { 1753 // %%%%%% need to implement this 1754 } 1755 1756 void MacroAssembler::empty_FPU_stack() { 1757 // %%%%%% need to implement this 1758 } 1759 1760 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) { 1761 // plausibility check for oops 1762 if (!VerifyOops) return; 1763 1764 if (reg == G0) return; // always NULL, which is always an oop 1765 1766 BLOCK_COMMENT("verify_oop {"); 1767 char buffer[64]; 1768 #ifdef COMPILER1 1769 if (CommentedAssembly) { 1770 snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset()); 1771 block_comment(buffer); 1772 } 1773 #endif 1774 1775 int len = strlen(file) + strlen(msg) + 1 + 4; 1776 sprintf(buffer, "%d", line); 1777 len += strlen(buffer); 1778 sprintf(buffer, " at offset %d ", offset()); 1779 len += strlen(buffer); 1780 char * real_msg = new char[len]; 1781 sprintf(real_msg, "%s%s(%s:%d)", msg, buffer, file, line); 1782 1783 // Call indirectly to solve generation ordering problem 1784 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1785 1786 // Make some space on stack above the current register window. 1787 // Enough to hold 8 64-bit registers. 1788 add(SP,-8*8,SP); 1789 1790 // Save some 64-bit registers; a normal 'save' chops the heads off 1791 // of 64-bit longs in the 32-bit build. 1792 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1793 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1794 mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed 1795 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1796 1797 set((intptr_t)real_msg, O1); 1798 // Load address to call to into O7 1799 load_ptr_contents(a, O7); 1800 // Register call to verify_oop_subroutine 1801 callr(O7, G0); 1802 delayed()->nop(); 1803 // recover frame size 1804 add(SP, 8*8,SP); 1805 BLOCK_COMMENT("} verify_oop"); 1806 } 1807 1808 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) { 1809 // plausibility check for oops 1810 if (!VerifyOops) return; 1811 1812 char buffer[64]; 1813 sprintf(buffer, "%d", line); 1814 int len = strlen(file) + strlen(msg) + 1 + 4 + strlen(buffer); 1815 sprintf(buffer, " at SP+%d ", addr.disp()); 1816 len += strlen(buffer); 1817 char * real_msg = new char[len]; 1818 sprintf(real_msg, "%s at SP+%d (%s:%d)", msg, addr.disp(), file, line); 1819 1820 // Call indirectly to solve generation ordering problem 1821 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1822 1823 // Make some space on stack above the current register window. 1824 // Enough to hold 8 64-bit registers. 1825 add(SP,-8*8,SP); 1826 1827 // Save some 64-bit registers; a normal 'save' chops the heads off 1828 // of 64-bit longs in the 32-bit build. 1829 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1830 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1831 ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed 1832 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1833 1834 set((intptr_t)real_msg, O1); 1835 // Load address to call to into O7 1836 load_ptr_contents(a, O7); 1837 // Register call to verify_oop_subroutine 1838 callr(O7, G0); 1839 delayed()->nop(); 1840 // recover frame size 1841 add(SP, 8*8,SP); 1842 } 1843 1844 // side-door communication with signalHandler in os_solaris.cpp 1845 address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL }; 1846 1847 // This macro is expanded just once; it creates shared code. Contract: 1848 // receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY 1849 // registers, including flags. May not use a register 'save', as this blows 1850 // the high bits of the O-regs if they contain Long values. Acts as a 'leaf' 1851 // call. 1852 void MacroAssembler::verify_oop_subroutine() { 1853 assert( VM_Version::v9_instructions_work(), "VerifyOops not supported for V8" ); 1854 1855 // Leaf call; no frame. 1856 Label succeed, fail, null_or_fail; 1857 1858 // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home). 1859 // O0 is now the oop to be checked. O7 is the return address. 1860 Register O0_obj = O0; 1861 1862 // Save some more registers for temps. 1863 stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8); 1864 stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8); 1865 stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8); 1866 stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8); 1867 1868 // Save flags 1869 Register O5_save_flags = O5; 1870 rdccr( O5_save_flags ); 1871 1872 { // count number of verifies 1873 Register O2_adr = O2; 1874 Register O3_accum = O3; 1875 inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum); 1876 } 1877 1878 Register O2_mask = O2; 1879 Register O3_bits = O3; 1880 Register O4_temp = O4; 1881 1882 // mark lower end of faulting range 1883 assert(_verify_oop_implicit_branch[0] == NULL, "set once"); 1884 _verify_oop_implicit_branch[0] = pc(); 1885 1886 // We can't check the mark oop because it could be in the process of 1887 // locking or unlocking while this is running. 1888 set(Universe::verify_oop_mask (), O2_mask); 1889 set(Universe::verify_oop_bits (), O3_bits); 1890 1891 // assert((obj & oop_mask) == oop_bits); 1892 and3(O0_obj, O2_mask, O4_temp); 1893 cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, null_or_fail); 1894 1895 if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) { 1896 // the null_or_fail case is useless; must test for null separately 1897 br_null_short(O0_obj, pn, succeed); 1898 } 1899 1900 // Check the klassOop of this object for being in the right area of memory. 1901 // Cannot do the load in the delay above slot in case O0 is null 1902 load_klass(O0_obj, O0_obj); 1903 // assert((klass & klass_mask) == klass_bits); 1904 if( Universe::verify_klass_mask() != Universe::verify_oop_mask() ) 1905 set(Universe::verify_klass_mask(), O2_mask); 1906 if( Universe::verify_klass_bits() != Universe::verify_oop_bits() ) 1907 set(Universe::verify_klass_bits(), O3_bits); 1908 and3(O0_obj, O2_mask, O4_temp); 1909 cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, fail); 1910 // Check the klass's klass 1911 load_klass(O0_obj, O0_obj); 1912 and3(O0_obj, O2_mask, O4_temp); 1913 cmp(O4_temp, O3_bits); 1914 brx(notEqual, false, pn, fail); 1915 delayed()->wrccr( O5_save_flags ); // Restore CCR's 1916 1917 // mark upper end of faulting range 1918 _verify_oop_implicit_branch[1] = pc(); 1919 1920 //----------------------- 1921 // all tests pass 1922 bind(succeed); 1923 1924 // Restore prior 64-bit registers 1925 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0); 1926 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1); 1927 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2); 1928 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3); 1929 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4); 1930 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5); 1931 1932 retl(); // Leaf return; restore prior O7 in delay slot 1933 delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7); 1934 1935 //----------------------- 1936 bind(null_or_fail); // nulls are less common but OK 1937 br_null(O0_obj, false, pt, succeed, false); 1938 delayed()->wrccr( O5_save_flags ); // Restore CCR's 1939 1940 //----------------------- 1941 // report failure: 1942 bind(fail); 1943 _verify_oop_implicit_branch[2] = pc(); 1944 1945 wrccr( O5_save_flags ); // Restore CCR's 1946 1947 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1948 1949 // stop_subroutine expects message pointer in I1. 1950 mov(I1, O1); 1951 1952 // Restore prior 64-bit registers 1953 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0); 1954 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1); 1955 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2); 1956 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3); 1957 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4); 1958 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5); 1959 1960 // factor long stop-sequence into subroutine to save space 1961 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1962 1963 // call indirectly to solve generation ordering problem 1964 AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address()); 1965 load_ptr_contents(al, O5); 1966 jmpl(O5, 0, O7); 1967 delayed()->nop(); 1968 } 1969 1970 1971 void MacroAssembler::stop(const char* msg) { 1972 // save frame first to get O7 for return address 1973 // add one word to size in case struct is odd number of words long 1974 // It must be doubleword-aligned for storing doubles into it. 1975 1976 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1977 1978 // stop_subroutine expects message pointer in I1. 1979 set((intptr_t)msg, O1); 1980 1981 // factor long stop-sequence into subroutine to save space 1982 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1983 1984 // call indirectly to solve generation ordering problem 1985 AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address()); 1986 load_ptr_contents(a, O5); 1987 jmpl(O5, 0, O7); 1988 delayed()->nop(); 1989 1990 breakpoint_trap(); // make stop actually stop rather than writing 1991 // unnoticeable results in the output files. 1992 1993 // restore(); done in callee to save space! 1994 } 1995 1996 1997 void MacroAssembler::warn(const char* msg) { 1998 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1999 RegistersForDebugging::save_registers(this); 2000 mov(O0, L0); 2001 set((intptr_t)msg, O0); 2002 call( CAST_FROM_FN_PTR(address, warning) ); 2003 delayed()->nop(); 2004 // ret(); 2005 // delayed()->restore(); 2006 RegistersForDebugging::restore_registers(this, L0); 2007 restore(); 2008 } 2009 2010 2011 void MacroAssembler::untested(const char* what) { 2012 // We must be able to turn interactive prompting off 2013 // in order to run automated test scripts on the VM 2014 // Use the flag ShowMessageBoxOnError 2015 2016 char* b = new char[1024]; 2017 sprintf(b, "untested: %s", what); 2018 2019 if ( ShowMessageBoxOnError ) stop(b); 2020 else warn(b); 2021 } 2022 2023 2024 void MacroAssembler::stop_subroutine() { 2025 RegistersForDebugging::save_registers(this); 2026 2027 // for the sake of the debugger, stick a PC on the current frame 2028 // (this assumes that the caller has performed an extra "save") 2029 mov(I7, L7); 2030 add(O7, -7 * BytesPerInt, I7); 2031 2032 save_frame(); // one more save to free up another O7 register 2033 mov(I0, O1); // addr of reg save area 2034 2035 // We expect pointer to message in I1. Caller must set it up in O1 2036 mov(I1, O0); // get msg 2037 call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); 2038 delayed()->nop(); 2039 2040 restore(); 2041 2042 RegistersForDebugging::restore_registers(this, O0); 2043 2044 save_frame(0); 2045 call(CAST_FROM_FN_PTR(address,breakpoint)); 2046 delayed()->nop(); 2047 restore(); 2048 2049 mov(L7, I7); 2050 retl(); 2051 delayed()->restore(); // see stop above 2052 } 2053 2054 2055 void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) { 2056 if ( ShowMessageBoxOnError ) { 2057 JavaThreadState saved_state = JavaThread::current()->thread_state(); 2058 JavaThread::current()->set_thread_state(_thread_in_vm); 2059 { 2060 // In order to get locks work, we need to fake a in_VM state 2061 ttyLocker ttyl; 2062 ::tty->print_cr("EXECUTION STOPPED: %s\n", msg); 2063 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 2064 ::tty->print_cr("Interpreter::bytecode_counter = %d", BytecodeCounter::counter_value()); 2065 } 2066 if (os::message_box(msg, "Execution stopped, print registers?")) 2067 regs->print(::tty); 2068 } 2069 ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state); 2070 } 2071 else 2072 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); 2073 assert(false, err_msg("DEBUG MESSAGE: %s", msg)); 2074 } 2075 2076 2077 #ifndef PRODUCT 2078 void MacroAssembler::test() { 2079 ResourceMark rm; 2080 2081 CodeBuffer cb("test", 10000, 10000); 2082 MacroAssembler* a = new MacroAssembler(&cb); 2083 VM_Version::allow_all(); 2084 a->test_v9(); 2085 a->test_v8_onlys(); 2086 VM_Version::revert(); 2087 2088 StubRoutines::Sparc::test_stop_entry()(); 2089 } 2090 #endif 2091 2092 2093 void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) { 2094 subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words? 2095 Label no_extras; 2096 br( negative, true, pt, no_extras ); // if neg, clear reg 2097 delayed()->set(0, Rresult); // annuled, so only if taken 2098 bind( no_extras ); 2099 } 2100 2101 2102 void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) { 2103 #ifdef _LP64 2104 add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult); 2105 #else 2106 add(Rextra_words, frame::memory_parameter_word_sp_offset + 1, Rresult); 2107 #endif 2108 bclr(1, Rresult); 2109 sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes 2110 } 2111 2112 2113 void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) { 2114 calc_frame_size(Rextra_words, Rresult); 2115 neg(Rresult); 2116 save(SP, Rresult, SP); 2117 } 2118 2119 2120 // --------------------------------------------------------- 2121 Assembler::RCondition cond2rcond(Assembler::Condition c) { 2122 switch (c) { 2123 /*case zero: */ 2124 case Assembler::equal: return Assembler::rc_z; 2125 case Assembler::lessEqual: return Assembler::rc_lez; 2126 case Assembler::less: return Assembler::rc_lz; 2127 /*case notZero:*/ 2128 case Assembler::notEqual: return Assembler::rc_nz; 2129 case Assembler::greater: return Assembler::rc_gz; 2130 case Assembler::greaterEqual: return Assembler::rc_gez; 2131 } 2132 ShouldNotReachHere(); 2133 return Assembler::rc_z; 2134 } 2135 2136 // compares (32 bit) register with zero and branches. NOT FOR USE WITH 64-bit POINTERS 2137 void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a, Predict p) { 2138 tst(s1); 2139 br (c, a, p, L); 2140 } 2141 2142 // Compares a pointer register with zero and branches on null. 2143 // Does a test & branch on 32-bit systems and a register-branch on 64-bit. 2144 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L, bool emit_delayed_nop ) { 2145 assert_not_delayed(); 2146 if (emit_delayed_nop && use_cbc(L)) { 2147 Assembler::cbc(zero, ptr_cc, s1, 0, L); 2148 return; 2149 } 2150 #ifdef _LP64 2151 bpr( rc_z, a, p, s1, L ); 2152 #else 2153 tst(s1); 2154 br ( zero, a, p, L ); 2155 #endif 2156 // Some callers can fill the delay slot. 2157 if (emit_delayed_nop) { 2158 delayed()->nop(); 2159 } 2160 } 2161 2162 void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L, bool emit_delayed_nop ) { 2163 assert_not_delayed(); 2164 if (emit_delayed_nop && use_cbc(L)) { 2165 Assembler::cbc(notZero, ptr_cc, s1, 0, L); 2166 return; 2167 } 2168 #ifdef _LP64 2169 bpr( rc_nz, a, p, s1, L ); 2170 #else 2171 tst(s1); 2172 br ( notZero, a, p, L ); 2173 #endif 2174 // Some callers can fill the delay slot. 2175 if (emit_delayed_nop) { 2176 delayed()->nop(); 2177 } 2178 } 2179 2180 void MacroAssembler::br_on_reg_cond( RCondition rc, bool a, Predict p, 2181 Register s1, address d, 2182 relocInfo::relocType rt ) { 2183 assert_not_delayed(); 2184 if (VM_Version::v9_instructions_work()) { 2185 bpr(rc, a, p, s1, d, rt); 2186 } else { 2187 tst(s1); 2188 br(reg_cond_to_cc_cond(rc), a, p, d, rt); 2189 } 2190 } 2191 2192 void MacroAssembler::br_on_reg_cond( RCondition rc, bool a, Predict p, 2193 Register s1, Label& L, bool emit_delayed_nop ) { 2194 assert_not_delayed(); 2195 if (emit_delayed_nop && use_cbc(L)) { 2196 // Use xcc to have the same result as bpr (it tests all 64 bits). 2197 Assembler::cbc(reg_cond_to_cc_cond(rc), xcc, s1, 0, L); 2198 return; 2199 } 2200 if (VM_Version::v9_instructions_work()) { 2201 bpr(rc, a, p, s1, L); 2202 } else { 2203 tst(s1); 2204 br(reg_cond_to_cc_cond(rc), a, p, L); 2205 } 2206 // Some callers can fill the delay slot. 2207 if (emit_delayed_nop) { 2208 delayed()->nop(); 2209 } 2210 } 2211 2212 // Compare registers and branch with nop in delay slot or cbcond without delay slot. 2213 void MacroAssembler::cmp_and_br(Register s1, Register s2, Condition c, 2214 bool a, Predict p, Label& L) { 2215 assert_not_delayed(); 2216 if (use_cbc(L)) { 2217 Assembler::cbc(c, icc, s1, s2, L); 2218 } else { 2219 cmp(s1, s2); 2220 br(c, a, p, L); 2221 delayed()->nop(); 2222 } 2223 } 2224 2225 // Compare integer (32 bit) values (icc only). 2226 void MacroAssembler::cmp_and_br_short(Register s1, Register s2, Condition c, 2227 Predict p, Label& L) { 2228 assert_not_delayed(); 2229 if (use_cbcond(L)) { 2230 Assembler::cbcond(c, icc, s1, s2, L); 2231 } else { 2232 cmp(s1, s2); 2233 br(c, false, p, L); 2234 delayed()->nop(); 2235 } 2236 } 2237 2238 // Compare integer (32 bit) values (icc only). 2239 void MacroAssembler::cmp_and_br_short(Register s1, int simm13a, Condition c, 2240 Predict p, Label& L) { 2241 assert_not_delayed(); 2242 if (is_simm(simm13a,5) && use_cbcond(L)) { 2243 Assembler::cbcond(c, icc, s1, simm13a, L); 2244 } else { 2245 cmp(s1, simm13a); 2246 br(c, false, p, L); 2247 delayed()->nop(); 2248 } 2249 } 2250 2251 // Branch that tests xcc in LP64 and icc in !LP64 2252 void MacroAssembler::cmp_and_brx_short(Register s1, Register s2, Condition c, 2253 Predict p, Label& L) { 2254 assert_not_delayed(); 2255 if (use_cbcond(L)) { 2256 Assembler::cbcond(c, ptr_cc, s1, s2, L); 2257 } else { 2258 cmp(s1, s2); 2259 brx(c, false, p, L); 2260 delayed()->nop(); 2261 } 2262 } 2263 2264 // Branch that tests xcc in LP64 and icc in !LP64 2265 void MacroAssembler::cmp_and_brx_short(Register s1, int simm13a, Condition c, 2266 Predict p, Label& L) { 2267 assert_not_delayed(); 2268 if (is_simm(simm13a,5) && use_cbcond(L)) { 2269 Assembler::cbcond(c, ptr_cc, s1, simm13a, L); 2270 } else { 2271 cmp(s1, simm13a); 2272 brx(c, false, p, L); 2273 delayed()->nop(); 2274 } 2275 } 2276 2277 // Short branch version for compares a pointer with zero. 2278 2279 void MacroAssembler::br_null_short(Register s1, Predict p, Label& L) { 2280 assert_not_delayed(); 2281 if (use_cbcond(L)) { 2282 Assembler::cbcond(zero, ptr_cc, s1, 0, L); 2283 return; 2284 } 2285 br_null(s1, false, p, L); 2286 delayed()->nop(); 2287 } 2288 2289 void MacroAssembler::br_notnull_short(Register s1, Predict p, Label& L) { 2290 assert_not_delayed(); 2291 if (use_cbcond(L)) { 2292 Assembler::cbcond(notZero, ptr_cc, s1, 0, L); 2293 return; 2294 } 2295 br_notnull(s1, false, p, L); 2296 delayed()->nop(); 2297 } 2298 2299 // Unconditional short branch 2300 void MacroAssembler::ba_short(Label& L) { 2301 if (use_cbcond(L)) { 2302 Assembler::cbcond(equal, icc, G0, G0, L); 2303 return; 2304 } 2305 br(always, false, pt, L); 2306 delayed()->nop(); 2307 } 2308 2309 // instruction sequences factored across compiler & interpreter 2310 2311 2312 void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low, 2313 Register Rb_hi, Register Rb_low, 2314 Register Rresult) { 2315 2316 Label check_low_parts, done; 2317 2318 cmp(Ra_hi, Rb_hi ); // compare hi parts 2319 br(equal, true, pt, check_low_parts); 2320 delayed()->cmp(Ra_low, Rb_low); // test low parts 2321 2322 // And, with an unsigned comparison, it does not matter if the numbers 2323 // are negative or not. 2324 // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff. 2325 // The second one is bigger (unsignedly). 2326 2327 // Other notes: The first move in each triplet can be unconditional 2328 // (and therefore probably prefetchable). 2329 // And the equals case for the high part does not need testing, 2330 // since that triplet is reached only after finding the high halves differ. 2331 2332 if (VM_Version::v9_instructions_work()) { 2333 mov(-1, Rresult); 2334 ba(done); delayed()-> movcc(greater, false, icc, 1, Rresult); 2335 } else { 2336 br(less, true, pt, done); delayed()-> set(-1, Rresult); 2337 br(greater, true, pt, done); delayed()-> set( 1, Rresult); 2338 } 2339 2340 bind( check_low_parts ); 2341 2342 if (VM_Version::v9_instructions_work()) { 2343 mov( -1, Rresult); 2344 movcc(equal, false, icc, 0, Rresult); 2345 movcc(greaterUnsigned, false, icc, 1, Rresult); 2346 } else { 2347 set(-1, Rresult); 2348 br(equal, true, pt, done); delayed()->set( 0, Rresult); 2349 br(greaterUnsigned, true, pt, done); delayed()->set( 1, Rresult); 2350 } 2351 bind( done ); 2352 } 2353 2354 void MacroAssembler::lneg( Register Rhi, Register Rlow ) { 2355 subcc( G0, Rlow, Rlow ); 2356 subc( G0, Rhi, Rhi ); 2357 } 2358 2359 void MacroAssembler::lshl( Register Rin_high, Register Rin_low, 2360 Register Rcount, 2361 Register Rout_high, Register Rout_low, 2362 Register Rtemp ) { 2363 2364 2365 Register Ralt_count = Rtemp; 2366 Register Rxfer_bits = Rtemp; 2367 2368 assert( Ralt_count != Rin_high 2369 && Ralt_count != Rin_low 2370 && Ralt_count != Rcount 2371 && Rxfer_bits != Rin_low 2372 && Rxfer_bits != Rin_high 2373 && Rxfer_bits != Rcount 2374 && Rxfer_bits != Rout_low 2375 && Rout_low != Rin_high, 2376 "register alias checks"); 2377 2378 Label big_shift, done; 2379 2380 // This code can be optimized to use the 64 bit shifts in V9. 2381 // Here we use the 32 bit shifts. 2382 2383 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 2384 subcc(Rcount, 31, Ralt_count); 2385 br(greater, true, pn, big_shift); 2386 delayed()->dec(Ralt_count); 2387 2388 // shift < 32 bits, Ralt_count = Rcount-31 2389 2390 // We get the transfer bits by shifting right by 32-count the low 2391 // register. This is done by shifting right by 31-count and then by one 2392 // more to take care of the special (rare) case where count is zero 2393 // (shifting by 32 would not work). 2394 2395 neg(Ralt_count); 2396 2397 // The order of the next two instructions is critical in the case where 2398 // Rin and Rout are the same and should not be reversed. 2399 2400 srl(Rin_low, Ralt_count, Rxfer_bits); // shift right by 31-count 2401 if (Rcount != Rout_low) { 2402 sll(Rin_low, Rcount, Rout_low); // low half 2403 } 2404 sll(Rin_high, Rcount, Rout_high); 2405 if (Rcount == Rout_low) { 2406 sll(Rin_low, Rcount, Rout_low); // low half 2407 } 2408 srl(Rxfer_bits, 1, Rxfer_bits ); // shift right by one more 2409 ba(done); 2410 delayed()->or3(Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low 2411 2412 // shift >= 32 bits, Ralt_count = Rcount-32 2413 bind(big_shift); 2414 sll(Rin_low, Ralt_count, Rout_high ); 2415 clr(Rout_low); 2416 2417 bind(done); 2418 } 2419 2420 2421 void MacroAssembler::lshr( Register Rin_high, Register Rin_low, 2422 Register Rcount, 2423 Register Rout_high, Register Rout_low, 2424 Register Rtemp ) { 2425 2426 Register Ralt_count = Rtemp; 2427 Register Rxfer_bits = Rtemp; 2428 2429 assert( Ralt_count != Rin_high 2430 && Ralt_count != Rin_low 2431 && Ralt_count != Rcount 2432 && Rxfer_bits != Rin_low 2433 && Rxfer_bits != Rin_high 2434 && Rxfer_bits != Rcount 2435 && Rxfer_bits != Rout_high 2436 && Rout_high != Rin_low, 2437 "register alias checks"); 2438 2439 Label big_shift, done; 2440 2441 // This code can be optimized to use the 64 bit shifts in V9. 2442 // Here we use the 32 bit shifts. 2443 2444 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 2445 subcc(Rcount, 31, Ralt_count); 2446 br(greater, true, pn, big_shift); 2447 delayed()->dec(Ralt_count); 2448 2449 // shift < 32 bits, Ralt_count = Rcount-31 2450 2451 // We get the transfer bits by shifting left by 32-count the high 2452 // register. This is done by shifting left by 31-count and then by one 2453 // more to take care of the special (rare) case where count is zero 2454 // (shifting by 32 would not work). 2455 2456 neg(Ralt_count); 2457 if (Rcount != Rout_low) { 2458 srl(Rin_low, Rcount, Rout_low); 2459 } 2460 2461 // The order of the next two instructions is critical in the case where 2462 // Rin and Rout are the same and should not be reversed. 2463 2464 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 2465 sra(Rin_high, Rcount, Rout_high ); // high half 2466 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 2467 if (Rcount == Rout_low) { 2468 srl(Rin_low, Rcount, Rout_low); 2469 } 2470 ba(done); 2471 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 2472 2473 // shift >= 32 bits, Ralt_count = Rcount-32 2474 bind(big_shift); 2475 2476 sra(Rin_high, Ralt_count, Rout_low); 2477 sra(Rin_high, 31, Rout_high); // sign into hi 2478 2479 bind( done ); 2480 } 2481 2482 2483 2484 void MacroAssembler::lushr( Register Rin_high, Register Rin_low, 2485 Register Rcount, 2486 Register Rout_high, Register Rout_low, 2487 Register Rtemp ) { 2488 2489 Register Ralt_count = Rtemp; 2490 Register Rxfer_bits = Rtemp; 2491 2492 assert( Ralt_count != Rin_high 2493 && Ralt_count != Rin_low 2494 && Ralt_count != Rcount 2495 && Rxfer_bits != Rin_low 2496 && Rxfer_bits != Rin_high 2497 && Rxfer_bits != Rcount 2498 && Rxfer_bits != Rout_high 2499 && Rout_high != Rin_low, 2500 "register alias checks"); 2501 2502 Label big_shift, done; 2503 2504 // This code can be optimized to use the 64 bit shifts in V9. 2505 // Here we use the 32 bit shifts. 2506 2507 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 2508 subcc(Rcount, 31, Ralt_count); 2509 br(greater, true, pn, big_shift); 2510 delayed()->dec(Ralt_count); 2511 2512 // shift < 32 bits, Ralt_count = Rcount-31 2513 2514 // We get the transfer bits by shifting left by 32-count the high 2515 // register. This is done by shifting left by 31-count and then by one 2516 // more to take care of the special (rare) case where count is zero 2517 // (shifting by 32 would not work). 2518 2519 neg(Ralt_count); 2520 if (Rcount != Rout_low) { 2521 srl(Rin_low, Rcount, Rout_low); 2522 } 2523 2524 // The order of the next two instructions is critical in the case where 2525 // Rin and Rout are the same and should not be reversed. 2526 2527 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 2528 srl(Rin_high, Rcount, Rout_high ); // high half 2529 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 2530 if (Rcount == Rout_low) { 2531 srl(Rin_low, Rcount, Rout_low); 2532 } 2533 ba(done); 2534 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 2535 2536 // shift >= 32 bits, Ralt_count = Rcount-32 2537 bind(big_shift); 2538 2539 srl(Rin_high, Ralt_count, Rout_low); 2540 clr(Rout_high); 2541 2542 bind( done ); 2543 } 2544 2545 #ifdef _LP64 2546 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) { 2547 cmp(Ra, Rb); 2548 mov(-1, Rresult); 2549 movcc(equal, false, xcc, 0, Rresult); 2550 movcc(greater, false, xcc, 1, Rresult); 2551 } 2552 #endif 2553 2554 2555 void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) { 2556 switch (size_in_bytes) { 2557 case 8: ld_long(src, dst); break; 2558 case 4: ld( src, dst); break; 2559 case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break; 2560 case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break; 2561 default: ShouldNotReachHere(); 2562 } 2563 } 2564 2565 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) { 2566 switch (size_in_bytes) { 2567 case 8: st_long(src, dst); break; 2568 case 4: st( src, dst); break; 2569 case 2: sth( src, dst); break; 2570 case 1: stb( src, dst); break; 2571 default: ShouldNotReachHere(); 2572 } 2573 } 2574 2575 2576 void MacroAssembler::float_cmp( bool is_float, int unordered_result, 2577 FloatRegister Fa, FloatRegister Fb, 2578 Register Rresult) { 2579 2580 fcmp(is_float ? FloatRegisterImpl::S : FloatRegisterImpl::D, fcc0, Fa, Fb); 2581 2582 Condition lt = unordered_result == -1 ? f_unorderedOrLess : f_less; 2583 Condition eq = f_equal; 2584 Condition gt = unordered_result == 1 ? f_unorderedOrGreater : f_greater; 2585 2586 if (VM_Version::v9_instructions_work()) { 2587 2588 mov(-1, Rresult); 2589 movcc(eq, true, fcc0, 0, Rresult); 2590 movcc(gt, true, fcc0, 1, Rresult); 2591 2592 } else { 2593 Label done; 2594 2595 set( -1, Rresult ); 2596 //fb(lt, true, pn, done); delayed()->set( -1, Rresult ); 2597 fb( eq, true, pn, done); delayed()->set( 0, Rresult ); 2598 fb( gt, true, pn, done); delayed()->set( 1, Rresult ); 2599 2600 bind (done); 2601 } 2602 } 2603 2604 2605 void MacroAssembler::fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) 2606 { 2607 if (VM_Version::v9_instructions_work()) { 2608 Assembler::fneg(w, s, d); 2609 } else { 2610 if (w == FloatRegisterImpl::S) { 2611 Assembler::fneg(w, s, d); 2612 } else if (w == FloatRegisterImpl::D) { 2613 // number() does a sanity check on the alignment. 2614 assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) && 2615 ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check"); 2616 2617 Assembler::fneg(FloatRegisterImpl::S, s, d); 2618 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); 2619 } else { 2620 assert(w == FloatRegisterImpl::Q, "Invalid float register width"); 2621 2622 // number() does a sanity check on the alignment. 2623 assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) && 2624 ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check"); 2625 2626 Assembler::fneg(FloatRegisterImpl::S, s, d); 2627 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); 2628 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor()); 2629 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor()); 2630 } 2631 } 2632 } 2633 2634 void MacroAssembler::fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) 2635 { 2636 if (VM_Version::v9_instructions_work()) { 2637 Assembler::fmov(w, s, d); 2638 } else { 2639 if (w == FloatRegisterImpl::S) { 2640 Assembler::fmov(w, s, d); 2641 } else if (w == FloatRegisterImpl::D) { 2642 // number() does a sanity check on the alignment. 2643 assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) && 2644 ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check"); 2645 2646 Assembler::fmov(FloatRegisterImpl::S, s, d); 2647 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); 2648 } else { 2649 assert(w == FloatRegisterImpl::Q, "Invalid float register width"); 2650 2651 // number() does a sanity check on the alignment. 2652 assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) && 2653 ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check"); 2654 2655 Assembler::fmov(FloatRegisterImpl::S, s, d); 2656 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); 2657 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor()); 2658 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor()); 2659 } 2660 } 2661 } 2662 2663 void MacroAssembler::fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) 2664 { 2665 if (VM_Version::v9_instructions_work()) { 2666 Assembler::fabs(w, s, d); 2667 } else { 2668 if (w == FloatRegisterImpl::S) { 2669 Assembler::fabs(w, s, d); 2670 } else if (w == FloatRegisterImpl::D) { 2671 // number() does a sanity check on the alignment. 2672 assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) && 2673 ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check"); 2674 2675 Assembler::fabs(FloatRegisterImpl::S, s, d); 2676 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); 2677 } else { 2678 assert(w == FloatRegisterImpl::Q, "Invalid float register width"); 2679 2680 // number() does a sanity check on the alignment. 2681 assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) && 2682 ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check"); 2683 2684 Assembler::fabs(FloatRegisterImpl::S, s, d); 2685 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); 2686 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor()); 2687 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor()); 2688 } 2689 } 2690 } 2691 2692 void MacroAssembler::save_all_globals_into_locals() { 2693 mov(G1,L1); 2694 mov(G2,L2); 2695 mov(G3,L3); 2696 mov(G4,L4); 2697 mov(G5,L5); 2698 mov(G6,L6); 2699 mov(G7,L7); 2700 } 2701 2702 void MacroAssembler::restore_globals_from_locals() { 2703 mov(L1,G1); 2704 mov(L2,G2); 2705 mov(L3,G3); 2706 mov(L4,G4); 2707 mov(L5,G5); 2708 mov(L6,G6); 2709 mov(L7,G7); 2710 } 2711 2712 // Use for 64 bit operation. 2713 void MacroAssembler::casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm) 2714 { 2715 // store ptr_reg as the new top value 2716 #ifdef _LP64 2717 casx(top_ptr_reg, top_reg, ptr_reg); 2718 #else 2719 cas_under_lock(top_ptr_reg, top_reg, ptr_reg, lock_addr, use_call_vm); 2720 #endif // _LP64 2721 } 2722 2723 // [RGV] This routine does not handle 64 bit operations. 2724 // use casx_under_lock() or casx directly!!! 2725 void MacroAssembler::cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm) 2726 { 2727 // store ptr_reg as the new top value 2728 if (VM_Version::v9_instructions_work()) { 2729 cas(top_ptr_reg, top_reg, ptr_reg); 2730 } else { 2731 2732 // If the register is not an out nor global, it is not visible 2733 // after the save. Allocate a register for it, save its 2734 // value in the register save area (the save may not flush 2735 // registers to the save area). 2736 2737 Register top_ptr_reg_after_save; 2738 Register top_reg_after_save; 2739 Register ptr_reg_after_save; 2740 2741 if (top_ptr_reg->is_out() || top_ptr_reg->is_global()) { 2742 top_ptr_reg_after_save = top_ptr_reg->after_save(); 2743 } else { 2744 Address reg_save_addr = top_ptr_reg->address_in_saved_window(); 2745 top_ptr_reg_after_save = L0; 2746 st(top_ptr_reg, reg_save_addr); 2747 } 2748 2749 if (top_reg->is_out() || top_reg->is_global()) { 2750 top_reg_after_save = top_reg->after_save(); 2751 } else { 2752 Address reg_save_addr = top_reg->address_in_saved_window(); 2753 top_reg_after_save = L1; 2754 st(top_reg, reg_save_addr); 2755 } 2756 2757 if (ptr_reg->is_out() || ptr_reg->is_global()) { 2758 ptr_reg_after_save = ptr_reg->after_save(); 2759 } else { 2760 Address reg_save_addr = ptr_reg->address_in_saved_window(); 2761 ptr_reg_after_save = L2; 2762 st(ptr_reg, reg_save_addr); 2763 } 2764 2765 const Register& lock_reg = L3; 2766 const Register& lock_ptr_reg = L4; 2767 const Register& value_reg = L5; 2768 const Register& yield_reg = L6; 2769 const Register& yieldall_reg = L7; 2770 2771 save_frame(); 2772 2773 if (top_ptr_reg_after_save == L0) { 2774 ld(top_ptr_reg->address_in_saved_window().after_save(), top_ptr_reg_after_save); 2775 } 2776 2777 if (top_reg_after_save == L1) { 2778 ld(top_reg->address_in_saved_window().after_save(), top_reg_after_save); 2779 } 2780 2781 if (ptr_reg_after_save == L2) { 2782 ld(ptr_reg->address_in_saved_window().after_save(), ptr_reg_after_save); 2783 } 2784 2785 Label(retry_get_lock); 2786 Label(not_same); 2787 Label(dont_yield); 2788 2789 assert(lock_addr, "lock_address should be non null for v8"); 2790 set((intptr_t)lock_addr, lock_ptr_reg); 2791 // Initialize yield counter 2792 mov(G0,yield_reg); 2793 mov(G0, yieldall_reg); 2794 set(StubRoutines::Sparc::locked, lock_reg); 2795 2796 bind(retry_get_lock); 2797 cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dont_yield); 2798 2799 if(use_call_vm) { 2800 Untested("Need to verify global reg consistancy"); 2801 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::yield_all), yieldall_reg); 2802 } else { 2803 // Save the regs and make space for a C call 2804 save(SP, -96, SP); 2805 save_all_globals_into_locals(); 2806 call(CAST_FROM_FN_PTR(address,os::yield_all)); 2807 delayed()->mov(yieldall_reg, O0); 2808 restore_globals_from_locals(); 2809 restore(); 2810 } 2811 2812 // reset the counter 2813 mov(G0,yield_reg); 2814 add(yieldall_reg, 1, yieldall_reg); 2815 2816 bind(dont_yield); 2817 // try to get lock 2818 swap(lock_ptr_reg, 0, lock_reg); 2819 2820 // did we get the lock? 2821 cmp(lock_reg, StubRoutines::Sparc::unlocked); 2822 br(Assembler::notEqual, true, Assembler::pn, retry_get_lock); 2823 delayed()->add(yield_reg,1,yield_reg); 2824 2825 // yes, got lock. do we have the same top? 2826 ld(top_ptr_reg_after_save, 0, value_reg); 2827 cmp_and_br_short(value_reg, top_reg_after_save, Assembler::notEqual, Assembler::pn, not_same); 2828 2829 // yes, same top. 2830 st(ptr_reg_after_save, top_ptr_reg_after_save, 0); 2831 membar(Assembler::StoreStore); 2832 2833 bind(not_same); 2834 mov(value_reg, ptr_reg_after_save); 2835 st(lock_reg, lock_ptr_reg, 0); // unlock 2836 2837 restore(); 2838 } 2839 } 2840 2841 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 2842 Register tmp, 2843 int offset) { 2844 intptr_t value = *delayed_value_addr; 2845 if (value != 0) 2846 return RegisterOrConstant(value + offset); 2847 2848 // load indirectly to solve generation ordering problem 2849 AddressLiteral a(delayed_value_addr); 2850 load_ptr_contents(a, tmp); 2851 2852 #ifdef ASSERT 2853 tst(tmp); 2854 breakpoint_trap(zero, xcc); 2855 #endif 2856 2857 if (offset != 0) 2858 add(tmp, offset, tmp); 2859 2860 return RegisterOrConstant(tmp); 2861 } 2862 2863 2864 RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 2865 assert(d.register_or_noreg() != G0, "lost side effect"); 2866 if ((s2.is_constant() && s2.as_constant() == 0) || 2867 (s2.is_register() && s2.as_register() == G0)) { 2868 // Do nothing, just move value. 2869 if (s1.is_register()) { 2870 if (d.is_constant()) d = temp; 2871 mov(s1.as_register(), d.as_register()); 2872 return d; 2873 } else { 2874 return s1; 2875 } 2876 } 2877 2878 if (s1.is_register()) { 2879 assert_different_registers(s1.as_register(), temp); 2880 if (d.is_constant()) d = temp; 2881 andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2882 return d; 2883 } else { 2884 if (s2.is_register()) { 2885 assert_different_registers(s2.as_register(), temp); 2886 if (d.is_constant()) d = temp; 2887 set(s1.as_constant(), temp); 2888 andn(temp, s2.as_register(), d.as_register()); 2889 return d; 2890 } else { 2891 intptr_t res = s1.as_constant() & ~s2.as_constant(); 2892 return res; 2893 } 2894 } 2895 } 2896 2897 RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 2898 assert(d.register_or_noreg() != G0, "lost side effect"); 2899 if ((s2.is_constant() && s2.as_constant() == 0) || 2900 (s2.is_register() && s2.as_register() == G0)) { 2901 // Do nothing, just move value. 2902 if (s1.is_register()) { 2903 if (d.is_constant()) d = temp; 2904 mov(s1.as_register(), d.as_register()); 2905 return d; 2906 } else { 2907 return s1; 2908 } 2909 } 2910 2911 if (s1.is_register()) { 2912 assert_different_registers(s1.as_register(), temp); 2913 if (d.is_constant()) d = temp; 2914 add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2915 return d; 2916 } else { 2917 if (s2.is_register()) { 2918 assert_different_registers(s2.as_register(), temp); 2919 if (d.is_constant()) d = temp; 2920 add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register()); 2921 return d; 2922 } else { 2923 intptr_t res = s1.as_constant() + s2.as_constant(); 2924 return res; 2925 } 2926 } 2927 } 2928 2929 RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 2930 assert(d.register_or_noreg() != G0, "lost side effect"); 2931 if (!is_simm13(s2.constant_or_zero())) 2932 s2 = (s2.as_constant() & 0xFF); 2933 if ((s2.is_constant() && s2.as_constant() == 0) || 2934 (s2.is_register() && s2.as_register() == G0)) { 2935 // Do nothing, just move value. 2936 if (s1.is_register()) { 2937 if (d.is_constant()) d = temp; 2938 mov(s1.as_register(), d.as_register()); 2939 return d; 2940 } else { 2941 return s1; 2942 } 2943 } 2944 2945 if (s1.is_register()) { 2946 assert_different_registers(s1.as_register(), temp); 2947 if (d.is_constant()) d = temp; 2948 sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2949 return d; 2950 } else { 2951 if (s2.is_register()) { 2952 assert_different_registers(s2.as_register(), temp); 2953 if (d.is_constant()) d = temp; 2954 set(s1.as_constant(), temp); 2955 sll_ptr(temp, s2.as_register(), d.as_register()); 2956 return d; 2957 } else { 2958 intptr_t res = s1.as_constant() << s2.as_constant(); 2959 return res; 2960 } 2961 } 2962 } 2963 2964 2965 // Look up the method for a megamorphic invokeinterface call. 2966 // The target method is determined by <intf_klass, itable_index>. 2967 // The receiver klass is in recv_klass. 2968 // On success, the result will be in method_result, and execution falls through. 2969 // On failure, execution transfers to the given label. 2970 void MacroAssembler::lookup_interface_method(Register recv_klass, 2971 Register intf_klass, 2972 RegisterOrConstant itable_index, 2973 Register method_result, 2974 Register scan_temp, 2975 Register sethi_temp, 2976 Label& L_no_such_interface) { 2977 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); 2978 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 2979 "caller must use same register for non-constant itable index as for method"); 2980 2981 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 2982 int vtable_base = instanceKlass::vtable_start_offset() * wordSize; 2983 int scan_step = itableOffsetEntry::size() * wordSize; 2984 int vte_size = vtableEntry::size() * wordSize; 2985 2986 lduw(recv_klass, instanceKlass::vtable_length_offset() * wordSize, scan_temp); 2987 // %%% We should store the aligned, prescaled offset in the klassoop. 2988 // Then the next several instructions would fold away. 2989 2990 int round_to_unit = ((HeapWordsPerLong > 1) ? BytesPerLong : 0); 2991 int itb_offset = vtable_base; 2992 if (round_to_unit != 0) { 2993 // hoist first instruction of round_to(scan_temp, BytesPerLong): 2994 itb_offset += round_to_unit - wordSize; 2995 } 2996 int itb_scale = exact_log2(vtableEntry::size() * wordSize); 2997 sll(scan_temp, itb_scale, scan_temp); 2998 add(scan_temp, itb_offset, scan_temp); 2999 if (round_to_unit != 0) { 3000 // Round up to align_object_offset boundary 3001 // see code for instanceKlass::start_of_itable! 3002 // Was: round_to(scan_temp, BytesPerLong); 3003 // Hoisted: add(scan_temp, BytesPerLong-1, scan_temp); 3004 and3(scan_temp, -round_to_unit, scan_temp); 3005 } 3006 add(recv_klass, scan_temp, scan_temp); 3007 3008 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 3009 RegisterOrConstant itable_offset = itable_index; 3010 itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset); 3011 itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset); 3012 add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass); 3013 3014 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { 3015 // if (scan->interface() == intf) { 3016 // result = (klass + scan->offset() + itable_index); 3017 // } 3018 // } 3019 Label search, found_method; 3020 3021 for (int peel = 1; peel >= 0; peel--) { 3022 // %%%% Could load both offset and interface in one ldx, if they were 3023 // in the opposite order. This would save a load. 3024 ld_ptr(scan_temp, itableOffsetEntry::interface_offset_in_bytes(), method_result); 3025 3026 // Check that this entry is non-null. A null entry means that 3027 // the receiver class doesn't implement the interface, and wasn't the 3028 // same as when the caller was compiled. 3029 bpr(Assembler::rc_z, false, Assembler::pn, method_result, L_no_such_interface); 3030 delayed()->cmp(method_result, intf_klass); 3031 3032 if (peel) { 3033 brx(Assembler::equal, false, Assembler::pt, found_method); 3034 } else { 3035 brx(Assembler::notEqual, false, Assembler::pn, search); 3036 // (invert the test to fall through to found_method...) 3037 } 3038 delayed()->add(scan_temp, scan_step, scan_temp); 3039 3040 if (!peel) break; 3041 3042 bind(search); 3043 } 3044 3045 bind(found_method); 3046 3047 // Got a hit. 3048 int ito_offset = itableOffsetEntry::offset_offset_in_bytes(); 3049 // scan_temp[-scan_step] points to the vtable offset we need 3050 ito_offset -= scan_step; 3051 lduw(scan_temp, ito_offset, scan_temp); 3052 ld_ptr(recv_klass, scan_temp, method_result); 3053 } 3054 3055 3056 void MacroAssembler::check_klass_subtype(Register sub_klass, 3057 Register super_klass, 3058 Register temp_reg, 3059 Register temp2_reg, 3060 Label& L_success) { 3061 Label L_failure, L_pop_to_failure; 3062 check_klass_subtype_fast_path(sub_klass, super_klass, 3063 temp_reg, temp2_reg, 3064 &L_success, &L_failure, NULL); 3065 Register sub_2 = sub_klass; 3066 Register sup_2 = super_klass; 3067 if (!sub_2->is_global()) sub_2 = L0; 3068 if (!sup_2->is_global()) sup_2 = L1; 3069 3070 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 3071 check_klass_subtype_slow_path(sub_2, sup_2, 3072 L2, L3, L4, L5, 3073 NULL, &L_pop_to_failure); 3074 3075 // on success: 3076 restore(); 3077 ba_short(L_success); 3078 3079 // on failure: 3080 bind(L_pop_to_failure); 3081 restore(); 3082 bind(L_failure); 3083 } 3084 3085 3086 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 3087 Register super_klass, 3088 Register temp_reg, 3089 Register temp2_reg, 3090 Label* L_success, 3091 Label* L_failure, 3092 Label* L_slow_path, 3093 RegisterOrConstant super_check_offset) { 3094 int sc_offset = (klassOopDesc::header_size() * HeapWordSize + 3095 Klass::secondary_super_cache_offset_in_bytes()); 3096 int sco_offset = (klassOopDesc::header_size() * HeapWordSize + 3097 Klass::super_check_offset_offset_in_bytes()); 3098 3099 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 3100 bool need_slow_path = (must_load_sco || 3101 super_check_offset.constant_or_zero() == sco_offset); 3102 3103 assert_different_registers(sub_klass, super_klass, temp_reg); 3104 if (super_check_offset.is_register()) { 3105 assert_different_registers(sub_klass, super_klass, temp_reg, 3106 super_check_offset.as_register()); 3107 } else if (must_load_sco) { 3108 assert(temp2_reg != noreg, "supply either a temp or a register offset"); 3109 } 3110 3111 Label L_fallthrough; 3112 int label_nulls = 0; 3113 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 3114 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 3115 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 3116 assert(label_nulls <= 1 || 3117 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path), 3118 "at most one NULL in the batch, usually"); 3119 3120 // If the pointers are equal, we are done (e.g., String[] elements). 3121 // This self-check enables sharing of secondary supertype arrays among 3122 // non-primary types such as array-of-interface. Otherwise, each such 3123 // type would need its own customized SSA. 3124 // We move this check to the front of the fast path because many 3125 // type checks are in fact trivially successful in this manner, 3126 // so we get a nicely predicted branch right at the start of the check. 3127 cmp(super_klass, sub_klass); 3128 brx(Assembler::equal, false, Assembler::pn, *L_success); 3129 delayed()->nop(); 3130 3131 // Check the supertype display: 3132 if (must_load_sco) { 3133 // The super check offset is always positive... 3134 lduw(super_klass, sco_offset, temp2_reg); 3135 super_check_offset = RegisterOrConstant(temp2_reg); 3136 // super_check_offset is register. 3137 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register()); 3138 } 3139 ld_ptr(sub_klass, super_check_offset, temp_reg); 3140 cmp(super_klass, temp_reg); 3141 3142 // This check has worked decisively for primary supers. 3143 // Secondary supers are sought in the super_cache ('super_cache_addr'). 3144 // (Secondary supers are interfaces and very deeply nested subtypes.) 3145 // This works in the same check above because of a tricky aliasing 3146 // between the super_cache and the primary super display elements. 3147 // (The 'super_check_addr' can address either, as the case requires.) 3148 // Note that the cache is updated below if it does not help us find 3149 // what we need immediately. 3150 // So if it was a primary super, we can just fail immediately. 3151 // Otherwise, it's the slow path for us (no success at this point). 3152 3153 // Hacked ba(), which may only be used just before L_fallthrough. 3154 #define FINAL_JUMP(label) \ 3155 if (&(label) != &L_fallthrough) { \ 3156 ba(label); delayed()->nop(); \ 3157 } 3158 3159 if (super_check_offset.is_register()) { 3160 brx(Assembler::equal, false, Assembler::pn, *L_success); 3161 delayed()->cmp(super_check_offset.as_register(), sc_offset); 3162 3163 if (L_failure == &L_fallthrough) { 3164 brx(Assembler::equal, false, Assembler::pt, *L_slow_path); 3165 delayed()->nop(); 3166 } else { 3167 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 3168 delayed()->nop(); 3169 FINAL_JUMP(*L_slow_path); 3170 } 3171 } else if (super_check_offset.as_constant() == sc_offset) { 3172 // Need a slow path; fast failure is impossible. 3173 if (L_slow_path == &L_fallthrough) { 3174 brx(Assembler::equal, false, Assembler::pt, *L_success); 3175 delayed()->nop(); 3176 } else { 3177 brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path); 3178 delayed()->nop(); 3179 FINAL_JUMP(*L_success); 3180 } 3181 } else { 3182 // No slow path; it's a fast decision. 3183 if (L_failure == &L_fallthrough) { 3184 brx(Assembler::equal, false, Assembler::pt, *L_success); 3185 delayed()->nop(); 3186 } else { 3187 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 3188 delayed()->nop(); 3189 FINAL_JUMP(*L_success); 3190 } 3191 } 3192 3193 bind(L_fallthrough); 3194 3195 #undef FINAL_JUMP 3196 } 3197 3198 3199 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 3200 Register super_klass, 3201 Register count_temp, 3202 Register scan_temp, 3203 Register scratch_reg, 3204 Register coop_reg, 3205 Label* L_success, 3206 Label* L_failure) { 3207 assert_different_registers(sub_klass, super_klass, 3208 count_temp, scan_temp, scratch_reg, coop_reg); 3209 3210 Label L_fallthrough, L_loop; 3211 int label_nulls = 0; 3212 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 3213 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 3214 assert(label_nulls <= 1, "at most one NULL in the batch"); 3215 3216 // a couple of useful fields in sub_klass: 3217 int ss_offset = (klassOopDesc::header_size() * HeapWordSize + 3218 Klass::secondary_supers_offset_in_bytes()); 3219 int sc_offset = (klassOopDesc::header_size() * HeapWordSize + 3220 Klass::secondary_super_cache_offset_in_bytes()); 3221 3222 // Do a linear scan of the secondary super-klass chain. 3223 // This code is rarely used, so simplicity is a virtue here. 3224 3225 #ifndef PRODUCT 3226 int* pst_counter = &SharedRuntime::_partial_subtype_ctr; 3227 inc_counter((address) pst_counter, count_temp, scan_temp); 3228 #endif 3229 3230 // We will consult the secondary-super array. 3231 ld_ptr(sub_klass, ss_offset, scan_temp); 3232 3233 // Compress superclass if necessary. 3234 Register search_key = super_klass; 3235 bool decode_super_klass = false; 3236 if (UseCompressedOops) { 3237 if (coop_reg != noreg) { 3238 encode_heap_oop_not_null(super_klass, coop_reg); 3239 search_key = coop_reg; 3240 } else { 3241 encode_heap_oop_not_null(super_klass); 3242 decode_super_klass = true; // scarce temps! 3243 } 3244 // The superclass is never null; it would be a basic system error if a null 3245 // pointer were to sneak in here. Note that we have already loaded the 3246 // Klass::super_check_offset from the super_klass in the fast path, 3247 // so if there is a null in that register, we are already in the afterlife. 3248 } 3249 3250 // Load the array length. (Positive movl does right thing on LP64.) 3251 lduw(scan_temp, arrayOopDesc::length_offset_in_bytes(), count_temp); 3252 3253 // Check for empty secondary super list 3254 tst(count_temp); 3255 3256 // Top of search loop 3257 bind(L_loop); 3258 br(Assembler::equal, false, Assembler::pn, *L_failure); 3259 delayed()->add(scan_temp, heapOopSize, scan_temp); 3260 assert(heapOopSize != 0, "heapOopSize should be initialized"); 3261 3262 // Skip the array header in all array accesses. 3263 int elem_offset = arrayOopDesc::base_offset_in_bytes(T_OBJECT); 3264 elem_offset -= heapOopSize; // the scan pointer was pre-incremented also 3265 3266 // Load next super to check 3267 if (UseCompressedOops) { 3268 // Don't use load_heap_oop; we don't want to decode the element. 3269 lduw( scan_temp, elem_offset, scratch_reg ); 3270 } else { 3271 ld_ptr( scan_temp, elem_offset, scratch_reg ); 3272 } 3273 3274 // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list 3275 cmp(scratch_reg, search_key); 3276 3277 // A miss means we are NOT a subtype and need to keep looping 3278 brx(Assembler::notEqual, false, Assembler::pn, L_loop); 3279 delayed()->deccc(count_temp); // decrement trip counter in delay slot 3280 3281 // Falling out the bottom means we found a hit; we ARE a subtype 3282 if (decode_super_klass) decode_heap_oop(super_klass); 3283 3284 // Success. Cache the super we found and proceed in triumph. 3285 st_ptr(super_klass, sub_klass, sc_offset); 3286 3287 if (L_success != &L_fallthrough) { 3288 ba(*L_success); 3289 delayed()->nop(); 3290 } 3291 3292 bind(L_fallthrough); 3293 } 3294 3295 3296 void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg, 3297 Register temp_reg, 3298 Label& wrong_method_type) { 3299 assert_different_registers(mtype_reg, mh_reg, temp_reg); 3300 // compare method type against that of the receiver 3301 RegisterOrConstant mhtype_offset = delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg); 3302 load_heap_oop(mh_reg, mhtype_offset, temp_reg); 3303 cmp_and_brx_short(temp_reg, mtype_reg, Assembler::notEqual, Assembler::pn, wrong_method_type); 3304 } 3305 3306 3307 // A method handle has a "vmslots" field which gives the size of its 3308 // argument list in JVM stack slots. This field is either located directly 3309 // in every method handle, or else is indirectly accessed through the 3310 // method handle's MethodType. This macro hides the distinction. 3311 void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg, 3312 Register temp_reg) { 3313 assert_different_registers(vmslots_reg, mh_reg, temp_reg); 3314 // load mh.type.form.vmslots 3315 if (java_lang_invoke_MethodHandle::vmslots_offset_in_bytes() != 0) { 3316 // hoist vmslots into every mh to avoid dependent load chain 3317 ld( Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::vmslots_offset_in_bytes, temp_reg)), vmslots_reg); 3318 } else { 3319 Register temp2_reg = vmslots_reg; 3320 load_heap_oop(Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg)), temp2_reg); 3321 load_heap_oop(Address(temp2_reg, delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, temp_reg)), temp2_reg); 3322 ld( Address(temp2_reg, delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg); 3323 } 3324 } 3325 3326 3327 void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop) { 3328 assert(mh_reg == G3_method_handle, "caller must put MH object in G3"); 3329 assert_different_registers(mh_reg, temp_reg); 3330 3331 // pick out the interpreted side of the handler 3332 // NOTE: vmentry is not an oop! 3333 ld_ptr(mh_reg, delayed_value(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes, temp_reg), temp_reg); 3334 3335 // off we go... 3336 ld_ptr(temp_reg, MethodHandleEntry::from_interpreted_entry_offset_in_bytes(), temp_reg); 3337 jmp(temp_reg, 0); 3338 3339 // for the various stubs which take control at this point, 3340 // see MethodHandles::generate_method_handle_stub 3341 3342 // Some callers can fill the delay slot. 3343 if (emit_delayed_nop) { 3344 delayed()->nop(); 3345 } 3346 } 3347 3348 3349 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, 3350 Register temp_reg, 3351 int extra_slot_offset) { 3352 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 3353 int stackElementSize = Interpreter::stackElementSize; 3354 int offset = extra_slot_offset * stackElementSize; 3355 if (arg_slot.is_constant()) { 3356 offset += arg_slot.as_constant() * stackElementSize; 3357 return offset; 3358 } else { 3359 assert(temp_reg != noreg, "must specify"); 3360 sll_ptr(arg_slot.as_register(), exact_log2(stackElementSize), temp_reg); 3361 if (offset != 0) 3362 add(temp_reg, offset, temp_reg); 3363 return temp_reg; 3364 } 3365 } 3366 3367 3368 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 3369 Register temp_reg, 3370 int extra_slot_offset) { 3371 return Address(Gargs, argument_offset(arg_slot, temp_reg, extra_slot_offset)); 3372 } 3373 3374 3375 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, 3376 Register temp_reg, 3377 Label& done, Label* slow_case, 3378 BiasedLockingCounters* counters) { 3379 assert(UseBiasedLocking, "why call this otherwise?"); 3380 3381 if (PrintBiasedLockingStatistics) { 3382 assert_different_registers(obj_reg, mark_reg, temp_reg, O7); 3383 if (counters == NULL) 3384 counters = BiasedLocking::counters(); 3385 } 3386 3387 Label cas_label; 3388 3389 // Biased locking 3390 // See whether the lock is currently biased toward our thread and 3391 // whether the epoch is still valid 3392 // Note that the runtime guarantees sufficient alignment of JavaThread 3393 // pointers to allow age to be placed into low bits 3394 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); 3395 and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); 3396 cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label); 3397 3398 load_klass(obj_reg, temp_reg); 3399 ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg); 3400 or3(G2_thread, temp_reg, temp_reg); 3401 xor3(mark_reg, temp_reg, temp_reg); 3402 andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg); 3403 if (counters != NULL) { 3404 cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg); 3405 // Reload mark_reg as we may need it later 3406 ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg); 3407 } 3408 brx(Assembler::equal, true, Assembler::pt, done); 3409 delayed()->nop(); 3410 3411 Label try_revoke_bias; 3412 Label try_rebias; 3413 Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes()); 3414 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 3415 3416 // At this point we know that the header has the bias pattern and 3417 // that we are not the bias owner in the current epoch. We need to 3418 // figure out more details about the state of the header in order to 3419 // know what operations can be legally performed on the object's 3420 // header. 3421 3422 // If the low three bits in the xor result aren't clear, that means 3423 // the prototype header is no longer biased and we have to revoke 3424 // the bias on this object. 3425 btst(markOopDesc::biased_lock_mask_in_place, temp_reg); 3426 brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias); 3427 3428 // Biasing is still enabled for this data type. See whether the 3429 // epoch of the current bias is still valid, meaning that the epoch 3430 // bits of the mark word are equal to the epoch bits of the 3431 // prototype header. (Note that the prototype header's epoch bits 3432 // only change at a safepoint.) If not, attempt to rebias the object 3433 // toward the current thread. Note that we must be absolutely sure 3434 // that the current epoch is invalid in order to do this because 3435 // otherwise the manipulations it performs on the mark word are 3436 // illegal. 3437 delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg); 3438 brx(Assembler::notZero, false, Assembler::pn, try_rebias); 3439 3440 // The epoch of the current bias is still valid but we know nothing 3441 // about the owner; it might be set or it might be clear. Try to 3442 // acquire the bias of the object using an atomic operation. If this 3443 // fails we will go in to the runtime to revoke the object's bias. 3444 // Note that we first construct the presumed unbiased header so we 3445 // don't accidentally blow away another thread's valid bias. 3446 delayed()->and3(mark_reg, 3447 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place, 3448 mark_reg); 3449 or3(G2_thread, mark_reg, temp_reg); 3450 casn(mark_addr.base(), mark_reg, temp_reg); 3451 // If the biasing toward our thread failed, this means that 3452 // another thread succeeded in biasing it toward itself and we 3453 // need to revoke that bias. The revocation will occur in the 3454 // interpreter runtime in the slow case. 3455 cmp(mark_reg, temp_reg); 3456 if (counters != NULL) { 3457 cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg); 3458 } 3459 if (slow_case != NULL) { 3460 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 3461 delayed()->nop(); 3462 } 3463 ba_short(done); 3464 3465 bind(try_rebias); 3466 // At this point we know the epoch has expired, meaning that the 3467 // current "bias owner", if any, is actually invalid. Under these 3468 // circumstances _only_, we are allowed to use the current header's 3469 // value as the comparison value when doing the cas to acquire the 3470 // bias in the current epoch. In other words, we allow transfer of 3471 // the bias from one thread to another directly in this situation. 3472 // 3473 // FIXME: due to a lack of registers we currently blow away the age 3474 // bits in this situation. Should attempt to preserve them. 3475 load_klass(obj_reg, temp_reg); 3476 ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg); 3477 or3(G2_thread, temp_reg, temp_reg); 3478 casn(mark_addr.base(), mark_reg, temp_reg); 3479 // If the biasing toward our thread failed, this means that 3480 // another thread succeeded in biasing it toward itself and we 3481 // need to revoke that bias. The revocation will occur in the 3482 // interpreter runtime in the slow case. 3483 cmp(mark_reg, temp_reg); 3484 if (counters != NULL) { 3485 cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg); 3486 } 3487 if (slow_case != NULL) { 3488 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 3489 delayed()->nop(); 3490 } 3491 ba_short(done); 3492 3493 bind(try_revoke_bias); 3494 // The prototype mark in the klass doesn't have the bias bit set any 3495 // more, indicating that objects of this data type are not supposed 3496 // to be biased any more. We are going to try to reset the mark of 3497 // this object to the prototype value and fall through to the 3498 // CAS-based locking scheme. Note that if our CAS fails, it means 3499 // that another thread raced us for the privilege of revoking the 3500 // bias of this particular object, so it's okay to continue in the 3501 // normal locking code. 3502 // 3503 // FIXME: due to a lack of registers we currently blow away the age 3504 // bits in this situation. Should attempt to preserve them. 3505 load_klass(obj_reg, temp_reg); 3506 ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg); 3507 casn(mark_addr.base(), mark_reg, temp_reg); 3508 // Fall through to the normal CAS-based lock, because no matter what 3509 // the result of the above CAS, some thread must have succeeded in 3510 // removing the bias bit from the object's header. 3511 if (counters != NULL) { 3512 cmp(mark_reg, temp_reg); 3513 cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg); 3514 } 3515 3516 bind(cas_label); 3517 } 3518 3519 void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done, 3520 bool allow_delay_slot_filling) { 3521 // Check for biased locking unlock case, which is a no-op 3522 // Note: we do not have to check the thread ID for two reasons. 3523 // First, the interpreter checks for IllegalMonitorStateException at 3524 // a higher level. Second, if the bias was revoked while we held the 3525 // lock, the object could not be rebiased toward another thread, so 3526 // the bias bit would be clear. 3527 ld_ptr(mark_addr, temp_reg); 3528 and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); 3529 cmp(temp_reg, markOopDesc::biased_lock_pattern); 3530 brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done); 3531 delayed(); 3532 if (!allow_delay_slot_filling) { 3533 nop(); 3534 } 3535 } 3536 3537 3538 // CASN -- 32-64 bit switch hitter similar to the synthetic CASN provided by 3539 // Solaris/SPARC's "as". Another apt name would be cas_ptr() 3540 3541 void MacroAssembler::casn (Register addr_reg, Register cmp_reg, Register set_reg ) { 3542 casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); 3543 } 3544 3545 3546 3547 // compiler_lock_object() and compiler_unlock_object() are direct transliterations 3548 // of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments. 3549 // The code could be tightened up considerably. 3550 // 3551 // box->dhw disposition - post-conditions at DONE_LABEL. 3552 // - Successful inflated lock: box->dhw != 0. 3553 // Any non-zero value suffices. 3554 // Consider G2_thread, rsp, boxReg, or unused_mark() 3555 // - Successful Stack-lock: box->dhw == mark. 3556 // box->dhw must contain the displaced mark word value 3557 // - Failure -- icc.ZFlag == 0 and box->dhw is undefined. 3558 // The slow-path fast_enter() and slow_enter() operators 3559 // are responsible for setting box->dhw = NonZero (typically ::unused_mark). 3560 // - Biased: box->dhw is undefined 3561 // 3562 // SPARC refworkload performance - specifically jetstream and scimark - are 3563 // extremely sensitive to the size of the code emitted by compiler_lock_object 3564 // and compiler_unlock_object. Critically, the key factor is code size, not path 3565 // length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the 3566 // effect). 3567 3568 3569 void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, 3570 Register Rbox, Register Rscratch, 3571 BiasedLockingCounters* counters, 3572 bool try_bias) { 3573 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 3574 3575 verify_oop(Roop); 3576 Label done ; 3577 3578 if (counters != NULL) { 3579 inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch); 3580 } 3581 3582 if (EmitSync & 1) { 3583 mov(3, Rscratch); 3584 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 3585 cmp(SP, G0); 3586 return ; 3587 } 3588 3589 if (EmitSync & 2) { 3590 3591 // Fetch object's markword 3592 ld_ptr(mark_addr, Rmark); 3593 3594 if (try_bias) { 3595 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 3596 } 3597 3598 // Save Rbox in Rscratch to be used for the cas operation 3599 mov(Rbox, Rscratch); 3600 3601 // set Rmark to markOop | markOopDesc::unlocked_value 3602 or3(Rmark, markOopDesc::unlocked_value, Rmark); 3603 3604 // Initialize the box. (Must happen before we update the object mark!) 3605 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 3606 3607 // compare object markOop with Rmark and if equal exchange Rscratch with object markOop 3608 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 3609 casx_under_lock(mark_addr.base(), Rmark, Rscratch, 3610 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); 3611 3612 // if compare/exchange succeeded we found an unlocked object and we now have locked it 3613 // hence we are done 3614 cmp(Rmark, Rscratch); 3615 #ifdef _LP64 3616 sub(Rscratch, STACK_BIAS, Rscratch); 3617 #endif 3618 brx(Assembler::equal, false, Assembler::pt, done); 3619 delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot 3620 3621 // we did not find an unlocked object so see if this is a recursive case 3622 // sub(Rscratch, SP, Rscratch); 3623 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 3624 andcc(Rscratch, 0xfffff003, Rscratch); 3625 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 3626 bind (done); 3627 return ; 3628 } 3629 3630 Label Egress ; 3631 3632 if (EmitSync & 256) { 3633 Label IsInflated ; 3634 3635 ld_ptr(mark_addr, Rmark); // fetch obj->mark 3636 // Triage: biased, stack-locked, neutral, inflated 3637 if (try_bias) { 3638 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 3639 // Invariant: if control reaches this point in the emitted stream 3640 // then Rmark has not been modified. 3641 } 3642 3643 // Store mark into displaced mark field in the on-stack basic-lock "box" 3644 // Critically, this must happen before the CAS 3645 // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty. 3646 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 3647 andcc(Rmark, 2, G0); 3648 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 3649 delayed()-> 3650 3651 // Try stack-lock acquisition. 3652 // Beware: the 1st instruction is in a delay slot 3653 mov(Rbox, Rscratch); 3654 or3(Rmark, markOopDesc::unlocked_value, Rmark); 3655 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 3656 casn(mark_addr.base(), Rmark, Rscratch); 3657 cmp(Rmark, Rscratch); 3658 brx(Assembler::equal, false, Assembler::pt, done); 3659 delayed()->sub(Rscratch, SP, Rscratch); 3660 3661 // Stack-lock attempt failed - check for recursive stack-lock. 3662 // See the comments below about how we might remove this case. 3663 #ifdef _LP64 3664 sub(Rscratch, STACK_BIAS, Rscratch); 3665 #endif 3666 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 3667 andcc(Rscratch, 0xfffff003, Rscratch); 3668 br(Assembler::always, false, Assembler::pt, done); 3669 delayed()-> st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 3670 3671 bind(IsInflated); 3672 if (EmitSync & 64) { 3673 // If m->owner != null goto IsLocked 3674 // Pessimistic form: Test-and-CAS vs CAS 3675 // The optimistic form avoids RTS->RTO cache line upgrades. 3676 ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch); 3677 andcc(Rscratch, Rscratch, G0); 3678 brx(Assembler::notZero, false, Assembler::pn, done); 3679 delayed()->nop(); 3680 // m->owner == null : it's unlocked. 3681 } 3682 3683 // Try to CAS m->owner from null to Self 3684 // Invariant: if we acquire the lock then _recursions should be 0. 3685 add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark); 3686 mov(G2_thread, Rscratch); 3687 casn(Rmark, G0, Rscratch); 3688 cmp(Rscratch, G0); 3689 // Intentional fall-through into done 3690 } else { 3691 // Aggressively avoid the Store-before-CAS penalty 3692 // Defer the store into box->dhw until after the CAS 3693 Label IsInflated, Recursive ; 3694 3695 // Anticipate CAS -- Avoid RTS->RTO upgrade 3696 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 3697 3698 ld_ptr(mark_addr, Rmark); // fetch obj->mark 3699 // Triage: biased, stack-locked, neutral, inflated 3700 3701 if (try_bias) { 3702 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 3703 // Invariant: if control reaches this point in the emitted stream 3704 // then Rmark has not been modified. 3705 } 3706 andcc(Rmark, 2, G0); 3707 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 3708 delayed()-> // Beware - dangling delay-slot 3709 3710 // Try stack-lock acquisition. 3711 // Transiently install BUSY (0) encoding in the mark word. 3712 // if the CAS of 0 into the mark was successful then we execute: 3713 // ST box->dhw = mark -- save fetched mark in on-stack basiclock box 3714 // ST obj->mark = box -- overwrite transient 0 value 3715 // This presumes TSO, of course. 3716 3717 mov(0, Rscratch); 3718 or3(Rmark, markOopDesc::unlocked_value, Rmark); 3719 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 3720 casn(mark_addr.base(), Rmark, Rscratch); 3721 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 3722 cmp(Rscratch, Rmark); 3723 brx(Assembler::notZero, false, Assembler::pn, Recursive); 3724 delayed()->st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 3725 if (counters != NULL) { 3726 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 3727 } 3728 ba(done); 3729 delayed()->st_ptr(Rbox, mark_addr); 3730 3731 bind(Recursive); 3732 // Stack-lock attempt failed - check for recursive stack-lock. 3733 // Tests show that we can remove the recursive case with no impact 3734 // on refworkload 0.83. If we need to reduce the size of the code 3735 // emitted by compiler_lock_object() the recursive case is perfect 3736 // candidate. 3737 // 3738 // A more extreme idea is to always inflate on stack-lock recursion. 3739 // This lets us eliminate the recursive checks in compiler_lock_object 3740 // and compiler_unlock_object and the (box->dhw == 0) encoding. 3741 // A brief experiment - requiring changes to synchronizer.cpp, interpreter, 3742 // and showed a performance *increase*. In the same experiment I eliminated 3743 // the fast-path stack-lock code from the interpreter and always passed 3744 // control to the "slow" operators in synchronizer.cpp. 3745 3746 // RScratch contains the fetched obj->mark value from the failed CASN. 3747 #ifdef _LP64 3748 sub(Rscratch, STACK_BIAS, Rscratch); 3749 #endif 3750 sub(Rscratch, SP, Rscratch); 3751 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 3752 andcc(Rscratch, 0xfffff003, Rscratch); 3753 if (counters != NULL) { 3754 // Accounting needs the Rscratch register 3755 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 3756 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 3757 ba_short(done); 3758 } else { 3759 ba(done); 3760 delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 3761 } 3762 3763 bind (IsInflated); 3764 if (EmitSync & 64) { 3765 // If m->owner != null goto IsLocked 3766 // Test-and-CAS vs CAS 3767 // Pessimistic form avoids futile (doomed) CAS attempts 3768 // The optimistic form avoids RTS->RTO cache line upgrades. 3769 ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch); 3770 andcc(Rscratch, Rscratch, G0); 3771 brx(Assembler::notZero, false, Assembler::pn, done); 3772 delayed()->nop(); 3773 // m->owner == null : it's unlocked. 3774 } 3775 3776 // Try to CAS m->owner from null to Self 3777 // Invariant: if we acquire the lock then _recursions should be 0. 3778 add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark); 3779 mov(G2_thread, Rscratch); 3780 casn(Rmark, G0, Rscratch); 3781 cmp(Rscratch, G0); 3782 // ST box->displaced_header = NonZero. 3783 // Any non-zero value suffices: 3784 // unused_mark(), G2_thread, RBox, RScratch, rsp, etc. 3785 st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes()); 3786 // Intentional fall-through into done 3787 } 3788 3789 bind (done); 3790 } 3791 3792 void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, 3793 Register Rbox, Register Rscratch, 3794 bool try_bias) { 3795 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 3796 3797 Label done ; 3798 3799 if (EmitSync & 4) { 3800 cmp(SP, G0); 3801 return ; 3802 } 3803 3804 if (EmitSync & 8) { 3805 if (try_bias) { 3806 biased_locking_exit(mark_addr, Rscratch, done); 3807 } 3808 3809 // Test first if it is a fast recursive unlock 3810 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark); 3811 br_null_short(Rmark, Assembler::pt, done); 3812 3813 // Check if it is still a light weight lock, this is is true if we see 3814 // the stack address of the basicLock in the markOop of the object 3815 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 3816 casx_under_lock(mark_addr.base(), Rbox, Rmark, 3817 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); 3818 ba(done); 3819 delayed()->cmp(Rbox, Rmark); 3820 bind(done); 3821 return ; 3822 } 3823 3824 // Beware ... If the aggregate size of the code emitted by CLO and CUO is 3825 // is too large performance rolls abruptly off a cliff. 3826 // This could be related to inlining policies, code cache management, or 3827 // I$ effects. 3828 Label LStacked ; 3829 3830 if (try_bias) { 3831 // TODO: eliminate redundant LDs of obj->mark 3832 biased_locking_exit(mark_addr, Rscratch, done); 3833 } 3834 3835 ld_ptr(Roop, oopDesc::mark_offset_in_bytes(), Rmark); 3836 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch); 3837 andcc(Rscratch, Rscratch, G0); 3838 brx(Assembler::zero, false, Assembler::pn, done); 3839 delayed()->nop(); // consider: relocate fetch of mark, above, into this DS 3840 andcc(Rmark, 2, G0); 3841 brx(Assembler::zero, false, Assembler::pt, LStacked); 3842 delayed()->nop(); 3843 3844 // It's inflated 3845 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before 3846 // the ST of 0 into _owner which releases the lock. This prevents loads 3847 // and stores within the critical section from reordering (floating) 3848 // past the store that releases the lock. But TSO is a strong memory model 3849 // and that particular flavor of barrier is a noop, so we can safely elide it. 3850 // Note that we use 1-0 locking by default for the inflated case. We 3851 // close the resultant (and rare) race by having contented threads in 3852 // monitorenter periodically poll _owner. 3853 ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch); 3854 ld_ptr(Rmark, ObjectMonitor::recursions_offset_in_bytes() - 2, Rbox); 3855 xor3(Rscratch, G2_thread, Rscratch); 3856 orcc(Rbox, Rscratch, Rbox); 3857 brx(Assembler::notZero, false, Assembler::pn, done); 3858 delayed()-> 3859 ld_ptr(Rmark, ObjectMonitor::EntryList_offset_in_bytes() - 2, Rscratch); 3860 ld_ptr(Rmark, ObjectMonitor::cxq_offset_in_bytes() - 2, Rbox); 3861 orcc(Rbox, Rscratch, G0); 3862 if (EmitSync & 65536) { 3863 Label LSucc ; 3864 brx(Assembler::notZero, false, Assembler::pn, LSucc); 3865 delayed()->nop(); 3866 ba(done); 3867 delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2); 3868 3869 bind(LSucc); 3870 st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2); 3871 if (os::is_MP()) { membar (StoreLoad); } 3872 ld_ptr(Rmark, ObjectMonitor::succ_offset_in_bytes() - 2, Rscratch); 3873 andcc(Rscratch, Rscratch, G0); 3874 brx(Assembler::notZero, false, Assembler::pt, done); 3875 delayed()->andcc(G0, G0, G0); 3876 add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark); 3877 mov(G2_thread, Rscratch); 3878 casn(Rmark, G0, Rscratch); 3879 // invert icc.zf and goto done 3880 br_notnull(Rscratch, false, Assembler::pt, done); 3881 delayed()->cmp(G0, G0); 3882 ba(done); 3883 delayed()->cmp(G0, 1); 3884 } else { 3885 brx(Assembler::notZero, false, Assembler::pn, done); 3886 delayed()->nop(); 3887 ba(done); 3888 delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2); 3889 } 3890 3891 bind (LStacked); 3892 // Consider: we could replace the expensive CAS in the exit 3893 // path with a simple ST of the displaced mark value fetched from 3894 // the on-stack basiclock box. That admits a race where a thread T2 3895 // in the slow lock path -- inflating with monitor M -- could race a 3896 // thread T1 in the fast unlock path, resulting in a missed wakeup for T2. 3897 // More precisely T1 in the stack-lock unlock path could "stomp" the 3898 // inflated mark value M installed by T2, resulting in an orphan 3899 // object monitor M and T2 becoming stranded. We can remedy that situation 3900 // by having T2 periodically poll the object's mark word using timed wait 3901 // operations. If T2 discovers that a stomp has occurred it vacates 3902 // the monitor M and wakes any other threads stranded on the now-orphan M. 3903 // In addition the monitor scavenger, which performs deflation, 3904 // would also need to check for orpan monitors and stranded threads. 3905 // 3906 // Finally, inflation is also used when T2 needs to assign a hashCode 3907 // to O and O is stack-locked by T1. The "stomp" race could cause 3908 // an assigned hashCode value to be lost. We can avoid that condition 3909 // and provide the necessary hashCode stability invariants by ensuring 3910 // that hashCode generation is idempotent between copying GCs. 3911 // For example we could compute the hashCode of an object O as 3912 // O's heap address XOR some high quality RNG value that is refreshed 3913 // at GC-time. The monitor scavenger would install the hashCode 3914 // found in any orphan monitors. Again, the mechanism admits a 3915 // lost-update "stomp" WAW race but detects and recovers as needed. 3916 // 3917 // A prototype implementation showed excellent results, although 3918 // the scavenger and timeout code was rather involved. 3919 3920 casn(mark_addr.base(), Rbox, Rscratch); 3921 cmp(Rbox, Rscratch); 3922 // Intentional fall through into done ... 3923 3924 bind(done); 3925 } 3926 3927 3928 3929 void MacroAssembler::print_CPU_state() { 3930 // %%%%% need to implement this 3931 } 3932 3933 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { 3934 // %%%%% need to implement this 3935 } 3936 3937 void MacroAssembler::push_IU_state() { 3938 // %%%%% need to implement this 3939 } 3940 3941 3942 void MacroAssembler::pop_IU_state() { 3943 // %%%%% need to implement this 3944 } 3945 3946 3947 void MacroAssembler::push_FPU_state() { 3948 // %%%%% need to implement this 3949 } 3950 3951 3952 void MacroAssembler::pop_FPU_state() { 3953 // %%%%% need to implement this 3954 } 3955 3956 3957 void MacroAssembler::push_CPU_state() { 3958 // %%%%% need to implement this 3959 } 3960 3961 3962 void MacroAssembler::pop_CPU_state() { 3963 // %%%%% need to implement this 3964 } 3965 3966 3967 3968 void MacroAssembler::verify_tlab() { 3969 #ifdef ASSERT 3970 if (UseTLAB && VerifyOops) { 3971 Label next, next2, ok; 3972 Register t1 = L0; 3973 Register t2 = L1; 3974 Register t3 = L2; 3975 3976 save_frame(0); 3977 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 3978 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); 3979 or3(t1, t2, t3); 3980 cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next); 3981 stop("assert(top >= start)"); 3982 should_not_reach_here(); 3983 3984 bind(next); 3985 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 3986 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2); 3987 or3(t3, t2, t3); 3988 cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2); 3989 stop("assert(top <= end)"); 3990 should_not_reach_here(); 3991 3992 bind(next2); 3993 and3(t3, MinObjAlignmentInBytesMask, t3); 3994 cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok); 3995 stop("assert(aligned)"); 3996 should_not_reach_here(); 3997 3998 bind(ok); 3999 restore(); 4000 } 4001 #endif 4002 } 4003 4004 4005 void MacroAssembler::eden_allocate( 4006 Register obj, // result: pointer to object after successful allocation 4007 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 4008 int con_size_in_bytes, // object size in bytes if known at compile time 4009 Register t1, // temp register 4010 Register t2, // temp register 4011 Label& slow_case // continuation point if fast allocation fails 4012 ){ 4013 // make sure arguments make sense 4014 assert_different_registers(obj, var_size_in_bytes, t1, t2); 4015 assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size"); 4016 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 4017 4018 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { 4019 // No allocation in the shared eden. 4020 ba_short(slow_case); 4021 } else { 4022 // get eden boundaries 4023 // note: we need both top & top_addr! 4024 const Register top_addr = t1; 4025 const Register end = t2; 4026 4027 CollectedHeap* ch = Universe::heap(); 4028 set((intx)ch->top_addr(), top_addr); 4029 intx delta = (intx)ch->end_addr() - (intx)ch->top_addr(); 4030 ld_ptr(top_addr, delta, end); 4031 ld_ptr(top_addr, 0, obj); 4032 4033 // try to allocate 4034 Label retry; 4035 bind(retry); 4036 #ifdef ASSERT 4037 // make sure eden top is properly aligned 4038 { 4039 Label L; 4040 btst(MinObjAlignmentInBytesMask, obj); 4041 br(Assembler::zero, false, Assembler::pt, L); 4042 delayed()->nop(); 4043 stop("eden top is not properly aligned"); 4044 bind(L); 4045 } 4046 #endif // ASSERT 4047 const Register free = end; 4048 sub(end, obj, free); // compute amount of free space 4049 if (var_size_in_bytes->is_valid()) { 4050 // size is unknown at compile time 4051 cmp(free, var_size_in_bytes); 4052 br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 4053 delayed()->add(obj, var_size_in_bytes, end); 4054 } else { 4055 // size is known at compile time 4056 cmp(free, con_size_in_bytes); 4057 br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 4058 delayed()->add(obj, con_size_in_bytes, end); 4059 } 4060 // Compare obj with the value at top_addr; if still equal, swap the value of 4061 // end with the value at top_addr. If not equal, read the value at top_addr 4062 // into end. 4063 casx_under_lock(top_addr, obj, end, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); 4064 // if someone beat us on the allocation, try again, otherwise continue 4065 cmp(obj, end); 4066 brx(Assembler::notEqual, false, Assembler::pn, retry); 4067 delayed()->mov(end, obj); // nop if successfull since obj == end 4068 4069 #ifdef ASSERT 4070 // make sure eden top is properly aligned 4071 { 4072 Label L; 4073 const Register top_addr = t1; 4074 4075 set((intx)ch->top_addr(), top_addr); 4076 ld_ptr(top_addr, 0, top_addr); 4077 btst(MinObjAlignmentInBytesMask, top_addr); 4078 br(Assembler::zero, false, Assembler::pt, L); 4079 delayed()->nop(); 4080 stop("eden top is not properly aligned"); 4081 bind(L); 4082 } 4083 #endif // ASSERT 4084 } 4085 } 4086 4087 4088 void MacroAssembler::tlab_allocate( 4089 Register obj, // result: pointer to object after successful allocation 4090 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 4091 int con_size_in_bytes, // object size in bytes if known at compile time 4092 Register t1, // temp register 4093 Label& slow_case // continuation point if fast allocation fails 4094 ){ 4095 // make sure arguments make sense 4096 assert_different_registers(obj, var_size_in_bytes, t1); 4097 assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size"); 4098 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 4099 4100 const Register free = t1; 4101 4102 verify_tlab(); 4103 4104 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj); 4105 4106 // calculate amount of free space 4107 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free); 4108 sub(free, obj, free); 4109 4110 Label done; 4111 if (var_size_in_bytes == noreg) { 4112 cmp(free, con_size_in_bytes); 4113 } else { 4114 cmp(free, var_size_in_bytes); 4115 } 4116 br(Assembler::less, false, Assembler::pn, slow_case); 4117 // calculate the new top pointer 4118 if (var_size_in_bytes == noreg) { 4119 delayed()->add(obj, con_size_in_bytes, free); 4120 } else { 4121 delayed()->add(obj, var_size_in_bytes, free); 4122 } 4123 4124 bind(done); 4125 4126 #ifdef ASSERT 4127 // make sure new free pointer is properly aligned 4128 { 4129 Label L; 4130 btst(MinObjAlignmentInBytesMask, free); 4131 br(Assembler::zero, false, Assembler::pt, L); 4132 delayed()->nop(); 4133 stop("updated TLAB free is not properly aligned"); 4134 bind(L); 4135 } 4136 #endif // ASSERT 4137 4138 // update the tlab top pointer 4139 st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 4140 verify_tlab(); 4141 } 4142 4143 4144 void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) { 4145 Register top = O0; 4146 Register t1 = G1; 4147 Register t2 = G3; 4148 Register t3 = O1; 4149 assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */); 4150 Label do_refill, discard_tlab; 4151 4152 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { 4153 // No allocation in the shared eden. 4154 ba_short(slow_case); 4155 } 4156 4157 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top); 4158 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1); 4159 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2); 4160 4161 // calculate amount of free space 4162 sub(t1, top, t1); 4163 srl_ptr(t1, LogHeapWordSize, t1); 4164 4165 // Retain tlab and allocate object in shared space if 4166 // the amount free in the tlab is too large to discard. 4167 cmp(t1, t2); 4168 brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab); 4169 4170 // increment waste limit to prevent getting stuck on this slow path 4171 delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2); 4172 st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())); 4173 if (TLABStats) { 4174 // increment number of slow_allocations 4175 ld(G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()), t2); 4176 add(t2, 1, t2); 4177 stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset())); 4178 } 4179 ba_short(try_eden); 4180 4181 bind(discard_tlab); 4182 if (TLABStats) { 4183 // increment number of refills 4184 ld(G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()), t2); 4185 add(t2, 1, t2); 4186 stw(t2, G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset())); 4187 // accumulate wastage 4188 ld(G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()), t2); 4189 add(t2, t1, t2); 4190 stw(t2, G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset())); 4191 } 4192 4193 // if tlab is currently allocated (top or end != null) then 4194 // fill [top, end + alignment_reserve) with array object 4195 br_null_short(top, Assembler::pn, do_refill); 4196 4197 set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2); 4198 st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word 4199 // set klass to intArrayKlass 4200 sub(t1, typeArrayOopDesc::header_size(T_INT), t1); 4201 add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1); 4202 sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1); 4203 st(t1, top, arrayOopDesc::length_offset_in_bytes()); 4204 set((intptr_t)Universe::intArrayKlassObj_addr(), t2); 4205 ld_ptr(t2, 0, t2); 4206 // store klass last. concurrent gcs assumes klass length is valid if 4207 // klass field is not null. 4208 store_klass(t2, top); 4209 verify_oop(top); 4210 4211 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t1); 4212 sub(top, t1, t1); // size of tlab's allocated portion 4213 incr_allocated_bytes(t1, t2, t3); 4214 4215 // refill the tlab with an eden allocation 4216 bind(do_refill); 4217 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1); 4218 sll_ptr(t1, LogHeapWordSize, t1); 4219 // allocate new tlab, address returned in top 4220 eden_allocate(top, t1, 0, t2, t3, slow_case); 4221 4222 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset())); 4223 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 4224 #ifdef ASSERT 4225 // check that tlab_size (t1) is still valid 4226 { 4227 Label ok; 4228 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2); 4229 sll_ptr(t2, LogHeapWordSize, t2); 4230 cmp_and_br_short(t1, t2, Assembler::equal, Assembler::pt, ok); 4231 stop("assert(t1 == tlab_size)"); 4232 should_not_reach_here(); 4233 4234 bind(ok); 4235 } 4236 #endif // ASSERT 4237 add(top, t1, top); // t1 is tlab_size 4238 sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top); 4239 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset())); 4240 verify_tlab(); 4241 ba_short(retry); 4242 } 4243 4244 void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, 4245 Register t1, Register t2) { 4246 // Bump total bytes allocated by this thread 4247 assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch 4248 assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2); 4249 // v8 support has gone the way of the dodo 4250 ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1); 4251 add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1); 4252 stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset())); 4253 } 4254 4255 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 4256 switch (cond) { 4257 // Note some conditions are synonyms for others 4258 case Assembler::never: return Assembler::always; 4259 case Assembler::zero: return Assembler::notZero; 4260 case Assembler::lessEqual: return Assembler::greater; 4261 case Assembler::less: return Assembler::greaterEqual; 4262 case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned; 4263 case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned; 4264 case Assembler::negative: return Assembler::positive; 4265 case Assembler::overflowSet: return Assembler::overflowClear; 4266 case Assembler::always: return Assembler::never; 4267 case Assembler::notZero: return Assembler::zero; 4268 case Assembler::greater: return Assembler::lessEqual; 4269 case Assembler::greaterEqual: return Assembler::less; 4270 case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned; 4271 case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned; 4272 case Assembler::positive: return Assembler::negative; 4273 case Assembler::overflowClear: return Assembler::overflowSet; 4274 } 4275 4276 ShouldNotReachHere(); return Assembler::overflowClear; 4277 } 4278 4279 void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr, 4280 Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) { 4281 Condition negated_cond = negate_condition(cond); 4282 Label L; 4283 brx(negated_cond, false, Assembler::pt, L); 4284 delayed()->nop(); 4285 inc_counter(counter_ptr, Rtmp1, Rtmp2); 4286 bind(L); 4287 } 4288 4289 void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) { 4290 AddressLiteral addrlit(counter_addr); 4291 sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register. 4292 Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits. 4293 ld(addr, Rtmp2); 4294 inc(Rtmp2); 4295 st(Rtmp2, addr); 4296 } 4297 4298 void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) { 4299 inc_counter((address) counter_addr, Rtmp1, Rtmp2); 4300 } 4301 4302 SkipIfEqual::SkipIfEqual( 4303 MacroAssembler* masm, Register temp, const bool* flag_addr, 4304 Assembler::Condition condition) { 4305 _masm = masm; 4306 AddressLiteral flag(flag_addr); 4307 _masm->sethi(flag, temp); 4308 _masm->ldub(temp, flag.low10(), temp); 4309 _masm->tst(temp); 4310 _masm->br(condition, false, Assembler::pt, _label); 4311 _masm->delayed()->nop(); 4312 } 4313 4314 SkipIfEqual::~SkipIfEqual() { 4315 _masm->bind(_label); 4316 } 4317 4318 4319 // Writes to stack successive pages until offset reached to check for 4320 // stack overflow + shadow pages. This clobbers tsp and scratch. 4321 void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp, 4322 Register Rscratch) { 4323 // Use stack pointer in temp stack pointer 4324 mov(SP, Rtsp); 4325 4326 // Bang stack for total size given plus stack shadow page size. 4327 // Bang one page at a time because a large size can overflow yellow and 4328 // red zones (the bang will fail but stack overflow handling can't tell that 4329 // it was a stack overflow bang vs a regular segv). 4330 int offset = os::vm_page_size(); 4331 Register Roffset = Rscratch; 4332 4333 Label loop; 4334 bind(loop); 4335 set((-offset)+STACK_BIAS, Rscratch); 4336 st(G0, Rtsp, Rscratch); 4337 set(offset, Roffset); 4338 sub(Rsize, Roffset, Rsize); 4339 cmp(Rsize, G0); 4340 br(Assembler::greater, false, Assembler::pn, loop); 4341 delayed()->sub(Rtsp, Roffset, Rtsp); 4342 4343 // Bang down shadow pages too. 4344 // The -1 because we already subtracted 1 page. 4345 for (int i = 0; i< StackShadowPages-1; i++) { 4346 set((-i*offset)+STACK_BIAS, Rscratch); 4347 st(G0, Rtsp, Rscratch); 4348 } 4349 } 4350 4351 /////////////////////////////////////////////////////////////////////////////////// 4352 #ifndef SERIALGC 4353 4354 static address satb_log_enqueue_with_frame = NULL; 4355 static u_char* satb_log_enqueue_with_frame_end = NULL; 4356 4357 static address satb_log_enqueue_frameless = NULL; 4358 static u_char* satb_log_enqueue_frameless_end = NULL; 4359 4360 static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions? 4361 4362 static void generate_satb_log_enqueue(bool with_frame) { 4363 BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize); 4364 CodeBuffer buf(bb); 4365 MacroAssembler masm(&buf); 4366 4367 #define __ masm. 4368 4369 address start = __ pc(); 4370 Register pre_val; 4371 4372 Label refill, restart; 4373 if (with_frame) { 4374 __ save_frame(0); 4375 pre_val = I0; // Was O0 before the save. 4376 } else { 4377 pre_val = O0; 4378 } 4379 int satb_q_index_byte_offset = 4380 in_bytes(JavaThread::satb_mark_queue_offset() + 4381 PtrQueue::byte_offset_of_index()); 4382 int satb_q_buf_byte_offset = 4383 in_bytes(JavaThread::satb_mark_queue_offset() + 4384 PtrQueue::byte_offset_of_buf()); 4385 assert(in_bytes(PtrQueue::byte_width_of_index()) == sizeof(intptr_t) && 4386 in_bytes(PtrQueue::byte_width_of_buf()) == sizeof(intptr_t), 4387 "check sizes in assembly below"); 4388 4389 __ bind(restart); 4390 __ ld_ptr(G2_thread, satb_q_index_byte_offset, L0); 4391 4392 __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn, L0, refill); 4393 // If the branch is taken, no harm in executing this in the delay slot. 4394 __ delayed()->ld_ptr(G2_thread, satb_q_buf_byte_offset, L1); 4395 __ sub(L0, oopSize, L0); 4396 4397 __ st_ptr(pre_val, L1, L0); // [_buf + index] := I0 4398 if (!with_frame) { 4399 // Use return-from-leaf 4400 __ retl(); 4401 __ delayed()->st_ptr(L0, G2_thread, satb_q_index_byte_offset); 4402 } else { 4403 // Not delayed. 4404 __ st_ptr(L0, G2_thread, satb_q_index_byte_offset); 4405 } 4406 if (with_frame) { 4407 __ ret(); 4408 __ delayed()->restore(); 4409 } 4410 __ bind(refill); 4411 4412 address handle_zero = 4413 CAST_FROM_FN_PTR(address, 4414 &SATBMarkQueueSet::handle_zero_index_for_thread); 4415 // This should be rare enough that we can afford to save all the 4416 // scratch registers that the calling context might be using. 4417 __ mov(G1_scratch, L0); 4418 __ mov(G3_scratch, L1); 4419 __ mov(G4, L2); 4420 // We need the value of O0 above (for the write into the buffer), so we 4421 // save and restore it. 4422 __ mov(O0, L3); 4423 // Since the call will overwrite O7, we save and restore that, as well. 4424 __ mov(O7, L4); 4425 __ call_VM_leaf(L5, handle_zero, G2_thread); 4426 __ mov(L0, G1_scratch); 4427 __ mov(L1, G3_scratch); 4428 __ mov(L2, G4); 4429 __ mov(L3, O0); 4430 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 4431 __ delayed()->mov(L4, O7); 4432 4433 if (with_frame) { 4434 satb_log_enqueue_with_frame = start; 4435 satb_log_enqueue_with_frame_end = __ pc(); 4436 } else { 4437 satb_log_enqueue_frameless = start; 4438 satb_log_enqueue_frameless_end = __ pc(); 4439 } 4440 4441 #undef __ 4442 } 4443 4444 static inline void generate_satb_log_enqueue_if_necessary(bool with_frame) { 4445 if (with_frame) { 4446 if (satb_log_enqueue_with_frame == 0) { 4447 generate_satb_log_enqueue(with_frame); 4448 assert(satb_log_enqueue_with_frame != 0, "postcondition."); 4449 if (G1SATBPrintStubs) { 4450 tty->print_cr("Generated with-frame satb enqueue:"); 4451 Disassembler::decode((u_char*)satb_log_enqueue_with_frame, 4452 satb_log_enqueue_with_frame_end, 4453 tty); 4454 } 4455 } 4456 } else { 4457 if (satb_log_enqueue_frameless == 0) { 4458 generate_satb_log_enqueue(with_frame); 4459 assert(satb_log_enqueue_frameless != 0, "postcondition."); 4460 if (G1SATBPrintStubs) { 4461 tty->print_cr("Generated frameless satb enqueue:"); 4462 Disassembler::decode((u_char*)satb_log_enqueue_frameless, 4463 satb_log_enqueue_frameless_end, 4464 tty); 4465 } 4466 } 4467 } 4468 } 4469 4470 void MacroAssembler::g1_write_barrier_pre(Register obj, 4471 Register index, 4472 int offset, 4473 Register pre_val, 4474 Register tmp, 4475 bool preserve_o_regs) { 4476 Label filtered; 4477 4478 if (obj == noreg) { 4479 // We are not loading the previous value so make 4480 // sure that we don't trash the value in pre_val 4481 // with the code below. 4482 assert_different_registers(pre_val, tmp); 4483 } else { 4484 // We will be loading the previous value 4485 // in this code so... 4486 assert(offset == 0 || index == noreg, "choose one"); 4487 assert(pre_val == noreg, "check this code"); 4488 } 4489 4490 // Is marking active? 4491 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { 4492 ld(G2, 4493 in_bytes(JavaThread::satb_mark_queue_offset() + 4494 PtrQueue::byte_offset_of_active()), 4495 tmp); 4496 } else { 4497 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, 4498 "Assumption"); 4499 ldsb(G2, 4500 in_bytes(JavaThread::satb_mark_queue_offset() + 4501 PtrQueue::byte_offset_of_active()), 4502 tmp); 4503 } 4504 4505 // Check on whether to annul. 4506 br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered); 4507 delayed()->nop(); 4508 4509 // Do we need to load the previous value? 4510 if (obj != noreg) { 4511 // Load the previous value... 4512 if (index == noreg) { 4513 if (Assembler::is_simm13(offset)) { 4514 load_heap_oop(obj, offset, tmp); 4515 } else { 4516 set(offset, tmp); 4517 load_heap_oop(obj, tmp, tmp); 4518 } 4519 } else { 4520 load_heap_oop(obj, index, tmp); 4521 } 4522 // Previous value has been loaded into tmp 4523 pre_val = tmp; 4524 } 4525 4526 assert(pre_val != noreg, "must have a real register"); 4527 4528 // Is the previous value null? 4529 // Check on whether to annul. 4530 br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, pre_val, filtered); 4531 delayed()->nop(); 4532 4533 // OK, it's not filtered, so we'll need to call enqueue. In the normal 4534 // case, pre_val will be a scratch G-reg, but there are some cases in 4535 // which it's an O-reg. In the first case, do a normal call. In the 4536 // latter, do a save here and call the frameless version. 4537 4538 guarantee(pre_val->is_global() || pre_val->is_out(), 4539 "Or we need to think harder."); 4540 4541 if (pre_val->is_global() && !preserve_o_regs) { 4542 generate_satb_log_enqueue_if_necessary(true); // with frame 4543 4544 call(satb_log_enqueue_with_frame); 4545 delayed()->mov(pre_val, O0); 4546 } else { 4547 generate_satb_log_enqueue_if_necessary(false); // frameless 4548 4549 save_frame(0); 4550 call(satb_log_enqueue_frameless); 4551 delayed()->mov(pre_val->after_save(), O0); 4552 restore(); 4553 } 4554 4555 bind(filtered); 4556 } 4557 4558 static jint num_ct_writes = 0; 4559 static jint num_ct_writes_filtered_in_hr = 0; 4560 static jint num_ct_writes_filtered_null = 0; 4561 static G1CollectedHeap* g1 = NULL; 4562 4563 static Thread* count_ct_writes(void* filter_val, void* new_val) { 4564 Atomic::inc(&num_ct_writes); 4565 if (filter_val == NULL) { 4566 Atomic::inc(&num_ct_writes_filtered_in_hr); 4567 } else if (new_val == NULL) { 4568 Atomic::inc(&num_ct_writes_filtered_null); 4569 } else { 4570 if (g1 == NULL) { 4571 g1 = G1CollectedHeap::heap(); 4572 } 4573 } 4574 if ((num_ct_writes % 1000000) == 0) { 4575 jint num_ct_writes_filtered = 4576 num_ct_writes_filtered_in_hr + 4577 num_ct_writes_filtered_null; 4578 4579 tty->print_cr("%d potential CT writes: %5.2f%% filtered\n" 4580 " (%5.2f%% intra-HR, %5.2f%% null).", 4581 num_ct_writes, 4582 100.0*(float)num_ct_writes_filtered/(float)num_ct_writes, 4583 100.0*(float)num_ct_writes_filtered_in_hr/ 4584 (float)num_ct_writes, 4585 100.0*(float)num_ct_writes_filtered_null/ 4586 (float)num_ct_writes); 4587 } 4588 return Thread::current(); 4589 } 4590 4591 static address dirty_card_log_enqueue = 0; 4592 static u_char* dirty_card_log_enqueue_end = 0; 4593 4594 // This gets to assume that o0 contains the object address. 4595 static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) { 4596 BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2); 4597 CodeBuffer buf(bb); 4598 MacroAssembler masm(&buf); 4599 #define __ masm. 4600 address start = __ pc(); 4601 4602 Label not_already_dirty, restart, refill; 4603 4604 #ifdef _LP64 4605 __ srlx(O0, CardTableModRefBS::card_shift, O0); 4606 #else 4607 __ srl(O0, CardTableModRefBS::card_shift, O0); 4608 #endif 4609 AddressLiteral addrlit(byte_map_base); 4610 __ set(addrlit, O1); // O1 := <card table base> 4611 __ ldub(O0, O1, O2); // O2 := [O0 + O1] 4612 4613 __ br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt, 4614 O2, not_already_dirty, false); 4615 // Get O1 + O2 into a reg by itself -- useful in the take-the-branch 4616 // case, harmless if not. 4617 __ delayed()->add(O0, O1, O3); 4618 4619 // We didn't take the branch, so we're already dirty: return. 4620 // Use return-from-leaf 4621 __ retl(); 4622 __ delayed()->nop(); 4623 4624 // Not dirty. 4625 __ bind(not_already_dirty); 4626 // First, dirty it. 4627 __ stb(G0, O3, G0); // [cardPtr] := 0 (i.e., dirty). 4628 int dirty_card_q_index_byte_offset = 4629 in_bytes(JavaThread::dirty_card_queue_offset() + 4630 PtrQueue::byte_offset_of_index()); 4631 int dirty_card_q_buf_byte_offset = 4632 in_bytes(JavaThread::dirty_card_queue_offset() + 4633 PtrQueue::byte_offset_of_buf()); 4634 __ bind(restart); 4635 __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, L0); 4636 4637 __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn, 4638 L0, refill, false); 4639 // If the branch is taken, no harm in executing this in the delay slot. 4640 __ delayed()->ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, L1); 4641 __ sub(L0, oopSize, L0); 4642 4643 __ st_ptr(O3, L1, L0); // [_buf + index] := I0 4644 // Use return-from-leaf 4645 __ retl(); 4646 __ delayed()->st_ptr(L0, G2_thread, dirty_card_q_index_byte_offset); 4647 4648 __ bind(refill); 4649 address handle_zero = 4650 CAST_FROM_FN_PTR(address, 4651 &DirtyCardQueueSet::handle_zero_index_for_thread); 4652 // This should be rare enough that we can afford to save all the 4653 // scratch registers that the calling context might be using. 4654 __ mov(G1_scratch, L3); 4655 __ mov(G3_scratch, L5); 4656 // We need the value of O3 above (for the write into the buffer), so we 4657 // save and restore it. 4658 __ mov(O3, L6); 4659 // Since the call will overwrite O7, we save and restore that, as well. 4660 __ mov(O7, L4); 4661 4662 __ call_VM_leaf(L7_thread_cache, handle_zero, G2_thread); 4663 __ mov(L3, G1_scratch); 4664 __ mov(L5, G3_scratch); 4665 __ mov(L6, O3); 4666 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 4667 __ delayed()->mov(L4, O7); 4668 4669 dirty_card_log_enqueue = start; 4670 dirty_card_log_enqueue_end = __ pc(); 4671 // XXX Should have a guarantee here about not going off the end! 4672 // Does it already do so? Do an experiment... 4673 4674 #undef __ 4675 4676 } 4677 4678 static inline void 4679 generate_dirty_card_log_enqueue_if_necessary(jbyte* byte_map_base) { 4680 if (dirty_card_log_enqueue == 0) { 4681 generate_dirty_card_log_enqueue(byte_map_base); 4682 assert(dirty_card_log_enqueue != 0, "postcondition."); 4683 if (G1SATBPrintStubs) { 4684 tty->print_cr("Generated dirty_card enqueue:"); 4685 Disassembler::decode((u_char*)dirty_card_log_enqueue, 4686 dirty_card_log_enqueue_end, 4687 tty); 4688 } 4689 } 4690 } 4691 4692 4693 void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val, Register tmp) { 4694 4695 Label filtered; 4696 MacroAssembler* post_filter_masm = this; 4697 4698 if (new_val == G0) return; 4699 4700 G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set(); 4701 assert(bs->kind() == BarrierSet::G1SATBCT || 4702 bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier"); 4703 if (G1RSBarrierRegionFilter) { 4704 xor3(store_addr, new_val, tmp); 4705 #ifdef _LP64 4706 srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp); 4707 #else 4708 srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp); 4709 #endif 4710 4711 if (G1PrintCTFilterStats) { 4712 guarantee(tmp->is_global(), "Or stats won't work..."); 4713 // This is a sleazy hack: I'm temporarily hijacking G2, which I 4714 // promise to restore. 4715 mov(new_val, G2); 4716 save_frame(0); 4717 mov(tmp, O0); 4718 mov(G2, O1); 4719 // Save G-regs that target may use. 4720 mov(G1, L1); 4721 mov(G2, L2); 4722 mov(G3, L3); 4723 mov(G4, L4); 4724 mov(G5, L5); 4725 call(CAST_FROM_FN_PTR(address, &count_ct_writes)); 4726 delayed()->nop(); 4727 mov(O0, G2); 4728 // Restore G-regs that target may have used. 4729 mov(L1, G1); 4730 mov(L3, G3); 4731 mov(L4, G4); 4732 mov(L5, G5); 4733 restore(G0, G0, G0); 4734 } 4735 // XXX Should I predict this taken or not? Does it mattern? 4736 br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered); 4737 } 4738 4739 // If the "store_addr" register is an "in" or "local" register, move it to 4740 // a scratch reg so we can pass it as an argument. 4741 bool use_scr = !(store_addr->is_global() || store_addr->is_out()); 4742 // Pick a scratch register different from "tmp". 4743 Register scr = (tmp == G1_scratch ? G3_scratch : G1_scratch); 4744 // Make sure we use up the delay slot! 4745 if (use_scr) { 4746 post_filter_masm->mov(store_addr, scr); 4747 } else { 4748 post_filter_masm->nop(); 4749 } 4750 generate_dirty_card_log_enqueue_if_necessary(bs->byte_map_base); 4751 save_frame(0); 4752 call(dirty_card_log_enqueue); 4753 if (use_scr) { 4754 delayed()->mov(scr, O0); 4755 } else { 4756 delayed()->mov(store_addr->after_save(), O0); 4757 } 4758 restore(); 4759 4760 bind(filtered); 4761 4762 } 4763 4764 #endif // SERIALGC 4765 /////////////////////////////////////////////////////////////////////////////////// 4766 4767 void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) { 4768 // If we're writing constant NULL, we can skip the write barrier. 4769 if (new_val == G0) return; 4770 CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set(); 4771 assert(bs->kind() == BarrierSet::CardTableModRef || 4772 bs->kind() == BarrierSet::CardTableExtension, "wrong barrier"); 4773 card_table_write(bs->byte_map_base, tmp, store_addr); 4774 } 4775 4776 void MacroAssembler::load_klass(Register src_oop, Register klass) { 4777 // The number of bytes in this code is used by 4778 // MachCallDynamicJavaNode::ret_addr_offset() 4779 // if this changes, change that. 4780 if (UseCompressedOops) { 4781 lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass); 4782 decode_heap_oop_not_null(klass); 4783 } else { 4784 ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass); 4785 } 4786 } 4787 4788 void MacroAssembler::store_klass(Register klass, Register dst_oop) { 4789 if (UseCompressedOops) { 4790 assert(dst_oop != klass, "not enough registers"); 4791 encode_heap_oop_not_null(klass); 4792 st(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 4793 } else { 4794 st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 4795 } 4796 } 4797 4798 void MacroAssembler::store_klass_gap(Register s, Register d) { 4799 if (UseCompressedOops) { 4800 assert(s != d, "not enough registers"); 4801 st(s, d, oopDesc::klass_gap_offset_in_bytes()); 4802 } 4803 } 4804 4805 void MacroAssembler::load_heap_oop(const Address& s, Register d) { 4806 if (UseCompressedOops) { 4807 lduw(s, d); 4808 decode_heap_oop(d); 4809 } else { 4810 ld_ptr(s, d); 4811 } 4812 } 4813 4814 void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) { 4815 if (UseCompressedOops) { 4816 lduw(s1, s2, d); 4817 decode_heap_oop(d, d); 4818 } else { 4819 ld_ptr(s1, s2, d); 4820 } 4821 } 4822 4823 void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) { 4824 if (UseCompressedOops) { 4825 lduw(s1, simm13a, d); 4826 decode_heap_oop(d, d); 4827 } else { 4828 ld_ptr(s1, simm13a, d); 4829 } 4830 } 4831 4832 void MacroAssembler::load_heap_oop(Register s1, RegisterOrConstant s2, Register d) { 4833 if (s2.is_constant()) load_heap_oop(s1, s2.as_constant(), d); 4834 else load_heap_oop(s1, s2.as_register(), d); 4835 } 4836 4837 void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) { 4838 if (UseCompressedOops) { 4839 assert(s1 != d && s2 != d, "not enough registers"); 4840 encode_heap_oop(d); 4841 st(d, s1, s2); 4842 } else { 4843 st_ptr(d, s1, s2); 4844 } 4845 } 4846 4847 void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) { 4848 if (UseCompressedOops) { 4849 assert(s1 != d, "not enough registers"); 4850 encode_heap_oop(d); 4851 st(d, s1, simm13a); 4852 } else { 4853 st_ptr(d, s1, simm13a); 4854 } 4855 } 4856 4857 void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) { 4858 if (UseCompressedOops) { 4859 assert(a.base() != d, "not enough registers"); 4860 encode_heap_oop(d); 4861 st(d, a, offset); 4862 } else { 4863 st_ptr(d, a, offset); 4864 } 4865 } 4866 4867 4868 void MacroAssembler::encode_heap_oop(Register src, Register dst) { 4869 assert (UseCompressedOops, "must be compressed"); 4870 assert (Universe::heap() != NULL, "java heap should be initialized"); 4871 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4872 verify_oop(src); 4873 if (Universe::narrow_oop_base() == NULL) { 4874 srlx(src, LogMinObjAlignmentInBytes, dst); 4875 return; 4876 } 4877 Label done; 4878 if (src == dst) { 4879 // optimize for frequent case src == dst 4880 bpr(rc_nz, true, Assembler::pt, src, done); 4881 delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken 4882 bind(done); 4883 srlx(src, LogMinObjAlignmentInBytes, dst); 4884 } else { 4885 bpr(rc_z, false, Assembler::pn, src, done); 4886 delayed() -> mov(G0, dst); 4887 // could be moved before branch, and annulate delay, 4888 // but may add some unneeded work decoding null 4889 sub(src, G6_heapbase, dst); 4890 srlx(dst, LogMinObjAlignmentInBytes, dst); 4891 bind(done); 4892 } 4893 } 4894 4895 4896 void MacroAssembler::encode_heap_oop_not_null(Register r) { 4897 assert (UseCompressedOops, "must be compressed"); 4898 assert (Universe::heap() != NULL, "java heap should be initialized"); 4899 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4900 verify_oop(r); 4901 if (Universe::narrow_oop_base() != NULL) 4902 sub(r, G6_heapbase, r); 4903 srlx(r, LogMinObjAlignmentInBytes, r); 4904 } 4905 4906 void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) { 4907 assert (UseCompressedOops, "must be compressed"); 4908 assert (Universe::heap() != NULL, "java heap should be initialized"); 4909 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4910 verify_oop(src); 4911 if (Universe::narrow_oop_base() == NULL) { 4912 srlx(src, LogMinObjAlignmentInBytes, dst); 4913 } else { 4914 sub(src, G6_heapbase, dst); 4915 srlx(dst, LogMinObjAlignmentInBytes, dst); 4916 } 4917 } 4918 4919 // Same algorithm as oops.inline.hpp decode_heap_oop. 4920 void MacroAssembler::decode_heap_oop(Register src, Register dst) { 4921 assert (UseCompressedOops, "must be compressed"); 4922 assert (Universe::heap() != NULL, "java heap should be initialized"); 4923 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4924 sllx(src, LogMinObjAlignmentInBytes, dst); 4925 if (Universe::narrow_oop_base() != NULL) { 4926 Label done; 4927 bpr(rc_nz, true, Assembler::pt, dst, done); 4928 delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken 4929 bind(done); 4930 } 4931 verify_oop(dst); 4932 } 4933 4934 void MacroAssembler::decode_heap_oop_not_null(Register r) { 4935 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4936 // pd_code_size_limit. 4937 // Also do not verify_oop as this is called by verify_oop. 4938 assert (UseCompressedOops, "must be compressed"); 4939 assert (Universe::heap() != NULL, "java heap should be initialized"); 4940 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4941 sllx(r, LogMinObjAlignmentInBytes, r); 4942 if (Universe::narrow_oop_base() != NULL) 4943 add(r, G6_heapbase, r); 4944 } 4945 4946 void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) { 4947 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4948 // pd_code_size_limit. 4949 // Also do not verify_oop as this is called by verify_oop. 4950 assert (UseCompressedOops, "must be compressed"); 4951 assert (Universe::heap() != NULL, "java heap should be initialized"); 4952 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4953 sllx(src, LogMinObjAlignmentInBytes, dst); 4954 if (Universe::narrow_oop_base() != NULL) 4955 add(dst, G6_heapbase, dst); 4956 } 4957 4958 void MacroAssembler::reinit_heapbase() { 4959 if (UseCompressedOops) { 4960 // call indirectly to solve generation ordering problem 4961 AddressLiteral base(Universe::narrow_oop_base_addr()); 4962 load_ptr_contents(base, G6_heapbase); 4963 } 4964 } 4965 4966 // Compare char[] arrays aligned to 4 bytes. 4967 void MacroAssembler::char_arrays_equals(Register ary1, Register ary2, 4968 Register limit, Register result, 4969 Register chr1, Register chr2, Label& Ldone) { 4970 Label Lvector, Lloop; 4971 assert(chr1 == result, "should be the same"); 4972 4973 // Note: limit contains number of bytes (2*char_elements) != 0. 4974 andcc(limit, 0x2, chr1); // trailing character ? 4975 br(Assembler::zero, false, Assembler::pt, Lvector); 4976 delayed()->nop(); 4977 4978 // compare the trailing char 4979 sub(limit, sizeof(jchar), limit); 4980 lduh(ary1, limit, chr1); 4981 lduh(ary2, limit, chr2); 4982 cmp(chr1, chr2); 4983 br(Assembler::notEqual, true, Assembler::pt, Ldone); 4984 delayed()->mov(G0, result); // not equal 4985 4986 // only one char ? 4987 bpr(rc_z, true, Assembler::pn, limit, Ldone); 4988 delayed()->add(G0, 1, result); // zero-length arrays are equal 4989 4990 // word by word compare, dont't need alignment check 4991 bind(Lvector); 4992 // Shift ary1 and ary2 to the end of the arrays, negate limit 4993 add(ary1, limit, ary1); 4994 add(ary2, limit, ary2); 4995 neg(limit, limit); 4996 4997 lduw(ary1, limit, chr1); 4998 bind(Lloop); 4999 lduw(ary2, limit, chr2); 5000 cmp(chr1, chr2); 5001 br(Assembler::notEqual, true, Assembler::pt, Ldone); 5002 delayed()->mov(G0, result); // not equal 5003 inccc(limit, 2*sizeof(jchar)); 5004 // annul LDUW if branch is not taken to prevent access past end of array 5005 br(Assembler::notZero, true, Assembler::pt, Lloop); 5006 delayed()->lduw(ary1, limit, chr1); // hoisted 5007 5008 // Caller should set it: 5009 // add(G0, 1, result); // equals 5010 }