1 #ifdef USE_PRAGMA_IDENT_SRC
   2 #pragma ident "@(#)assembler_sparc.cpp  1.208 07/08/29 13:42:15 JVM"
   3 #endif
   4 /*
   5  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  
  26  */
  27 
  28 #include "incls/_precompiled.incl"
  29 #include "incls/_assembler_sparc.cpp.incl"
  30 
  31 // Implementation of Address
  32 
  33 Address::Address( addr_type t, int which ) {
  34   switch (t) {
  35    case extra_in_argument:
  36    case extra_out_argument:
  37      _base = t == extra_in_argument ? FP : SP;
  38      _hi   = 0;
  39 // Warning:  In LP64 mode, _disp will occupy more than 10 bits.  
  40 //           This is inconsistent with the other constructors but op 
  41 //           codes such as ld or ldx, only access disp() to get their 
  42 //           simm13 argument.
  43      _disp = ((which - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS;
  44     break;
  45    default:
  46     ShouldNotReachHere();
  47     break;
  48   }
  49 }
  50 
  51 static const char* argumentNames[][2] = {
  52   {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"},
  53   {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"},
  54   {"A(n>9)","P(n>9)"}
  55 };
  56 
  57 const char* Argument::name() const {
  58   int nofArgs = sizeof argumentNames / sizeof argumentNames[0];
  59   int num = number();
  60   if (num >= nofArgs)  num = nofArgs - 1;
  61   return argumentNames[num][is_in() ? 1 : 0];
  62 }
  63 
  64 void Assembler::print_instruction(int inst) {
  65   const char* s;
  66   switch (inv_op(inst)) {
  67   default:         s = "????"; break;
  68   case call_op:    s = "call"; break;
  69   case branch_op: 
  70     switch (inv_op2(inst)) {
  71       case bpr_op2:    s = "bpr";  break;
  72       case fb_op2:     s = "fb";   break;
  73       case fbp_op2:    s = "fbp";  break;
  74       case br_op2:     s = "br";   break;
  75       case bp_op2:     s = "bp";   break;
  76       case cb_op2:     s = "cb";   break;
  77       default:         s = "????"; break;
  78     }
  79   }
  80   ::tty->print("%s", s);
  81 }
  82 
  83      
  84 // Patch instruction inst at offset inst_pos to refer to dest_pos
  85 // and return the resulting instruction.
  86 // We should have pcs, not offsets, but since all is relative, it will work out
  87 // OK.
  88 int Assembler::patched_branch(int dest_pos, int inst, int inst_pos) {
  89 
  90   int m; // mask for displacement field
  91   int v; // new value for displacement field
  92   const int word_aligned_ones = -4;
  93   switch (inv_op(inst)) {
  94   default: ShouldNotReachHere();
  95   case call_op:    m = wdisp(word_aligned_ones, 0, 30);  v = wdisp(dest_pos, inst_pos, 30); break;
  96   case branch_op:
  97     switch (inv_op2(inst)) {
  98       case bpr_op2:    m = wdisp16(word_aligned_ones, 0);      v = wdisp16(dest_pos, inst_pos);     break;
  99       case fbp_op2:    m = wdisp(  word_aligned_ones, 0, 19);  v = wdisp(  dest_pos, inst_pos, 19); break;
 100       case bp_op2:     m = wdisp(  word_aligned_ones, 0, 19);  v = wdisp(  dest_pos, inst_pos, 19); break;
 101       case fb_op2:     m = wdisp(  word_aligned_ones, 0, 22);  v = wdisp(  dest_pos, inst_pos, 22); break;
 102       case br_op2:     m = wdisp(  word_aligned_ones, 0, 22);  v = wdisp(  dest_pos, inst_pos, 22); break;
 103       case cb_op2:     m = wdisp(  word_aligned_ones, 0, 22);  v = wdisp(  dest_pos, inst_pos, 22); break;
 104       default: ShouldNotReachHere();
 105     }
 106   }
 107   return  inst & ~m  |  v;
 108 }
 109 
 110 // Return the offset of the branch destionation of instruction inst
 111 // at offset pos.
 112 // Should have pcs, but since all is relative, it works out.
 113 int Assembler::branch_destination(int inst, int pos) {
 114   int r;
 115   switch (inv_op(inst)) {
 116   default: ShouldNotReachHere();
 117   case call_op:        r = inv_wdisp(inst, pos, 30);  break;
 118   case branch_op:
 119     switch (inv_op2(inst)) {
 120       case bpr_op2:    r = inv_wdisp16(inst, pos);    break;
 121       case fbp_op2:    r = inv_wdisp(  inst, pos, 19);  break;
 122       case bp_op2:     r = inv_wdisp(  inst, pos, 19);  break;
 123       case fb_op2:     r = inv_wdisp(  inst, pos, 22);  break;
 124       case br_op2:     r = inv_wdisp(  inst, pos, 22);  break;
 125       case cb_op2:     r = inv_wdisp(  inst, pos, 22);  break;
 126       default: ShouldNotReachHere();
 127     }
 128   }
 129   return r;
 130 }
 131 
 132 int AbstractAssembler::code_fill_byte() {
 133   return 0x00;                  // illegal instruction 0x00000000
 134 }
 135 
 136 // Generate a bunch 'o stuff (including v9's
 137 #ifndef PRODUCT
 138 void Assembler::test_v9() {
 139   add(    G0, G1, G2 );
 140   add(    G3,  0, G4 );
 141   
 142   addcc(  G5, G6, G7 );
 143   addcc(  I0,  1, I1 );
 144   addc(   I2, I3, I4 );
 145   addc(   I5, -1, I6 );
 146   addccc( I7, L0, L1 );
 147   addccc( L2, (1 << 12) - 2, L3 );
 148 
 149   Label lbl1, lbl2, lbl3;
 150 
 151   bind(lbl1);
 152   
 153   bpr( rc_z,    true, pn, L4, pc(),  relocInfo::oop_type );
 154   delayed()->nop();
 155   bpr( rc_lez, false, pt, L5, lbl1);
 156   delayed()->nop();
 157       
 158   fb( f_never,     true, pc() + 4,  relocInfo::none);
 159   delayed()->nop();
 160   fb( f_notEqual, false, lbl2 );
 161   delayed()->nop();
 162   
 163   fbp( f_notZero,        true, fcc0, pn, pc() - 4,  relocInfo::none);
 164   delayed()->nop();
 165   fbp( f_lessOrGreater, false, fcc1, pt, lbl3 );
 166   delayed()->nop();
 167   
 168   br( equal,  true, pc() + 1024, relocInfo::none);
 169   delayed()->nop();
 170   br( lessEqual, false, lbl1 );
 171   delayed()->nop();
 172   br( never, false, lbl1 );
 173   delayed()->nop();
 174     
 175   bp( less,               true, icc, pn, pc(), relocInfo::none);
 176   delayed()->nop();
 177   bp( lessEqualUnsigned, false, xcc, pt, lbl2 );
 178   delayed()->nop();
 179   
 180   call( pc(), relocInfo::none);
 181   delayed()->nop();
 182   call( lbl3 );
 183   delayed()->nop();
 184 
 185  
 186   casa(  L6, L7, O0 );
 187   casxa( O1, O2, O3, 0 );
 188  
 189   udiv(   O4, O5, O7 );
 190   udiv(   G0, (1 << 12) - 1, G1 );
 191   sdiv(   G1, G2, G3 );
 192   sdiv(   G4, -((1 << 12) - 1), G5 );
 193   udivcc( G6, G7, I0 );
 194   udivcc( I1, -((1 << 12) - 2), I2 );
 195   sdivcc( I3, I4, I5 );
 196   sdivcc( I6, -((1 << 12) - 0), I7 );
 197   
 198   done();
 199   retry();
 200 
 201   fadd( FloatRegisterImpl::S, F0,  F1, F2 );
 202   fsub( FloatRegisterImpl::D, F34, F0, F62 );
 203 
 204   fcmp(  FloatRegisterImpl::Q, fcc0, F0, F60);
 205   fcmpe( FloatRegisterImpl::S, fcc1, F31, F30);
 206   
 207   ftox( FloatRegisterImpl::D, F2, F4 );
 208   ftoi( FloatRegisterImpl::Q, F4, F8 );
 209   
 210   ftof( FloatRegisterImpl::S, FloatRegisterImpl::Q, F3, F12 );
 211   
 212   fxtof( FloatRegisterImpl::S, F4, F5 );
 213   fitof( FloatRegisterImpl::D, F6, F8 );
 214   
 215   fmov( FloatRegisterImpl::Q, F16, F20 );
 216   fneg( FloatRegisterImpl::S, F6, F7 );
 217   fabs( FloatRegisterImpl::D, F10, F12 );
 218 
 219   fmul( FloatRegisterImpl::Q,  F24, F28, F32 );
 220   fmul( FloatRegisterImpl::S,  FloatRegisterImpl::D,  F8, F9, F14 );
 221   fdiv( FloatRegisterImpl::S,  F10, F11, F12 );
 222   
 223   fsqrt( FloatRegisterImpl::S, F13, F14 );
 224   
 225   flush( L0, L1 );
 226   flush( L2, -1 );
 227 
 228   flushw();
 229   
 230   illtrap( (1 << 22) - 2);
 231    
 232   impdep1( 17, (1 << 19) - 1 );
 233   impdep2( 3,  0 );
 234   
 235   jmpl( L3, L4, L5 );
 236   delayed()->nop();
 237   jmpl( L6, -1, L7, Relocation::spec_simple(relocInfo::none));
 238   delayed()->nop();
 239 
 240 
 241   ldf(    FloatRegisterImpl::S, O0, O1, F15 );
 242   ldf(    FloatRegisterImpl::D, O2, -1, F14 );
 243 
 244 
 245   ldfsr(  O3, O4 );
 246   ldfsr(  O5, -1 );
 247   ldxfsr( O6, O7 );
 248   ldxfsr( I0, -1 );
 249 
 250   ldfa(  FloatRegisterImpl::D, I1, I2, 1, F16 );
 251   ldfa(  FloatRegisterImpl::Q, I3, -1,    F36 );
 252 
 253   ldsb(  I4, I5, I6 );
 254   ldsb(  I7, -1, G0 );
 255   ldsh(  G1, G3, G4 );
 256   ldsh(  G5, -1, G6 );
 257   ldsw(  G7, L0, L1 );
 258   ldsw(  L2, -1, L3 );
 259   ldub(  L4, L5, L6 );
 260   ldub(  L7, -1, O0 );
 261   lduh(  O1, O2, O3 );
 262   lduh(  O4, -1, O5 );
 263   lduw(  O6, O7, G0 );
 264   lduw(  G1, -1, G2 );
 265   ldx(   G3, G4, G5 );
 266   ldx(   G6, -1, G7 );
 267   ldd(   I0, I1, I2 );
 268   ldd(   I3, -1, I4 );
 269   
 270   ldsba(  I5, I6, 2, I7 );
 271   ldsba(  L0, -1, L1 );
 272   ldsha(  L2, L3, 3, L4 );
 273   ldsha(  L5, -1, L6 );
 274   ldswa(  L7, O0, (1 << 8) - 1, O1 );
 275   ldswa(  O2, -1, O3 );
 276   lduba(  O4, O5, 0, O6 );
 277   lduba(  O7, -1, I0 );
 278   lduha(  I1, I2, 1, I3 );
 279   lduha(  I4, -1, I5 );
 280   lduwa(  I6, I7, 2, L0 );
 281   lduwa(  L1, -1, L2 );
 282   ldxa(   L3, L4, 3, L5 );
 283   ldxa(   L6, -1, L7 );
 284   ldda(   G0, G1, 4, G2 );
 285   ldda(   G3, -1, G4 );
 286   
 287   ldstub(  G5, G6, G7 );
 288   ldstub(  O0, -1, O1 );
 289 
 290   ldstuba( O2, O3, 5, O4 );
 291   ldstuba( O5, -1, O6 );
 292 
 293   and3(    I0, L0, O0 );
 294   and3(    G7, -1, O7 );
 295   andcc(   L2, I2, G2 );
 296   andcc(   L4, -1, G4 );
 297   andn(    I5, I6, I7 );
 298   andn(    I6, -1, I7 );
 299   andncc(  I5, I6, I7 );
 300   andncc(  I7, -1, I6 );
 301   or3(     I5, I6, I7 );
 302   or3(     I7, -1, I6 );
 303   orcc(    I5, I6, I7 );
 304   orcc(    I7, -1, I6 );
 305   orn(     I5, I6, I7 );
 306   orn(     I7, -1, I6 );
 307   orncc(   I5, I6, I7 );
 308   orncc(   I7, -1, I6 );
 309   xor3(    I5, I6, I7 );
 310   xor3(    I7, -1, I6 );
 311   xorcc(   I5, I6, I7 );
 312   xorcc(   I7, -1, I6 );
 313   xnor(    I5, I6, I7 );
 314   xnor(    I7, -1, I6 );
 315   xnorcc(  I5, I6, I7 );
 316   xnorcc(  I7, -1, I6 );
 317   
 318   membar( Membar_mask_bits(StoreStore | LoadStore | StoreLoad | LoadLoad | Sync | MemIssue | Lookaside ) );
 319   membar( StoreStore );
 320   membar( LoadStore );
 321   membar( StoreLoad );
 322   membar( LoadLoad );
 323   membar( Sync );
 324   membar( MemIssue );
 325   membar( Lookaside );
 326  
 327   fmov( FloatRegisterImpl::S, f_ordered,  true, fcc2, F16, F17 );    
 328   fmov( FloatRegisterImpl::D, rc_lz, L5, F18, F20 );
 329 
 330   movcc( overflowClear,  false, icc, I6, L4 );
 331   movcc( f_unorderedOrEqual, true, fcc2, (1 << 10) - 1, O0 );
 332  
 333   movr( rc_nz, I5, I6, I7 );
 334   movr( rc_gz, L1, -1,  L2 );
 335   
 336   mulx(  I5, I6, I7 );
 337   mulx(  I7, -1, I6 );
 338   sdivx( I5, I6, I7 );
 339   sdivx( I7, -1, I6 );
 340   udivx( I5, I6, I7 );
 341   udivx( I7, -1, I6 );
 342 
 343   umul(   I5, I6, I7 );
 344   umul(   I7, -1, I6 );
 345   smul(   I5, I6, I7 );
 346   smul(   I7, -1, I6 );
 347   umulcc( I5, I6, I7 );
 348   umulcc( I7, -1, I6 );
 349   smulcc( I5, I6, I7 );
 350   smulcc( I7, -1, I6 );
 351 
 352   mulscc(   I5, I6, I7 );
 353   mulscc(   I7, -1, I6 );
 354 
 355   nop();
 356   
 357   
 358   popc( G0,  G1);
 359   popc( -1, G2);
 360   
 361   prefetch(   L1, L2,    severalReads );
 362   prefetch(   L3, -1,    oneRead );
 363   prefetcha(  O3, O2, 6, severalWritesAndPossiblyReads );
 364   prefetcha(  G2, -1,    oneWrite );
 365   
 366   rett( I7, I7);
 367   delayed()->nop();
 368   rett( G0, -1, relocInfo::none);
 369   delayed()->nop();
 370   
 371   save(    I5, I6, I7 );
 372   save(    I7, -1, I6 );
 373   restore( I5, I6, I7 );
 374   restore( I7, -1, I6 );
 375  
 376   saved();
 377   restored();
 378   
 379   sethi( 0xaaaaaaaa, I3, Relocation::spec_simple(relocInfo::none));
 380   
 381   sll(  I5, I6, I7 );
 382   sll(  I7, 31, I6 );
 383   srl(  I5, I6, I7 );
 384   srl(  I7,  0, I6 );
 385   sra(  I5, I6, I7 );
 386   sra(  I7, 30, I6 );
 387   sllx( I5, I6, I7 );
 388   sllx( I7, 63, I6 );
 389   srlx( I5, I6, I7 );
 390   srlx( I7,  0, I6 );
 391   srax( I5, I6, I7 );
 392   srax( I7, 62, I6 );
 393   
 394   sir( -1 );
 395   
 396   stbar();
 397   
 398   stf(    FloatRegisterImpl::Q, F40, G0, I7 );
 399   stf(    FloatRegisterImpl::S, F18, I3, -1 );
 400 
 401   stfsr(  L1, L2 );
 402   stfsr(  I7, -1 );
 403   stxfsr( I6, I5 );
 404   stxfsr( L4, -1 );
 405 
 406   stfa(  FloatRegisterImpl::D, F22, I6, I7, 7 );
 407   stfa(  FloatRegisterImpl::Q, F44, G0, -1 );
 408 
 409   stb(  L5, O2, I7 );
 410   stb(  I7, I6, -1 );
 411   sth(  L5, O2, I7 );
 412   sth(  I7, I6, -1 );
 413   stw(  L5, O2, I7 );
 414   stw(  I7, I6, -1 );
 415   stx(  L5, O2, I7 );
 416   stx(  I7, I6, -1 );
 417   std(  L5, O2, I7 );
 418   std(  I7, I6, -1 );
 419 
 420   stba(  L5, O2, I7, 8 );
 421   stba(  I7, I6, -1    );
 422   stha(  L5, O2, I7, 9 );
 423   stha(  I7, I6, -1    );
 424   stwa(  L5, O2, I7, 0 );
 425   stwa(  I7, I6, -1    );
 426   stxa(  L5, O2, I7, 11 );
 427   stxa(  I7, I6, -1     );
 428   stda(  L5, O2, I7, 12 );
 429   stda(  I7, I6, -1     );
 430 
 431   sub(    I5, I6, I7 );
 432   sub(    I7, -1, I6 );
 433   subcc(  I5, I6, I7 );
 434   subcc(  I7, -1, I6 );
 435   subc(   I5, I6, I7 );
 436   subc(   I7, -1, I6 );
 437   subccc( I5, I6, I7 );
 438   subccc( I7, -1, I6 );
 439   
 440   swap( I5, I6, I7 );
 441   swap( I7, -1, I6 );
 442 
 443   swapa(   G0, G1, 13, G2 );
 444   swapa(   I7, -1,     I6 );
 445 
 446   taddcc(    I5, I6, I7 );
 447   taddcc(    I7, -1, I6 );
 448   taddcctv(  I5, I6, I7 );
 449   taddcctv(  I7, -1, I6 );
 450 
 451   tsubcc(    I5, I6, I7 );
 452   tsubcc(    I7, -1, I6 );
 453   tsubcctv(  I5, I6, I7 );
 454   tsubcctv(  I7, -1, I6 );
 455 
 456   trap( overflowClear, xcc, G0, G1 );
 457   trap( lessEqual,     icc, I7, 17 );
 458 
 459   bind(lbl2);
 460   bind(lbl3);
 461 
 462   code()->decode();
 463 }
 464 
 465 // Generate a bunch 'o stuff unique to V8
 466 void Assembler::test_v8_onlys() {
 467   Label lbl1;
 468 
 469   cb( cp_0or1or2, false, pc() - 4, relocInfo::none);
 470   delayed()->nop();
 471   cb( cp_never,    true, lbl1);
 472   delayed()->nop();
 473 
 474   cpop1(1, 2, 3, 4);
 475   cpop2(5, 6, 7, 8);
 476 
 477   ldc( I0, I1, 31);
 478   ldc( I2, -1,  0);
 479 
 480   lddc( I4, I4, 30);
 481   lddc( I6,  0, 1 );
 482 
 483   ldcsr( L0, L1, 0);
 484   ldcsr( L1, (1 << 12) - 1, 17 );
 485 
 486   stc( 31, L4, L5);
 487   stc( 30, L6, -(1 << 12) );
 488 
 489   stdc( 0, L7, G0);
 490   stdc( 1, G1, 0 );
 491 
 492   stcsr( 16, G2, G3);
 493   stcsr( 17, G4, 1 );
 494 
 495   stdcq( 4, G5, G6);
 496   stdcq( 5, G7, -1 );
 497 
 498   bind(lbl1);
 499 
 500   code()->decode();
 501 }
 502 #endif
 503 
 504 // Implementation of MacroAssembler
 505 
 506 void MacroAssembler::null_check(Register reg, int offset) {
 507   if (needs_explicit_null_check((intptr_t)offset)) {
 508     // provoke OS NULL exception if reg = NULL by
 509     // accessing M[reg] w/o changing any registers
 510     ld_ptr(reg, 0, G0);
 511   }
 512   else {
 513     // nothing to do, (later) access of M[reg + offset]
 514     // will provoke OS NULL exception if reg = NULL
 515   }
 516 }
 517 
 518 // Ring buffer jumps
 519 
 520 #ifndef PRODUCT
 521 void MacroAssembler::ret(  bool trace )   { if (trace) {
 522                                                     mov(I7, O7); // traceable register
 523                                                     JMP(O7, 2 * BytesPerInstWord);
 524                                                   } else {
 525                                                     jmpl( I7, 2 * BytesPerInstWord, G0 );
 526                                                   }
 527                                                 }
 528 
 529 void MacroAssembler::retl( bool trace )  { if (trace) JMP(O7, 2 * BytesPerInstWord);
 530                                                  else jmpl( O7, 2 * BytesPerInstWord, G0 ); }
 531 #endif /* PRODUCT */
 532 
 533 
 534 void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) {
 535   assert_not_delayed();
 536   // This can only be traceable if r1 & r2 are visible after a window save
 537   if (TraceJumps) {
 538 #ifndef PRODUCT
 539     save_frame(0);
 540     verify_thread();
 541     ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
 542     add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
 543     sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
 544     add(O2, O1, O1);
 545 
 546     add(r1->after_save(), r2->after_save(), O2);
 547     set((intptr_t)file, O3);
 548     set(line, O4);
 549     Label L;
 550     // get nearby pc, store jmp target
 551     call(L, relocInfo::none);  // No relocation for call to pc+0x8
 552     delayed()->st(O2, O1, 0);
 553     bind(L);
 554 
 555     // store nearby pc
 556     st(O7, O1, sizeof(intptr_t));
 557     // store file
 558     st(O3, O1, 2*sizeof(intptr_t));
 559     // store line
 560     st(O4, O1, 3*sizeof(intptr_t));
 561     add(O0, 1, O0);
 562     and3(O0, JavaThread::jump_ring_buffer_size  - 1, O0);
 563     st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
 564     restore();
 565 #endif /* PRODUCT */
 566   }
 567   jmpl(r1, r2, G0);
 568 }
 569 void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) {
 570   assert_not_delayed();
 571   // This can only be traceable if r1 is visible after a window save
 572   if (TraceJumps) {
 573 #ifndef PRODUCT
 574     save_frame(0);
 575     verify_thread();
 576     ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
 577     add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
 578     sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
 579     add(O2, O1, O1);
 580 
 581     add(r1->after_save(), offset, O2);
 582     set((intptr_t)file, O3);
 583     set(line, O4);
 584     Label L;
 585     // get nearby pc, store jmp target
 586     call(L, relocInfo::none);  // No relocation for call to pc+0x8
 587     delayed()->st(O2, O1, 0);
 588     bind(L);
 589 
 590     // store nearby pc
 591     st(O7, O1, sizeof(intptr_t));
 592     // store file
 593     st(O3, O1, 2*sizeof(intptr_t));
 594     // store line
 595     st(O4, O1, 3*sizeof(intptr_t));
 596     add(O0, 1, O0);
 597     and3(O0, JavaThread::jump_ring_buffer_size  - 1, O0);
 598     st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
 599     restore();
 600 #endif /* PRODUCT */
 601   }
 602   jmp(r1, offset);
 603 }
 604 
 605 // This code sequence is relocatable to any address, even on LP64.
 606 void MacroAssembler::jumpl( Address& a, Register d, int offset, const char* file, int line ) {
 607   assert_not_delayed();
 608   // Force fixed length sethi because NativeJump and NativeFarCall don't handle
 609   // variable length instruction streams.
 610   sethi(a, /*ForceRelocatable=*/ true);
 611   if (TraceJumps) {
 612 #ifndef PRODUCT
 613     // Must do the add here so relocation can find the remainder of the
 614     // value to be relocated.
 615     add(a.base(), a.disp() + offset, a.base(), a.rspec(offset));
 616     save_frame(0);
 617     verify_thread();
 618     ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
 619     add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
 620     sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
 621     add(O2, O1, O1);
 622 
 623     set((intptr_t)file, O3);
 624     set(line, O4);
 625     Label L;
 626 
 627     // get nearby pc, store jmp target
 628     call(L, relocInfo::none);  // No relocation for call to pc+0x8
 629     delayed()->st(a.base()->after_save(), O1, 0);
 630     bind(L);
 631 
 632     // store nearby pc
 633     st(O7, O1, sizeof(intptr_t));
 634     // store file
 635     st(O3, O1, 2*sizeof(intptr_t));
 636     // store line
 637     st(O4, O1, 3*sizeof(intptr_t));
 638     add(O0, 1, O0);
 639     and3(O0, JavaThread::jump_ring_buffer_size  - 1, O0);
 640     st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
 641     restore();
 642     jmpl(a.base(), G0, d);
 643 #else
 644     jmpl(a, d, offset);
 645 #endif /* PRODUCT */
 646   } else {
 647     jmpl(a, d, offset);
 648   }
 649 }
 650 
 651 void MacroAssembler::jump( Address& a, int offset, const char* file, int line ) {
 652   jumpl( a, G0, offset, file, line );
 653 }
 654 
 655 
 656 // Convert to C varargs format
 657 void MacroAssembler::set_varargs( Argument inArg, Register d ) {
 658   // spill register-resident args to their memory slots
 659   // (SPARC calling convention requires callers to have already preallocated these)
 660   // Note that the inArg might in fact be an outgoing argument,
 661   // if a leaf routine or stub does some tricky argument shuffling.
 662   // This routine must work even though one of the saved arguments
 663   // is in the d register (e.g., set_varargs(Argument(0, false), O0)).
 664   for (Argument savePtr = inArg;
 665        savePtr.is_register();
 666        savePtr = savePtr.successor()) {
 667     st_ptr(savePtr.as_register(), savePtr.address_in_frame());
 668   }
 669   // return the address of the first memory slot
 670   add(inArg.address_in_frame(), d);
 671 }
 672 
 673 // Conditional breakpoint (for assertion checks in assembly code)
 674 void MacroAssembler::breakpoint_trap(Condition c, CC cc) {
 675   trap(c, cc, G0, ST_RESERVED_FOR_USER_0);
 676 }
 677 
 678 // We want to use ST_BREAKPOINT here, but the debugger is confused by it.
 679 void MacroAssembler::breakpoint_trap() {
 680   trap(ST_RESERVED_FOR_USER_0);
 681 }
 682 
 683 // flush windows (except current) using flushw instruction if avail.
 684 void MacroAssembler::flush_windows() {
 685   if (VM_Version::v9_instructions_work())  flushw();
 686   else                                     flush_windows_trap();
 687 }
 688 
 689 // Write serialization page so VM thread can do a pseudo remote membar
 690 // We use the current thread pointer to calculate a thread specific
 691 // offset to write to within the page. This minimizes bus traffic
 692 // due to cache line collision.
 693 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
 694   Address mem_serialize_page(tmp1, os::get_memory_serialize_page());
 695   srl(thread, os::get_serialize_page_shift_count(), tmp2);
 696   if (Assembler::is_simm13(os::vm_page_size())) {
 697     and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2);
 698   }
 699   else {
 700     set((os::vm_page_size() - sizeof(int)), tmp1);
 701     and3(tmp2, tmp1, tmp2);
 702   }
 703   load_address(mem_serialize_page);
 704   st(G0, tmp1, tmp2);
 705 }
 706 
 707 
 708 
 709 void MacroAssembler::enter() {
 710   Unimplemented();
 711 }
 712 
 713 void MacroAssembler::leave() {
 714   Unimplemented();
 715 }
 716 
 717 void MacroAssembler::mult(Register s1, Register s2, Register d) {
 718   if(VM_Version::v9_instructions_work()) {
 719     mulx (s1, s2, d);
 720   } else {
 721     smul (s1, s2, d);
 722   }
 723 }
 724 
 725 void MacroAssembler::mult(Register s1, int simm13a, Register d) {
 726   if(VM_Version::v9_instructions_work()) {
 727     mulx (s1, simm13a, d);
 728   } else {
 729     smul (s1, simm13a, d);
 730   }
 731 }
 732 
 733 
 734 #ifdef ASSERT
 735 void MacroAssembler::read_ccr_v8_assert(Register ccr_save) {
 736   const Register s1 = G3_scratch;
 737   const Register s2 = G4_scratch;
 738   Label get_psr_test;
 739   // Get the condition codes the V8 way.
 740   read_ccr_trap(s1);
 741   mov(ccr_save, s2);
 742   // This is a test of V8 which has icc but not xcc 
 743   // so mask off the xcc bits
 744   and3(s2, 0xf, s2);
 745   // Compare condition codes from the V8 and V9 ways.
 746   subcc(s2, s1, G0);
 747   br(Assembler::notEqual, true, Assembler::pt, get_psr_test);
 748   delayed()->breakpoint_trap();
 749   bind(get_psr_test);
 750 }
 751 
 752 void MacroAssembler::write_ccr_v8_assert(Register ccr_save) {
 753   const Register s1 = G3_scratch;
 754   const Register s2 = G4_scratch;
 755   Label set_psr_test;
 756   // Write out the saved condition codes the V8 way
 757   write_ccr_trap(ccr_save, s1, s2);
 758   // Read back the condition codes using the V9 instruction
 759   rdccr(s1);
 760   mov(ccr_save, s2);
 761   // This is a test of V8 which has icc but not xcc 
 762   // so mask off the xcc bits
 763   and3(s2, 0xf, s2);
 764   and3(s1, 0xf, s1);
 765   // Compare the V8 way with the V9 way.
 766   subcc(s2, s1, G0);
 767   br(Assembler::notEqual, true, Assembler::pt, set_psr_test);
 768   delayed()->breakpoint_trap();
 769   bind(set_psr_test);
 770 }
 771 #else
 772 #define read_ccr_v8_assert(x)
 773 #define write_ccr_v8_assert(x)
 774 #endif // ASSERT
 775 
 776 void MacroAssembler::read_ccr(Register ccr_save) {
 777   if (VM_Version::v9_instructions_work()) {
 778     rdccr(ccr_save);
 779     // Test code sequence used on V8.  Do not move above rdccr.
 780     read_ccr_v8_assert(ccr_save);
 781   } else {
 782     read_ccr_trap(ccr_save);
 783   }
 784 }
 785 
 786 void MacroAssembler::write_ccr(Register ccr_save) {
 787   if (VM_Version::v9_instructions_work()) {
 788     // Test code sequence used on V8.  Do not move below wrccr.
 789     write_ccr_v8_assert(ccr_save);
 790     wrccr(ccr_save);
 791   } else {
 792     const Register temp_reg1 = G3_scratch;
 793     const Register temp_reg2 = G4_scratch;
 794     write_ccr_trap(ccr_save, temp_reg1, temp_reg2);
 795   }
 796 }
 797 
 798 
 799 // Calls to C land
 800 
 801 #ifdef ASSERT
 802 // a hook for debugging
 803 static Thread* reinitialize_thread() {
 804   return ThreadLocalStorage::thread();
 805 }
 806 #else
 807 #define reinitialize_thread ThreadLocalStorage::thread
 808 #endif
 809 
 810 #ifdef ASSERT
 811 address last_get_thread = NULL;
 812 #endif
 813 
 814 // call this when G2_thread is not known to be valid
 815 void MacroAssembler::get_thread() {
 816   save_frame(0);                // to avoid clobbering O0
 817   mov(G1, L0);                  // avoid clobbering G1
 818   mov(G5_method, L1);           // avoid clobbering G5
 819   mov(G3, L2);                  // avoid clobbering G3 also
 820   mov(G4, L5);                  // avoid clobbering G4
 821 #ifdef ASSERT
 822   Address last_get_thread_addr(L3, (address)&last_get_thread);
 823   sethi(last_get_thread_addr);
 824   inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call
 825   st_ptr(L4, last_get_thread_addr);
 826 #endif
 827   call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type);
 828   delayed()->nop();
 829   mov(L0, G1);
 830   mov(L1, G5_method);
 831   mov(L2, G3);
 832   mov(L5, G4);
 833   restore(O0, 0, G2_thread);
 834 }
 835 
 836 static Thread* verify_thread_subroutine(Thread* gthread_value) {
 837   Thread* correct_value = ThreadLocalStorage::thread();
 838   guarantee(gthread_value == correct_value, "G2_thread value must be the thread");
 839   return correct_value;
 840 }
 841 
 842 void MacroAssembler::verify_thread() {
 843   if (VerifyThread) {
 844     // NOTE: this chops off the heads of the 64-bit O registers.
 845 #ifdef CC_INTERP
 846     save_frame(0);
 847 #else
 848     // make sure G2_thread contains the right value
 849     save_frame_and_mov(0, Lmethod, Lmethod);   // to avoid clobbering O0 (and propagate Lmethod for -Xprof)
 850     mov(G1, L1);                // avoid clobbering G1 
 851     // G2 saved below
 852     mov(G3, L3);                // avoid clobbering G3
 853     mov(G4, L4);                // avoid clobbering G4
 854     mov(G5_method, L5);         // avoid clobbering G5_method
 855 #endif /* CC_INTERP */
 856 #if defined(COMPILER2) && !defined(_LP64)
 857     // Save & restore possible 64-bit Long arguments in G-regs
 858     srlx(G1,32,L0);
 859     srlx(G4,32,L6);
 860 #endif
 861     call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type);
 862     delayed()->mov(G2_thread, O0);
 863 
 864     mov(L1, G1);                // Restore G1 
 865     // G2 restored below
 866     mov(L3, G3);                // restore G3
 867     mov(L4, G4);                // restore G4
 868     mov(L5, G5_method);         // restore G5_method
 869 #if defined(COMPILER2) && !defined(_LP64)
 870     // Save & restore possible 64-bit Long arguments in G-regs
 871     sllx(L0,32,G2);             // Move old high G1 bits high in G2
 872     sllx(G1, 0,G1);             // Clear current high G1 bits
 873     or3 (G1,G2,G1);             // Recover 64-bit G1
 874     sllx(L6,32,G2);             // Move old high G4 bits high in G2
 875     sllx(G4, 0,G4);             // Clear current high G4 bits
 876     or3 (G4,G2,G4);             // Recover 64-bit G4
 877 #endif
 878     restore(O0, 0, G2_thread);
 879   }
 880 }
 881 
 882 
 883 void MacroAssembler::save_thread(const Register thread_cache) {
 884   verify_thread();
 885   if (thread_cache->is_valid()) {
 886     assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile");
 887     mov(G2_thread, thread_cache);
 888   }
 889   if (VerifyThread) {
 890     // smash G2_thread, as if the VM were about to anyway
 891     set(0x67676767, G2_thread);
 892   }
 893 }
 894 
 895 
 896 void MacroAssembler::restore_thread(const Register thread_cache) {
 897   if (thread_cache->is_valid()) {
 898     assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile");
 899     mov(thread_cache, G2_thread);
 900     verify_thread();
 901   } else {
 902     // do it the slow way
 903     get_thread();
 904   }
 905 }
 906 
 907 
 908 // %%% maybe get rid of [re]set_last_Java_frame
 909 void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) {
 910   assert_not_delayed();
 911   Address flags(G2_thread,
 912                 0,
 913                 in_bytes(JavaThread::frame_anchor_offset()) + 
 914                          in_bytes(JavaFrameAnchor::flags_offset()));
 915   Address pc_addr(G2_thread,
 916                   0,
 917                   in_bytes(JavaThread::last_Java_pc_offset()));
 918 
 919   // Always set last_Java_pc and flags first because once last_Java_sp is visible
 920   // has_last_Java_frame is true and users will look at the rest of the fields.
 921   // (Note: flags should always be zero before we get here so doesn't need to be set.)
 922 
 923 #ifdef ASSERT
 924   // Verify that flags was zeroed on return to Java
 925   Label PcOk;
 926   save_frame(0);                // to avoid clobbering O0
 927   ld_ptr(pc_addr, L0);
 928   tst(L0);
 929 #ifdef _LP64
 930   brx(Assembler::zero, false, Assembler::pt, PcOk);
 931 #else
 932   br(Assembler::zero, false, Assembler::pt, PcOk);
 933 #endif // _LP64
 934   delayed() -> nop();
 935   stop("last_Java_pc not zeroed before leaving Java");
 936   bind(PcOk);
 937 
 938   // Verify that flags was zeroed on return to Java
 939   Label FlagsOk;
 940   ld(flags, L0);
 941   tst(L0);
 942   br(Assembler::zero, false, Assembler::pt, FlagsOk);
 943   delayed() -> restore();
 944   stop("flags not zeroed before leaving Java");
 945   bind(FlagsOk);
 946 #endif /* ASSERT */
 947   //
 948   // When returning from calling out from Java mode the frame anchor's last_Java_pc
 949   // will always be set to NULL. It is set here so that if we are doing a call to
 950   // native (not VM) that we capture the known pc and don't have to rely on the
 951   // native call having a standard frame linkage where we can find the pc.
 952 
 953   if (last_Java_pc->is_valid()) {
 954     st_ptr(last_Java_pc, pc_addr);
 955   }
 956 
 957 #ifdef _LP64
 958 #ifdef ASSERT
 959   // Make sure that we have an odd stack
 960   Label StackOk;
 961   andcc(last_java_sp, 0x01, G0);
 962   br(Assembler::notZero, false, Assembler::pt, StackOk);
 963   delayed() -> nop();
 964   stop("Stack Not Biased in set_last_Java_frame");
 965   bind(StackOk);
 966 #endif // ASSERT
 967   assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame");
 968   add( last_java_sp, STACK_BIAS, G4_scratch );
 969   st_ptr(G4_scratch,    Address(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset())));
 970 #else
 971   st_ptr(last_java_sp,    Address(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset())));
 972 #endif // _LP64
 973 }
 974 
 975 void MacroAssembler::reset_last_Java_frame(void) {
 976   assert_not_delayed();
 977 
 978   Address sp_addr(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset()));
 979   Address pc_addr(G2_thread,
 980                   0,
 981                   in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
 982   Address flags(G2_thread,
 983                 0,
 984                 in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset()));
 985   
 986 #ifdef ASSERT
 987   // check that it WAS previously set
 988 #ifdef CC_INTERP
 989     save_frame(0);
 990 #else
 991     save_frame_and_mov(0, Lmethod, Lmethod);     // Propagate Lmethod to helper frame for -Xprof
 992 #endif /* CC_INTERP */
 993     ld_ptr(sp_addr, L0);
 994     tst(L0);
 995     breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
 996     restore();
 997 #endif // ASSERT
 998   
 999   st_ptr(G0, sp_addr);
1000   // Always return last_Java_pc to zero
1001   st_ptr(G0, pc_addr);
1002   // Always null flags after return to Java
1003   st(G0, flags);
1004 }
1005 
1006 
1007 void MacroAssembler::call_VM_base(
1008   Register        oop_result,
1009   Register        thread_cache,
1010   Register        last_java_sp,
1011   address         entry_point,
1012   int             number_of_arguments,
1013   bool            check_exceptions)
1014 {
1015   assert_not_delayed();
1016 
1017   // determine last_java_sp register
1018   if (!last_java_sp->is_valid()) {
1019     last_java_sp = SP;
1020   }
1021   // debugging support
1022   assert(number_of_arguments >= 0   , "cannot have negative number of arguments");
1023 
1024   // 64-bit last_java_sp is biased!
1025   set_last_Java_frame(last_java_sp, noreg);
1026   if (VerifyThread)  mov(G2_thread, O0); // about to be smashed; pass early
1027   save_thread(thread_cache);
1028   // do the call
1029   call(entry_point, relocInfo::runtime_call_type);
1030   if (!VerifyThread)
1031     delayed()->mov(G2_thread, O0);  // pass thread as first argument
1032   else
1033     delayed()->nop();             // (thread already passed)
1034   restore_thread(thread_cache);
1035   reset_last_Java_frame();
1036 
1037   // check for pending exceptions. use Gtemp as scratch register.
1038   if (check_exceptions) {
1039     check_and_forward_exception(Gtemp);
1040   }
1041   
1042   // get oop result if there is one and reset the value in the thread
1043   if (oop_result->is_valid()) {
1044     get_vm_result(oop_result);
1045   }
1046 }
1047 
1048 void MacroAssembler::check_and_forward_exception(Register scratch_reg)
1049 {
1050   Label L;
1051 
1052   check_and_handle_popframe(scratch_reg);
1053   check_and_handle_earlyret(scratch_reg);
1054 
1055   Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
1056   ld_ptr(exception_addr, scratch_reg);
1057   br_null(scratch_reg,false,pt,L);
1058   delayed()->nop();
1059   // we use O7 linkage so that forward_exception_entry has the issuing PC
1060   call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
1061   delayed()->nop();
1062   bind(L);
1063 }
1064 
1065 
1066 void MacroAssembler::check_and_handle_popframe(Register scratch_reg) {
1067 }
1068 
1069 
1070 void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
1071 }
1072 
1073 
1074 void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
1075   call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
1076 }
1077 
1078 
1079 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {
1080   // O0 is reserved for the thread
1081   mov(arg_1, O1);
1082   call_VM(oop_result, entry_point, 1, check_exceptions);
1083 }
1084 
1085 
1086 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
1087   // O0 is reserved for the thread
1088   mov(arg_1, O1);
1089   mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
1090   call_VM(oop_result, entry_point, 2, check_exceptions);
1091 }
1092 
1093 
1094 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
1095   // O0 is reserved for the thread
1096   mov(arg_1, O1);
1097   mov(arg_2, O2); assert(arg_2 != O1,                "smashed argument");
1098   mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument");
1099   call_VM(oop_result, entry_point, 3, check_exceptions);
1100 }
1101 
1102 
1103 
1104 // Note: The following call_VM overloadings are useful when a "save"
1105 // has already been performed by a stub, and the last Java frame is
1106 // the previous one.  In that case, last_java_sp must be passed as FP
1107 // instead of SP.
1108 
1109 
1110 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) {
1111   call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions);
1112 }
1113 
1114 
1115 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) {
1116   // O0 is reserved for the thread
1117   mov(arg_1, O1);
1118   call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
1119 }
1120 
1121 
1122 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
1123   // O0 is reserved for the thread
1124   mov(arg_1, O1);
1125   mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
1126   call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
1127 }
1128 
1129 
1130 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
1131   // O0 is reserved for the thread
1132   mov(arg_1, O1);
1133   mov(arg_2, O2); assert(arg_2 != O1,                "smashed argument");
1134   mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument");
1135   call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
1136 }
1137 
1138 
1139 
1140 void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) {
1141   assert_not_delayed();
1142   save_thread(thread_cache);
1143   // do the call
1144   call(entry_point, relocInfo::runtime_call_type);
1145   delayed()->nop();
1146   restore_thread(thread_cache);
1147 }
1148 
1149 
1150 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) {
1151   call_VM_leaf_base(thread_cache, entry_point, number_of_arguments);
1152 }
1153 
1154 
1155 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) {
1156   mov(arg_1, O0);
1157   call_VM_leaf(thread_cache, entry_point, 1);
1158 }
1159 
1160 
1161 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
1162   mov(arg_1, O0);
1163   mov(arg_2, O1); assert(arg_2 != O0, "smashed argument");
1164   call_VM_leaf(thread_cache, entry_point, 2);
1165 }
1166 
1167 
1168 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) {
1169   mov(arg_1, O0);
1170   mov(arg_2, O1); assert(arg_2 != O0,                "smashed argument");
1171   mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument");
1172   call_VM_leaf(thread_cache, entry_point, 3);
1173 }
1174 
1175 
1176 void MacroAssembler::get_vm_result(Register oop_result) {
1177   verify_thread();
1178   Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset()));
1179   ld_ptr(    vm_result_addr, oop_result);
1180   st_ptr(G0, vm_result_addr);
1181   verify_oop(oop_result);
1182 }
1183 
1184 
1185 void MacroAssembler::get_vm_result_2(Register oop_result) {
1186   verify_thread();
1187   Address vm_result_addr_2(G2_thread, 0, in_bytes(JavaThread::vm_result_2_offset()));
1188   ld_ptr(vm_result_addr_2, oop_result);
1189   st_ptr(G0, vm_result_addr_2);
1190   verify_oop(oop_result);
1191 }
1192 
1193 
1194 // We require that C code which does not return a value in vm_result will
1195 // leave it undisturbed.
1196 void MacroAssembler::set_vm_result(Register oop_result) {
1197   verify_thread();
1198   Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset()));
1199   verify_oop(oop_result);
1200 
1201 # ifdef ASSERT
1202     // Check that we are not overwriting any other oop.
1203 #ifdef CC_INTERP
1204     save_frame(0);
1205 #else
1206     save_frame_and_mov(0, Lmethod, Lmethod);     // Propagate Lmethod for -Xprof
1207 #endif /* CC_INTERP */
1208     ld_ptr(vm_result_addr, L0);
1209     tst(L0);
1210     restore();
1211     breakpoint_trap(notZero, Assembler::ptr_cc);
1212     // }
1213 # endif
1214 
1215   st_ptr(oop_result, vm_result_addr);
1216 }
1217 
1218 
1219 void MacroAssembler::store_check(Register tmp, Register obj) {
1220   // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
1221 
1222   /* $$$ This stuff needs to go into one of the BarrierSet generator
1223      functions.  (The particular barrier sets will have to be friends of
1224      MacroAssembler, I guess.) */
1225   BarrierSet* bs = Universe::heap()->barrier_set();
1226   assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
1227   CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1228   assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1229 #ifdef _LP64
1230   srlx(obj, CardTableModRefBS::card_shift, obj);
1231 #else
1232   srl(obj, CardTableModRefBS::card_shift, obj);
1233 #endif
1234   assert( tmp != obj, "need separate temp reg");
1235   Address rs(tmp, (address)ct->byte_map_base);
1236   load_address(rs);
1237   stb(G0, rs.base(), obj);
1238 }
1239 
1240 void MacroAssembler::store_check(Register tmp, Register obj, Register offset) {
1241   store_check(tmp, obj);
1242 }
1243 
1244 // %%% Note:  The following six instructions have been moved,
1245 //            unchanged, from assembler_sparc.inline.hpp.
1246 //            They will be refactored at a later date.
1247 
1248 void MacroAssembler::sethi(intptr_t imm22a, 
1249                             Register d, 
1250                             bool ForceRelocatable,
1251                             RelocationHolder const& rspec) {
1252   Address adr( d, (address)imm22a, rspec );
1253   MacroAssembler::sethi( adr, ForceRelocatable );
1254 }
1255 
1256 
1257 void MacroAssembler::sethi(Address& a, bool ForceRelocatable) {
1258   address save_pc;
1259   int shiftcnt;
1260   // if addr of local, do not need to load it
1261   assert(a.base() != FP  &&  a.base() != SP, "just use ld or st for locals");
1262 #ifdef _LP64
1263 # ifdef CHECK_DELAY
1264   assert_not_delayed( (char *)"cannot put two instructions in delay slot" );
1265 # endif
1266   v9_dep();
1267 //  ForceRelocatable = 1;
1268   save_pc = pc();
1269   if (a.hi32() == 0 && a.low32() >= 0) {
1270     Assembler::sethi(a.low32(), a.base(), a.rspec());
1271   }
1272   else if (a.hi32() == -1) {
1273     Assembler::sethi(~a.low32(), a.base(), a.rspec());
1274     xor3(a.base(), ~low10(~0), a.base());
1275   }
1276   else {
1277     Assembler::sethi(a.hi32(), a.base(), a.rspec() );   // 22
1278     if ( a.hi32() & 0x3ff )                 // Any bits?
1279       or3( a.base(), a.hi32() & 0x3ff ,a.base() ); // High 32 bits are now in low 32 
1280     if ( a.low32() & 0xFFFFFC00 ) {         // done?
1281       if( (a.low32() >> 20) & 0xfff ) {               // Any bits set?
1282         sllx(a.base(), 12, a.base());           // Make room for next 12 bits  
1283         or3( a.base(), (a.low32() >> 20) & 0xfff,a.base() ); // Or in next 12
1284         shiftcnt = 0;                           // We already shifted
1285       }
1286       else
1287         shiftcnt = 12;
1288       if( (a.low32() >> 10) & 0x3ff ) {               
1289         sllx(a.base(), shiftcnt+10, a.base());// Make room for last 10 bits
1290         or3( a.base(), (a.low32() >> 10) & 0x3ff,a.base() ); // Or in next 10 
1291         shiftcnt = 0;
1292       }
1293       else
1294         shiftcnt = 10;
1295       sllx(a.base(), shiftcnt+10 , a.base());           // Shift leaving disp field 0'd
1296     }
1297     else 
1298       sllx( a.base(), 32, a.base() );
1299   }
1300   // Pad out the instruction sequence so it can be 
1301   // patched later.
1302   if ( ForceRelocatable || (a.rtype() != relocInfo::none && 
1303                             a.rtype() != relocInfo::runtime_call_type) ) {
1304     while ( pc() < (save_pc + (7 * BytesPerInstWord )) )
1305       nop();
1306   }
1307 #else
1308   Assembler::sethi(a.hi(), a.base(), a.rspec());
1309 #endif
1310 
1311 }
1312     
1313 int MacroAssembler::size_of_sethi(address a, bool worst_case) {
1314 #ifdef _LP64
1315   if (worst_case) return 7;
1316   intptr_t iaddr = (intptr_t)a;
1317   int hi32 = (int)(iaddr >> 32);
1318   int lo32 = (int)(iaddr);
1319   int inst_count;
1320   if (hi32 == 0 && lo32 >= 0)
1321     inst_count = 1;
1322   else if (hi32 == -1)
1323     inst_count = 2;
1324   else {
1325     inst_count = 2;
1326     if ( hi32 & 0x3ff )
1327       inst_count++;
1328     if ( lo32 & 0xFFFFFC00 ) {
1329       if( (lo32 >> 20) & 0xfff ) inst_count += 2;
1330       if( (lo32 >> 10) & 0x3ff ) inst_count += 2;
1331     }
1332   }
1333   return BytesPerInstWord * inst_count;
1334 #else
1335   return BytesPerInstWord;
1336 #endif
1337 }
1338 
1339 int MacroAssembler::worst_case_size_of_set() {
1340   return size_of_sethi(NULL, true) + 1;
1341 }
1342 
1343 void MacroAssembler::set(intptr_t value, Register d,
1344                          RelocationHolder const& rspec) {
1345   Address val( d, (address)value, rspec);
1346 
1347   if ( rspec.type() == relocInfo::none ) {
1348     // can optimize
1349     if (-4096 <= value  &&  value <= 4095) {
1350       or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended)
1351       return;
1352     }
1353     if (inv_hi22(hi22(value)) == value) {
1354       sethi(val);               
1355       return;
1356     }
1357   }
1358   assert_not_delayed( (char *)"cannot put two instructions in delay slot" );
1359   sethi( val );
1360   if (rspec.type() != relocInfo::none || (value & 0x3ff) != 0) {
1361     add( d, value &  0x3ff, d, rspec);
1362   }
1363 }
1364 
1365 void MacroAssembler::setsw(int value, Register d,
1366                            RelocationHolder const& rspec) {
1367   Address val( d, (address)value, rspec);
1368   if ( rspec.type() == relocInfo::none ) {
1369     // can optimize
1370     if (-4096 <= value  &&  value <= 4095) {
1371       or3(G0, value, d);
1372       return;
1373     }
1374     if (inv_hi22(hi22(value)) == value) {
1375       sethi( val );
1376 #ifndef _LP64
1377       if ( value < 0 ) {
1378         assert_not_delayed();
1379         sra (d, G0, d);
1380       }
1381 #endif
1382       return;
1383     }
1384   }
1385   assert_not_delayed();
1386   sethi( val );
1387   add( d, value &  0x3ff, d, rspec);
1388 
1389   // (A negative value could be loaded in 2 insns with sethi/xor,
1390   // but it would take a more complex relocation.)
1391 #ifndef _LP64
1392   if ( value < 0)
1393     sra(d, G0, d);
1394 #endif
1395 }
1396 
1397 // %%% End of moved six set instructions.
1398 
1399 
1400 void MacroAssembler::set64(jlong value, Register d, Register tmp) {
1401   assert_not_delayed();
1402   v9_dep();
1403 
1404   int hi = (int)(value >> 32);
1405   int lo = (int)(value & ~0);
1406   // (Matcher::isSimpleConstant64 knows about the following optimizations.)
1407   if (Assembler::is_simm13(lo) && value == lo) {
1408     or3(G0, lo, d);
1409   } else if (hi == 0) {
1410     Assembler::sethi(lo, d);   // hardware version zero-extends to upper 32
1411     if (low10(lo) != 0)
1412       or3(d, low10(lo), d);
1413   } 
1414   else if (hi == -1) {
1415     Assembler::sethi(~lo, d);  // hardware version zero-extends to upper 32
1416     xor3(d, low10(lo) ^ ~low10(~0), d);
1417   } 
1418   else if (lo == 0) {
1419     if (Assembler::is_simm13(hi)) {
1420       or3(G0, hi, d);
1421     } else {
1422       Assembler::sethi(hi, d);   // hardware version zero-extends to upper 32
1423       if (low10(hi) != 0)
1424         or3(d, low10(hi), d);
1425     }
1426     sllx(d, 32, d);
1427   } 
1428   else {
1429     Assembler::sethi(hi, tmp);
1430     Assembler::sethi(lo,   d); // macro assembler version sign-extends
1431     if (low10(hi) != 0)
1432       or3 (tmp, low10(hi), tmp);
1433     if (low10(lo) != 0)
1434       or3 (  d, low10(lo),   d);
1435     sllx(tmp, 32, tmp);
1436     or3 (d, tmp, d);
1437   }
1438 }
1439 
1440 // compute size in bytes of sparc frame, given
1441 // number of extraWords
1442 int MacroAssembler::total_frame_size_in_bytes(int extraWords) {
1443 
1444   int nWords = frame::memory_parameter_word_sp_offset;
1445 
1446   nWords += extraWords;
1447 
1448   if (nWords & 1) ++nWords; // round up to double-word
1449 
1450   return nWords * BytesPerWord;
1451 }
1452 
1453 
1454 // save_frame: given number of "extra" words in frame,
1455 // issue approp. save instruction (p 200, v8 manual)
1456 
1457 void MacroAssembler::save_frame(int extraWords = 0) {
1458   int delta = -total_frame_size_in_bytes(extraWords);
1459   if (is_simm13(delta)) {
1460     save(SP, delta, SP);
1461   } else {
1462     set(delta, G3_scratch);
1463     save(SP, G3_scratch, SP);
1464   }
1465 }
1466 
1467 
1468 void MacroAssembler::save_frame_c1(int size_in_bytes) {
1469   if (is_simm13(-size_in_bytes)) {
1470     save(SP, -size_in_bytes, SP);
1471   } else {
1472     set(-size_in_bytes, G3_scratch);
1473     save(SP, G3_scratch, SP);
1474   }
1475 }
1476 
1477 
1478 void MacroAssembler::save_frame_and_mov(int extraWords,
1479                                         Register s1, Register d1,
1480                                         Register s2, Register d2) {
1481   assert_not_delayed();
1482 
1483   // The trick here is to use precisely the same memory word
1484   // that trap handlers also use to save the register.
1485   // This word cannot be used for any other purpose, but
1486   // it works fine to save the register's value, whether or not
1487   // an interrupt flushes register windows at any given moment!
1488   Address s1_addr;
1489   if (s1->is_valid() && (s1->is_in() || s1->is_local())) {
1490     s1_addr = s1->address_in_saved_window();
1491     st_ptr(s1, s1_addr);
1492   }
1493 
1494   Address s2_addr;
1495   if (s2->is_valid() && (s2->is_in() || s2->is_local())) {
1496     s2_addr = s2->address_in_saved_window();
1497     st_ptr(s2, s2_addr);
1498   }
1499 
1500   save_frame(extraWords);
1501 
1502   if (s1_addr.base() == SP) {
1503     ld_ptr(s1_addr.after_save(), d1);
1504   } else if (s1->is_valid()) {
1505     mov(s1->after_save(), d1);
1506   }
1507 
1508   if (s2_addr.base() == SP) {
1509     ld_ptr(s2_addr.after_save(), d2);
1510   } else if (s2->is_valid()) {
1511     mov(s2->after_save(), d2);
1512   }
1513 }
1514 
1515 
1516 Address MacroAssembler::allocate_oop_address(jobject obj, Register d) {
1517   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1518   int oop_index = oop_recorder()->allocate_index(obj);
1519   return Address(d, address(obj), oop_Relocation::spec(oop_index));
1520 }
1521 
1522 
1523 Address MacroAssembler::constant_oop_address(jobject obj, Register d) {
1524   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1525   int oop_index = oop_recorder()->find_index(obj);
1526   return Address(d, address(obj), oop_Relocation::spec(oop_index));
1527 }
1528 
1529 
1530 void MacroAssembler::align(int modulus) {
1531   while (offset() % modulus != 0) nop();
1532 }
1533 
1534 
1535 void MacroAssembler::safepoint() {
1536   relocate(breakpoint_Relocation::spec(breakpoint_Relocation::safepoint));
1537 }
1538 
1539 
1540 void RegistersForDebugging::print(outputStream* s) {
1541   int j;
1542   for ( j = 0;  j < 8;  ++j )
1543     if ( j != 6 ) s->print_cr("i%d = 0x%.16lx", j, i[j]);
1544     else          s->print_cr( "fp = 0x%.16lx",    i[j]);
1545   s->cr();
1546 
1547   for ( j = 0;  j < 8;  ++j )
1548     s->print_cr("l%d = 0x%.16lx", j, l[j]);
1549   s->cr();
1550 
1551   for ( j = 0;  j < 8;  ++j )
1552     if ( j != 6 ) s->print_cr("o%d = 0x%.16lx", j, o[j]);
1553     else          s->print_cr( "sp = 0x%.16lx",    o[j]);
1554   s->cr();
1555 
1556   for ( j = 0;  j < 8;  ++j )
1557     s->print_cr("g%d = 0x%.16lx", j, g[j]);
1558   s->cr();
1559 
1560   // print out floats with compression
1561   for (j = 0; j < 32; ) {
1562     jfloat val = f[j];
1563     int last = j;
1564     for ( ;  last+1 < 32;  ++last ) {
1565       char b1[1024], b2[1024];
1566       sprintf(b1, "%f", val);
1567       sprintf(b2, "%f", f[last+1]);
1568       if (strcmp(b1, b2))
1569         break;
1570     }
1571     s->print("f%d", j);
1572     if ( j != last )  s->print(" - f%d", last);
1573     s->print(" = %f", val);
1574     s->fill_to(25);
1575     s->print_cr(" (0x%x)", val);
1576     j = last + 1;
1577   }
1578   s->cr();
1579 
1580   // and doubles (evens only)
1581   for (j = 0; j < 32; ) {
1582     jdouble val = d[j];
1583     int last = j;
1584     for ( ;  last+1 < 32;  ++last ) {
1585       char b1[1024], b2[1024];
1586       sprintf(b1, "%f", val);
1587       sprintf(b2, "%f", d[last+1]);
1588       if (strcmp(b1, b2))
1589         break;
1590     }
1591     s->print("d%d", 2 * j);
1592     if ( j != last )  s->print(" - d%d", last);
1593     s->print(" = %f", val);
1594     s->fill_to(30);
1595     s->print("(0x%x)", *(int*)&val);
1596     s->fill_to(42);
1597     s->print_cr("(0x%x)", *(1 + (int*)&val));
1598     j = last + 1;
1599   }
1600   s->cr();
1601 }
1602 
1603 void RegistersForDebugging::save_registers(MacroAssembler* a) {
1604   a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0);
1605   a->flush_windows();
1606   int i;
1607   for (i = 0; i < 8; ++i) { 
1608     a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1);  a->st_ptr( L1, O0, i_offset(i));
1609     a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1);  a->st_ptr( L1, O0, l_offset(i));
1610     a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i));
1611     a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i));
1612   }
1613   for (i = 0;  i < 32; ++i) {
1614     a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i));
1615   }
1616   for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
1617     a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i));
1618   }
1619 }
1620 
1621 void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) {
1622   for (int i = 1; i < 8;  ++i) {
1623     a->ld_ptr(r, g_offset(i), as_gRegister(i));
1624   }
1625   for (int j = 0; j < 32; ++j) {
1626     a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j));
1627   }
1628   for (int k = 0; k < (VM_Version::v9_instructions_work() ? 64 : 32); k += 2) {
1629     a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k));
1630   }
1631 }
1632 
1633 
1634 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
1635 void MacroAssembler::push_fTOS() {
1636   // %%%%%% need to implement this
1637 }
1638 
1639 // pops double TOS element from CPU stack and pushes on FPU stack 
1640 void MacroAssembler::pop_fTOS() {
1641   // %%%%%% need to implement this
1642 }
1643 
1644 void MacroAssembler::empty_FPU_stack() {
1645   // %%%%%% need to implement this
1646 }
1647 
1648 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) {
1649   // plausibility check for oops
1650   if (!VerifyOops) return;
1651 
1652   if (reg == G0)  return;       // always NULL, which is always an oop
1653 
1654   char buffer[16];
1655   sprintf(buffer, "%d", line);
1656   int len = strlen(file) + strlen(msg) + 1 + 4 + strlen(buffer);
1657   char * real_msg = new char[len];
1658   sprintf(real_msg, "%s (%s:%d)", msg, file, line);
1659 
1660   // Call indirectly to solve generation ordering problem
1661   Address a(O7, (address)StubRoutines::verify_oop_subroutine_entry_address());
1662 
1663   // Make some space on stack above the current register window.
1664   // Enough to hold 8 64-bit registers.
1665   add(SP,-8*8,SP);
1666 
1667   // Save some 64-bit registers; a normal 'save' chops the heads off
1668   // of 64-bit longs in the 32-bit build.
1669   stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8);
1670   stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8);
1671   mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed
1672   stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
1673 
1674   set((intptr_t)real_msg, O1);
1675   // Load address to call to into O7
1676   load_ptr_contents(a, O7);
1677   // Register call to verify_oop_subroutine
1678   callr(O7, G0);
1679   delayed()->nop();
1680   // recover frame size
1681   add(SP, 8*8,SP);
1682 }
1683 
1684 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) {
1685   // plausibility check for oops
1686   if (!VerifyOops) return;
1687 
1688   char buffer[64];
1689   sprintf(buffer, "%d", line);
1690   int len = strlen(file) + strlen(msg) + 1 + 4 + strlen(buffer);
1691   sprintf(buffer, " at SP+%d ", addr.disp());
1692   len += strlen(buffer);
1693   char * real_msg = new char[len];
1694   sprintf(real_msg, "%s at SP+%d (%s:%d)", msg, addr.disp(), file, line);
1695 
1696   // Call indirectly to solve generation ordering problem
1697   Address a(O7, (address)StubRoutines::verify_oop_subroutine_entry_address());
1698 
1699   // Make some space on stack above the current register window.
1700   // Enough to hold 8 64-bit registers.
1701   add(SP,-8*8,SP);
1702 
1703   // Save some 64-bit registers; a normal 'save' chops the heads off
1704   // of 64-bit longs in the 32-bit build.
1705   stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8);
1706   stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8);
1707   ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed
1708   stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
1709 
1710   set((intptr_t)real_msg, O1);
1711   // Load address to call to into O7
1712   load_ptr_contents(a, O7);
1713   // Register call to verify_oop_subroutine
1714   callr(O7, G0);
1715   delayed()->nop();
1716   // recover frame size
1717   add(SP, 8*8,SP);
1718 }
1719 
1720 // side-door communication with signalHandler in os_solaris.cpp
1721 address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL };
1722 
1723 // This macro is expanded just once; it creates shared code.  Contract:
1724 // receives an oop in O0.  Must restore O0 & O7 from TLS.  Must not smash ANY
1725 // registers, including flags.  May not use a register 'save', as this blows
1726 // the high bits of the O-regs if they contain Long values.  Acts as a 'leaf'
1727 // call.
1728 void MacroAssembler::verify_oop_subroutine() {
1729   assert( VM_Version::v9_instructions_work(), "VerifyOops not supported for V8" );
1730 
1731   // Leaf call; no frame.
1732   Label succeed, fail, null_or_fail;
1733 
1734   // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home).
1735   // O0 is now the oop to be checked.  O7 is the return address.  
1736   Register O0_obj = O0;
1737 
1738   // Save some more registers for temps.
1739   stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8);
1740   stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8);
1741   stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8);
1742   stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8);
1743   
1744   // Save flags
1745   Register O5_save_flags = O5;
1746   rdccr( O5_save_flags );
1747 
1748   { // count number of verifies
1749     Register O2_adr   = O2;
1750     Register O3_accum = O3;
1751     Address count_addr( O2_adr, (address) StubRoutines::verify_oop_count_addr() );
1752     sethi(count_addr);
1753     ld(count_addr, O3_accum);
1754     inc(O3_accum);
1755     st(O3_accum, count_addr);
1756   }
1757 
1758   Register O2_mask = O2;
1759   Register O3_bits = O3;
1760   Register O4_temp = O4;
1761 
1762   // mark lower end of faulting range
1763   assert(_verify_oop_implicit_branch[0] == NULL, "set once");
1764   _verify_oop_implicit_branch[0] = pc();
1765 
1766   // We can't check the mark oop because it could be in the process of
1767   // locking or unlocking while this is running.
1768   set(Universe::verify_oop_mask (), O2_mask);
1769   set(Universe::verify_oop_bits (), O3_bits);
1770 
1771   // assert((obj & oop_mask) == oop_bits);
1772   and3(O0_obj, O2_mask, O4_temp);
1773   cmp(O4_temp, O3_bits);
1774   brx(notEqual, false, pn, null_or_fail);
1775   delayed()->nop();
1776 
1777   if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) {
1778     // the null_or_fail case is useless; must test for null separately
1779     br_null(O0_obj, false, pn, succeed);
1780     delayed()->nop();
1781   }
1782 
1783   // Check the klassOop of this object for being in the right area of memory.
1784   // Cannot do the load in the delay above slot in case O0 is null
1785   ld_ptr(Address(O0_obj, 0, oopDesc::klass_offset_in_bytes()), O0_obj);
1786   // assert((klass & klass_mask) == klass_bits);
1787   if( Universe::verify_klass_mask() != Universe::verify_oop_mask() )
1788     set(Universe::verify_klass_mask(), O2_mask);
1789   if( Universe::verify_klass_bits() != Universe::verify_oop_bits() )
1790     set(Universe::verify_klass_bits(), O3_bits);
1791   and3(O0_obj, O2_mask, O4_temp);
1792   cmp(O4_temp, O3_bits);
1793   brx(notEqual, false, pn, fail);
1794   // Check the klass's klass
1795   delayed()->ld_ptr(Address(O0_obj, 0, oopDesc::klass_offset_in_bytes()), O0_obj);
1796   and3(O0_obj, O2_mask, O4_temp);
1797   cmp(O4_temp, O3_bits);
1798   brx(notEqual, false, pn, fail);
1799   delayed()->wrccr( O5_save_flags ); // Restore CCR's
1800   
1801   // mark upper end of faulting range
1802   _verify_oop_implicit_branch[1] = pc();
1803 
1804   //-----------------------
1805   // all tests pass
1806   bind(succeed);
1807 
1808   // Restore prior 64-bit registers
1809   ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0);
1810   ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1);
1811   ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2);
1812   ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3);
1813   ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4);
1814   ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5);
1815 
1816   retl();                       // Leaf return; restore prior O7 in delay slot
1817   delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7);
1818 
1819   //-----------------------
1820   bind(null_or_fail);           // nulls are less common but OK
1821   br_null(O0_obj, false, pt, succeed);
1822   delayed()->wrccr( O5_save_flags ); // Restore CCR's
1823 
1824   //-----------------------
1825   // report failure:
1826   bind(fail);
1827   _verify_oop_implicit_branch[2] = pc();
1828 
1829   wrccr( O5_save_flags ); // Restore CCR's
1830 
1831   save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
1832   
1833   // stop_subroutine expects message pointer in I1.
1834   mov(I1, O1);
1835   
1836   // Restore prior 64-bit registers
1837   ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0);
1838   ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1);
1839   ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2);
1840   ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3);
1841   ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4);
1842   ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5);
1843 
1844   // factor long stop-sequence into subroutine to save space
1845   assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
1846   
1847   // call indirectly to solve generation ordering problem
1848   Address a(O5, (address)StubRoutines::Sparc::stop_subroutine_entry_address());
1849   load_ptr_contents(a, O5);
1850   jmpl(O5, 0, O7);
1851   delayed()->nop();
1852 }
1853 
1854 
1855 void MacroAssembler::stop(const char* msg) {
1856   // save frame first to get O7 for return address
1857   // add one word to size in case struct is odd number of words long
1858   // It must be doubleword-aligned for storing doubles into it.
1859 
1860     save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
1861 
1862     // stop_subroutine expects message pointer in I1.
1863     set((intptr_t)msg, O1);
1864 
1865     // factor long stop-sequence into subroutine to save space
1866     assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
1867 
1868     // call indirectly to solve generation ordering problem
1869     Address a(O5, (address)StubRoutines::Sparc::stop_subroutine_entry_address());
1870     load_ptr_contents(a, O5);
1871     jmpl(O5, 0, O7);
1872     delayed()->nop();
1873     
1874     breakpoint_trap();   // make stop actually stop rather than writing
1875                          // unnoticeable results in the output files.
1876 
1877     // restore(); done in callee to save space!
1878 }
1879 
1880 
1881 void MacroAssembler::warn(const char* msg) {
1882   save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
1883   RegistersForDebugging::save_registers(this);
1884   mov(O0, L0);
1885   set((intptr_t)msg, O0);
1886   call( CAST_FROM_FN_PTR(address, warning) );
1887   delayed()->nop();
1888 //  ret();
1889 //  delayed()->restore();
1890   RegistersForDebugging::restore_registers(this, L0);
1891   restore();
1892 }
1893 
1894 
1895 void MacroAssembler::untested(const char* what) {
1896   // We must be able to turn interactive prompting off
1897   // in order to run automated test scripts on the VM
1898   // Use the flag ShowMessageBoxOnError
1899   
1900   char* b = new char[1024];
1901   sprintf(b, "untested: %s", what);  
1902 
1903   if ( ShowMessageBoxOnError )   stop(b);
1904   else                           warn(b);
1905 }
1906 
1907 
1908 void MacroAssembler::stop_subroutine() {
1909   RegistersForDebugging::save_registers(this);
1910 
1911   // for the sake of the debugger, stick a PC on the current frame
1912   // (this assumes that the caller has performed an extra "save")
1913   mov(I7, L7);
1914   add(O7, -7 * BytesPerInt, I7);
1915 
1916   save_frame(); // one more save to free up another O7 register
1917   mov(I0, O1); // addr of reg save area
1918 
1919   // We expect pointer to message in I1. Caller must set it up in O1
1920   mov(I1, O0); // get msg
1921   call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type);
1922   delayed()->nop();
1923 
1924   restore();
1925 
1926   RegistersForDebugging::restore_registers(this, O0);
1927 
1928   save_frame(0);
1929   call(CAST_FROM_FN_PTR(address,breakpoint));
1930   delayed()->nop();
1931   restore();
1932 
1933   mov(L7, I7);
1934   retl();
1935   delayed()->restore(); // see stop above
1936 }
1937 
1938 
1939 void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) {
1940   if ( ShowMessageBoxOnError ) {
1941       JavaThreadState saved_state = JavaThread::current()->thread_state();
1942       JavaThread::current()->set_thread_state(_thread_in_vm);
1943       {
1944         // In order to get locks work, we need to fake a in_VM state
1945         ttyLocker ttyl;
1946         ::tty->print_cr("EXECUTION STOPPED: %s\n", msg);
1947         if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
1948           ::tty->print_cr("Interpreter::bytecode_counter = %d", BytecodeCounter::counter_value());
1949         }
1950         if (os::message_box(msg, "Execution stopped, print registers?"))
1951           regs->print(::tty);
1952       }
1953       ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state);
1954   }
1955   else
1956      ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
1957   assert(false, "error");
1958 }
1959 
1960 
1961 #ifndef PRODUCT
1962 void MacroAssembler::test() {
1963   ResourceMark rm;
1964 
1965   CodeBuffer cb("test", 10000, 10000);
1966   MacroAssembler* a = new MacroAssembler(&cb);
1967   VM_Version::allow_all();
1968   a->test_v9();
1969   a->test_v8_onlys();
1970   VM_Version::revert();
1971 
1972   StubRoutines::Sparc::test_stop_entry()();
1973 }
1974 #endif
1975 
1976 
1977 void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) {
1978   subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words?
1979   Label no_extras;
1980   br( negative, true, pt, no_extras ); // if neg, clear reg
1981   delayed()->set( 0, Rresult);         // annuled, so only if taken
1982   bind( no_extras );
1983 }
1984 
1985 
1986 void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) {
1987 #ifdef _LP64
1988   add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult);
1989 #else
1990   add(Rextra_words, frame::memory_parameter_word_sp_offset + 1, Rresult);
1991 #endif
1992   bclr(1, Rresult);
1993   sll(Rresult, LogBytesPerWord, Rresult);  // Rresult has total frame bytes
1994 }
1995 
1996 
1997 void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) {
1998   calc_frame_size(Rextra_words, Rresult);
1999   neg(Rresult);
2000   save(SP, Rresult, SP);
2001 }
2002 
2003 
2004 // ---------------------------------------------------------
2005 Assembler::RCondition cond2rcond(Assembler::Condition c) {
2006   switch (c) {
2007     /*case zero: */
2008     case Assembler::equal:        return Assembler::rc_z;
2009     case Assembler::lessEqual:    return Assembler::rc_lez;
2010     case Assembler::less:         return Assembler::rc_lz;
2011     /*case notZero:*/
2012     case Assembler::notEqual:     return Assembler::rc_nz;
2013     case Assembler::greater:      return Assembler::rc_gz;
2014     case Assembler::greaterEqual: return Assembler::rc_gez;
2015   }
2016   ShouldNotReachHere();
2017   return Assembler::rc_z;
2018 }
2019 
2020 // compares register with zero and branches.  NOT FOR USE WITH 64-bit POINTERS
2021 void MacroAssembler::br_zero( Condition c, bool a, Predict p, Register s1, Label& L) {
2022   tst(s1);
2023   br (c, a, p, L);
2024 }
2025 
2026 
2027 // Compares a pointer register with zero and branches on null.
2028 // Does a test & branch on 32-bit systems and a register-branch on 64-bit.
2029 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) {
2030   assert_not_delayed();
2031 #ifdef _LP64
2032   bpr( rc_z, a, p, s1, L );
2033 #else
2034   tst(s1);
2035   br ( zero, a, p, L );
2036 #endif
2037 }
2038 
2039 void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) {
2040   assert_not_delayed();
2041 #ifdef _LP64
2042   bpr( rc_nz, a, p, s1, L );
2043 #else
2044   tst(s1);
2045   br ( notZero, a, p, L );
2046 #endif
2047 }
2048 
2049 
2050 // instruction sequences factored across compiler & interpreter 
2051 
2052 
2053 void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low, 
2054                            Register Rb_hi, Register Rb_low,
2055                            Register Rresult) {
2056 
2057   Label check_low_parts, done;
2058 
2059   cmp(Ra_hi, Rb_hi );  // compare hi parts
2060   br(equal, true, pt, check_low_parts);
2061   delayed()->cmp(Ra_low, Rb_low); // test low parts
2062 
2063   // And, with an unsigned comparison, it does not matter if the numbers
2064   // are negative or not.
2065   // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff.
2066   // The second one is bigger (unsignedly). 
2067 
2068   // Other notes:  The first move in each triplet can be unconditional
2069   // (and therefore probably prefetchable).
2070   // And the equals case for the high part does not need testing,
2071   // since that triplet is reached only after finding the high halves differ.
2072 
2073   if (VM_Version::v9_instructions_work()) {
2074     
2075                                     mov  (                     -1, Rresult);
2076     ba( false, done );  delayed()-> movcc(greater, false, icc,  1, Rresult);
2077   }
2078   else {
2079     br(less,    true, pt, done); delayed()-> set(-1, Rresult);
2080     br(greater, true, pt, done); delayed()-> set( 1, Rresult);
2081   }
2082 
2083   bind( check_low_parts );
2084 
2085   if (VM_Version::v9_instructions_work()) {
2086     mov(                               -1, Rresult);
2087     movcc(equal,           false, icc,  0, Rresult);
2088     movcc(greaterUnsigned, false, icc,  1, Rresult);
2089   }
2090   else {
2091                                                     set(-1, Rresult);
2092     br(equal,           true, pt, done); delayed()->set( 0, Rresult);
2093     br(greaterUnsigned, true, pt, done); delayed()->set( 1, Rresult);
2094   }
2095   bind( done );
2096 }
2097 
2098 void MacroAssembler::lneg( Register Rhi, Register Rlow ) {
2099   subcc(  G0, Rlow, Rlow );
2100   subc(   G0, Rhi,  Rhi  );
2101 }
2102 
2103 void MacroAssembler::lshl( Register Rin_high,  Register Rin_low,
2104                            Register Rcount,
2105                            Register Rout_high, Register Rout_low,
2106                            Register Rtemp ) {
2107 
2108 
2109   Register Ralt_count = Rtemp;
2110   Register Rxfer_bits = Rtemp;
2111 
2112   assert( Ralt_count != Rin_high
2113       &&  Ralt_count != Rin_low
2114       &&  Ralt_count != Rcount
2115       &&  Rxfer_bits != Rin_low
2116       &&  Rxfer_bits != Rin_high
2117       &&  Rxfer_bits != Rcount
2118       &&  Rxfer_bits != Rout_low
2119       &&  Rout_low   != Rin_high,
2120         "register alias checks");
2121 
2122   Label big_shift, done;
2123 
2124   // This code can be optimized to use the 64 bit shifts in V9.
2125   // Here we use the 32 bit shifts.
2126 
2127   and3( Rcount,         0x3f,           Rcount);     // take least significant 6 bits
2128   subcc(Rcount,         31,             Ralt_count);
2129   br(greater, true, pn, big_shift);
2130   delayed()->
2131   dec(Ralt_count); 
2132 
2133   // shift < 32 bits, Ralt_count = Rcount-31
2134 
2135   // We get the transfer bits by shifting right by 32-count the low
2136   // register. This is done by shifting right by 31-count and then by one
2137   // more to take care of the special (rare) case where count is zero
2138   // (shifting by 32 would not work).
2139 
2140   neg(  Ralt_count                                 ); 
2141 
2142   // The order of the next two instructions is critical in the case where
2143   // Rin and Rout are the same and should not be reversed.
2144 
2145   srl(  Rin_low,        Ralt_count,     Rxfer_bits ); // shift right by 31-count
2146   if (Rcount != Rout_low) {
2147     sll(        Rin_low,        Rcount,         Rout_low   ); // low half
2148   }
2149   sll(  Rin_high,       Rcount,         Rout_high  );
2150   if (Rcount == Rout_low) {
2151     sll(        Rin_low,        Rcount,         Rout_low   ); // low half
2152   }
2153   srl(  Rxfer_bits,     1,              Rxfer_bits ); // shift right by one more
2154   ba (false, done);
2155   delayed()->
2156   or3(  Rout_high,      Rxfer_bits,     Rout_high);   // new hi value: or in shifted old hi part and xfer from low
2157 
2158   // shift >= 32 bits, Ralt_count = Rcount-32
2159   bind(big_shift);
2160   sll(  Rin_low,        Ralt_count,     Rout_high  );
2161   clr(  Rout_low                                   );
2162 
2163   bind(done);
2164 }
2165 
2166 
2167 void MacroAssembler::lshr( Register Rin_high,  Register Rin_low,
2168                            Register Rcount,
2169                            Register Rout_high, Register Rout_low,
2170                            Register Rtemp ) {
2171 
2172   Register Ralt_count = Rtemp;
2173   Register Rxfer_bits = Rtemp;
2174 
2175   assert( Ralt_count != Rin_high
2176       &&  Ralt_count != Rin_low
2177       &&  Ralt_count != Rcount
2178       &&  Rxfer_bits != Rin_low
2179       &&  Rxfer_bits != Rin_high
2180       &&  Rxfer_bits != Rcount
2181       &&  Rxfer_bits != Rout_high
2182       &&  Rout_high  != Rin_low,
2183         "register alias checks");
2184 
2185   Label big_shift, done;
2186 
2187   // This code can be optimized to use the 64 bit shifts in V9.
2188   // Here we use the 32 bit shifts.
2189 
2190   and3( Rcount,         0x3f,           Rcount);     // take least significant 6 bits
2191   subcc(Rcount,         31,             Ralt_count);
2192   br(greater, true, pn, big_shift);
2193   delayed()->dec(Ralt_count);
2194 
2195   // shift < 32 bits, Ralt_count = Rcount-31
2196 
2197   // We get the transfer bits by shifting left by 32-count the high
2198   // register. This is done by shifting left by 31-count and then by one
2199   // more to take care of the special (rare) case where count is zero
2200   // (shifting by 32 would not work).
2201 
2202   neg(  Ralt_count                                  ); 
2203   if (Rcount != Rout_low) {
2204     srl(        Rin_low,        Rcount,         Rout_low    ); 
2205   }
2206 
2207   // The order of the next two instructions is critical in the case where
2208   // Rin and Rout are the same and should not be reversed.
2209 
2210   sll(  Rin_high,       Ralt_count,     Rxfer_bits  ); // shift left by 31-count
2211   sra(  Rin_high,       Rcount,         Rout_high   ); // high half
2212   sll(  Rxfer_bits,     1,              Rxfer_bits  ); // shift left by one more
2213   if (Rcount == Rout_low) {
2214     srl(        Rin_low,        Rcount,         Rout_low    ); 
2215   }
2216   ba (false, done);
2217   delayed()->
2218   or3(  Rout_low,       Rxfer_bits,     Rout_low    ); // new low value: or shifted old low part and xfer from high
2219 
2220   // shift >= 32 bits, Ralt_count = Rcount-32
2221   bind(big_shift);
2222 
2223   sra(  Rin_high,       Ralt_count,     Rout_low    );
2224   sra(  Rin_high,       31,             Rout_high   ); // sign into hi
2225 
2226   bind( done );
2227 }
2228 
2229 
2230 
2231 void MacroAssembler::lushr( Register Rin_high,  Register Rin_low,
2232                             Register Rcount,
2233                             Register Rout_high, Register Rout_low,
2234                             Register Rtemp ) {
2235 
2236   Register Ralt_count = Rtemp;
2237   Register Rxfer_bits = Rtemp;
2238 
2239   assert( Ralt_count != Rin_high
2240       &&  Ralt_count != Rin_low
2241       &&  Ralt_count != Rcount
2242       &&  Rxfer_bits != Rin_low
2243       &&  Rxfer_bits != Rin_high
2244       &&  Rxfer_bits != Rcount
2245       &&  Rxfer_bits != Rout_high
2246       &&  Rout_high  != Rin_low,
2247         "register alias checks");
2248 
2249   Label big_shift, done;
2250 
2251   // This code can be optimized to use the 64 bit shifts in V9.
2252   // Here we use the 32 bit shifts.
2253 
2254   and3( Rcount,         0x3f,           Rcount);     // take least significant 6 bits
2255   subcc(Rcount,         31,             Ralt_count);
2256   br(greater, true, pn, big_shift);
2257   delayed()->dec(Ralt_count);
2258 
2259   // shift < 32 bits, Ralt_count = Rcount-31
2260 
2261   // We get the transfer bits by shifting left by 32-count the high
2262   // register. This is done by shifting left by 31-count and then by one
2263   // more to take care of the special (rare) case where count is zero
2264   // (shifting by 32 would not work).
2265 
2266   neg(  Ralt_count                                  ); 
2267   if (Rcount != Rout_low) {
2268     srl(        Rin_low,        Rcount,         Rout_low    ); 
2269   }
2270 
2271   // The order of the next two instructions is critical in the case where
2272   // Rin and Rout are the same and should not be reversed.
2273 
2274   sll(  Rin_high,       Ralt_count,     Rxfer_bits  ); // shift left by 31-count
2275   srl(  Rin_high,       Rcount,         Rout_high   ); // high half
2276   sll(  Rxfer_bits,     1,              Rxfer_bits  ); // shift left by one more
2277   if (Rcount == Rout_low) {
2278     srl(        Rin_low,        Rcount,         Rout_low    ); 
2279   }
2280   ba (false, done);
2281   delayed()->
2282   or3(  Rout_low,       Rxfer_bits,     Rout_low    ); // new low value: or shifted old low part and xfer from high
2283 
2284   // shift >= 32 bits, Ralt_count = Rcount-32
2285   bind(big_shift);
2286 
2287   srl(  Rin_high,       Ralt_count,     Rout_low    );
2288   clr(  Rout_high                                   );
2289 
2290   bind( done );
2291 }
2292 
2293 #ifdef _LP64
2294 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) {
2295   cmp(Ra, Rb);
2296   mov(                       -1, Rresult);
2297   movcc(equal,   false, xcc,  0, Rresult);
2298   movcc(greater, false, xcc,  1, Rresult);
2299 }
2300 #endif
2301 
2302 
2303 void MacroAssembler::float_cmp( bool is_float, int unordered_result, 
2304                                 FloatRegister Fa, FloatRegister Fb,
2305                                 Register Rresult) {
2306 
2307   fcmp(is_float ? FloatRegisterImpl::S : FloatRegisterImpl::D, fcc0, Fa, Fb);
2308 
2309   Condition lt = unordered_result == -1 ? f_unorderedOrLess    : f_less;
2310   Condition eq =                          f_equal;
2311   Condition gt = unordered_result ==  1 ? f_unorderedOrGreater : f_greater;
2312 
2313   if (VM_Version::v9_instructions_work()) {
2314 
2315     mov(                   -1, Rresult );
2316     movcc( eq, true, fcc0,  0, Rresult );
2317     movcc( gt, true, fcc0,  1, Rresult );
2318 
2319   } else {
2320     Label done;
2321 
2322                                          set( -1, Rresult );
2323     //fb(lt, true, pn, done); delayed()->set( -1, Rresult );
2324     fb( eq, true, pn, done);  delayed()->set(  0, Rresult );
2325     fb( gt, true, pn, done);  delayed()->set(  1, Rresult );
2326 
2327     bind (done);
2328   }
2329 }
2330 
2331 
2332 void MacroAssembler::fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
2333 {
2334   if (VM_Version::v9_instructions_work()) {
2335     Assembler::fneg(w, s, d);
2336   } else {
2337     if (w == FloatRegisterImpl::S) {
2338       Assembler::fneg(w, s, d);
2339     } else if (w == FloatRegisterImpl::D) {
2340       // number() does a sanity check on the alignment.
2341       assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
2342         ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
2343 
2344       Assembler::fneg(FloatRegisterImpl::S, s, d);
2345       Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
2346     } else {
2347       assert(w == FloatRegisterImpl::Q, "Invalid float register width");
2348 
2349       // number() does a sanity check on the alignment.
2350       assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
2351         ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
2352 
2353       Assembler::fneg(FloatRegisterImpl::S, s, d);
2354       Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
2355       Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
2356       Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
2357     }
2358   }
2359 }
2360 
2361 void MacroAssembler::fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
2362 {
2363   if (VM_Version::v9_instructions_work()) {
2364     Assembler::fmov(w, s, d);
2365   } else {
2366     if (w == FloatRegisterImpl::S) {
2367       Assembler::fmov(w, s, d);
2368     } else if (w == FloatRegisterImpl::D) {
2369       // number() does a sanity check on the alignment.
2370       assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
2371         ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
2372 
2373       Assembler::fmov(FloatRegisterImpl::S, s, d);
2374       Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
2375     } else {
2376       assert(w == FloatRegisterImpl::Q, "Invalid float register width");
2377 
2378       // number() does a sanity check on the alignment.
2379       assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
2380         ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
2381 
2382       Assembler::fmov(FloatRegisterImpl::S, s, d);
2383       Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
2384       Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
2385       Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
2386     }
2387   }
2388 }
2389 
2390 void MacroAssembler::fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
2391 {
2392   if (VM_Version::v9_instructions_work()) {
2393     Assembler::fabs(w, s, d);
2394   } else {
2395     if (w == FloatRegisterImpl::S) {
2396       Assembler::fabs(w, s, d);
2397     } else if (w == FloatRegisterImpl::D) {
2398       // number() does a sanity check on the alignment.
2399       assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
2400         ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
2401 
2402       Assembler::fabs(FloatRegisterImpl::S, s, d);
2403       Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
2404     } else {
2405       assert(w == FloatRegisterImpl::Q, "Invalid float register width");
2406 
2407       // number() does a sanity check on the alignment.
2408       assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
2409        ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
2410 
2411       Assembler::fabs(FloatRegisterImpl::S, s, d);
2412       Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
2413       Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
2414       Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
2415     }
2416   }
2417 }
2418 
2419 void MacroAssembler::save_all_globals_into_locals() {
2420   mov(G1,L1);
2421   mov(G2,L2);
2422   mov(G3,L3);
2423   mov(G4,L4);
2424   mov(G5,L5);
2425   mov(G6,L6);
2426   mov(G7,L7);
2427 }
2428 
2429 void MacroAssembler::restore_globals_from_locals() {
2430   mov(L1,G1);
2431   mov(L2,G2);
2432   mov(L3,G3);
2433   mov(L4,G4);
2434   mov(L5,G5);
2435   mov(L6,G6);
2436   mov(L7,G7);
2437 }
2438 
2439 // Use for 64 bit operation.
2440 void MacroAssembler::casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
2441 {
2442   // store ptr_reg as the new top value
2443 #ifdef _LP64
2444   casx(top_ptr_reg, top_reg, ptr_reg);
2445 #else
2446   cas_under_lock(top_ptr_reg, top_reg, ptr_reg, lock_addr, use_call_vm);
2447 #endif // _LP64 
2448 }       
2449 
2450 // [RGV] This routine does not handle 64 bit operations.
2451 //       use casx_under_lock() or casx directly!!!
2452 void MacroAssembler::cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
2453 {
2454   // store ptr_reg as the new top value
2455   if (VM_Version::v9_instructions_work()) {
2456     cas(top_ptr_reg, top_reg, ptr_reg);
2457   } else {
2458         
2459     // If the register is not an out nor global, it is not visible
2460     // after the save.  Allocate a register for it, save its
2461     // value in the register save area (the save may not flush
2462     // registers to the save area).
2463 
2464     Register top_ptr_reg_after_save;
2465     Register top_reg_after_save;
2466     Register ptr_reg_after_save;
2467 
2468     if (top_ptr_reg->is_out() || top_ptr_reg->is_global()) {
2469       top_ptr_reg_after_save = top_ptr_reg->after_save();
2470     } else {
2471       Address reg_save_addr = top_ptr_reg->address_in_saved_window();
2472       top_ptr_reg_after_save = L0;
2473       st(top_ptr_reg, reg_save_addr);
2474     }
2475 
2476     if (top_reg->is_out() || top_reg->is_global()) {
2477       top_reg_after_save = top_reg->after_save();
2478     } else {
2479       Address reg_save_addr = top_reg->address_in_saved_window();
2480       top_reg_after_save = L1;
2481       st(top_reg, reg_save_addr);
2482     }
2483 
2484     if (ptr_reg->is_out() || ptr_reg->is_global()) {
2485       ptr_reg_after_save = ptr_reg->after_save();
2486     } else {
2487       Address reg_save_addr = ptr_reg->address_in_saved_window();
2488       ptr_reg_after_save = L2;
2489       st(ptr_reg, reg_save_addr);
2490     }
2491 
2492     const Register& lock_reg = L3;
2493     const Register& lock_ptr_reg = L4;
2494     const Register& value_reg = L5;
2495     const Register& yield_reg = L6;
2496     const Register& yieldall_reg = L7;
2497 
2498     save_frame();
2499 
2500     if (top_ptr_reg_after_save == L0) {
2501       ld(top_ptr_reg->address_in_saved_window().after_save(), top_ptr_reg_after_save);
2502     }
2503 
2504     if (top_reg_after_save == L1) {
2505       ld(top_reg->address_in_saved_window().after_save(), top_reg_after_save);
2506     }
2507 
2508     if (ptr_reg_after_save == L2) {
2509       ld(ptr_reg->address_in_saved_window().after_save(), ptr_reg_after_save);
2510     }
2511 
2512     Label(retry_get_lock);
2513     Label(not_same);
2514     Label(dont_yield);
2515 
2516     assert(lock_addr, "lock_address should be non null for v8");
2517     set((intptr_t)lock_addr, lock_ptr_reg);
2518     // Initialize yield counter
2519     mov(G0,yield_reg);
2520     mov(G0, yieldall_reg);
2521     set(StubRoutines::Sparc::locked, lock_reg);
2522 
2523     bind(retry_get_lock);
2524     cmp(yield_reg, V8AtomicOperationUnderLockSpinCount);
2525     br(Assembler::less, false, Assembler::pt, dont_yield);
2526     delayed()->nop();
2527     
2528     if(use_call_vm) {
2529       Untested("Need to verify global reg consistancy");
2530       call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::yield_all), yieldall_reg);
2531     } else {
2532       // Save the regs and make space for a C call
2533       save(SP, -96, SP);
2534       save_all_globals_into_locals();
2535       call(CAST_FROM_FN_PTR(address,os::yield_all));
2536       delayed()->mov(yieldall_reg, O0);
2537       restore_globals_from_locals();
2538       restore();
2539     }
2540 
2541     // reset the counter
2542     mov(G0,yield_reg);
2543     add(yieldall_reg, 1, yieldall_reg);
2544     
2545     bind(dont_yield);
2546     // try to get lock
2547     swap(lock_ptr_reg, 0, lock_reg);
2548 
2549     // did we get the lock?
2550     cmp(lock_reg, StubRoutines::Sparc::unlocked);
2551     br(Assembler::notEqual, true, Assembler::pn, retry_get_lock);
2552     delayed()->add(yield_reg,1,yield_reg);
2553 
2554     // yes, got lock.  do we have the same top?
2555     ld(top_ptr_reg_after_save, 0, value_reg);
2556     cmp(value_reg, top_reg_after_save);
2557     br(Assembler::notEqual, false, Assembler::pn, not_same);
2558     delayed()->nop();
2559 
2560     // yes, same top.
2561     st(ptr_reg_after_save, top_ptr_reg_after_save, 0);
2562     membar(Assembler::StoreStore); 
2563 
2564     bind(not_same);
2565     mov(value_reg, ptr_reg_after_save);
2566     st(lock_reg, lock_ptr_reg, 0); // unlock
2567 
2568     restore();
2569   }
2570 }
2571 
2572 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg,
2573                                           Label& done, Label* slow_case,
2574                                           BiasedLockingCounters* counters) {
2575   assert(UseBiasedLocking, "why call this otherwise?");
2576 
2577   if (PrintBiasedLockingStatistics) {
2578     assert_different_registers(obj_reg, mark_reg, temp_reg, O7);
2579     if (counters == NULL)
2580       counters = BiasedLocking::counters();
2581   }
2582 
2583   Label cas_label;
2584 
2585   // Biased locking
2586   // See whether the lock is currently biased toward our thread and
2587   // whether the epoch is still valid
2588   // Note that the runtime guarantees sufficient alignment of JavaThread
2589   // pointers to allow age to be placed into low bits
2590   assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
2591   and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
2592   cmp(temp_reg, markOopDesc::biased_lock_pattern);
2593   brx(Assembler::notEqual, false, Assembler::pn, cas_label);
2594 
2595   delayed()->ld_ptr(Address(obj_reg, 0, oopDesc::klass_offset_in_bytes()), temp_reg);
2596   ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
2597   or3(G2_thread, temp_reg, temp_reg);
2598   xor3(mark_reg, temp_reg, temp_reg);
2599   andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg);
2600   if (counters != NULL) {
2601     cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg);
2602     // Reload mark_reg as we may need it later
2603     ld_ptr(Address(obj_reg, 0, oopDesc::mark_offset_in_bytes()), mark_reg);
2604   }
2605   brx(Assembler::equal, true, Assembler::pt, done);
2606   delayed()->nop();
2607 
2608   Label try_revoke_bias;
2609   Label try_rebias;
2610   Address mark_addr = Address(obj_reg, 0, oopDesc::mark_offset_in_bytes());
2611   assert(mark_addr.disp() == 0, "cas must take a zero displacement");
2612 
2613   // At this point we know that the header has the bias pattern and
2614   // that we are not the bias owner in the current epoch. We need to
2615   // figure out more details about the state of the header in order to
2616   // know what operations can be legally performed on the object's
2617   // header.
2618 
2619   // If the low three bits in the xor result aren't clear, that means
2620   // the prototype header is no longer biased and we have to revoke
2621   // the bias on this object.
2622   btst(markOopDesc::biased_lock_mask_in_place, temp_reg);
2623   brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias);
2624 
2625   // Biasing is still enabled for this data type. See whether the
2626   // epoch of the current bias is still valid, meaning that the epoch
2627   // bits of the mark word are equal to the epoch bits of the
2628   // prototype header. (Note that the prototype header's epoch bits
2629   // only change at a safepoint.) If not, attempt to rebias the object
2630   // toward the current thread. Note that we must be absolutely sure
2631   // that the current epoch is invalid in order to do this because
2632   // otherwise the manipulations it performs on the mark word are
2633   // illegal.
2634   delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg);
2635   brx(Assembler::notZero, false, Assembler::pn, try_rebias);
2636 
2637   // The epoch of the current bias is still valid but we know nothing
2638   // about the owner; it might be set or it might be clear. Try to
2639   // acquire the bias of the object using an atomic operation. If this
2640   // fails we will go in to the runtime to revoke the object's bias.
2641   // Note that we first construct the presumed unbiased header so we
2642   // don't accidentally blow away another thread's valid bias.
2643   delayed()->and3(mark_reg,
2644                   markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place,
2645                   mark_reg);
2646   or3(G2_thread, mark_reg, temp_reg);
2647   casx_under_lock(mark_addr.base(), mark_reg, temp_reg, 
2648                   (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
2649   // If the biasing toward our thread failed, this means that
2650   // another thread succeeded in biasing it toward itself and we
2651   // need to revoke that bias. The revocation will occur in the
2652   // interpreter runtime in the slow case.
2653   cmp(mark_reg, temp_reg);
2654   if (counters != NULL) {
2655     cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg);
2656   }
2657   if (slow_case != NULL) {
2658     brx(Assembler::notEqual, true, Assembler::pn, *slow_case);
2659     delayed()->nop();
2660   }
2661   br(Assembler::always, false, Assembler::pt, done);
2662   delayed()->nop();
2663   
2664   bind(try_rebias);
2665   // At this point we know the epoch has expired, meaning that the
2666   // current "bias owner", if any, is actually invalid. Under these
2667   // circumstances _only_, we are allowed to use the current header's
2668   // value as the comparison value when doing the cas to acquire the
2669   // bias in the current epoch. In other words, we allow transfer of
2670   // the bias from one thread to another directly in this situation.
2671   //
2672   // FIXME: due to a lack of registers we currently blow away the age
2673   // bits in this situation. Should attempt to preserve them.
2674   ld_ptr(Address(obj_reg, 0, oopDesc::klass_offset_in_bytes()), temp_reg);
2675   ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
2676   or3(G2_thread, temp_reg, temp_reg);
2677   casx_under_lock(mark_addr.base(), mark_reg, temp_reg, 
2678                   (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
2679   // If the biasing toward our thread failed, this means that
2680   // another thread succeeded in biasing it toward itself and we
2681   // need to revoke that bias. The revocation will occur in the
2682   // interpreter runtime in the slow case.
2683   cmp(mark_reg, temp_reg);
2684   if (counters != NULL) {
2685     cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg);
2686   }
2687   if (slow_case != NULL) {
2688     brx(Assembler::notEqual, true, Assembler::pn, *slow_case);
2689     delayed()->nop();
2690   }
2691   br(Assembler::always, false, Assembler::pt, done);
2692   delayed()->nop();
2693   
2694   bind(try_revoke_bias);
2695   // The prototype mark in the klass doesn't have the bias bit set any
2696   // more, indicating that objects of this data type are not supposed
2697   // to be biased any more. We are going to try to reset the mark of
2698   // this object to the prototype value and fall through to the
2699   // CAS-based locking scheme. Note that if our CAS fails, it means
2700   // that another thread raced us for the privilege of revoking the
2701   // bias of this particular object, so it's okay to continue in the
2702   // normal locking code.
2703   //
2704   // FIXME: due to a lack of registers we currently blow away the age
2705   // bits in this situation. Should attempt to preserve them.
2706   ld_ptr(Address(obj_reg, 0, oopDesc::klass_offset_in_bytes()), temp_reg);
2707   ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
2708   casx_under_lock(mark_addr.base(), mark_reg, temp_reg, 
2709                   (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
2710   // Fall through to the normal CAS-based lock, because no matter what
2711   // the result of the above CAS, some thread must have succeeded in
2712   // removing the bias bit from the object's header.
2713   if (counters != NULL) {
2714     cmp(mark_reg, temp_reg);
2715     cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg);
2716   }
2717 
2718   bind(cas_label);
2719 }
2720 
2721 void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done,
2722                                           bool allow_delay_slot_filling) {
2723   // Check for biased locking unlock case, which is a no-op
2724   // Note: we do not have to check the thread ID for two reasons.
2725   // First, the interpreter checks for IllegalMonitorStateException at
2726   // a higher level. Second, if the bias was revoked while we held the
2727   // lock, the object could not be rebiased toward another thread, so
2728   // the bias bit would be clear.
2729   ld_ptr(mark_addr, temp_reg);
2730   and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
2731   cmp(temp_reg, markOopDesc::biased_lock_pattern);
2732   brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done);
2733   delayed();
2734   if (!allow_delay_slot_filling) {
2735     nop();
2736   }
2737 }
2738 
2739 
2740 // CASN -- 32-64 bit switch hitter similar to the synthetic CASN provided by 
2741 // Solaris/SPARC's "as".  Another apt name would be cas_ptr()
2742 
2743 void MacroAssembler::casn (Register addr_reg, Register cmp_reg, Register set_reg ) {
2744   casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()) ; 
2745 }
2746 
2747 
2748 
2749 // compiler_lock_object() and compiler_unlock_object() are direct transliterations
2750 // of i486.ad fast_lock() and fast_unlock().  See those methods for detailed comments.
2751 // The code could be tightened up considerably.  
2752 //
2753 // box->dhw disposition - post-conditions at DONE_LABEL.
2754 // -   Successful inflated lock:  box->dhw != 0.
2755 //     Any non-zero value suffices.  
2756 //     Consider G2_thread, rsp, boxReg, or unused_mark()
2757 // -   Successful Stack-lock: box->dhw == mark.  
2758 //     box->dhw must contain the displaced mark word value
2759 // -   Failure -- icc.ZFlag == 0 and box->dhw is undefined. 
2760 //     The slow-path fast_enter() and slow_enter() operators 
2761 //     are responsible for setting box->dhw = NonZero (typically ::unused_mark).
2762 // -   Biased: box->dhw is undefined
2763 //
2764 // SPARC refworkload performance - specifically jetstream and scimark - are
2765 // extremely sensitive to the size of the code emitted by compiler_lock_object
2766 // and compiler_unlock_object.  Critically, the key factor is code size, not path
2767 // length.  (Simply experiments to pad CLO with unexecuted NOPs demonstrte the
2768 // effect). 
2769   
2770 
2771 void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, Register Rbox, Register Rscratch,
2772                                           BiasedLockingCounters* counters) {
2773    Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes());
2774 
2775    verify_oop(Roop);
2776    Label done ; 
2777 
2778    if (counters != NULL) {
2779      inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch);
2780    }
2781 
2782    if (EmitSync & 1) { 
2783      mov    (3, Rscratch) ;           
2784      st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
2785      cmp    (SP, G0) ; 
2786      return ; 
2787    }
2788 
2789    if (EmitSync & 2) { 
2790 
2791      // Fetch object's markword
2792      ld_ptr(mark_addr, Rmark);
2793 
2794      if (UseBiasedLocking) {
2795         biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
2796      }
2797   
2798      // Save Rbox in Rscratch to be used for the cas operation
2799      mov(Rbox, Rscratch);
2800   
2801      // set Rmark to markOop | markOopDesc::unlocked_value      
2802      or3(Rmark, markOopDesc::unlocked_value, Rmark);
2803   
2804      // Initialize the box.  (Must happen before we update the object mark!)
2805      st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
2806   
2807      // compare object markOop with Rmark and if equal exchange Rscratch with object markOop
2808      assert(mark_addr.disp() == 0, "cas must take a zero displacement");
2809      casx_under_lock(mark_addr.base(), Rmark, Rscratch, 
2810         (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
2811   
2812      // if compare/exchange succeeded we found an unlocked object and we now have locked it
2813      // hence we are done
2814      cmp(Rmark, Rscratch);
2815 #ifdef _LP64
2816      sub(Rscratch, STACK_BIAS, Rscratch);
2817 #endif
2818      brx(Assembler::equal, false, Assembler::pt, done);
2819      delayed()->sub(Rscratch, SP, Rscratch);  //pull next instruction into delay slot
2820   
2821      // we did not find an unlocked object so see if this is a recursive case
2822      // sub(Rscratch, SP, Rscratch);
2823      assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
2824      andcc(Rscratch, 0xfffff003, Rscratch);
2825      st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
2826      bind (done) ; 
2827      return ; 
2828    }
2829 
2830    Label Egress ; 
2831 
2832    if (EmitSync & 256) { 
2833       Label IsInflated ; 
2834 
2835       ld_ptr (mark_addr, Rmark);           // fetch obj->mark
2836       // Triage: biased, stack-locked, neutral, inflated
2837       if (UseBiasedLocking) {
2838         biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
2839         // Invariant: if control reaches this point in the emitted stream
2840         // then Rmark has not been modified.  
2841       }
2842 
2843       // Store mark into displaced mark field in the on-stack basic-lock "box"
2844       // Critically, this must happen before the CAS
2845       // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty.  
2846       st_ptr (Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
2847       andcc  (Rmark, 2, G0) ; 
2848       brx    (Assembler::notZero, false, Assembler::pn, IsInflated) ; 
2849       delayed() ->         
2850 
2851       // Try stack-lock acquisition.  
2852       // Beware: the 1st instruction is in a delay slot
2853       mov    (Rbox,  Rscratch);
2854       or3    (Rmark, markOopDesc::unlocked_value, Rmark);
2855       assert (mark_addr.disp() == 0, "cas must take a zero displacement");
2856       casn   (mark_addr.base(), Rmark, Rscratch) ; 
2857       cmp    (Rmark, Rscratch);
2858       brx    (Assembler::equal, false, Assembler::pt, done);
2859       delayed()->sub(Rscratch, SP, Rscratch); 
2860   
2861       // Stack-lock attempt failed - check for recursive stack-lock.  
2862       // See the comments below about how we might remove this case.
2863 #ifdef _LP64
2864       sub    (Rscratch, STACK_BIAS, Rscratch);
2865 #endif
2866       assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
2867       andcc  (Rscratch, 0xfffff003, Rscratch);
2868       br     (Assembler::always, false, Assembler::pt, done) ; 
2869       delayed()-> st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
2870 
2871       bind   (IsInflated) ; 
2872       if (EmitSync & 64) { 
2873          // If m->owner != null goto IsLocked
2874          // Pessimistic form: Test-and-CAS vs CAS
2875          // The optimistic form avoids RTS->RTO cache line upgrades.
2876          ld_ptr (Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ; 
2877          andcc  (Rscratch, Rscratch, G0) ; 
2878          brx    (Assembler::notZero, false, Assembler::pn, done) ; 
2879          delayed()->nop() ; 
2880          // m->owner == null : it's unlocked.
2881       } 
2882 
2883       // Try to CAS m->owner from null to Self
2884       // Invariant: if we acquire the lock then _recursions should be 0.
2885       add    (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ;
2886       mov    (G2_thread, Rscratch) ;
2887       casn   (Rmark, G0, Rscratch) ;
2888       cmp    (Rscratch, G0) ;
2889       // Intentional fall-through into done
2890    } else {
2891       // Aggressively avoid the Store-before-CAS penalty
2892       // Defer the store into box->dhw until after the CAS
2893       Label IsInflated, Recursive ; 
2894 
2895 // Anticipate CAS -- Avoid RTS->RTO upgrade
2896 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads) ; 
2897 
2898       ld_ptr (mark_addr, Rmark);           // fetch obj->mark
2899       // Triage: biased, stack-locked, neutral, inflated
2900 
2901       if (UseBiasedLocking) {
2902         biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
2903         // Invariant: if control reaches this point in the emitted stream
2904         // then Rmark has not been modified.  
2905       }
2906       andcc  (Rmark, 2, G0) ; 
2907       brx    (Assembler::notZero, false, Assembler::pn, IsInflated) ; 
2908       delayed()->                         // Beware - dangling delay-slot
2909 
2910       // Try stack-lock acquisition.  
2911       // Transiently install BUSY (0) encoding in the mark word.  
2912       // if the CAS of 0 into the mark was successful then we execute:
2913       //   ST box->dhw  = mark   -- save fetched mark in on-stack basiclock box
2914       //   ST obj->mark = box    -- overwrite transient 0 value
2915       // This presumes TSO, of course. 
2916    
2917       mov    (0, Rscratch) ; 
2918       or3    (Rmark, markOopDesc::unlocked_value, Rmark);
2919       assert (mark_addr.disp() == 0, "cas must take a zero displacement");
2920       casn   (mark_addr.base(), Rmark, Rscratch) ; 
2921 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads) ; 
2922       cmp    (Rscratch, Rmark) ; 
2923       brx    (Assembler::notZero, false, Assembler::pn, Recursive) ; 
2924       delayed() -> 
2925         st_ptr (Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
2926       if (counters != NULL) {
2927         cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch);
2928       }
2929       br     (Assembler::always, false, Assembler::pt, done);
2930       delayed() -> 
2931         st_ptr (Rbox, mark_addr) ; 
2932       
2933       bind   (Recursive) ; 
2934       // Stack-lock attempt failed - check for recursive stack-lock.  
2935       // Tests show that we can remove the recursive case with no impact
2936       // on refworkload 0.83.  If we need to reduce the size of the code
2937       // emitted by compiler_lock_object() the recursive case is perfect
2938       // candidate. 
2939       // 
2940       // A more extreme idea is to always inflate on stack-lock recursion.
2941       // This lets us eliminate the recursive checks in compiler_lock_object
2942       // and compiler_unlock_object and the (box->dhw == 0) encoding.
2943       // A brief experiment - requiring changes to synchronizer.cpp, interpreter,
2944       // and showed a performance *increase*.  In the same experiment I eliminated
2945       // the fast-path stack-lock code from the interpreter and always passed
2946       // control to the "slow" operators in synchronizer.cpp. 
2947 
2948       // RScratch contains the fetched obj->mark value from the failed CASN. 
2949 #ifdef _LP64
2950       sub    (Rscratch, STACK_BIAS, Rscratch);
2951 #endif
2952       sub(Rscratch, SP, Rscratch); 
2953       assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
2954       andcc  (Rscratch, 0xfffff003, Rscratch);
2955       if (counters != NULL) {
2956         // Accounting needs the Rscratch register
2957         st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
2958         cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch);
2959         br     (Assembler::always, false, Assembler::pt, done) ; 
2960         delayed()->nop() ; 
2961       } else {
2962         br     (Assembler::always, false, Assembler::pt, done) ; 
2963         delayed()-> st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
2964       }
2965 
2966       bind   (IsInflated) ; 
2967       if (EmitSync & 64) { 
2968          // If m->owner != null goto IsLocked
2969          // Test-and-CAS vs CAS
2970          // Pessimistic form avoids futile (doomed) CAS attempts
2971          // The optimistic form avoids RTS->RTO cache line upgrades.
2972          ld_ptr (Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ; 
2973          andcc  (Rscratch, Rscratch, G0) ; 
2974          brx    (Assembler::notZero, false, Assembler::pn, done) ; 
2975          delayed()->nop() ; 
2976          // m->owner == null : it's unlocked.
2977       } 
2978 
2979       // Try to CAS m->owner from null to Self
2980       // Invariant: if we acquire the lock then _recursions should be 0.
2981       add    (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ;
2982       mov    (G2_thread, Rscratch) ;
2983       casn   (Rmark, G0, Rscratch) ;
2984       cmp    (Rscratch, G0) ;
2985       // ST box->displaced_header = NonZero.
2986       // Any non-zero value suffices:
2987       //    unused_mark(), G2_thread, RBox, RScratch, rsp, etc.  
2988       st_ptr (Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes());
2989       // Intentional fall-through into done
2990    }
2991 
2992    bind   (done) ; 
2993 }
2994 
2995 void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, Register Rbox, Register Rscratch) {
2996    Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes());
2997 
2998    Label done ; 
2999 
3000    if (EmitSync & 4) { 
3001      cmp  (SP, G0) ; 
3002      return ; 
3003    }
3004     
3005    if (EmitSync & 8) { 
3006      if (UseBiasedLocking) {
3007         biased_locking_exit(mark_addr, Rscratch, done);
3008      }
3009 
3010      // Test first if it is a fast recursive unlock
3011      ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark);
3012      cmp(Rmark, G0);
3013      brx(Assembler::equal, false, Assembler::pt, done);
3014      delayed()->nop();
3015 
3016      // Check if it is still a light weight lock, this is is true if we see
3017      // the stack address of the basicLock in the markOop of the object
3018      assert(mark_addr.disp() == 0, "cas must take a zero displacement");
3019      casx_under_lock(mark_addr.base(), Rbox, Rmark, 
3020        (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
3021      br (Assembler::always, false, Assembler::pt, done);
3022      delayed()->cmp(Rbox, Rmark);
3023      bind (done) ; 
3024      return ; 
3025    }
3026 
3027    // Beware ... If the aggregate size of the code emitted by CLO and CUO is
3028    // is too large performance rolls abruptly off a cliff.  
3029    // This could be related to inlining policies, code cache management, or
3030    // I$ effects.  
3031    Label LStacked ; 
3032 
3033    if (UseBiasedLocking) {
3034       // TODO: eliminate redundant LDs of obj->mark
3035       biased_locking_exit(mark_addr, Rscratch, done);
3036    }
3037 
3038    ld_ptr (Roop, oopDesc::mark_offset_in_bytes(), Rmark) ; 
3039    ld_ptr (Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch);
3040    andcc  (Rscratch, Rscratch, G0);
3041    brx    (Assembler::zero, false, Assembler::pn, done);
3042    delayed()-> nop() ;      // consider: relocate fetch of mark, above, into this DS
3043    andcc  (Rmark, 2, G0) ; 
3044    brx    (Assembler::zero, false, Assembler::pt, LStacked) ; 
3045    delayed()-> nop() ; 
3046     
3047    // It's inflated
3048    // Conceptually we need a #loadstore|#storestore "release" MEMBAR before 
3049    // the ST of 0 into _owner which releases the lock.  This prevents loads
3050    // and stores within the critical section from reordering (floating) 
3051    // past the store that releases the lock.  But TSO is a strong memory model
3052    // and that particular flavor of barrier is a noop, so we can safely elide it.
3053    // Note that we use 1-0 locking by default for the inflated case.  We
3054    // close the resultant (and rare) race by having contented threads in
3055    // monitorenter periodically poll _owner.  
3056    ld_ptr (Address(Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ; 
3057    ld_ptr (Address(Rmark, 0, ObjectMonitor::recursions_offset_in_bytes()-2), Rbox) ; 
3058    xor3   (Rscratch, G2_thread, Rscratch) ; 
3059    orcc   (Rbox, Rscratch, Rbox) ; 
3060    brx    (Assembler::notZero, false, Assembler::pn, done) ; 
3061    delayed()->
3062    ld_ptr (Address (Rmark, 0, ObjectMonitor::EntryList_offset_in_bytes()-2), Rscratch) ; 
3063    ld_ptr (Address (Rmark, 0, ObjectMonitor::cxq_offset_in_bytes()-2), Rbox) ; 
3064    orcc   (Rbox, Rscratch, G0) ; 
3065    if (EmitSync & 65536) { 
3066       Label LSucc ; 
3067       brx    (Assembler::notZero, false, Assembler::pn, LSucc) ;    
3068       delayed()->nop() ; 
3069       br     (Assembler::always, false, Assembler::pt, done) ; 
3070       delayed()->
3071       st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ; 
3072 
3073       bind   (LSucc) ; 
3074       st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ; 
3075       if (os::is_MP()) { membar (StoreLoad) ; }
3076       ld_ptr (Address (Rmark, 0, ObjectMonitor::succ_offset_in_bytes()-2), Rscratch) ; 
3077       andcc  (Rscratch, Rscratch, G0) ; 
3078       brx    (Assembler::notZero, false, Assembler::pt, done) ; 
3079       delayed()-> andcc (G0, G0, G0) ; 
3080       add    (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ;
3081       mov    (G2_thread, Rscratch) ;
3082       casn   (Rmark, G0, Rscratch) ;
3083       cmp    (Rscratch, G0) ; 
3084       // invert icc.zf and goto done
3085       brx    (Assembler::notZero, false, Assembler::pt, done) ; 
3086       delayed() -> cmp (G0, G0) ; 
3087       br     (Assembler::always, false, Assembler::pt, done);
3088       delayed() -> cmp (G0, 1) ; 
3089    } else { 
3090       brx    (Assembler::notZero, false, Assembler::pn, done) ;    
3091       delayed()->nop() ; 
3092       br     (Assembler::always, false, Assembler::pt, done) ; 
3093       delayed()->
3094       st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ; 
3095    }
3096 
3097    bind   (LStacked) ; 
3098    // Consider: we could replace the expensive CAS in the exit
3099    // path with a simple ST of the displaced mark value fetched from
3100    // the on-stack basiclock box.  That admits a race where a thread T2
3101    // in the slow lock path -- inflating with monitor M -- could race a
3102    // thread T1 in the fast unlock path, resulting in a missed wakeup for T2.
3103    // More precisely T1 in the stack-lock unlock path could "stomp" the 
3104    // inflated mark value M installed by T2, resulting in an orphan
3105    // object monitor M and T2 becoming stranded.  We can remedy that situation
3106    // by having T2 periodically poll the object's mark word using timed wait
3107    // operations.  If T2 discovers that a stomp has occurred it vacates
3108    // the monitor M and wakes any other threads stranded on the now-orphan M. 
3109    // In addition the monitor scavenger, which performs deflation,
3110    // would also need to check for orpan monitors and stranded threads.
3111    //
3112    // Finally, inflation is also used when T2 needs to assign a hashCode
3113    // to O and O is stack-locked by T1.  The "stomp" race could cause
3114    // an assigned hashCode value to be lost.  We can avoid that condition
3115    // and provide the necessary hashCode stability invariants by ensuring
3116    // that hashCode generation is idempotent between copying GCs.
3117    // For example we could compute the hashCode of an object O as
3118    // O's heap address XOR some high quality RNG value that is refreshed
3119    // at GC-time.  The monitor scavenger would install the hashCode
3120    // found in any orphan monitors.  Again, the mechanism admits a 
3121    // lost-update "stomp" WAW race but detects and recovers as needed.
3122    //
3123    // A prototype implementation showed excellent results, although
3124    // the scavenger and timeout code was rather involved.
3125 
3126    casn   (mark_addr.base(), Rbox, Rscratch) ; 
3127    cmp    (Rbox, Rscratch);
3128    // Intentional fall through into done ...
3129 
3130    bind   (done) ; 
3131 }
3132 
3133 
3134 
3135 void MacroAssembler::print_CPU_state() {
3136   // %%%%% need to implement this
3137 }
3138 
3139 void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
3140   // %%%%% need to implement this
3141 }
3142 
3143 void MacroAssembler::push_IU_state() {
3144   // %%%%% need to implement this
3145 }
3146 
3147 
3148 void MacroAssembler::pop_IU_state() {
3149   // %%%%% need to implement this
3150 }
3151 
3152 
3153 void MacroAssembler::push_FPU_state() {
3154   // %%%%% need to implement this
3155 }
3156 
3157 
3158 void MacroAssembler::pop_FPU_state() {
3159   // %%%%% need to implement this
3160 }
3161 
3162 
3163 void MacroAssembler::push_CPU_state() {
3164   // %%%%% need to implement this
3165 }
3166 
3167 
3168 void MacroAssembler::pop_CPU_state() {
3169   // %%%%% need to implement this
3170 }
3171 
3172 
3173 
3174 void MacroAssembler::verify_tlab() {
3175 #ifdef ASSERT
3176   if (UseTLAB && VerifyOops) {
3177     Label next, next2, ok;
3178     Register t1 = L0;
3179     Register t2 = L1;
3180     Register t3 = L2;
3181     
3182     save_frame(0);
3183     ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1);
3184     ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2);
3185     or3(t1, t2, t3);
3186     cmp(t1, t2);
3187     br(Assembler::greaterEqual, false, Assembler::pn, next);
3188     delayed()->nop();
3189     stop("assert(top >= start)");
3190     should_not_reach_here();
3191 
3192     bind(next);
3193     ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1);
3194     ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2);
3195     or3(t3, t2, t3);
3196     cmp(t1, t2);
3197     br(Assembler::lessEqual, false, Assembler::pn, next2); 
3198     delayed()->nop();
3199     stop("assert(top <= end)");
3200     should_not_reach_here();
3201 
3202     bind(next2);
3203     and3(t3, MinObjAlignmentInBytesMask, t3);
3204     cmp(t3, 0);
3205     br(Assembler::lessEqual, false, Assembler::pn, ok); 
3206     delayed()->nop();
3207     stop("assert(aligned)");
3208     should_not_reach_here();
3209 
3210     bind(ok);
3211     restore();
3212   }
3213 #endif 
3214 }
3215 
3216 
3217 void MacroAssembler::eden_allocate(
3218   Register obj,                        // result: pointer to object after successful allocation
3219   Register var_size_in_bytes,          // object size in bytes if unknown at compile time; invalid otherwise
3220   int      con_size_in_bytes,          // object size in bytes if   known at compile time
3221   Register t1,                         // temp register
3222   Register t2,                         // temp register
3223   Label&   slow_case                   // continuation point if fast allocation fails
3224 ){
3225   // make sure arguments make sense
3226   assert_different_registers(obj, var_size_in_bytes, t1, t2);
3227   assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size");
3228   assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
3229 
3230   // get eden boundaries
3231   // note: we need both top & top_addr!
3232   const Register top_addr = t1;
3233   const Register end      = t2;
3234   
3235   CollectedHeap* ch = Universe::heap();
3236   set((intx)ch->top_addr(), top_addr);
3237   intx delta = (intx)ch->end_addr() - (intx)ch->top_addr();
3238   ld_ptr(top_addr, delta, end);
3239   ld_ptr(top_addr, 0, obj);
3240   
3241   // try to allocate
3242   Label retry;
3243   bind(retry);
3244 #ifdef ASSERT
3245   // make sure eden top is properly aligned
3246   {
3247     Label L;
3248     btst(MinObjAlignmentInBytesMask, obj);
3249     br(Assembler::zero, false, Assembler::pt, L);
3250     delayed()->nop();
3251     stop("eden top is not properly aligned");
3252     bind(L);
3253   }
3254 #endif // ASSERT
3255   const Register free = end;
3256   sub(end, obj, free);                                   // compute amount of free space
3257   if (var_size_in_bytes->is_valid()) {
3258     // size is unknown at compile time
3259     cmp(free, var_size_in_bytes);
3260     br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
3261     delayed()->add(obj, var_size_in_bytes, end);
3262   } else {
3263     // size is known at compile time
3264     cmp(free, con_size_in_bytes);
3265     br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
3266     delayed()->add(obj, con_size_in_bytes, end);
3267   }
3268   // Compare obj with the value at top_addr; if still equal, swap the value of
3269   // end with the value at top_addr. If not equal, read the value at top_addr
3270   // into end.
3271   casx_under_lock(top_addr, obj, end, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
3272   // if someone beat us on the allocation, try again, otherwise continue
3273   cmp(obj, end);
3274   brx(Assembler::notEqual, false, Assembler::pn, retry);
3275   delayed()->mov(end, obj);                              // nop if successfull since obj == end
3276 
3277 #ifdef ASSERT
3278   // make sure eden top is properly aligned
3279   {
3280     Label L;
3281     const Register top_addr = t1;
3282     
3283     set((intx)ch->top_addr(), top_addr);
3284     ld_ptr(top_addr, 0, top_addr);
3285     btst(MinObjAlignmentInBytesMask, top_addr);
3286     br(Assembler::zero, false, Assembler::pt, L);
3287     delayed()->nop();
3288     stop("eden top is not properly aligned");
3289     bind(L);
3290   }
3291 #endif // ASSERT
3292 }
3293 
3294 
3295 void MacroAssembler::tlab_allocate(
3296   Register obj,                        // result: pointer to object after successful allocation
3297   Register var_size_in_bytes,          // object size in bytes if unknown at compile time; invalid otherwise
3298   int      con_size_in_bytes,          // object size in bytes if   known at compile time
3299   Register t1,                         // temp register
3300   Label&   slow_case                   // continuation point if fast allocation fails
3301 ){
3302   // make sure arguments make sense
3303   assert_different_registers(obj, var_size_in_bytes, t1);
3304   assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size");
3305   assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
3306 
3307   const Register free  = t1;
3308   
3309   verify_tlab();
3310   
3311   ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj);
3312   
3313   // calculate amount of free space
3314   ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free);
3315   sub(free, obj, free);
3316   
3317   Label done;
3318   if (var_size_in_bytes == noreg) {
3319     cmp(free, con_size_in_bytes);
3320   } else {
3321     cmp(free, var_size_in_bytes);
3322   }
3323   br(Assembler::less, false, Assembler::pn, slow_case);
3324   // calculate the new top pointer
3325   if (var_size_in_bytes == noreg) {
3326     delayed()->add(obj, con_size_in_bytes, free);
3327   } else {
3328     delayed()->add(obj, var_size_in_bytes, free);
3329   }
3330   
3331   bind(done);
3332 
3333 #ifdef ASSERT
3334   // make sure new free pointer is properly aligned
3335   {
3336     Label L;
3337     btst(MinObjAlignmentInBytesMask, free);
3338     br(Assembler::zero, false, Assembler::pt, L);
3339     delayed()->nop();
3340     stop("updated TLAB free is not properly aligned");
3341     bind(L);
3342   }
3343 #endif // ASSERT
3344 
3345   // update the tlab top pointer
3346   st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
3347   verify_tlab();
3348 }
3349 
3350 
3351 void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) {
3352   Register top = O0;
3353   Register t1 = G1;
3354   Register t2 = G3;
3355   Register t3 = O1;
3356   assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */);
3357   Label do_refill, discard_tlab;
3358 
3359   if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
3360     // No allocation in the shared eden.
3361     br(Assembler::always, false, Assembler::pt, slow_case);
3362     delayed()->nop();
3363   }
3364 
3365   ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top);
3366   ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1);
3367   ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2);
3368         
3369   // calculate amount of free space
3370   sub(t1, top, t1);
3371   srl_ptr(t1, LogHeapWordSize, t1);
3372 
3373   // Retain tlab and allocate object in shared space if
3374   // the amount free in the tlab is too large to discard.
3375   cmp(t1, t2);
3376   brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab);
3377 
3378   // increment waste limit to prevent getting stuck on this slow path
3379   delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2);
3380   st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
3381   if (TLABStats) {
3382     // increment number of slow_allocations
3383     ld(G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()), t2);
3384     add(t2, 1, t2);
3385     stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()));
3386   }  
3387   br(Assembler::always, false, Assembler::pt, try_eden);
3388   delayed()->nop();
3389 
3390   bind(discard_tlab);
3391   if (TLABStats) {
3392     // increment number of refills
3393     ld(G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()), t2);
3394     add(t2, 1, t2);
3395     stw(t2, G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()));
3396     // accumulate wastage
3397     ld(G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()), t2);
3398     add(t2, t1, t2);
3399     stw(t2, G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()));
3400   }
3401 
3402   // if tlab is currently allocated (top or end != null) then
3403   // fill [top, end + alignment_reserve) with array object
3404   br_null(top, false, Assembler::pn, do_refill);
3405   delayed()->nop();
3406 
3407   set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2);
3408   st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word
3409   // set klass to intArrayKlass
3410   set((intptr_t)Universe::intArrayKlassObj_addr(), t2);
3411   ld_ptr(t2, 0, t2);
3412   st_ptr(t2, top, oopDesc::klass_offset_in_bytes());
3413   sub(t1, typeArrayOopDesc::header_size(T_INT), t1);
3414   add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1);
3415   sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1);
3416   st(t1, top, arrayOopDesc::length_offset_in_bytes());
3417   verify_oop(top);
3418 
3419   // refill the tlab with an eden allocation
3420   bind(do_refill);
3421   ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1);
3422   sll_ptr(t1, LogHeapWordSize, t1);
3423   // add object_size ??
3424   eden_allocate(top, t1, 0, t2, t3, slow_case);
3425 
3426   st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset()));
3427   st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
3428 #ifdef ASSERT
3429   // check that tlab_size (t1) is still valid
3430   {
3431     Label ok;
3432     ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2);
3433     sll_ptr(t2, LogHeapWordSize, t2);
3434     cmp(t1, t2);
3435     br(Assembler::equal, false, Assembler::pt, ok);
3436     delayed()->nop();
3437     stop("assert(t1 == tlab_size)");
3438     should_not_reach_here();
3439 
3440     bind(ok);
3441   }
3442 #endif // ASSERT
3443   add(top, t1, top); // t1 is tlab_size
3444   sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top);
3445   st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset()));
3446   verify_tlab();
3447   br(Assembler::always, false, Assembler::pt, retry);
3448   delayed()->nop();
3449 }
3450 
3451 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
3452   switch (cond) {
3453     // Note some conditions are synonyms for others
3454     case Assembler::never:                return Assembler::always;
3455     case Assembler::zero:                 return Assembler::notZero;
3456     case Assembler::lessEqual:            return Assembler::greater;
3457     case Assembler::less:                 return Assembler::greaterEqual;
3458     case Assembler::lessEqualUnsigned:    return Assembler::greaterUnsigned;
3459     case Assembler::lessUnsigned:         return Assembler::greaterEqualUnsigned;
3460     case Assembler::negative:             return Assembler::positive;
3461     case Assembler::overflowSet:          return Assembler::overflowClear;
3462     case Assembler::always:               return Assembler::never;
3463     case Assembler::notZero:              return Assembler::zero;
3464     case Assembler::greater:              return Assembler::lessEqual;
3465     case Assembler::greaterEqual:         return Assembler::less;
3466     case Assembler::greaterUnsigned:      return Assembler::lessEqualUnsigned;
3467     case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned;
3468     case Assembler::positive:             return Assembler::negative;
3469     case Assembler::overflowClear:        return Assembler::overflowSet;
3470   }
3471 
3472   ShouldNotReachHere(); return Assembler::overflowClear;
3473 }
3474 
3475 void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr,
3476                               Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) {
3477   Condition negated_cond = negate_condition(cond);
3478   Label L;
3479   brx(negated_cond, false, Assembler::pt, L);
3480   delayed()->nop();
3481   inc_counter(counter_ptr, Rtmp1, Rtmp2);
3482   bind(L);
3483 }
3484 
3485 void MacroAssembler::inc_counter(address counter_ptr, Register Rtmp1, Register Rtmp2) {
3486   Address counter_addr(Rtmp1, counter_ptr);
3487   load_contents(counter_addr, Rtmp2);
3488   inc(Rtmp2);
3489   store_contents(Rtmp2, counter_addr);
3490 }
3491 
3492 SkipIfEqual::SkipIfEqual(
3493     MacroAssembler* masm, Register temp, const bool* flag_addr, 
3494     Assembler::Condition condition) {
3495   _masm = masm;
3496   Address flag(temp, (address)flag_addr, relocInfo::none);
3497   _masm->sethi(flag);
3498   _masm->ldub(flag, temp);
3499   _masm->tst(temp);
3500   _masm->br(condition, false, Assembler::pt, _label);
3501   _masm->delayed()->nop();
3502 }
3503 
3504 SkipIfEqual::~SkipIfEqual() {
3505   _masm->bind(_label);
3506 }
3507 
3508 
3509 // Writes to stack successive pages until offset reached to check for
3510 // stack overflow + shadow pages.  This clobbers tsp and scratch.
3511 void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp,
3512                                      Register Rscratch) {
3513   // Use stack pointer in temp stack pointer
3514   mov(SP, Rtsp);
3515 
3516   // Bang stack for total size given plus stack shadow page size.
3517   // Bang one page at a time because a large size can overflow yellow and
3518   // red zones (the bang will fail but stack overflow handling can't tell that
3519   // it was a stack overflow bang vs a regular segv).
3520   int offset = os::vm_page_size();
3521   Register Roffset = Rscratch;
3522 
3523   Label loop;
3524   bind(loop);
3525   set((-offset)+STACK_BIAS, Rscratch);
3526   st(G0, Rtsp, Rscratch);
3527   set(offset, Roffset);
3528   sub(Rsize, Roffset, Rsize);
3529   cmp(Rsize, G0);
3530   br(Assembler::greater, false, Assembler::pn, loop);
3531   delayed()->sub(Rtsp, Roffset, Rtsp);
3532 
3533   // Bang down shadow pages too.
3534   // The -1 because we already subtracted 1 page.
3535   for (int i = 0; i< StackShadowPages-1; i++) {
3536     set((-i*offset)+STACK_BIAS, Rscratch);
3537     st(G0, Rtsp, Rscratch);
3538   }
3539 }