1 /*
   2  * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  20  * CA 95054 USA or visit www.sun.com if you need additional information or
  21  * have any questions.
  22  *
  23  */
  24 
  25 class BlockBegin;
  26 class BlockList;
  27 class LIR_Assembler;
  28 class CodeEmitInfo;
  29 class CodeStub;
  30 class CodeStubList;
  31 class ArrayCopyStub;
  32 class LIR_Op;
  33 class ciType;
  34 class ValueType;
  35 class LIR_OpVisitState;
  36 class FpuStackSim;
  37 
  38 //---------------------------------------------------------------------
  39 //                 LIR Operands
  40 //  LIR_OprDesc
  41 //    LIR_OprPtr
  42 //      LIR_Const
  43 //      LIR_Address
  44 //---------------------------------------------------------------------
  45 class LIR_OprDesc;
  46 class LIR_OprPtr;
  47 class LIR_Const;
  48 class LIR_Address;
  49 class LIR_OprVisitor;
  50 
  51 
  52 typedef LIR_OprDesc* LIR_Opr;
  53 typedef int          RegNr;
  54 
  55 define_array(LIR_OprArray, LIR_Opr)
  56 define_stack(LIR_OprList, LIR_OprArray)
  57 
  58 define_array(LIR_OprRefArray, LIR_Opr*)
  59 define_stack(LIR_OprRefList, LIR_OprRefArray)
  60 
  61 define_array(CodeEmitInfoArray, CodeEmitInfo*)
  62 define_stack(CodeEmitInfoList, CodeEmitInfoArray)
  63 
  64 define_array(LIR_OpArray, LIR_Op*)
  65 define_stack(LIR_OpList, LIR_OpArray)
  66 
  67 // define LIR_OprPtr early so LIR_OprDesc can refer to it
  68 class LIR_OprPtr: public CompilationResourceObj {
  69  public:
  70   bool is_oop_pointer() const                    { return (type() == T_OBJECT); }
  71   bool is_float_kind() const                     { BasicType t = type(); return (t == T_FLOAT) || (t == T_DOUBLE); }
  72 
  73   virtual LIR_Const*  as_constant()              { return NULL; }
  74   virtual LIR_Address* as_address()              { return NULL; }
  75   virtual BasicType type() const                 = 0;
  76   virtual void print_value_on(outputStream* out) const = 0;
  77 };
  78 
  79 
  80 
  81 // LIR constants
  82 class LIR_Const: public LIR_OprPtr {
  83  private:
  84   JavaValue _value;
  85 
  86   void type_check(BasicType t) const   { assert(type() == t, "type check"); }
  87   void type_check(BasicType t1, BasicType t2) const   { assert(type() == t1 || type() == t2, "type check"); }
  88 
  89  public:
  90   LIR_Const(jint i)                              { _value.set_type(T_INT);     _value.set_jint(i); }
  91   LIR_Const(jlong l)                             { _value.set_type(T_LONG);    _value.set_jlong(l); }
  92   LIR_Const(jfloat f)                            { _value.set_type(T_FLOAT);   _value.set_jfloat(f); }
  93   LIR_Const(jdouble d)                           { _value.set_type(T_DOUBLE);  _value.set_jdouble(d); }
  94   LIR_Const(jobject o)                           { _value.set_type(T_OBJECT);  _value.set_jobject(o); }
  95   LIR_Const(void* p) {
  96 #ifdef _LP64
  97     assert(sizeof(jlong) >= sizeof(p), "too small");;
  98     _value.set_type(T_LONG);    _value.set_jlong((jlong)p);
  99 #else
 100     assert(sizeof(jint) >= sizeof(p), "too small");;
 101     _value.set_type(T_INT);     _value.set_jint((jint)p);
 102 #endif
 103   }
 104 
 105   virtual BasicType type()       const { return _value.get_type(); }
 106   virtual LIR_Const* as_constant()     { return this; }
 107 
 108   jint      as_jint()    const         { type_check(T_INT   ); return _value.get_jint(); }
 109   jlong     as_jlong()   const         { type_check(T_LONG  ); return _value.get_jlong(); }
 110   jfloat    as_jfloat()  const         { type_check(T_FLOAT ); return _value.get_jfloat(); }
 111   jdouble   as_jdouble() const         { type_check(T_DOUBLE); return _value.get_jdouble(); }
 112   jobject   as_jobject() const         { type_check(T_OBJECT); return _value.get_jobject(); }
 113   jint      as_jint_lo() const         { type_check(T_LONG  ); return low(_value.get_jlong()); }
 114   jint      as_jint_hi() const         { type_check(T_LONG  ); return high(_value.get_jlong()); }
 115 
 116 #ifdef _LP64
 117   address   as_pointer() const         { type_check(T_LONG  ); return (address)_value.get_jlong(); }
 118 #else
 119   address   as_pointer() const         { type_check(T_INT   ); return (address)_value.get_jint(); }
 120 #endif
 121 
 122 
 123   jint      as_jint_bits() const       { type_check(T_FLOAT, T_INT); return _value.get_jint(); }
 124   jint      as_jint_lo_bits() const    {
 125     if (type() == T_DOUBLE) {
 126       return low(jlong_cast(_value.get_jdouble()));
 127     } else {
 128       return as_jint_lo();
 129     }
 130   }
 131   jint      as_jint_hi_bits() const    {
 132     if (type() == T_DOUBLE) {
 133       return high(jlong_cast(_value.get_jdouble()));
 134     } else {
 135       return as_jint_hi();
 136     }
 137   }
 138   jlong      as_jlong_bits() const    {
 139     if (type() == T_DOUBLE) {
 140       return jlong_cast(_value.get_jdouble());
 141     } else {
 142       return as_jlong();
 143     }
 144   }
 145 
 146   virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
 147 
 148 
 149   bool is_zero_float() {
 150     jfloat f = as_jfloat();
 151     jfloat ok = 0.0f;
 152     return jint_cast(f) == jint_cast(ok);
 153   }
 154 
 155   bool is_one_float() {
 156     jfloat f = as_jfloat();
 157     return !g_isnan(f) && g_isfinite(f) && f == 1.0;
 158   }
 159 
 160   bool is_zero_double() {
 161     jdouble d = as_jdouble();
 162     jdouble ok = 0.0;
 163     return jlong_cast(d) == jlong_cast(ok);
 164   }
 165 
 166   bool is_one_double() {
 167     jdouble d = as_jdouble();
 168     return !g_isnan(d) && g_isfinite(d) && d == 1.0;
 169   }
 170 };
 171 
 172 
 173 //---------------------LIR Operand descriptor------------------------------------
 174 //
 175 // The class LIR_OprDesc represents a LIR instruction operand;
 176 // it can be a register (ALU/FPU), stack location or a constant;
 177 // Constants and addresses are represented as resource area allocated
 178 // structures (see above).
 179 // Registers and stack locations are inlined into the this pointer
 180 // (see value function).
 181 
 182 class LIR_OprDesc: public CompilationResourceObj {
 183  public:
 184   // value structure:
 185   //     data       opr-type opr-kind
 186   // +--------------+-------+-------+
 187   // [max...........|7 6 5 4|3 2 1 0]
 188   //                             ^
 189   //                    is_pointer bit
 190   //
 191   // lowest bit cleared, means it is a structure pointer
 192   // we need  4 bits to represent types
 193 
 194  private:
 195   friend class LIR_OprFact;
 196 
 197   // Conversion
 198   intptr_t value() const                         { return (intptr_t) this; }
 199 
 200   bool check_value_mask(intptr_t mask, intptr_t masked_value) const {
 201     return (value() & mask) == masked_value;
 202   }
 203 
 204   enum OprKind {
 205       pointer_value      = 0
 206     , stack_value        = 1
 207     , cpu_register       = 3
 208     , fpu_register       = 5
 209     , illegal_value      = 7
 210   };
 211 
 212   enum OprBits {
 213       pointer_bits   = 1
 214     , kind_bits      = 3
 215     , type_bits      = 4
 216     , size_bits      = 2
 217     , destroys_bits  = 1
 218     , virtual_bits   = 1
 219     , is_xmm_bits    = 1
 220     , last_use_bits  = 1
 221     , is_fpu_stack_offset_bits = 1        // used in assertion checking on x86 for FPU stack slot allocation
 222     , non_data_bits  = kind_bits + type_bits + size_bits + destroys_bits + last_use_bits +
 223                        is_fpu_stack_offset_bits + virtual_bits + is_xmm_bits
 224     , data_bits      = BitsPerInt - non_data_bits
 225     , reg_bits       = data_bits / 2      // for two registers in one value encoding
 226   };
 227 
 228   enum OprShift {
 229       kind_shift     = 0
 230     , type_shift     = kind_shift     + kind_bits
 231     , size_shift     = type_shift     + type_bits
 232     , destroys_shift = size_shift     + size_bits
 233     , last_use_shift = destroys_shift + destroys_bits
 234     , is_fpu_stack_offset_shift = last_use_shift + last_use_bits
 235     , virtual_shift  = is_fpu_stack_offset_shift + is_fpu_stack_offset_bits
 236     , is_xmm_shift   = virtual_shift + virtual_bits
 237     , data_shift     = is_xmm_shift + is_xmm_bits
 238     , reg1_shift = data_shift
 239     , reg2_shift = data_shift + reg_bits
 240 
 241   };
 242 
 243   enum OprSize {
 244       single_size = 0 << size_shift
 245     , double_size = 1 << size_shift
 246   };
 247 
 248   enum OprMask {
 249       kind_mask      = right_n_bits(kind_bits)
 250     , type_mask      = right_n_bits(type_bits) << type_shift
 251     , size_mask      = right_n_bits(size_bits) << size_shift
 252     , last_use_mask  = right_n_bits(last_use_bits) << last_use_shift
 253     , is_fpu_stack_offset_mask = right_n_bits(is_fpu_stack_offset_bits) << is_fpu_stack_offset_shift
 254     , virtual_mask   = right_n_bits(virtual_bits) << virtual_shift
 255     , is_xmm_mask    = right_n_bits(is_xmm_bits) << is_xmm_shift
 256     , pointer_mask   = right_n_bits(pointer_bits)
 257     , lower_reg_mask = right_n_bits(reg_bits)
 258     , no_type_mask   = (int)(~(type_mask | last_use_mask | is_fpu_stack_offset_mask))
 259   };
 260 
 261   uintptr_t data() const                         { return value() >> data_shift; }
 262   int lo_reg_half() const                        { return data() & lower_reg_mask; }
 263   int hi_reg_half() const                        { return (data() >> reg_bits) & lower_reg_mask; }
 264   OprKind kind_field() const                     { return (OprKind)(value() & kind_mask); }
 265   OprSize size_field() const                     { return (OprSize)(value() & size_mask); }
 266 
 267   static char type_char(BasicType t);
 268 
 269  public:
 270   enum {
 271     vreg_base = ConcreteRegisterImpl::number_of_registers,
 272     vreg_max = (1 << data_bits) - 1
 273   };
 274 
 275   static inline LIR_Opr illegalOpr();
 276 
 277   enum OprType {
 278       unknown_type  = 0 << type_shift    // means: not set (catch uninitialized types)
 279     , int_type      = 1 << type_shift
 280     , long_type     = 2 << type_shift
 281     , object_type   = 3 << type_shift
 282     , pointer_type  = 4 << type_shift
 283     , float_type    = 5 << type_shift
 284     , double_type   = 6 << type_shift
 285   };
 286   friend OprType as_OprType(BasicType t);
 287   friend BasicType as_BasicType(OprType t);
 288 
 289   OprType type_field_valid() const               { assert(is_register() || is_stack(), "should not be called otherwise"); return (OprType)(value() & type_mask); }
 290   OprType type_field() const                     { return is_illegal() ? unknown_type : (OprType)(value() & type_mask); }
 291 
 292   static OprSize size_for(BasicType t) {
 293     switch (t) {
 294       case T_LONG:
 295       case T_DOUBLE:
 296         return double_size;
 297         break;
 298 
 299       case T_FLOAT:
 300       case T_BOOLEAN:
 301       case T_CHAR:
 302       case T_BYTE:
 303       case T_SHORT:
 304       case T_INT:
 305       case T_OBJECT:
 306       case T_ARRAY:
 307         return single_size;
 308         break;
 309 
 310       default:
 311         ShouldNotReachHere();
 312         return single_size;
 313       }
 314   }
 315 
 316 
 317   void validate_type() const PRODUCT_RETURN;
 318 
 319   BasicType type() const {
 320     if (is_pointer()) {
 321       return pointer()->type();
 322     }
 323     return as_BasicType(type_field());
 324   }
 325 
 326 
 327   ValueType* value_type() const                  { return as_ValueType(type()); }
 328 
 329   char type_char() const                         { return type_char((is_pointer()) ? pointer()->type() : type()); }
 330 
 331   bool is_equal(LIR_Opr opr) const         { return this == opr; }
 332   // checks whether types are same
 333   bool is_same_type(LIR_Opr opr) const     {
 334     assert(type_field() != unknown_type &&
 335            opr->type_field() != unknown_type, "shouldn't see unknown_type");
 336     return type_field() == opr->type_field();
 337   }
 338   bool is_same_register(LIR_Opr opr) {
 339     return (is_register() && opr->is_register() &&
 340             kind_field() == opr->kind_field() &&
 341             (value() & no_type_mask) == (opr->value() & no_type_mask));
 342   }
 343 
 344   bool is_pointer() const      { return check_value_mask(pointer_mask, pointer_value); }
 345   bool is_illegal() const      { return kind_field() == illegal_value; }
 346   bool is_valid() const        { return kind_field() != illegal_value; }
 347 
 348   bool is_register() const     { return is_cpu_register() || is_fpu_register(); }
 349   bool is_virtual() const      { return is_virtual_cpu()  || is_virtual_fpu();  }
 350 
 351   bool is_constant() const     { return is_pointer() && pointer()->as_constant() != NULL; }
 352   bool is_address() const      { return is_pointer() && pointer()->as_address() != NULL; }
 353 
 354   bool is_float_kind() const   { return is_pointer() ? pointer()->is_float_kind() : (kind_field() == fpu_register); }
 355   bool is_oop() const;
 356 
 357   // semantic for fpu- and xmm-registers:
 358   // * is_float and is_double return true for xmm_registers
 359   //   (so is_single_fpu and is_single_xmm are true)
 360   // * So you must always check for is_???_xmm prior to is_???_fpu to
 361   //   distinguish between fpu- and xmm-registers
 362 
 363   bool is_stack() const        { validate_type(); return check_value_mask(kind_mask,                stack_value);                 }
 364   bool is_single_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | single_size);  }
 365   bool is_double_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | double_size);  }
 366 
 367   bool is_cpu_register() const { validate_type(); return check_value_mask(kind_mask,                cpu_register);                }
 368   bool is_virtual_cpu() const  { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register | virtual_mask); }
 369   bool is_fixed_cpu() const    { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register);                }
 370   bool is_single_cpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    cpu_register | single_size);  }
 371   bool is_double_cpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    cpu_register | double_size);  }
 372 
 373   bool is_fpu_register() const { validate_type(); return check_value_mask(kind_mask,                fpu_register);                }
 374   bool is_virtual_fpu() const  { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register | virtual_mask); }
 375   bool is_fixed_fpu() const    { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register);                }
 376   bool is_single_fpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    fpu_register | single_size);  }
 377   bool is_double_fpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    fpu_register | double_size);  }
 378 
 379   bool is_xmm_register() const { validate_type(); return check_value_mask(kind_mask | is_xmm_mask,             fpu_register | is_xmm_mask); }
 380   bool is_single_xmm() const   { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | single_size | is_xmm_mask); }
 381   bool is_double_xmm() const   { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | double_size | is_xmm_mask); }
 382 
 383   // fast accessor functions for special bits that do not work for pointers
 384   // (in this functions, the check for is_pointer() is omitted)
 385   bool is_single_word() const      { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, single_size); }
 386   bool is_double_word() const      { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, double_size); }
 387   bool is_virtual_register() const { assert(is_register(),               "type check"); return check_value_mask(virtual_mask, virtual_mask); }
 388   bool is_oop_register() const     { assert(is_register() || is_stack(), "type check"); return type_field_valid() == object_type; }
 389   BasicType type_register() const  { assert(is_register() || is_stack(), "type check"); return as_BasicType(type_field_valid());  }
 390 
 391   bool is_last_use() const         { assert(is_register(), "only works for registers"); return (value() & last_use_mask) != 0; }
 392   bool is_fpu_stack_offset() const { assert(is_register(), "only works for registers"); return (value() & is_fpu_stack_offset_mask) != 0; }
 393   LIR_Opr make_last_use()          { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | last_use_mask); }
 394   LIR_Opr make_fpu_stack_offset()  { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | is_fpu_stack_offset_mask); }
 395 
 396 
 397   int single_stack_ix() const  { assert(is_single_stack() && !is_virtual(), "type check"); return (int)data(); }
 398   int double_stack_ix() const  { assert(is_double_stack() && !is_virtual(), "type check"); return (int)data(); }
 399   RegNr cpu_regnr() const      { assert(is_single_cpu()   && !is_virtual(), "type check"); return (RegNr)data(); }
 400   RegNr cpu_regnrLo() const    { assert(is_double_cpu()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
 401   RegNr cpu_regnrHi() const    { assert(is_double_cpu()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
 402   RegNr fpu_regnr() const      { assert(is_single_fpu()   && !is_virtual(), "type check"); return (RegNr)data(); }
 403   RegNr fpu_regnrLo() const    { assert(is_double_fpu()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
 404   RegNr fpu_regnrHi() const    { assert(is_double_fpu()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
 405   RegNr xmm_regnr() const      { assert(is_single_xmm()   && !is_virtual(), "type check"); return (RegNr)data(); }
 406   RegNr xmm_regnrLo() const    { assert(is_double_xmm()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
 407   RegNr xmm_regnrHi() const    { assert(is_double_xmm()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
 408   int   vreg_number() const    { assert(is_virtual(),                       "type check"); return (RegNr)data(); }
 409 
 410   LIR_OprPtr* pointer()  const                   { assert(is_pointer(), "type check");      return (LIR_OprPtr*)this; }
 411   LIR_Const* as_constant_ptr() const             { return pointer()->as_constant(); }
 412   LIR_Address* as_address_ptr() const            { return pointer()->as_address(); }
 413 
 414   Register as_register()    const;
 415   Register as_register_lo() const;
 416   Register as_register_hi() const;
 417 
 418   Register as_pointer_register() {
 419 #ifdef _LP64
 420     if (is_double_cpu()) {
 421       assert(as_register_lo() == as_register_hi(), "should be a single register");
 422       return as_register_lo();
 423     }
 424 #endif
 425     return as_register();
 426   }
 427 
 428 #ifdef X86
 429   XMMRegister as_xmm_float_reg() const;
 430   XMMRegister as_xmm_double_reg() const;
 431   // for compatibility with RInfo
 432   int fpu () const                                  { return lo_reg_half(); }
 433 #endif // X86
 434 
 435 #ifdef SPARC
 436   FloatRegister as_float_reg   () const;
 437   FloatRegister as_double_reg  () const;
 438 #endif
 439 
 440   jint      as_jint()    const { return as_constant_ptr()->as_jint(); }
 441   jlong     as_jlong()   const { return as_constant_ptr()->as_jlong(); }
 442   jfloat    as_jfloat()  const { return as_constant_ptr()->as_jfloat(); }
 443   jdouble   as_jdouble() const { return as_constant_ptr()->as_jdouble(); }
 444   jobject   as_jobject() const { return as_constant_ptr()->as_jobject(); }
 445 
 446   void print() const PRODUCT_RETURN;
 447   void print(outputStream* out) const PRODUCT_RETURN;
 448 };
 449 
 450 
 451 inline LIR_OprDesc::OprType as_OprType(BasicType type) {
 452   switch (type) {
 453   case T_INT:      return LIR_OprDesc::int_type;
 454   case T_LONG:     return LIR_OprDesc::long_type;
 455   case T_FLOAT:    return LIR_OprDesc::float_type;
 456   case T_DOUBLE:   return LIR_OprDesc::double_type;
 457   case T_OBJECT:
 458   case T_ARRAY:    return LIR_OprDesc::object_type;
 459   case T_ILLEGAL:  // fall through
 460   default: ShouldNotReachHere(); return LIR_OprDesc::unknown_type;
 461   }
 462 }
 463 
 464 inline BasicType as_BasicType(LIR_OprDesc::OprType t) {
 465   switch (t) {
 466   case LIR_OprDesc::int_type:     return T_INT;
 467   case LIR_OprDesc::long_type:    return T_LONG;
 468   case LIR_OprDesc::float_type:   return T_FLOAT;
 469   case LIR_OprDesc::double_type:  return T_DOUBLE;
 470   case LIR_OprDesc::object_type:  return T_OBJECT;
 471   case LIR_OprDesc::unknown_type: // fall through
 472   default: ShouldNotReachHere();  return T_ILLEGAL;
 473   }
 474 }
 475 
 476 
 477 // LIR_Address
 478 class LIR_Address: public LIR_OprPtr {
 479  friend class LIR_OpVisitState;
 480 
 481  public:
 482   // NOTE: currently these must be the log2 of the scale factor (and
 483   // must also be equivalent to the ScaleFactor enum in
 484   // assembler_i486.hpp)
 485   enum Scale {
 486     times_1  =  0,
 487     times_2  =  1,
 488     times_4  =  2,
 489     times_8  =  3
 490   };
 491 
 492  private:
 493   LIR_Opr   _base;
 494   LIR_Opr   _index;
 495   Scale     _scale;
 496   intx      _disp;
 497   BasicType _type;
 498 
 499  public:
 500   LIR_Address(LIR_Opr base, LIR_Opr index, BasicType type):
 501        _base(base)
 502      , _index(index)
 503      , _scale(times_1)
 504      , _type(type)
 505      , _disp(0) { verify(); }
 506 
 507   LIR_Address(LIR_Opr base, int disp, BasicType type):
 508        _base(base)
 509      , _index(LIR_OprDesc::illegalOpr())
 510      , _scale(times_1)
 511      , _type(type)
 512      , _disp(disp) { verify(); }
 513 
 514 #ifdef X86
 515   LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, int disp, BasicType type):
 516        _base(base)
 517      , _index(index)
 518      , _scale(scale)
 519      , _type(type)
 520      , _disp(disp) { verify(); }
 521 #endif // X86
 522 
 523   LIR_Opr base()  const                          { return _base;  }
 524   LIR_Opr index() const                          { return _index; }
 525   Scale   scale() const                          { return _scale; }
 526   intx    disp()  const                          { return _disp;  }
 527 
 528   bool equals(LIR_Address* other) const          { return base() == other->base() && index() == other->index() && disp() == other->disp() && scale() == other->scale(); }
 529 
 530   virtual LIR_Address* as_address()              { return this;   }
 531   virtual BasicType type() const                 { return _type; }
 532   virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
 533 
 534   void verify() const PRODUCT_RETURN;
 535 
 536   static Scale scale(BasicType type);
 537 };
 538 
 539 
 540 // operand factory
 541 class LIR_OprFact: public AllStatic {
 542  public:
 543 
 544   static LIR_Opr illegalOpr;
 545 
 546   static LIR_Opr single_cpu(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |                                     LIR_OprDesc::int_type    | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); }
 547   static LIR_Opr single_cpu_oop(int reg)        { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |                                     LIR_OprDesc::object_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); }
 548   static LIR_Opr double_cpu(int reg1, int reg2) {
 549     LP64_ONLY(assert(reg1 == reg2, "must be identical"));
 550     return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
 551                                (reg2 << LIR_OprDesc::reg2_shift) |
 552                                LIR_OprDesc::long_type            |
 553                                LIR_OprDesc::cpu_register         |
 554                                LIR_OprDesc::double_size);
 555   }
 556 
 557   static LIR_Opr single_fpu(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 558                                                                              LIR_OprDesc::float_type           |
 559                                                                              LIR_OprDesc::fpu_register         |
 560                                                                              LIR_OprDesc::single_size); }
 561 
 562 #ifdef SPARC
 563   static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
 564                                                                              (reg2 << LIR_OprDesc::reg2_shift) |
 565                                                                              LIR_OprDesc::double_type          |
 566                                                                              LIR_OprDesc::fpu_register         |
 567                                                                              LIR_OprDesc::double_size); }
 568 #endif
 569 #ifdef X86
 570   static LIR_Opr double_fpu(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 571                                                                              (reg  << LIR_OprDesc::reg2_shift) |
 572                                                                              LIR_OprDesc::double_type          |
 573                                                                              LIR_OprDesc::fpu_register         |
 574                                                                              LIR_OprDesc::double_size); }
 575 
 576   static LIR_Opr single_xmm(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 577                                                                              LIR_OprDesc::float_type           |
 578                                                                              LIR_OprDesc::fpu_register         |
 579                                                                              LIR_OprDesc::single_size          |
 580                                                                              LIR_OprDesc::is_xmm_mask); }
 581   static LIR_Opr double_xmm(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 582                                                                              (reg  << LIR_OprDesc::reg2_shift) |
 583                                                                              LIR_OprDesc::double_type          |
 584                                                                              LIR_OprDesc::fpu_register         |
 585                                                                              LIR_OprDesc::double_size          |
 586                                                                              LIR_OprDesc::is_xmm_mask); }
 587 #endif // X86
 588 
 589 
 590   static LIR_Opr virtual_register(int index, BasicType type) {
 591     LIR_Opr res;
 592     switch (type) {
 593       case T_OBJECT: // fall through
 594       case T_ARRAY:
 595         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift)  |
 596                                             LIR_OprDesc::object_type  |
 597                                             LIR_OprDesc::cpu_register |
 598                                             LIR_OprDesc::single_size  |
 599                                             LIR_OprDesc::virtual_mask);
 600         break;
 601 
 602       case T_INT:
 603         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 604                                   LIR_OprDesc::int_type              |
 605                                   LIR_OprDesc::cpu_register          |
 606                                   LIR_OprDesc::single_size           |
 607                                   LIR_OprDesc::virtual_mask);
 608         break;
 609 
 610       case T_LONG:
 611         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 612                                   LIR_OprDesc::long_type             |
 613                                   LIR_OprDesc::cpu_register          |
 614                                   LIR_OprDesc::double_size           |
 615                                   LIR_OprDesc::virtual_mask);
 616         break;
 617 
 618       case T_FLOAT:
 619         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 620                                   LIR_OprDesc::float_type           |
 621                                   LIR_OprDesc::fpu_register         |
 622                                   LIR_OprDesc::single_size          |
 623                                   LIR_OprDesc::virtual_mask);
 624         break;
 625 
 626       case
 627         T_DOUBLE: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 628                                             LIR_OprDesc::double_type           |
 629                                             LIR_OprDesc::fpu_register          |
 630                                             LIR_OprDesc::double_size           |
 631                                             LIR_OprDesc::virtual_mask);
 632         break;
 633 
 634       default:       ShouldNotReachHere(); res = illegalOpr;
 635     }
 636 
 637 #ifdef ASSERT
 638     res->validate_type();
 639     assert(res->vreg_number() == index, "conversion check");
 640     assert(index >= LIR_OprDesc::vreg_base, "must start at vreg_base");
 641     assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big");
 642 
 643     // old-style calculation; check if old and new method are equal
 644     LIR_OprDesc::OprType t = as_OprType(type);
 645     LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | t |
 646                                           ((type == T_FLOAT || type == T_DOUBLE) ?  LIR_OprDesc::fpu_register : LIR_OprDesc::cpu_register) |
 647                                LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
 648     assert(res == old_res, "old and new method not equal");
 649 #endif
 650 
 651     return res;
 652   }
 653 
 654   // 'index' is computed by FrameMap::local_stack_pos(index); do not use other parameters as
 655   // the index is platform independent; a double stack useing indeces 2 and 3 has always
 656   // index 2.
 657   static LIR_Opr stack(int index, BasicType type) {
 658     LIR_Opr res;
 659     switch (type) {
 660       case T_OBJECT: // fall through
 661       case T_ARRAY:
 662         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 663                                   LIR_OprDesc::object_type           |
 664                                   LIR_OprDesc::stack_value           |
 665                                   LIR_OprDesc::single_size);
 666         break;
 667 
 668       case T_INT:
 669         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 670                                   LIR_OprDesc::int_type              |
 671                                   LIR_OprDesc::stack_value           |
 672                                   LIR_OprDesc::single_size);
 673         break;
 674 
 675       case T_LONG:
 676         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 677                                   LIR_OprDesc::long_type             |
 678                                   LIR_OprDesc::stack_value           |
 679                                   LIR_OprDesc::double_size);
 680         break;
 681 
 682       case T_FLOAT:
 683         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 684                                   LIR_OprDesc::float_type            |
 685                                   LIR_OprDesc::stack_value           |
 686                                   LIR_OprDesc::single_size);
 687         break;
 688       case T_DOUBLE:
 689         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 690                                   LIR_OprDesc::double_type           |
 691                                   LIR_OprDesc::stack_value           |
 692                                   LIR_OprDesc::double_size);
 693         break;
 694 
 695       default:       ShouldNotReachHere(); res = illegalOpr;
 696     }
 697 
 698 #ifdef ASSERT
 699     assert(index >= 0, "index must be positive");
 700     assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big");
 701 
 702     LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 703                                           LIR_OprDesc::stack_value           |
 704                                           as_OprType(type)                   |
 705                                           LIR_OprDesc::size_for(type));
 706     assert(res == old_res, "old and new method not equal");
 707 #endif
 708 
 709     return res;
 710   }
 711 
 712   static LIR_Opr intConst(jint i)                { return (LIR_Opr)(new LIR_Const(i)); }
 713   static LIR_Opr longConst(jlong l)              { return (LIR_Opr)(new LIR_Const(l)); }
 714   static LIR_Opr floatConst(jfloat f)            { return (LIR_Opr)(new LIR_Const(f)); }
 715   static LIR_Opr doubleConst(jdouble d)          { return (LIR_Opr)(new LIR_Const(d)); }
 716   static LIR_Opr oopConst(jobject o)             { return (LIR_Opr)(new LIR_Const(o)); }
 717   static LIR_Opr address(LIR_Address* a)         { return (LIR_Opr)a; }
 718   static LIR_Opr intptrConst(void* p)            { return (LIR_Opr)(new LIR_Const(p)); }
 719   static LIR_Opr intptrConst(intptr_t v)         { return (LIR_Opr)(new LIR_Const((void*)v)); }
 720   static LIR_Opr illegal()                       { return (LIR_Opr)-1; }
 721 
 722   static LIR_Opr value_type(ValueType* type);
 723   static LIR_Opr dummy_value_type(ValueType* type);
 724 };
 725 
 726 
 727 //-------------------------------------------------------------------------------
 728 //                   LIR Instructions
 729 //-------------------------------------------------------------------------------
 730 //
 731 // Note:
 732 //  - every instruction has a result operand
 733 //  - every instruction has an CodeEmitInfo operand (can be revisited later)
 734 //  - every instruction has a LIR_OpCode operand
 735 //  - LIR_OpN, means an instruction that has N input operands
 736 //
 737 // class hierarchy:
 738 //
 739 class  LIR_Op;
 740 class    LIR_Op0;
 741 class      LIR_OpLabel;
 742 class    LIR_Op1;
 743 class      LIR_OpBranch;
 744 class      LIR_OpConvert;
 745 class      LIR_OpAllocObj;
 746 class      LIR_OpRoundFP;
 747 class    LIR_Op2;
 748 class    LIR_OpDelay;
 749 class    LIR_Op3;
 750 class      LIR_OpAllocArray;
 751 class    LIR_OpCall;
 752 class      LIR_OpJavaCall;
 753 class      LIR_OpRTCall;
 754 class    LIR_OpArrayCopy;
 755 class    LIR_OpLock;
 756 class    LIR_OpTypeCheck;
 757 class    LIR_OpCompareAndSwap;
 758 class    LIR_OpProfileCall;
 759 
 760 
 761 // LIR operation codes
 762 enum LIR_Code {
 763     lir_none
 764   , begin_op0
 765       , lir_word_align
 766       , lir_label
 767       , lir_nop
 768       , lir_backwardbranch_target
 769       , lir_std_entry
 770       , lir_osr_entry
 771       , lir_build_frame
 772       , lir_fpop_raw
 773       , lir_24bit_FPU
 774       , lir_reset_FPU
 775       , lir_breakpoint
 776       , lir_rtcall
 777       , lir_membar
 778       , lir_membar_acquire
 779       , lir_membar_release
 780       , lir_get_thread
 781   , end_op0
 782   , begin_op1
 783       , lir_fxch
 784       , lir_fld
 785       , lir_ffree
 786       , lir_push
 787       , lir_pop
 788       , lir_null_check
 789       , lir_return
 790       , lir_leal
 791       , lir_neg
 792       , lir_branch
 793       , lir_cond_float_branch
 794       , lir_move
 795       , lir_prefetchr
 796       , lir_prefetchw
 797       , lir_convert
 798       , lir_alloc_object
 799       , lir_monaddr
 800       , lir_roundfp
 801       , lir_safepoint
 802   , end_op1
 803   , begin_op2
 804       , lir_cmp
 805       , lir_cmp_l2i
 806       , lir_ucmp_fd2i
 807       , lir_cmp_fd2i
 808       , lir_cmove
 809       , lir_add
 810       , lir_sub
 811       , lir_mul
 812       , lir_mul_strictfp
 813       , lir_div
 814       , lir_div_strictfp
 815       , lir_rem
 816       , lir_sqrt
 817       , lir_abs
 818       , lir_sin
 819       , lir_cos
 820       , lir_tan
 821       , lir_log
 822       , lir_log10
 823       , lir_logic_and
 824       , lir_logic_or
 825       , lir_logic_xor
 826       , lir_shl
 827       , lir_shr
 828       , lir_ushr
 829       , lir_alloc_array
 830       , lir_throw
 831       , lir_unwind
 832       , lir_compare_to
 833   , end_op2
 834   , begin_op3
 835       , lir_idiv
 836       , lir_irem
 837   , end_op3
 838   , begin_opJavaCall
 839       , lir_static_call
 840       , lir_optvirtual_call
 841       , lir_icvirtual_call
 842       , lir_virtual_call
 843       , lir_dynamic_call
 844   , end_opJavaCall
 845   , begin_opArrayCopy
 846       , lir_arraycopy
 847   , end_opArrayCopy
 848   , begin_opLock
 849     , lir_lock
 850     , lir_unlock
 851   , end_opLock
 852   , begin_delay_slot
 853     , lir_delay_slot
 854   , end_delay_slot
 855   , begin_opTypeCheck
 856     , lir_instanceof
 857     , lir_checkcast
 858     , lir_store_check
 859   , end_opTypeCheck
 860   , begin_opCompareAndSwap
 861     , lir_cas_long
 862     , lir_cas_obj
 863     , lir_cas_int
 864   , end_opCompareAndSwap
 865   , begin_opMDOProfile
 866     , lir_profile_call
 867   , end_opMDOProfile
 868 };
 869 
 870 
 871 enum LIR_Condition {
 872     lir_cond_equal
 873   , lir_cond_notEqual
 874   , lir_cond_less
 875   , lir_cond_lessEqual
 876   , lir_cond_greaterEqual
 877   , lir_cond_greater
 878   , lir_cond_belowEqual
 879   , lir_cond_aboveEqual
 880   , lir_cond_always
 881   , lir_cond_unknown = -1
 882 };
 883 
 884 
 885 enum LIR_PatchCode {
 886   lir_patch_none,
 887   lir_patch_low,
 888   lir_patch_high,
 889   lir_patch_normal
 890 };
 891 
 892 
 893 enum LIR_MoveKind {
 894   lir_move_normal,
 895   lir_move_volatile,
 896   lir_move_unaligned,
 897   lir_move_max_flag
 898 };
 899 
 900 
 901 // --------------------------------------------------
 902 // LIR_Op
 903 // --------------------------------------------------
 904 class LIR_Op: public CompilationResourceObj {
 905  friend class LIR_OpVisitState;
 906 
 907 #ifdef ASSERT
 908  private:
 909   const char *  _file;
 910   int           _line;
 911 #endif
 912 
 913  protected:
 914   LIR_Opr       _result;
 915   unsigned short _code;
 916   unsigned short _flags;
 917   CodeEmitInfo* _info;
 918   int           _id;     // value id for register allocation
 919   int           _fpu_pop_count;
 920   Instruction*  _source; // for debugging
 921 
 922   static void print_condition(outputStream* out, LIR_Condition cond) PRODUCT_RETURN;
 923 
 924  protected:
 925   static bool is_in_range(LIR_Code test, LIR_Code start, LIR_Code end)  { return start < test && test < end; }
 926 
 927  public:
 928   LIR_Op()
 929     : _result(LIR_OprFact::illegalOpr)
 930     , _code(lir_none)
 931     , _flags(0)
 932     , _info(NULL)
 933 #ifdef ASSERT
 934     , _file(NULL)
 935     , _line(0)
 936 #endif
 937     , _fpu_pop_count(0)
 938     , _source(NULL)
 939     , _id(-1)                             {}
 940 
 941   LIR_Op(LIR_Code code, LIR_Opr result, CodeEmitInfo* info)
 942     : _result(result)
 943     , _code(code)
 944     , _flags(0)
 945     , _info(info)
 946 #ifdef ASSERT
 947     , _file(NULL)
 948     , _line(0)
 949 #endif
 950     , _fpu_pop_count(0)
 951     , _source(NULL)
 952     , _id(-1)                             {}
 953 
 954   CodeEmitInfo* info() const                  { return _info;   }
 955   LIR_Code code()      const                  { return (LIR_Code)_code;   }
 956   LIR_Opr result_opr() const                  { return _result; }
 957   void    set_result_opr(LIR_Opr opr)         { _result = opr;  }
 958 
 959 #ifdef ASSERT
 960   void set_file_and_line(const char * file, int line) {
 961     _file = file;
 962     _line = line;
 963   }
 964 #endif
 965 
 966   virtual const char * name() const PRODUCT_RETURN0;
 967 
 968   int id()             const                  { return _id;     }
 969   void set_id(int id)                         { _id = id; }
 970 
 971   // FPU stack simulation helpers -- only used on Intel
 972   void set_fpu_pop_count(int count)           { assert(count >= 0 && count <= 1, "currently only 0 and 1 are valid"); _fpu_pop_count = count; }
 973   int  fpu_pop_count() const                  { return _fpu_pop_count; }
 974   bool pop_fpu_stack()                        { return _fpu_pop_count > 0; }
 975 
 976   Instruction* source() const                 { return _source; }
 977   void set_source(Instruction* ins)           { _source = ins; }
 978 
 979   virtual void emit_code(LIR_Assembler* masm) = 0;
 980   virtual void print_instr(outputStream* out) const   = 0;
 981   virtual void print_on(outputStream* st) const PRODUCT_RETURN;
 982 
 983   virtual LIR_OpCall* as_OpCall() { return NULL; }
 984   virtual LIR_OpJavaCall* as_OpJavaCall() { return NULL; }
 985   virtual LIR_OpLabel* as_OpLabel() { return NULL; }
 986   virtual LIR_OpDelay* as_OpDelay() { return NULL; }
 987   virtual LIR_OpLock* as_OpLock() { return NULL; }
 988   virtual LIR_OpAllocArray* as_OpAllocArray() { return NULL; }
 989   virtual LIR_OpAllocObj* as_OpAllocObj() { return NULL; }
 990   virtual LIR_OpRoundFP* as_OpRoundFP() { return NULL; }
 991   virtual LIR_OpBranch* as_OpBranch() { return NULL; }
 992   virtual LIR_OpRTCall* as_OpRTCall() { return NULL; }
 993   virtual LIR_OpConvert* as_OpConvert() { return NULL; }
 994   virtual LIR_Op0* as_Op0() { return NULL; }
 995   virtual LIR_Op1* as_Op1() { return NULL; }
 996   virtual LIR_Op2* as_Op2() { return NULL; }
 997   virtual LIR_Op3* as_Op3() { return NULL; }
 998   virtual LIR_OpArrayCopy* as_OpArrayCopy() { return NULL; }
 999   virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
1000   virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
1001   virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
1002 
1003   virtual void verify() const {}
1004 };
1005 
1006 // for calls
1007 class LIR_OpCall: public LIR_Op {
1008  friend class LIR_OpVisitState;
1009 
1010  protected:
1011   address      _addr;
1012   LIR_OprList* _arguments;
1013  protected:
1014   LIR_OpCall(LIR_Code code, address addr, LIR_Opr result,
1015              LIR_OprList* arguments, CodeEmitInfo* info = NULL)
1016     : LIR_Op(code, result, info)
1017     , _arguments(arguments)
1018     , _addr(addr) {}
1019 
1020  public:
1021   address addr() const                           { return _addr; }
1022   const LIR_OprList* arguments() const           { return _arguments; }
1023   virtual LIR_OpCall* as_OpCall()                { return this; }
1024 };
1025 
1026 
1027 // --------------------------------------------------
1028 // LIR_OpJavaCall
1029 // --------------------------------------------------
1030 class LIR_OpJavaCall: public LIR_OpCall {
1031  friend class LIR_OpVisitState;
1032 
1033  private:
1034   ciMethod*       _method;
1035   LIR_Opr         _receiver;
1036 
1037  public:
1038   LIR_OpJavaCall(LIR_Code code, ciMethod* method,
1039                  LIR_Opr receiver, LIR_Opr result,
1040                  address addr, LIR_OprList* arguments,
1041                  CodeEmitInfo* info)
1042   : LIR_OpCall(code, addr, result, arguments, info)
1043   , _receiver(receiver)
1044   , _method(method)          { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
1045 
1046   LIR_OpJavaCall(LIR_Code code, ciMethod* method,
1047                  LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset,
1048                  LIR_OprList* arguments, CodeEmitInfo* info)
1049   : LIR_OpCall(code, (address)vtable_offset, result, arguments, info)
1050   , _receiver(receiver)
1051   , _method(method)          { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
1052 
1053   LIR_Opr receiver() const                       { return _receiver; }
1054   ciMethod* method() const                       { return _method;   }
1055 
1056   // JSR 292 support.
1057   bool is_invokedynamic() const                  { return code() == lir_dynamic_call; }
1058   bool is_method_handle_invoke() const {
1059     return
1060       is_invokedynamic()  // An invokedynamic is always a MethodHandle call site.
1061       ||
1062       (method()->holder()->name() == ciSymbol::java_dyn_MethodHandle() &&
1063        method()->name()           == ciSymbol::invoke_name()); 
1064   }
1065 
1066   intptr_t vtable_offset() const {
1067     assert(_code == lir_virtual_call, "only have vtable for real vcall");
1068     return (intptr_t) addr();
1069   }
1070 
1071   virtual void emit_code(LIR_Assembler* masm);
1072   virtual LIR_OpJavaCall* as_OpJavaCall() { return this; }
1073   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1074 };
1075 
1076 // --------------------------------------------------
1077 // LIR_OpLabel
1078 // --------------------------------------------------
1079 // Location where a branch can continue
1080 class LIR_OpLabel: public LIR_Op {
1081  friend class LIR_OpVisitState;
1082 
1083  private:
1084   Label* _label;
1085  public:
1086   LIR_OpLabel(Label* lbl)
1087    : LIR_Op(lir_label, LIR_OprFact::illegalOpr, NULL)
1088    , _label(lbl)                                 {}
1089   Label* label() const                           { return _label; }
1090 
1091   virtual void emit_code(LIR_Assembler* masm);
1092   virtual LIR_OpLabel* as_OpLabel() { return this; }
1093   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1094 };
1095 
1096 // LIR_OpArrayCopy
1097 class LIR_OpArrayCopy: public LIR_Op {
1098  friend class LIR_OpVisitState;
1099 
1100  private:
1101   ArrayCopyStub*  _stub;
1102   LIR_Opr   _src;
1103   LIR_Opr   _src_pos;
1104   LIR_Opr   _dst;
1105   LIR_Opr   _dst_pos;
1106   LIR_Opr   _length;
1107   LIR_Opr   _tmp;
1108   ciArrayKlass* _expected_type;
1109   int       _flags;
1110 
1111 public:
1112   enum Flags {
1113     src_null_check         = 1 << 0,
1114     dst_null_check         = 1 << 1,
1115     src_pos_positive_check = 1 << 2,
1116     dst_pos_positive_check = 1 << 3,
1117     length_positive_check  = 1 << 4,
1118     src_range_check        = 1 << 5,
1119     dst_range_check        = 1 << 6,
1120     type_check             = 1 << 7,
1121     all_flags              = (1 << 8) - 1
1122   };
1123 
1124   LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp,
1125                   ciArrayKlass* expected_type, int flags, CodeEmitInfo* info);
1126 
1127   LIR_Opr src() const                            { return _src; }
1128   LIR_Opr src_pos() const                        { return _src_pos; }
1129   LIR_Opr dst() const                            { return _dst; }
1130   LIR_Opr dst_pos() const                        { return _dst_pos; }
1131   LIR_Opr length() const                         { return _length; }
1132   LIR_Opr tmp() const                            { return _tmp; }
1133   int flags() const                              { return _flags; }
1134   ciArrayKlass* expected_type() const            { return _expected_type; }
1135   ArrayCopyStub* stub() const                    { return _stub; }
1136 
1137   virtual void emit_code(LIR_Assembler* masm);
1138   virtual LIR_OpArrayCopy* as_OpArrayCopy() { return this; }
1139   void print_instr(outputStream* out) const PRODUCT_RETURN;
1140 };
1141 
1142 
1143 // --------------------------------------------------
1144 // LIR_Op0
1145 // --------------------------------------------------
1146 class LIR_Op0: public LIR_Op {
1147  friend class LIR_OpVisitState;
1148 
1149  public:
1150   LIR_Op0(LIR_Code code)
1151    : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1152   LIR_Op0(LIR_Code code, LIR_Opr result, CodeEmitInfo* info = NULL)
1153    : LIR_Op(code, result, info)  { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1154 
1155   virtual void emit_code(LIR_Assembler* masm);
1156   virtual LIR_Op0* as_Op0() { return this; }
1157   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1158 };
1159 
1160 
1161 // --------------------------------------------------
1162 // LIR_Op1
1163 // --------------------------------------------------
1164 
1165 class LIR_Op1: public LIR_Op {
1166  friend class LIR_OpVisitState;
1167 
1168  protected:
1169   LIR_Opr         _opr;   // input operand
1170   BasicType       _type;  // Operand types
1171   LIR_PatchCode   _patch; // only required with patchin (NEEDS_CLEANUP: do we want a special instruction for patching?)
1172 
1173   static void print_patch_code(outputStream* out, LIR_PatchCode code);
1174 
1175   void set_kind(LIR_MoveKind kind) {
1176     assert(code() == lir_move, "must be");
1177     _flags = kind;
1178   }
1179 
1180  public:
1181   LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result = LIR_OprFact::illegalOpr, BasicType type = T_ILLEGAL, LIR_PatchCode patch = lir_patch_none, CodeEmitInfo* info = NULL)
1182     : LIR_Op(code, result, info)
1183     , _opr(opr)
1184     , _patch(patch)
1185     , _type(type)                      { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1186 
1187   LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result, BasicType type, LIR_PatchCode patch, CodeEmitInfo* info, LIR_MoveKind kind)
1188     : LIR_Op(code, result, info)
1189     , _opr(opr)
1190     , _patch(patch)
1191     , _type(type)                      {
1192     assert(code == lir_move, "must be");
1193     set_kind(kind);
1194   }
1195 
1196   LIR_Op1(LIR_Code code, LIR_Opr opr, CodeEmitInfo* info)
1197     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1198     , _opr(opr)
1199     , _patch(lir_patch_none)
1200     , _type(T_ILLEGAL)                 { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1201 
1202   LIR_Opr in_opr()           const               { return _opr;   }
1203   LIR_PatchCode patch_code() const               { return _patch; }
1204   BasicType type()           const               { return _type;  }
1205 
1206   LIR_MoveKind move_kind() const {
1207     assert(code() == lir_move, "must be");
1208     return (LIR_MoveKind)_flags;
1209   }
1210 
1211   virtual void emit_code(LIR_Assembler* masm);
1212   virtual LIR_Op1* as_Op1() { return this; }
1213   virtual const char * name() const PRODUCT_RETURN0;
1214 
1215   void set_in_opr(LIR_Opr opr) { _opr = opr; }
1216 
1217   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1218   virtual void verify() const;
1219 };
1220 
1221 
1222 // for runtime calls
1223 class LIR_OpRTCall: public LIR_OpCall {
1224  friend class LIR_OpVisitState;
1225 
1226  private:
1227   LIR_Opr _tmp;
1228  public:
1229   LIR_OpRTCall(address addr, LIR_Opr tmp,
1230                LIR_Opr result, LIR_OprList* arguments, CodeEmitInfo* info = NULL)
1231     : LIR_OpCall(lir_rtcall, addr, result, arguments, info)
1232     , _tmp(tmp) {}
1233 
1234   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1235   virtual void emit_code(LIR_Assembler* masm);
1236   virtual LIR_OpRTCall* as_OpRTCall() { return this; }
1237 
1238   LIR_Opr tmp() const                            { return _tmp; }
1239 
1240   virtual void verify() const;
1241 };
1242 
1243 
1244 class LIR_OpBranch: public LIR_Op {
1245  friend class LIR_OpVisitState;
1246 
1247  private:
1248   LIR_Condition _cond;
1249   BasicType     _type;
1250   Label*        _label;
1251   BlockBegin*   _block;  // if this is a branch to a block, this is the block
1252   BlockBegin*   _ublock; // if this is a float-branch, this is the unorderd block
1253   CodeStub*     _stub;   // if this is a branch to a stub, this is the stub
1254 
1255  public:
1256   LIR_OpBranch(LIR_Condition cond, Label* lbl)
1257     : LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL)
1258     , _cond(cond)
1259     , _label(lbl)
1260     , _block(NULL)
1261     , _ublock(NULL)
1262     , _stub(NULL) { }
1263 
1264   LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block);
1265   LIR_OpBranch(LIR_Condition cond, BasicType type, CodeStub* stub);
1266 
1267   // for unordered comparisons
1268   LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* ublock);
1269 
1270   LIR_Condition cond()        const              { return _cond;        }
1271   BasicType     type()        const              { return _type;        }
1272   Label*        label()       const              { return _label;       }
1273   BlockBegin*   block()       const              { return _block;       }
1274   BlockBegin*   ublock()      const              { return _ublock;      }
1275   CodeStub*     stub()        const              { return _stub;       }
1276 
1277   void          change_block(BlockBegin* b);
1278   void          change_ublock(BlockBegin* b);
1279   void          negate_cond();
1280 
1281   virtual void emit_code(LIR_Assembler* masm);
1282   virtual LIR_OpBranch* as_OpBranch() { return this; }
1283   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1284 };
1285 
1286 
1287 class ConversionStub;
1288 
1289 class LIR_OpConvert: public LIR_Op1 {
1290  friend class LIR_OpVisitState;
1291 
1292  private:
1293    Bytecodes::Code _bytecode;
1294    ConversionStub* _stub;
1295 
1296  public:
1297    LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub)
1298      : LIR_Op1(lir_convert, opr, result)
1299      , _stub(stub)
1300      , _bytecode(code)                           {}
1301 
1302   Bytecodes::Code bytecode() const               { return _bytecode; }
1303   ConversionStub* stub() const                   { return _stub; }
1304 
1305   virtual void emit_code(LIR_Assembler* masm);
1306   virtual LIR_OpConvert* as_OpConvert() { return this; }
1307   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1308 
1309   static void print_bytecode(outputStream* out, Bytecodes::Code code) PRODUCT_RETURN;
1310 };
1311 
1312 
1313 // LIR_OpAllocObj
1314 class LIR_OpAllocObj : public LIR_Op1 {
1315  friend class LIR_OpVisitState;
1316 
1317  private:
1318   LIR_Opr _tmp1;
1319   LIR_Opr _tmp2;
1320   LIR_Opr _tmp3;
1321   LIR_Opr _tmp4;
1322   int     _hdr_size;
1323   int     _obj_size;
1324   CodeStub* _stub;
1325   bool    _init_check;
1326 
1327  public:
1328   LIR_OpAllocObj(LIR_Opr klass, LIR_Opr result,
1329                  LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4,
1330                  int hdr_size, int obj_size, bool init_check, CodeStub* stub)
1331     : LIR_Op1(lir_alloc_object, klass, result)
1332     , _tmp1(t1)
1333     , _tmp2(t2)
1334     , _tmp3(t3)
1335     , _tmp4(t4)
1336     , _hdr_size(hdr_size)
1337     , _obj_size(obj_size)
1338     , _init_check(init_check)
1339     , _stub(stub)                                { }
1340 
1341   LIR_Opr klass()        const                   { return in_opr();     }
1342   LIR_Opr obj()          const                   { return result_opr(); }
1343   LIR_Opr tmp1()         const                   { return _tmp1;        }
1344   LIR_Opr tmp2()         const                   { return _tmp2;        }
1345   LIR_Opr tmp3()         const                   { return _tmp3;        }
1346   LIR_Opr tmp4()         const                   { return _tmp4;        }
1347   int     header_size()  const                   { return _hdr_size;    }
1348   int     object_size()  const                   { return _obj_size;    }
1349   bool    init_check()   const                   { return _init_check;  }
1350   CodeStub* stub()       const                   { return _stub;        }
1351 
1352   virtual void emit_code(LIR_Assembler* masm);
1353   virtual LIR_OpAllocObj * as_OpAllocObj () { return this; }
1354   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1355 };
1356 
1357 
1358 // LIR_OpRoundFP
1359 class LIR_OpRoundFP : public LIR_Op1 {
1360  friend class LIR_OpVisitState;
1361 
1362  private:
1363   LIR_Opr _tmp;
1364 
1365  public:
1366   LIR_OpRoundFP(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result)
1367     : LIR_Op1(lir_roundfp, reg, result)
1368     , _tmp(stack_loc_temp) {}
1369 
1370   LIR_Opr tmp() const                            { return _tmp; }
1371   virtual LIR_OpRoundFP* as_OpRoundFP()          { return this; }
1372   void print_instr(outputStream* out) const PRODUCT_RETURN;
1373 };
1374 
1375 // LIR_OpTypeCheck
1376 class LIR_OpTypeCheck: public LIR_Op {
1377  friend class LIR_OpVisitState;
1378 
1379  private:
1380   LIR_Opr       _object;
1381   LIR_Opr       _array;
1382   ciKlass*      _klass;
1383   LIR_Opr       _tmp1;
1384   LIR_Opr       _tmp2;
1385   LIR_Opr       _tmp3;
1386   bool          _fast_check;
1387   CodeEmitInfo* _info_for_patch;
1388   CodeEmitInfo* _info_for_exception;
1389   CodeStub*     _stub;
1390   // Helpers for Tier1UpdateMethodData
1391   ciMethod*     _profiled_method;
1392   int           _profiled_bci;
1393 
1394 public:
1395   LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
1396                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
1397                   CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
1398                   ciMethod* profiled_method, int profiled_bci);
1399   LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array,
1400                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception,
1401                   ciMethod* profiled_method, int profiled_bci);
1402 
1403   LIR_Opr object() const                         { return _object;         }
1404   LIR_Opr array() const                          { assert(code() == lir_store_check, "not valid"); return _array;         }
1405   LIR_Opr tmp1() const                           { return _tmp1;           }
1406   LIR_Opr tmp2() const                           { return _tmp2;           }
1407   LIR_Opr tmp3() const                           { return _tmp3;           }
1408   ciKlass* klass() const                         { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _klass;          }
1409   bool fast_check() const                        { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _fast_check;     }
1410   CodeEmitInfo* info_for_patch() const           { return _info_for_patch;  }
1411   CodeEmitInfo* info_for_exception() const       { return _info_for_exception; }
1412   CodeStub* stub() const                         { return _stub;           }
1413 
1414   // methodDataOop profiling
1415   ciMethod* profiled_method()                    { return _profiled_method; }
1416   int       profiled_bci()                       { return _profiled_bci; }
1417 
1418   virtual void emit_code(LIR_Assembler* masm);
1419   virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; }
1420   void print_instr(outputStream* out) const PRODUCT_RETURN;
1421 };
1422 
1423 // LIR_Op2
1424 class LIR_Op2: public LIR_Op {
1425  friend class LIR_OpVisitState;
1426 
1427   int  _fpu_stack_size; // for sin/cos implementation on Intel
1428 
1429  protected:
1430   LIR_Opr   _opr1;
1431   LIR_Opr   _opr2;
1432   BasicType _type;
1433   LIR_Opr   _tmp;
1434   LIR_Condition _condition;
1435 
1436   void verify() const;
1437 
1438  public:
1439   LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, CodeEmitInfo* info = NULL)
1440     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1441     , _opr1(opr1)
1442     , _opr2(opr2)
1443     , _type(T_ILLEGAL)
1444     , _condition(condition)
1445     , _fpu_stack_size(0)
1446     , _tmp(LIR_OprFact::illegalOpr) {
1447     assert(code == lir_cmp, "code check");
1448   }
1449 
1450   LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result)
1451     : LIR_Op(code, result, NULL)
1452     , _opr1(opr1)
1453     , _opr2(opr2)
1454     , _type(T_ILLEGAL)
1455     , _condition(condition)
1456     , _fpu_stack_size(0)
1457     , _tmp(LIR_OprFact::illegalOpr) {
1458     assert(code == lir_cmove, "code check");
1459   }
1460 
1461   LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result = LIR_OprFact::illegalOpr,
1462           CodeEmitInfo* info = NULL, BasicType type = T_ILLEGAL)
1463     : LIR_Op(code, result, info)
1464     , _opr1(opr1)
1465     , _opr2(opr2)
1466     , _type(type)
1467     , _condition(lir_cond_unknown)
1468     , _fpu_stack_size(0)
1469     , _tmp(LIR_OprFact::illegalOpr) {
1470     assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
1471   }
1472 
1473   LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, LIR_Opr tmp)
1474     : LIR_Op(code, result, NULL)
1475     , _opr1(opr1)
1476     , _opr2(opr2)
1477     , _type(T_ILLEGAL)
1478     , _condition(lir_cond_unknown)
1479     , _fpu_stack_size(0)
1480     , _tmp(tmp) {
1481     assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
1482   }
1483 
1484   LIR_Opr in_opr1() const                        { return _opr1; }
1485   LIR_Opr in_opr2() const                        { return _opr2; }
1486   BasicType type()  const                        { return _type; }
1487   LIR_Opr tmp_opr() const                        { return _tmp; }
1488   LIR_Condition condition() const  {
1489     assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); return _condition;
1490   }
1491 
1492   void set_fpu_stack_size(int size)              { _fpu_stack_size = size; }
1493   int  fpu_stack_size() const                    { return _fpu_stack_size; }
1494 
1495   void set_in_opr1(LIR_Opr opr)                  { _opr1 = opr; }
1496   void set_in_opr2(LIR_Opr opr)                  { _opr2 = opr; }
1497 
1498   virtual void emit_code(LIR_Assembler* masm);
1499   virtual LIR_Op2* as_Op2() { return this; }
1500   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1501 };
1502 
1503 class LIR_OpAllocArray : public LIR_Op {
1504  friend class LIR_OpVisitState;
1505 
1506  private:
1507   LIR_Opr   _klass;
1508   LIR_Opr   _len;
1509   LIR_Opr   _tmp1;
1510   LIR_Opr   _tmp2;
1511   LIR_Opr   _tmp3;
1512   LIR_Opr   _tmp4;
1513   BasicType _type;
1514   CodeStub* _stub;
1515 
1516  public:
1517   LIR_OpAllocArray(LIR_Opr klass, LIR_Opr len, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, BasicType type, CodeStub* stub)
1518     : LIR_Op(lir_alloc_array, result, NULL)
1519     , _klass(klass)
1520     , _len(len)
1521     , _tmp1(t1)
1522     , _tmp2(t2)
1523     , _tmp3(t3)
1524     , _tmp4(t4)
1525     , _type(type)
1526     , _stub(stub) {}
1527 
1528   LIR_Opr   klass()   const                      { return _klass;       }
1529   LIR_Opr   len()     const                      { return _len;         }
1530   LIR_Opr   obj()     const                      { return result_opr(); }
1531   LIR_Opr   tmp1()    const                      { return _tmp1;        }
1532   LIR_Opr   tmp2()    const                      { return _tmp2;        }
1533   LIR_Opr   tmp3()    const                      { return _tmp3;        }
1534   LIR_Opr   tmp4()    const                      { return _tmp4;        }
1535   BasicType type()    const                      { return _type;        }
1536   CodeStub* stub()    const                      { return _stub;        }
1537 
1538   virtual void emit_code(LIR_Assembler* masm);
1539   virtual LIR_OpAllocArray * as_OpAllocArray () { return this; }
1540   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1541 };
1542 
1543 
1544 class LIR_Op3: public LIR_Op {
1545  friend class LIR_OpVisitState;
1546 
1547  private:
1548   LIR_Opr _opr1;
1549   LIR_Opr _opr2;
1550   LIR_Opr _opr3;
1551  public:
1552   LIR_Op3(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr opr3, LIR_Opr result, CodeEmitInfo* info = NULL)
1553     : LIR_Op(code, result, info)
1554     , _opr1(opr1)
1555     , _opr2(opr2)
1556     , _opr3(opr3)                                { assert(is_in_range(code, begin_op3, end_op3), "code check"); }
1557   LIR_Opr in_opr1() const                        { return _opr1; }
1558   LIR_Opr in_opr2() const                        { return _opr2; }
1559   LIR_Opr in_opr3() const                        { return _opr3; }
1560 
1561   virtual void emit_code(LIR_Assembler* masm);
1562   virtual LIR_Op3* as_Op3() { return this; }
1563   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1564 };
1565 
1566 
1567 //--------------------------------
1568 class LabelObj: public CompilationResourceObj {
1569  private:
1570   Label _label;
1571  public:
1572   LabelObj()                                     {}
1573   Label* label()                                 { return &_label; }
1574 };
1575 
1576 
1577 class LIR_OpLock: public LIR_Op {
1578  friend class LIR_OpVisitState;
1579 
1580  private:
1581   LIR_Opr _hdr;
1582   LIR_Opr _obj;
1583   LIR_Opr _lock;
1584   LIR_Opr _scratch;
1585   CodeStub* _stub;
1586  public:
1587   LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info)
1588     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1589     , _hdr(hdr)
1590     , _obj(obj)
1591     , _lock(lock)
1592     , _scratch(scratch)
1593     , _stub(stub)                      {}
1594 
1595   LIR_Opr hdr_opr() const                        { return _hdr; }
1596   LIR_Opr obj_opr() const                        { return _obj; }
1597   LIR_Opr lock_opr() const                       { return _lock; }
1598   LIR_Opr scratch_opr() const                    { return _scratch; }
1599   CodeStub* stub() const                         { return _stub; }
1600 
1601   virtual void emit_code(LIR_Assembler* masm);
1602   virtual LIR_OpLock* as_OpLock() { return this; }
1603   void print_instr(outputStream* out) const PRODUCT_RETURN;
1604 };
1605 
1606 
1607 class LIR_OpDelay: public LIR_Op {
1608  friend class LIR_OpVisitState;
1609 
1610  private:
1611   LIR_Op* _op;
1612 
1613  public:
1614   LIR_OpDelay(LIR_Op* op, CodeEmitInfo* info):
1615     LIR_Op(lir_delay_slot, LIR_OprFact::illegalOpr, info),
1616     _op(op) {
1617     assert(op->code() == lir_nop || LIRFillDelaySlots, "should be filling with nops");
1618   }
1619   virtual void emit_code(LIR_Assembler* masm);
1620   virtual LIR_OpDelay* as_OpDelay() { return this; }
1621   void print_instr(outputStream* out) const PRODUCT_RETURN;
1622   LIR_Op* delay_op() const { return _op; }
1623   CodeEmitInfo* call_info() const { return info(); }
1624 };
1625 
1626 
1627 // LIR_OpCompareAndSwap
1628 class LIR_OpCompareAndSwap : public LIR_Op {
1629  friend class LIR_OpVisitState;
1630 
1631  private:
1632   LIR_Opr _addr;
1633   LIR_Opr _cmp_value;
1634   LIR_Opr _new_value;
1635   LIR_Opr _tmp1;
1636   LIR_Opr _tmp2;
1637 
1638  public:
1639   LIR_OpCompareAndSwap(LIR_Code code, LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, LIR_Opr t1, LIR_Opr t2)
1640     : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  // no result, no info
1641     , _addr(addr)
1642     , _cmp_value(cmp_value)
1643     , _new_value(new_value)
1644     , _tmp1(t1)
1645     , _tmp2(t2)                                  { }
1646 
1647   LIR_Opr addr()        const                    { return _addr;  }
1648   LIR_Opr cmp_value()   const                    { return _cmp_value; }
1649   LIR_Opr new_value()   const                    { return _new_value; }
1650   LIR_Opr tmp1()        const                    { return _tmp1;      }
1651   LIR_Opr tmp2()        const                    { return _tmp2;      }
1652 
1653   virtual void emit_code(LIR_Assembler* masm);
1654   virtual LIR_OpCompareAndSwap * as_OpCompareAndSwap () { return this; }
1655   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1656 };
1657 
1658 // LIR_OpProfileCall
1659 class LIR_OpProfileCall : public LIR_Op {
1660  friend class LIR_OpVisitState;
1661 
1662  private:
1663   ciMethod* _profiled_method;
1664   int _profiled_bci;
1665   LIR_Opr _mdo;
1666   LIR_Opr _recv;
1667   LIR_Opr _tmp1;
1668   ciKlass* _known_holder;
1669 
1670  public:
1671   // Destroys recv
1672   LIR_OpProfileCall(LIR_Code code, ciMethod* profiled_method, int profiled_bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
1673     : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  // no result, no info
1674     , _profiled_method(profiled_method)
1675     , _profiled_bci(profiled_bci)
1676     , _mdo(mdo)
1677     , _recv(recv)
1678     , _tmp1(t1)
1679     , _known_holder(known_holder)                { }
1680 
1681   ciMethod* profiled_method() const              { return _profiled_method;  }
1682   int       profiled_bci()    const              { return _profiled_bci;     }
1683   LIR_Opr   mdo()             const              { return _mdo;              }
1684   LIR_Opr   recv()            const              { return _recv;             }
1685   LIR_Opr   tmp1()            const              { return _tmp1;             }
1686   ciKlass*  known_holder()    const              { return _known_holder;     }
1687 
1688   virtual void emit_code(LIR_Assembler* masm);
1689   virtual LIR_OpProfileCall* as_OpProfileCall() { return this; }
1690   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1691 };
1692 
1693 
1694 class LIR_InsertionBuffer;
1695 
1696 //--------------------------------LIR_List---------------------------------------------------
1697 // Maintains a list of LIR instructions (one instance of LIR_List per basic block)
1698 // The LIR instructions are appended by the LIR_List class itself;
1699 //
1700 // Notes:
1701 // - all offsets are(should be) in bytes
1702 // - local positions are specified with an offset, with offset 0 being local 0
1703 
1704 class LIR_List: public CompilationResourceObj {
1705  private:
1706   LIR_OpList  _operations;
1707 
1708   Compilation*  _compilation;
1709 #ifndef PRODUCT
1710   BlockBegin*   _block;
1711 #endif
1712 #ifdef ASSERT
1713   const char *  _file;
1714   int           _line;
1715 #endif
1716 
1717   void append(LIR_Op* op) {
1718     if (op->source() == NULL)
1719       op->set_source(_compilation->current_instruction());
1720 #ifndef PRODUCT
1721     if (PrintIRWithLIR) {
1722       _compilation->maybe_print_current_instruction();
1723       op->print(); tty->cr();
1724     }
1725 #endif // PRODUCT
1726 
1727     _operations.append(op);
1728 
1729 #ifdef ASSERT
1730     op->verify();
1731     op->set_file_and_line(_file, _line);
1732     _file = NULL;
1733     _line = 0;
1734 #endif
1735   }
1736 
1737  public:
1738   LIR_List(Compilation* compilation, BlockBegin* block = NULL);
1739 
1740 #ifdef ASSERT
1741   void set_file_and_line(const char * file, int line);
1742 #endif
1743 
1744   //---------- accessors ---------------
1745   LIR_OpList* instructions_list()                { return &_operations; }
1746   int         length() const                     { return _operations.length(); }
1747   LIR_Op*     at(int i) const                    { return _operations.at(i); }
1748 
1749   NOT_PRODUCT(BlockBegin* block() const          { return _block; });
1750 
1751   // insert LIR_Ops in buffer to right places in LIR_List
1752   void append(LIR_InsertionBuffer* buffer);
1753 
1754   //---------- mutators ---------------
1755   void insert_before(int i, LIR_List* op_list)   { _operations.insert_before(i, op_list->instructions_list()); }
1756   void insert_before(int i, LIR_Op* op)          { _operations.insert_before(i, op); }
1757 
1758   //---------- printing -------------
1759   void print_instructions() PRODUCT_RETURN;
1760 
1761 
1762   //---------- instructions -------------
1763   void call_opt_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
1764                         address dest, LIR_OprList* arguments,
1765                         CodeEmitInfo* info) {
1766     append(new LIR_OpJavaCall(lir_optvirtual_call, method, receiver, result, dest, arguments, info));
1767   }
1768   void call_static(ciMethod* method, LIR_Opr result,
1769                    address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
1770     append(new LIR_OpJavaCall(lir_static_call, method, LIR_OprFact::illegalOpr, result, dest, arguments, info));
1771   }
1772   void call_icvirtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
1773                       address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
1774     append(new LIR_OpJavaCall(lir_icvirtual_call, method, receiver, result, dest, arguments, info));
1775   }
1776   void call_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
1777                     intptr_t vtable_offset, LIR_OprList* arguments, CodeEmitInfo* info) {
1778     append(new LIR_OpJavaCall(lir_virtual_call, method, receiver, result, vtable_offset, arguments, info));
1779   }
1780   void call_dynamic(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
1781                     address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
1782     append(new LIR_OpJavaCall(lir_dynamic_call, method, receiver, result, dest, arguments, info));
1783   }
1784 
1785   void get_thread(LIR_Opr result)                { append(new LIR_Op0(lir_get_thread, result)); }
1786   void word_align()                              { append(new LIR_Op0(lir_word_align)); }
1787   void membar()                                  { append(new LIR_Op0(lir_membar)); }
1788   void membar_acquire()                          { append(new LIR_Op0(lir_membar_acquire)); }
1789   void membar_release()                          { append(new LIR_Op0(lir_membar_release)); }
1790 
1791   void nop()                                     { append(new LIR_Op0(lir_nop)); }
1792   void build_frame()                             { append(new LIR_Op0(lir_build_frame)); }
1793 
1794   void std_entry(LIR_Opr receiver)               { append(new LIR_Op0(lir_std_entry, receiver)); }
1795   void osr_entry(LIR_Opr osrPointer)             { append(new LIR_Op0(lir_osr_entry, osrPointer)); }
1796 
1797   void branch_destination(Label* lbl)            { append(new LIR_OpLabel(lbl)); }
1798 
1799   void negate(LIR_Opr from, LIR_Opr to)          { append(new LIR_Op1(lir_neg, from, to)); }
1800   void leal(LIR_Opr from, LIR_Opr result_reg)    { append(new LIR_Op1(lir_leal, from, result_reg)); }
1801 
1802   // result is a stack location for old backend and vreg for UseLinearScan
1803   // stack_loc_temp is an illegal register for old backend
1804   void roundfp(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result) { append(new LIR_OpRoundFP(reg, stack_loc_temp, result)); }
1805   void unaligned_move(LIR_Address* src, LIR_Opr dst) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
1806   void unaligned_move(LIR_Opr src, LIR_Address* dst) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), src->type(), lir_patch_none, NULL, lir_move_unaligned)); }
1807   void unaligned_move(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
1808   void move(LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
1809   void move(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info)); }
1810   void move(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info)); }
1811 
1812   void volatile_move(LIR_Opr src, LIR_Opr dst, BasicType type, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none) { append(new LIR_Op1(lir_move, src, dst, type, patch_code, info, lir_move_volatile)); }
1813 
1814   void oop2reg  (jobject o, LIR_Opr reg)         { append(new LIR_Op1(lir_move, LIR_OprFact::oopConst(o),    reg));   }
1815   void oop2reg_patch(jobject o, LIR_Opr reg, CodeEmitInfo* info);
1816 
1817   void return_op(LIR_Opr result)                 { append(new LIR_Op1(lir_return, result)); }
1818 
1819   void safepoint(LIR_Opr tmp, CodeEmitInfo* info)  { append(new LIR_Op1(lir_safepoint, tmp, info)); }
1820 
1821   void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, ConversionStub* stub = NULL/*, bool is_32bit = false*/) { append(new LIR_OpConvert(code, left, dst, stub)); }
1822 
1823   void logical_and (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_and,  left, right, dst)); }
1824   void logical_or  (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_or,   left, right, dst)); }
1825   void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor,  left, right, dst)); }
1826 
1827   void null_check(LIR_Opr opr, CodeEmitInfo* info)         { append(new LIR_Op1(lir_null_check, opr, info)); }
1828   void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info)); }
1829   void unwind_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { append(new LIR_Op2(lir_unwind, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info)); }
1830 
1831   void compare_to (LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1832     append(new LIR_Op2(lir_compare_to,  left, right, dst));
1833   }
1834 
1835   void push(LIR_Opr opr)                                   { append(new LIR_Op1(lir_push, opr)); }
1836   void pop(LIR_Opr reg)                                    { append(new LIR_Op1(lir_pop,  reg)); }
1837 
1838   void cmp(LIR_Condition condition, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info = NULL) {
1839     append(new LIR_Op2(lir_cmp, condition, left, right, info));
1840   }
1841   void cmp(LIR_Condition condition, LIR_Opr left, int right, CodeEmitInfo* info = NULL) {
1842     cmp(condition, left, LIR_OprFact::intConst(right), info);
1843   }
1844 
1845   void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info);
1846   void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Address* addr, CodeEmitInfo* info);
1847 
1848   void cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst) {
1849     append(new LIR_Op2(lir_cmove, condition, src1, src2, dst));
1850   }
1851 
1852   void cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, LIR_Opr t1, LIR_Opr t2);
1853   void cas_obj(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, LIR_Opr t1, LIR_Opr t2);
1854   void cas_int(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, LIR_Opr t1, LIR_Opr t2);
1855 
1856   void abs (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_abs , from, tmp, to)); }
1857   void sqrt(LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_sqrt, from, tmp, to)); }
1858   void log (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_log,  from, LIR_OprFact::illegalOpr, to, tmp)); }
1859   void log10 (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)              { append(new LIR_Op2(lir_log10, from, LIR_OprFact::illegalOpr, to, tmp)); }
1860   void sin (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_sin , from, tmp1, to, tmp2)); }
1861   void cos (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_cos , from, tmp1, to, tmp2)); }
1862   void tan (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_tan , from, tmp1, to, tmp2)); }
1863 
1864   void add (LIR_Opr left, LIR_Opr right, LIR_Opr res)      { append(new LIR_Op2(lir_add, left, right, res)); }
1865   void sub (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_sub, left, right, res, info)); }
1866   void mul (LIR_Opr left, LIR_Opr right, LIR_Opr res) { append(new LIR_Op2(lir_mul, left, right, res)); }
1867   void mul_strictfp (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_mul_strictfp, left, right, res, tmp)); }
1868   void div (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL)      { append(new LIR_Op2(lir_div, left, right, res, info)); }
1869   void div_strictfp (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_div_strictfp, left, right, res, tmp)); }
1870   void rem (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL)      { append(new LIR_Op2(lir_rem, left, right, res, info)); }
1871 
1872   void volatile_load_mem_reg(LIR_Address* address, LIR_Opr dst, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
1873   void volatile_load_unsafe_reg(LIR_Opr base, LIR_Opr offset, LIR_Opr dst, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
1874 
1875   void load(LIR_Address* addr, LIR_Opr src, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
1876 
1877   void prefetch(LIR_Address* addr, bool is_store);
1878 
1879   void store_mem_int(jint v,    LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
1880   void store_mem_oop(jobject o, LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
1881   void store(LIR_Opr src, LIR_Address* addr, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
1882   void volatile_store_mem_reg(LIR_Opr src, LIR_Address* address, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
1883   void volatile_store_unsafe_reg(LIR_Opr src, LIR_Opr base, LIR_Opr offset, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
1884 
1885   void idiv(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
1886   void idiv(LIR_Opr left, int   right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
1887   void irem(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
1888   void irem(LIR_Opr left, int   right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
1889 
1890   void allocate_object(LIR_Opr dst, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, int header_size, int object_size, LIR_Opr klass, bool init_check, CodeStub* stub);
1891   void allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub);
1892 
1893   // jump is an unconditional branch
1894   void jump(BlockBegin* block) {
1895     append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, block));
1896   }
1897   void jump(CodeStub* stub) {
1898     append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, stub));
1899   }
1900   void branch(LIR_Condition cond, Label* lbl)        { append(new LIR_OpBranch(cond, lbl)); }
1901   void branch(LIR_Condition cond, BasicType type, BlockBegin* block) {
1902     assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
1903     append(new LIR_OpBranch(cond, type, block));
1904   }
1905   void branch(LIR_Condition cond, BasicType type, CodeStub* stub)    {
1906     assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
1907     append(new LIR_OpBranch(cond, type, stub));
1908   }
1909   void branch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* unordered) {
1910     assert(type == T_FLOAT || type == T_DOUBLE, "fp comparisons only");
1911     append(new LIR_OpBranch(cond, type, block, unordered));
1912   }
1913 
1914   void shift_left(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
1915   void shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
1916   void unsigned_shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
1917 
1918   void shift_left(LIR_Opr value, int count, LIR_Opr dst)       { shift_left(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
1919   void shift_right(LIR_Opr value, int count, LIR_Opr dst)      { shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
1920   void unsigned_shift_right(LIR_Opr value, int count, LIR_Opr dst) { unsigned_shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
1921 
1922   void lcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst)        { append(new LIR_Op2(lir_cmp_l2i,  left, right, dst)); }
1923   void fcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst, bool is_unordered_less);
1924 
1925   void call_runtime_leaf(address routine, LIR_Opr tmp, LIR_Opr result, LIR_OprList* arguments) {
1926     append(new LIR_OpRTCall(routine, tmp, result, arguments));
1927   }
1928 
1929   void call_runtime(address routine, LIR_Opr tmp, LIR_Opr result,
1930                     LIR_OprList* arguments, CodeEmitInfo* info) {
1931     append(new LIR_OpRTCall(routine, tmp, result, arguments, info));
1932   }
1933 
1934   void load_stack_address_monitor(int monitor_ix, LIR_Opr dst)  { append(new LIR_Op1(lir_monaddr, LIR_OprFact::intConst(monitor_ix), dst)); }
1935   void unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, CodeStub* stub);
1936   void lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info);
1937 
1938   void set_24bit_fpu()                                               { append(new LIR_Op0(lir_24bit_FPU )); }
1939   void restore_fpu()                                                 { append(new LIR_Op0(lir_reset_FPU )); }
1940   void breakpoint()                                                  { append(new LIR_Op0(lir_breakpoint)); }
1941 
1942   void arraycopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info) { append(new LIR_OpArrayCopy(src, src_pos, dst, dst_pos, length, tmp, expected_type, flags, info)); }
1943 
1944   void fpop_raw()                                { append(new LIR_Op0(lir_fpop_raw)); }
1945 
1946   void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass,
1947                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
1948                   CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
1949                   ciMethod* profiled_method, int profiled_bci);
1950   void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch);
1951   void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
1952 
1953   // methodDataOop profiling
1954   void profile_call(ciMethod* method, int bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) { append(new LIR_OpProfileCall(lir_profile_call, method, bci, mdo, recv, t1, cha_klass)); }
1955 };
1956 
1957 void print_LIR(BlockList* blocks);
1958 
1959 class LIR_InsertionBuffer : public CompilationResourceObj {
1960  private:
1961   LIR_List*   _lir;   // the lir list where ops of this buffer should be inserted later (NULL when uninitialized)
1962 
1963   // list of insertion points. index and count are stored alternately:
1964   // _index_and_count[i * 2]:     the index into lir list where "count" ops should be inserted
1965   // _index_and_count[i * 2 + 1]: the number of ops to be inserted at index
1966   intStack    _index_and_count;
1967 
1968   // the LIR_Ops to be inserted
1969   LIR_OpList  _ops;
1970 
1971   void append_new(int index, int count)  { _index_and_count.append(index); _index_and_count.append(count); }
1972   void set_index_at(int i, int value)    { _index_and_count.at_put((i << 1),     value); }
1973   void set_count_at(int i, int value)    { _index_and_count.at_put((i << 1) + 1, value); }
1974 
1975 #ifdef ASSERT
1976   void verify();
1977 #endif
1978  public:
1979   LIR_InsertionBuffer() : _lir(NULL), _index_and_count(8), _ops(8) { }
1980 
1981   // must be called before using the insertion buffer
1982   void init(LIR_List* lir)  { assert(!initialized(), "already initialized"); _lir = lir; _index_and_count.clear(); _ops.clear(); }
1983   bool initialized() const  { return _lir != NULL; }
1984   // called automatically when the buffer is appended to the LIR_List
1985   void finish()             { _lir = NULL; }
1986 
1987   // accessors
1988   LIR_List*  lir_list() const             { return _lir; }
1989   int number_of_insertion_points() const  { return _index_and_count.length() >> 1; }
1990   int index_at(int i) const               { return _index_and_count.at((i << 1));     }
1991   int count_at(int i) const               { return _index_and_count.at((i << 1) + 1); }
1992 
1993   int number_of_ops() const               { return _ops.length(); }
1994   LIR_Op* op_at(int i) const              { return _ops.at(i); }
1995 
1996   // append an instruction to the buffer
1997   void append(int index, LIR_Op* op);
1998 
1999   // instruction
2000   void move(int index, LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(index, new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
2001 };
2002 
2003 
2004 //
2005 // LIR_OpVisitState is used for manipulating LIR_Ops in an abstract way.
2006 // Calling a LIR_Op's visit function with a LIR_OpVisitState causes
2007 // information about the input, output and temporaries used by the
2008 // op to be recorded.  It also records whether the op has call semantics
2009 // and also records all the CodeEmitInfos used by this op.
2010 //
2011 
2012 
2013 class LIR_OpVisitState: public StackObj {
2014  public:
2015   typedef enum { inputMode, firstMode = inputMode, tempMode, outputMode, numModes, invalidMode = -1 } OprMode;
2016 
2017   enum {
2018     maxNumberOfOperands = 16,
2019     maxNumberOfInfos = 4
2020   };
2021 
2022  private:
2023   LIR_Op*          _op;
2024 
2025   // optimization: the operands and infos are not stored in a variable-length
2026   //               list, but in a fixed-size array to save time of size checks and resizing
2027   int              _oprs_len[numModes];
2028   LIR_Opr*         _oprs_new[numModes][maxNumberOfOperands];
2029   int _info_len;
2030   CodeEmitInfo*    _info_new[maxNumberOfInfos];
2031 
2032   bool             _has_call;
2033   bool             _has_slow_case;
2034 
2035 
2036   // only include register operands
2037   // addresses are decomposed to the base and index registers
2038   // constants and stack operands are ignored
2039   void append(LIR_Opr& opr, OprMode mode) {
2040     assert(opr->is_valid(), "should not call this otherwise");
2041     assert(mode >= 0 && mode < numModes, "bad mode");
2042 
2043     if (opr->is_register()) {
2044        assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2045       _oprs_new[mode][_oprs_len[mode]++] = &opr;
2046 
2047     } else if (opr->is_pointer()) {
2048       LIR_Address* address = opr->as_address_ptr();
2049       if (address != NULL) {
2050         // special handling for addresses: add base and index register of the address
2051         // both are always input operands!
2052         if (address->_base->is_valid()) {
2053           assert(address->_base->is_register(), "must be");
2054           assert(_oprs_len[inputMode] < maxNumberOfOperands, "array overflow");
2055           _oprs_new[inputMode][_oprs_len[inputMode]++] = &address->_base;
2056         }
2057         if (address->_index->is_valid()) {
2058           assert(address->_index->is_register(), "must be");
2059           assert(_oprs_len[inputMode] < maxNumberOfOperands, "array overflow");
2060           _oprs_new[inputMode][_oprs_len[inputMode]++] = &address->_index;
2061         }
2062 
2063       } else {
2064         assert(opr->is_constant(), "constant operands are not processed");
2065       }
2066     } else {
2067       assert(opr->is_stack(), "stack operands are not processed");
2068     }
2069   }
2070 
2071   void append(CodeEmitInfo* info) {
2072     assert(info != NULL, "should not call this otherwise");
2073     assert(_info_len < maxNumberOfInfos, "array overflow");
2074     _info_new[_info_len++] = info;
2075   }
2076 
2077  public:
2078   LIR_OpVisitState()         { reset(); }
2079 
2080   LIR_Op* op() const         { return _op; }
2081   void set_op(LIR_Op* op)    { reset(); _op = op; }
2082 
2083   bool has_call() const      { return _has_call; }
2084   bool has_slow_case() const { return _has_slow_case; }
2085 
2086   void reset() {
2087     _op = NULL;
2088     _has_call = false;
2089     _has_slow_case = false;
2090 
2091     _oprs_len[inputMode] = 0;
2092     _oprs_len[tempMode] = 0;
2093     _oprs_len[outputMode] = 0;
2094     _info_len = 0;
2095   }
2096 
2097 
2098   int opr_count(OprMode mode) const {
2099     assert(mode >= 0 && mode < numModes, "bad mode");
2100     return _oprs_len[mode];
2101   }
2102 
2103   LIR_Opr opr_at(OprMode mode, int index) const {
2104     assert(mode >= 0 && mode < numModes, "bad mode");
2105     assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
2106     return *_oprs_new[mode][index];
2107   }
2108 
2109   void set_opr_at(OprMode mode, int index, LIR_Opr opr) const {
2110     assert(mode >= 0 && mode < numModes, "bad mode");
2111     assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
2112     *_oprs_new[mode][index] = opr;
2113   }
2114 
2115   int info_count() const {
2116     return _info_len;
2117   }
2118 
2119   CodeEmitInfo* info_at(int index) const {
2120     assert(index < _info_len, "index out of bounds");
2121     return _info_new[index];
2122   }
2123 
2124   XHandlers* all_xhandler();
2125 
2126   // collects all register operands of the instruction
2127   void visit(LIR_Op* op);
2128 
2129 #if ASSERT
2130   // check that an operation has no operands
2131   bool no_operands(LIR_Op* op);
2132 #endif
2133 
2134   // LIR_Op visitor functions use these to fill in the state
2135   void do_input(LIR_Opr& opr)             { append(opr, LIR_OpVisitState::inputMode); }
2136   void do_output(LIR_Opr& opr)            { append(opr, LIR_OpVisitState::outputMode); }
2137   void do_temp(LIR_Opr& opr)              { append(opr, LIR_OpVisitState::tempMode); }
2138   void do_info(CodeEmitInfo* info)        { append(info); }
2139 
2140   void do_stub(CodeStub* stub);
2141   void do_call()                          { _has_call = true; }
2142   void do_slow_case()                     { _has_slow_case = true; }
2143   void do_slow_case(CodeEmitInfo* info) {
2144     _has_slow_case = true;
2145     append(info);
2146   }
2147 };
2148 
2149 
2150 inline LIR_Opr LIR_OprDesc::illegalOpr()   { return LIR_OprFact::illegalOpr; };