1 /*
   2  * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 class BlockBegin;
  26 class BlockList;
  27 class LIR_Assembler;
  28 class CodeEmitInfo;
  29 class CodeStub;
  30 class CodeStubList;
  31 class ArrayCopyStub;
  32 class LIR_Op;
  33 class ciType;
  34 class ValueType;
  35 class LIR_OpVisitState;
  36 class FpuStackSim;
  37 
  38 //---------------------------------------------------------------------
  39 //                 LIR Operands
  40 //  LIR_OprDesc
  41 //    LIR_OprPtr
  42 //      LIR_Const
  43 //      LIR_Address
  44 //---------------------------------------------------------------------
  45 class LIR_OprDesc;
  46 class LIR_OprPtr;
  47 class LIR_Const;
  48 class LIR_Address;
  49 class LIR_OprVisitor;
  50 
  51 
  52 typedef LIR_OprDesc* LIR_Opr;
  53 typedef int          RegNr;
  54 
  55 define_array(LIR_OprArray, LIR_Opr)
  56 define_stack(LIR_OprList, LIR_OprArray)
  57 
  58 define_array(LIR_OprRefArray, LIR_Opr*)
  59 define_stack(LIR_OprRefList, LIR_OprRefArray)
  60 
  61 define_array(CodeEmitInfoArray, CodeEmitInfo*)
  62 define_stack(CodeEmitInfoList, CodeEmitInfoArray)
  63 
  64 define_array(LIR_OpArray, LIR_Op*)
  65 define_stack(LIR_OpList, LIR_OpArray)
  66 
  67 // define LIR_OprPtr early so LIR_OprDesc can refer to it
  68 class LIR_OprPtr: public CompilationResourceObj {
  69  public:
  70   bool is_oop_pointer() const                    { return (type() == T_OBJECT); }
  71   bool is_float_kind() const                     { BasicType t = type(); return (t == T_FLOAT) || (t == T_DOUBLE); }
  72 
  73   virtual LIR_Const*  as_constant()              { return NULL; }
  74   virtual LIR_Address* as_address()              { return NULL; }
  75   virtual BasicType type() const                 = 0;
  76   virtual void print_value_on(outputStream* out) const = 0;
  77 };
  78 
  79 
  80 
  81 // LIR constants
  82 class LIR_Const: public LIR_OprPtr {
  83  private:
  84   JavaValue _value;
  85 
  86   void type_check(BasicType t) const   { assert(type() == t, "type check"); }
  87   void type_check(BasicType t1, BasicType t2) const   { assert(type() == t1 || type() == t2, "type check"); }
  88   void type_check(BasicType t1, BasicType t2, BasicType t3) const   { assert(type() == t1 || type() == t2 || type() == t3, "type check"); }
  89 
  90  public:
  91   LIR_Const(jint i, bool is_address=false)       { _value.set_type(is_address?T_ADDRESS:T_INT); _value.set_jint(i); }
  92   LIR_Const(jlong l)                             { _value.set_type(T_LONG);    _value.set_jlong(l); }
  93   LIR_Const(jfloat f)                            { _value.set_type(T_FLOAT);   _value.set_jfloat(f); }
  94   LIR_Const(jdouble d)                           { _value.set_type(T_DOUBLE);  _value.set_jdouble(d); }
  95   LIR_Const(jobject o)                           { _value.set_type(T_OBJECT);  _value.set_jobject(o); }
  96   LIR_Const(void* p) {
  97 #ifdef _LP64
  98     assert(sizeof(jlong) >= sizeof(p), "too small");;
  99     _value.set_type(T_LONG);    _value.set_jlong((jlong)p);
 100 #else
 101     assert(sizeof(jint) >= sizeof(p), "too small");;
 102     _value.set_type(T_INT);     _value.set_jint((jint)p);
 103 #endif
 104   }
 105 
 106   virtual BasicType type()       const { return _value.get_type(); }
 107   virtual LIR_Const* as_constant()     { return this; }
 108 
 109   jint      as_jint()    const         { type_check(T_INT, T_ADDRESS); return _value.get_jint(); }
 110   jlong     as_jlong()   const         { type_check(T_LONG  ); return _value.get_jlong(); }
 111   jfloat    as_jfloat()  const         { type_check(T_FLOAT ); return _value.get_jfloat(); }
 112   jdouble   as_jdouble() const         { type_check(T_DOUBLE); return _value.get_jdouble(); }
 113   jobject   as_jobject() const         { type_check(T_OBJECT); return _value.get_jobject(); }
 114   jint      as_jint_lo() const         { type_check(T_LONG  ); return low(_value.get_jlong()); }
 115   jint      as_jint_hi() const         { type_check(T_LONG  ); return high(_value.get_jlong()); }
 116 
 117 #ifdef _LP64
 118   address   as_pointer() const         { type_check(T_LONG  ); return (address)_value.get_jlong(); }
 119 #else
 120   address   as_pointer() const         { type_check(T_INT   ); return (address)_value.get_jint(); }
 121 #endif
 122 
 123 
 124   jint      as_jint_bits() const       { type_check(T_FLOAT, T_INT, T_ADDRESS); return _value.get_jint(); }
 125   jint      as_jint_lo_bits() const    {
 126     if (type() == T_DOUBLE) {
 127       return low(jlong_cast(_value.get_jdouble()));
 128     } else {
 129       return as_jint_lo();
 130     }
 131   }
 132   jint      as_jint_hi_bits() const    {
 133     if (type() == T_DOUBLE) {
 134       return high(jlong_cast(_value.get_jdouble()));
 135     } else {
 136       return as_jint_hi();
 137     }
 138   }
 139   jlong      as_jlong_bits() const    {
 140     if (type() == T_DOUBLE) {
 141       return jlong_cast(_value.get_jdouble());
 142     } else {
 143       return as_jlong();
 144     }
 145   }
 146 
 147   virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
 148 
 149 
 150   bool is_zero_float() {
 151     jfloat f = as_jfloat();
 152     jfloat ok = 0.0f;
 153     return jint_cast(f) == jint_cast(ok);
 154   }
 155 
 156   bool is_one_float() {
 157     jfloat f = as_jfloat();
 158     return !g_isnan(f) && g_isfinite(f) && f == 1.0;
 159   }
 160 
 161   bool is_zero_double() {
 162     jdouble d = as_jdouble();
 163     jdouble ok = 0.0;
 164     return jlong_cast(d) == jlong_cast(ok);
 165   }
 166 
 167   bool is_one_double() {
 168     jdouble d = as_jdouble();
 169     return !g_isnan(d) && g_isfinite(d) && d == 1.0;
 170   }
 171 };
 172 
 173 
 174 //---------------------LIR Operand descriptor------------------------------------
 175 //
 176 // The class LIR_OprDesc represents a LIR instruction operand;
 177 // it can be a register (ALU/FPU), stack location or a constant;
 178 // Constants and addresses are represented as resource area allocated
 179 // structures (see above).
 180 // Registers and stack locations are inlined into the this pointer
 181 // (see value function).
 182 
 183 class LIR_OprDesc: public CompilationResourceObj {
 184  public:
 185   // value structure:
 186   //     data       opr-type opr-kind
 187   // +--------------+-------+-------+
 188   // [max...........|7 6 5 4|3 2 1 0]
 189   //                             ^
 190   //                    is_pointer bit
 191   //
 192   // lowest bit cleared, means it is a structure pointer
 193   // we need  4 bits to represent types
 194 
 195  private:
 196   friend class LIR_OprFact;
 197 
 198   // Conversion
 199   intptr_t value() const                         { return (intptr_t) this; }
 200 
 201   bool check_value_mask(intptr_t mask, intptr_t masked_value) const {
 202     return (value() & mask) == masked_value;
 203   }
 204 
 205   enum OprKind {
 206       pointer_value      = 0
 207     , stack_value        = 1
 208     , cpu_register       = 3
 209     , fpu_register       = 5
 210     , illegal_value      = 7
 211   };
 212 
 213   enum OprBits {
 214       pointer_bits   = 1
 215     , kind_bits      = 3
 216     , type_bits      = 4
 217     , size_bits      = 2
 218     , destroys_bits  = 1
 219     , virtual_bits   = 1
 220     , is_xmm_bits    = 1
 221     , last_use_bits  = 1
 222     , is_fpu_stack_offset_bits = 1        // used in assertion checking on x86 for FPU stack slot allocation
 223     , non_data_bits  = kind_bits + type_bits + size_bits + destroys_bits + last_use_bits +
 224                        is_fpu_stack_offset_bits + virtual_bits + is_xmm_bits
 225     , data_bits      = BitsPerInt - non_data_bits
 226     , reg_bits       = data_bits / 2      // for two registers in one value encoding
 227   };
 228 
 229   enum OprShift {
 230       kind_shift     = 0
 231     , type_shift     = kind_shift     + kind_bits
 232     , size_shift     = type_shift     + type_bits
 233     , destroys_shift = size_shift     + size_bits
 234     , last_use_shift = destroys_shift + destroys_bits
 235     , is_fpu_stack_offset_shift = last_use_shift + last_use_bits
 236     , virtual_shift  = is_fpu_stack_offset_shift + is_fpu_stack_offset_bits
 237     , is_xmm_shift   = virtual_shift + virtual_bits
 238     , data_shift     = is_xmm_shift + is_xmm_bits
 239     , reg1_shift = data_shift
 240     , reg2_shift = data_shift + reg_bits
 241 
 242   };
 243 
 244   enum OprSize {
 245       single_size = 0 << size_shift
 246     , double_size = 1 << size_shift
 247   };
 248 
 249   enum OprMask {
 250       kind_mask      = right_n_bits(kind_bits)
 251     , type_mask      = right_n_bits(type_bits) << type_shift
 252     , size_mask      = right_n_bits(size_bits) << size_shift
 253     , last_use_mask  = right_n_bits(last_use_bits) << last_use_shift
 254     , is_fpu_stack_offset_mask = right_n_bits(is_fpu_stack_offset_bits) << is_fpu_stack_offset_shift
 255     , virtual_mask   = right_n_bits(virtual_bits) << virtual_shift
 256     , is_xmm_mask    = right_n_bits(is_xmm_bits) << is_xmm_shift
 257     , pointer_mask   = right_n_bits(pointer_bits)
 258     , lower_reg_mask = right_n_bits(reg_bits)
 259     , no_type_mask   = (int)(~(type_mask | last_use_mask | is_fpu_stack_offset_mask))
 260   };
 261 
 262   uintptr_t data() const                         { return value() >> data_shift; }
 263   int lo_reg_half() const                        { return data() & lower_reg_mask; }
 264   int hi_reg_half() const                        { return (data() >> reg_bits) & lower_reg_mask; }
 265   OprKind kind_field() const                     { return (OprKind)(value() & kind_mask); }
 266   OprSize size_field() const                     { return (OprSize)(value() & size_mask); }
 267 
 268   static char type_char(BasicType t);
 269 
 270  public:
 271   enum {
 272     vreg_base = ConcreteRegisterImpl::number_of_registers,
 273     vreg_max = (1 << data_bits) - 1
 274   };
 275 
 276   static inline LIR_Opr illegalOpr();
 277 
 278   enum OprType {
 279       unknown_type  = 0 << type_shift    // means: not set (catch uninitialized types)
 280     , int_type      = 1 << type_shift
 281     , long_type     = 2 << type_shift
 282     , object_type   = 3 << type_shift
 283     , address_type  = 4 << type_shift
 284     , float_type    = 5 << type_shift
 285     , double_type   = 6 << type_shift
 286   };
 287   friend OprType as_OprType(BasicType t);
 288   friend BasicType as_BasicType(OprType t);
 289 
 290   OprType type_field_valid() const               { assert(is_register() || is_stack(), "should not be called otherwise"); return (OprType)(value() & type_mask); }
 291   OprType type_field() const                     { return is_illegal() ? unknown_type : (OprType)(value() & type_mask); }
 292 
 293   static OprSize size_for(BasicType t) {
 294     switch (t) {
 295       case T_LONG:
 296       case T_DOUBLE:
 297         return double_size;
 298         break;
 299 
 300       case T_FLOAT:
 301       case T_BOOLEAN:
 302       case T_CHAR:
 303       case T_BYTE:
 304       case T_SHORT:
 305       case T_INT:
 306       case T_ADDRESS:
 307       case T_OBJECT:
 308       case T_ARRAY:
 309         return single_size;
 310         break;
 311 
 312       default:
 313         ShouldNotReachHere();
 314         return single_size;
 315       }
 316   }
 317 
 318 
 319   void validate_type() const PRODUCT_RETURN;
 320 
 321   BasicType type() const {
 322     if (is_pointer()) {
 323       return pointer()->type();
 324     }
 325     return as_BasicType(type_field());
 326   }
 327 
 328 
 329   ValueType* value_type() const                  { return as_ValueType(type()); }
 330 
 331   char type_char() const                         { return type_char((is_pointer()) ? pointer()->type() : type()); }
 332 
 333   bool is_equal(LIR_Opr opr) const         { return this == opr; }
 334   // checks whether types are same
 335   bool is_same_type(LIR_Opr opr) const     {
 336     assert(type_field() != unknown_type &&
 337            opr->type_field() != unknown_type, "shouldn't see unknown_type");
 338     return type_field() == opr->type_field();
 339   }
 340   bool is_same_register(LIR_Opr opr) {
 341     return (is_register() && opr->is_register() &&
 342             kind_field() == opr->kind_field() &&
 343             (value() & no_type_mask) == (opr->value() & no_type_mask));
 344   }
 345 
 346   bool is_pointer() const      { return check_value_mask(pointer_mask, pointer_value); }
 347   bool is_illegal() const      { return kind_field() == illegal_value; }
 348   bool is_valid() const        { return kind_field() != illegal_value; }
 349 
 350   bool is_register() const     { return is_cpu_register() || is_fpu_register(); }
 351   bool is_virtual() const      { return is_virtual_cpu()  || is_virtual_fpu();  }
 352 
 353   bool is_constant() const     { return is_pointer() && pointer()->as_constant() != NULL; }
 354   bool is_address() const      { return is_pointer() && pointer()->as_address() != NULL; }
 355 
 356   bool is_float_kind() const   { return is_pointer() ? pointer()->is_float_kind() : (kind_field() == fpu_register); }
 357   bool is_oop() const;
 358 
 359   // semantic for fpu- and xmm-registers:
 360   // * is_float and is_double return true for xmm_registers
 361   //   (so is_single_fpu and is_single_xmm are true)
 362   // * So you must always check for is_???_xmm prior to is_???_fpu to
 363   //   distinguish between fpu- and xmm-registers
 364 
 365   bool is_stack() const        { validate_type(); return check_value_mask(kind_mask,                stack_value);                 }
 366   bool is_single_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | single_size);  }
 367   bool is_double_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | double_size);  }
 368 
 369   bool is_cpu_register() const { validate_type(); return check_value_mask(kind_mask,                cpu_register);                }
 370   bool is_virtual_cpu() const  { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register | virtual_mask); }
 371   bool is_fixed_cpu() const    { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register);                }
 372   bool is_single_cpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    cpu_register | single_size);  }
 373   bool is_double_cpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    cpu_register | double_size);  }
 374 
 375   bool is_fpu_register() const { validate_type(); return check_value_mask(kind_mask,                fpu_register);                }
 376   bool is_virtual_fpu() const  { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register | virtual_mask); }
 377   bool is_fixed_fpu() const    { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register);                }
 378   bool is_single_fpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    fpu_register | single_size);  }
 379   bool is_double_fpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    fpu_register | double_size);  }
 380 
 381   bool is_xmm_register() const { validate_type(); return check_value_mask(kind_mask | is_xmm_mask,             fpu_register | is_xmm_mask); }
 382   bool is_single_xmm() const   { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | single_size | is_xmm_mask); }
 383   bool is_double_xmm() const   { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | double_size | is_xmm_mask); }
 384 
 385   // fast accessor functions for special bits that do not work for pointers
 386   // (in this functions, the check for is_pointer() is omitted)
 387   bool is_single_word() const      { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, single_size); }
 388   bool is_double_word() const      { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, double_size); }
 389   bool is_virtual_register() const { assert(is_register(),               "type check"); return check_value_mask(virtual_mask, virtual_mask); }
 390   bool is_oop_register() const     { assert(is_register() || is_stack(), "type check"); return type_field_valid() == object_type; }
 391   BasicType type_register() const  { assert(is_register() || is_stack(), "type check"); return as_BasicType(type_field_valid());  }
 392 
 393   bool is_last_use() const         { assert(is_register(), "only works for registers"); return (value() & last_use_mask) != 0; }
 394   bool is_fpu_stack_offset() const { assert(is_register(), "only works for registers"); return (value() & is_fpu_stack_offset_mask) != 0; }
 395   LIR_Opr make_last_use()          { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | last_use_mask); }
 396   LIR_Opr make_fpu_stack_offset()  { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | is_fpu_stack_offset_mask); }
 397 
 398 
 399   int single_stack_ix() const  { assert(is_single_stack() && !is_virtual(), "type check"); return (int)data(); }
 400   int double_stack_ix() const  { assert(is_double_stack() && !is_virtual(), "type check"); return (int)data(); }
 401   RegNr cpu_regnr() const      { assert(is_single_cpu()   && !is_virtual(), "type check"); return (RegNr)data(); }
 402   RegNr cpu_regnrLo() const    { assert(is_double_cpu()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
 403   RegNr cpu_regnrHi() const    { assert(is_double_cpu()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
 404   RegNr fpu_regnr() const      { assert(is_single_fpu()   && !is_virtual(), "type check"); return (RegNr)data(); }
 405   RegNr fpu_regnrLo() const    { assert(is_double_fpu()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
 406   RegNr fpu_regnrHi() const    { assert(is_double_fpu()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
 407   RegNr xmm_regnr() const      { assert(is_single_xmm()   && !is_virtual(), "type check"); return (RegNr)data(); }
 408   RegNr xmm_regnrLo() const    { assert(is_double_xmm()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
 409   RegNr xmm_regnrHi() const    { assert(is_double_xmm()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
 410   int   vreg_number() const    { assert(is_virtual(),                       "type check"); return (RegNr)data(); }
 411 
 412   LIR_OprPtr* pointer()  const                   { assert(is_pointer(), "type check");      return (LIR_OprPtr*)this; }
 413   LIR_Const* as_constant_ptr() const             { return pointer()->as_constant(); }
 414   LIR_Address* as_address_ptr() const            { return pointer()->as_address(); }
 415 
 416   Register as_register()    const;
 417   Register as_register_lo() const;
 418   Register as_register_hi() const;
 419 
 420   Register as_pointer_register() {
 421 #ifdef _LP64
 422     if (is_double_cpu()) {
 423       assert(as_register_lo() == as_register_hi(), "should be a single register");
 424       return as_register_lo();
 425     }
 426 #endif
 427     return as_register();
 428   }
 429 
 430 #ifdef X86
 431   XMMRegister as_xmm_float_reg() const;
 432   XMMRegister as_xmm_double_reg() const;
 433   // for compatibility with RInfo
 434   int fpu () const                                  { return lo_reg_half(); }
 435 #endif // X86
 436 #if defined(SPARC) || defined(ARM) || defined(PPC)
 437   FloatRegister as_float_reg   () const;
 438   FloatRegister as_double_reg  () const;
 439 #endif
 440 
 441   jint      as_jint()    const { return as_constant_ptr()->as_jint(); }
 442   jlong     as_jlong()   const { return as_constant_ptr()->as_jlong(); }
 443   jfloat    as_jfloat()  const { return as_constant_ptr()->as_jfloat(); }
 444   jdouble   as_jdouble() const { return as_constant_ptr()->as_jdouble(); }
 445   jobject   as_jobject() const { return as_constant_ptr()->as_jobject(); }
 446 
 447   void print() const PRODUCT_RETURN;
 448   void print(outputStream* out) const PRODUCT_RETURN;
 449 };
 450 
 451 
 452 inline LIR_OprDesc::OprType as_OprType(BasicType type) {
 453   switch (type) {
 454   case T_INT:      return LIR_OprDesc::int_type;
 455   case T_LONG:     return LIR_OprDesc::long_type;
 456   case T_FLOAT:    return LIR_OprDesc::float_type;
 457   case T_DOUBLE:   return LIR_OprDesc::double_type;
 458   case T_OBJECT:
 459   case T_ARRAY:    return LIR_OprDesc::object_type;
 460   case T_ADDRESS:  return LIR_OprDesc::address_type;
 461   case T_ILLEGAL:  // fall through
 462   default: ShouldNotReachHere(); return LIR_OprDesc::unknown_type;
 463   }
 464 }
 465 
 466 inline BasicType as_BasicType(LIR_OprDesc::OprType t) {
 467   switch (t) {
 468   case LIR_OprDesc::int_type:     return T_INT;
 469   case LIR_OprDesc::long_type:    return T_LONG;
 470   case LIR_OprDesc::float_type:   return T_FLOAT;
 471   case LIR_OprDesc::double_type:  return T_DOUBLE;
 472   case LIR_OprDesc::object_type:  return T_OBJECT;
 473   case LIR_OprDesc::address_type: return T_ADDRESS;
 474   case LIR_OprDesc::unknown_type: // fall through
 475   default: ShouldNotReachHere();  return T_ILLEGAL;
 476   }
 477 }
 478 
 479 
 480 // LIR_Address
 481 class LIR_Address: public LIR_OprPtr {
 482  friend class LIR_OpVisitState;
 483 
 484  public:
 485   // NOTE: currently these must be the log2 of the scale factor (and
 486   // must also be equivalent to the ScaleFactor enum in
 487   // assembler_i486.hpp)
 488   enum Scale {
 489     times_1  =  0,
 490     times_2  =  1,
 491     times_4  =  2,
 492     times_8  =  3
 493   };
 494 
 495  private:
 496   LIR_Opr   _base;
 497   LIR_Opr   _index;
 498   Scale     _scale;
 499   intx      _disp;
 500   BasicType _type;
 501 
 502  public:
 503   LIR_Address(LIR_Opr base, LIR_Opr index, BasicType type):
 504        _base(base)
 505      , _index(index)
 506      , _scale(times_1)
 507      , _type(type)
 508      , _disp(0) { verify(); }
 509 
 510   LIR_Address(LIR_Opr base, intx disp, BasicType type):
 511        _base(base)
 512      , _index(LIR_OprDesc::illegalOpr())
 513      , _scale(times_1)
 514      , _type(type)
 515      , _disp(disp) { verify(); }
 516 
 517   LIR_Address(LIR_Opr base, BasicType type):
 518        _base(base)
 519      , _index(LIR_OprDesc::illegalOpr())
 520      , _scale(times_1)
 521      , _type(type)
 522      , _disp(0) { verify(); }
 523 
 524 #if defined(X86) || defined(ARM)
 525   LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, intx disp, BasicType type):
 526        _base(base)
 527      , _index(index)
 528      , _scale(scale)
 529      , _type(type)
 530      , _disp(disp) { verify(); }
 531 #endif // X86 || ARM
 532 
 533   LIR_Opr base()  const                          { return _base;  }
 534   LIR_Opr index() const                          { return _index; }
 535   Scale   scale() const                          { return _scale; }
 536   intx    disp()  const                          { return _disp;  }
 537 
 538   bool equals(LIR_Address* other) const          { return base() == other->base() && index() == other->index() && disp() == other->disp() && scale() == other->scale(); }
 539 
 540   virtual LIR_Address* as_address()              { return this;   }
 541   virtual BasicType type() const                 { return _type; }
 542   virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
 543 
 544   void verify() const PRODUCT_RETURN;
 545 
 546   static Scale scale(BasicType type);
 547 };
 548 
 549 
 550 // operand factory
 551 class LIR_OprFact: public AllStatic {
 552  public:
 553 
 554   static LIR_Opr illegalOpr;
 555 
 556   static LIR_Opr single_cpu(int reg) {
 557     return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) | 
 558                                LIR_OprDesc::int_type             |
 559                                LIR_OprDesc::cpu_register         |
 560                                LIR_OprDesc::single_size);
 561   }
 562   static LIR_Opr single_cpu_oop(int reg) {
 563     return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 564                                LIR_OprDesc::object_type          |
 565                                LIR_OprDesc::cpu_register         |
 566                                LIR_OprDesc::single_size);
 567   }
 568   static LIR_Opr single_cpu_address(int reg) {
 569     return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 570                                LIR_OprDesc::address_type         |
 571                                LIR_OprDesc::cpu_register         |
 572                                LIR_OprDesc::single_size);
 573   }
 574   static LIR_Opr double_cpu(int reg1, int reg2) {
 575     LP64_ONLY(assert(reg1 == reg2, "must be identical"));
 576     return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
 577                                (reg2 << LIR_OprDesc::reg2_shift) |
 578                                LIR_OprDesc::long_type            |
 579                                LIR_OprDesc::cpu_register         |
 580                                LIR_OprDesc::double_size);
 581   }
 582 
 583   static LIR_Opr single_fpu(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 584                                                                              LIR_OprDesc::float_type           |
 585                                                                              LIR_OprDesc::fpu_register         |
 586                                                                              LIR_OprDesc::single_size); }
 587 #if defined(ARM)
 588   static LIR_Opr double_fpu(int reg1, int reg2)    { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size); }
 589   static LIR_Opr single_softfp(int reg)            { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift) |                                     LIR_OprDesc::float_type  | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); }
 590   static LIR_Opr double_softfp(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size); }
 591 #endif
 592 #ifdef SPARC
 593   static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
 594                                                                              (reg2 << LIR_OprDesc::reg2_shift) |
 595                                                                              LIR_OprDesc::double_type          |
 596                                                                              LIR_OprDesc::fpu_register         |
 597                                                                              LIR_OprDesc::double_size); }
 598 #endif
 599 #ifdef X86
 600   static LIR_Opr double_fpu(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 601                                                                              (reg  << LIR_OprDesc::reg2_shift) |
 602                                                                              LIR_OprDesc::double_type          |
 603                                                                              LIR_OprDesc::fpu_register         |
 604                                                                              LIR_OprDesc::double_size); }
 605 
 606   static LIR_Opr single_xmm(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 607                                                                              LIR_OprDesc::float_type           |
 608                                                                              LIR_OprDesc::fpu_register         |
 609                                                                              LIR_OprDesc::single_size          |
 610                                                                              LIR_OprDesc::is_xmm_mask); }
 611   static LIR_Opr double_xmm(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 612                                                                              (reg  << LIR_OprDesc::reg2_shift) |
 613                                                                              LIR_OprDesc::double_type          |
 614                                                                              LIR_OprDesc::fpu_register         |
 615                                                                              LIR_OprDesc::double_size          |
 616                                                                              LIR_OprDesc::is_xmm_mask); }
 617 #endif // X86
 618 #ifdef PPC
 619   static LIR_Opr double_fpu(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 620                                                                              (reg  << LIR_OprDesc::reg2_shift) |
 621                                                                              LIR_OprDesc::double_type          |
 622                                                                              LIR_OprDesc::fpu_register         |
 623                                                                              LIR_OprDesc::double_size); }
 624   static LIR_Opr single_softfp(int reg)            { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift)        |
 625                                                                              LIR_OprDesc::float_type           |
 626                                                                              LIR_OprDesc::cpu_register         |
 627                                                                              LIR_OprDesc::single_size); }
 628   static LIR_Opr double_softfp(int reg1, int reg2) { return (LIR_Opr)((reg2 << LIR_OprDesc::reg1_shift)        |
 629                                                                              (reg1 << LIR_OprDesc::reg2_shift) |
 630                                                                              LIR_OprDesc::double_type          |
 631                                                                              LIR_OprDesc::cpu_register         |
 632                                                                              LIR_OprDesc::double_size); }
 633 #endif // PPC
 634 
 635   static LIR_Opr virtual_register(int index, BasicType type) {
 636     LIR_Opr res;
 637     switch (type) {
 638       case T_OBJECT: // fall through
 639       case T_ARRAY:
 640         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift)  |
 641                                             LIR_OprDesc::object_type  |
 642                                             LIR_OprDesc::cpu_register |
 643                                             LIR_OprDesc::single_size  |
 644                                             LIR_OprDesc::virtual_mask);
 645         break;
 646 
 647       case T_INT:
 648         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 649                                   LIR_OprDesc::int_type              |
 650                                   LIR_OprDesc::cpu_register          |
 651                                   LIR_OprDesc::single_size           |
 652                                   LIR_OprDesc::virtual_mask);
 653         break;
 654 
 655       case T_ADDRESS:
 656         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 657                                   LIR_OprDesc::address_type          |
 658                                   LIR_OprDesc::cpu_register          |
 659                                   LIR_OprDesc::single_size           |
 660                                   LIR_OprDesc::virtual_mask);
 661         break;
 662 
 663       case T_LONG:
 664         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 665                                   LIR_OprDesc::long_type             |
 666                                   LIR_OprDesc::cpu_register          |
 667                                   LIR_OprDesc::double_size           |
 668                                   LIR_OprDesc::virtual_mask);
 669         break;
 670 
 671 #ifdef __SOFTFP__
 672       case T_FLOAT:
 673         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 674                                   LIR_OprDesc::float_type  |
 675                                   LIR_OprDesc::cpu_register |
 676                                   LIR_OprDesc::single_size |
 677                                   LIR_OprDesc::virtual_mask);
 678         break;
 679       case T_DOUBLE:
 680         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 681                                   LIR_OprDesc::double_type |
 682                                   LIR_OprDesc::cpu_register |
 683                                   LIR_OprDesc::double_size |
 684                                   LIR_OprDesc::virtual_mask);
 685         break;
 686 #else // __SOFTFP__
 687       case T_FLOAT:
 688         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 689                                   LIR_OprDesc::float_type           |
 690                                   LIR_OprDesc::fpu_register         |
 691                                   LIR_OprDesc::single_size          |
 692                                   LIR_OprDesc::virtual_mask);
 693         break;
 694 
 695       case
 696         T_DOUBLE: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 697                                             LIR_OprDesc::double_type           |
 698                                             LIR_OprDesc::fpu_register          |
 699                                             LIR_OprDesc::double_size           |
 700                                             LIR_OprDesc::virtual_mask);
 701         break;
 702 #endif // __SOFTFP__
 703       default:       ShouldNotReachHere(); res = illegalOpr;
 704     }
 705 
 706 #ifdef ASSERT
 707     res->validate_type();
 708     assert(res->vreg_number() == index, "conversion check");
 709     assert(index >= LIR_OprDesc::vreg_base, "must start at vreg_base");
 710     assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big");
 711 
 712     // old-style calculation; check if old and new method are equal
 713     LIR_OprDesc::OprType t = as_OprType(type);
 714 #ifdef __SOFTFP__
 715     LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 716                                t |
 717                                LIR_OprDesc::cpu_register |
 718                                LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
 719 #else // __SOFTFP__
 720     LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | t |
 721                                           ((type == T_FLOAT || type == T_DOUBLE) ?  LIR_OprDesc::fpu_register : LIR_OprDesc::cpu_register) |
 722                                LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
 723     assert(res == old_res, "old and new method not equal");
 724 #endif // __SOFTFP__
 725 #endif // ASSERT
 726 
 727     return res;
 728   }
 729 
 730   // 'index' is computed by FrameMap::local_stack_pos(index); do not use other parameters as
 731   // the index is platform independent; a double stack useing indeces 2 and 3 has always
 732   // index 2.
 733   static LIR_Opr stack(int index, BasicType type) {
 734     LIR_Opr res;
 735     switch (type) {
 736       case T_OBJECT: // fall through
 737       case T_ARRAY:
 738         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 739                                   LIR_OprDesc::object_type           |
 740                                   LIR_OprDesc::stack_value           |
 741                                   LIR_OprDesc::single_size);
 742         break;
 743 
 744       case T_INT:
 745         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 746                                   LIR_OprDesc::int_type              |
 747                                   LIR_OprDesc::stack_value           |
 748                                   LIR_OprDesc::single_size);
 749         break;
 750 
 751       case T_ADDRESS:
 752         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 753                                   LIR_OprDesc::address_type          |
 754                                   LIR_OprDesc::stack_value           |
 755                                   LIR_OprDesc::single_size);
 756         break;
 757 
 758       case T_LONG:
 759         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 760                                   LIR_OprDesc::long_type             |
 761                                   LIR_OprDesc::stack_value           |
 762                                   LIR_OprDesc::double_size);
 763         break;
 764 
 765       case T_FLOAT:
 766         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 767                                   LIR_OprDesc::float_type            |
 768                                   LIR_OprDesc::stack_value           |
 769                                   LIR_OprDesc::single_size);
 770         break;
 771       case T_DOUBLE:
 772         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 773                                   LIR_OprDesc::double_type           |
 774                                   LIR_OprDesc::stack_value           |
 775                                   LIR_OprDesc::double_size);
 776         break;
 777 
 778       default:       ShouldNotReachHere(); res = illegalOpr;
 779     }
 780 
 781 #ifdef ASSERT
 782     assert(index >= 0, "index must be positive");
 783     assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big");
 784 
 785     LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 786                                           LIR_OprDesc::stack_value           |
 787                                           as_OprType(type)                   |
 788                                           LIR_OprDesc::size_for(type));
 789     assert(res == old_res, "old and new method not equal");
 790 #endif
 791 
 792     return res;
 793   }
 794 
 795   static LIR_Opr intConst(jint i)                { return (LIR_Opr)(new LIR_Const(i)); }
 796   static LIR_Opr longConst(jlong l)              { return (LIR_Opr)(new LIR_Const(l)); }
 797   static LIR_Opr floatConst(jfloat f)            { return (LIR_Opr)(new LIR_Const(f)); }
 798   static LIR_Opr doubleConst(jdouble d)          { return (LIR_Opr)(new LIR_Const(d)); }
 799   static LIR_Opr oopConst(jobject o)             { return (LIR_Opr)(new LIR_Const(o)); }
 800   static LIR_Opr address(LIR_Address* a)         { return (LIR_Opr)a; }
 801   static LIR_Opr intptrConst(void* p)            { return (LIR_Opr)(new LIR_Const(p)); }
 802   static LIR_Opr intptrConst(intptr_t v)         { return (LIR_Opr)(new LIR_Const((void*)v)); }
 803   static LIR_Opr illegal()                       { return (LIR_Opr)-1; }
 804   static LIR_Opr addressConst(jint i)            { return (LIR_Opr)(new LIR_Const(i, true)); }
 805 
 806   static LIR_Opr value_type(ValueType* type);
 807   static LIR_Opr dummy_value_type(ValueType* type);
 808 };
 809 
 810 
 811 //-------------------------------------------------------------------------------
 812 //                   LIR Instructions
 813 //-------------------------------------------------------------------------------
 814 //
 815 // Note:
 816 //  - every instruction has a result operand
 817 //  - every instruction has an CodeEmitInfo operand (can be revisited later)
 818 //  - every instruction has a LIR_OpCode operand
 819 //  - LIR_OpN, means an instruction that has N input operands
 820 //
 821 // class hierarchy:
 822 //
 823 class  LIR_Op;
 824 class    LIR_Op0;
 825 class      LIR_OpLabel;
 826 class    LIR_Op1;
 827 class      LIR_OpBranch;
 828 class      LIR_OpConvert;
 829 class      LIR_OpAllocObj;
 830 class      LIR_OpRoundFP;
 831 class    LIR_Op2;
 832 class    LIR_OpDelay;
 833 class    LIR_Op3;
 834 class      LIR_OpAllocArray;
 835 class    LIR_OpCall;
 836 class      LIR_OpJavaCall;
 837 class      LIR_OpRTCall;
 838 class    LIR_OpArrayCopy;
 839 class    LIR_OpLock;
 840 class    LIR_OpTypeCheck;
 841 class    LIR_OpCompareAndSwap;
 842 class    LIR_OpProfileCall;
 843 
 844 
 845 // LIR operation codes
 846 enum LIR_Code {
 847     lir_none
 848   , begin_op0
 849       , lir_word_align
 850       , lir_label
 851       , lir_nop
 852       , lir_backwardbranch_target
 853       , lir_std_entry
 854       , lir_osr_entry
 855       , lir_build_frame
 856       , lir_fpop_raw
 857       , lir_24bit_FPU
 858       , lir_reset_FPU
 859       , lir_breakpoint
 860       , lir_rtcall
 861       , lir_membar
 862       , lir_membar_acquire
 863       , lir_membar_release
 864       , lir_get_thread
 865   , end_op0
 866   , begin_op1
 867       , lir_fxch
 868       , lir_fld
 869       , lir_ffree
 870       , lir_push
 871       , lir_pop
 872       , lir_null_check
 873       , lir_return
 874       , lir_leal
 875       , lir_neg
 876       , lir_branch
 877       , lir_cond_float_branch
 878       , lir_move
 879       , lir_prefetchr
 880       , lir_prefetchw
 881       , lir_convert
 882       , lir_alloc_object
 883       , lir_monaddr
 884       , lir_roundfp
 885       , lir_safepoint
 886       , lir_unwind
 887   , end_op1
 888   , begin_op2
 889       , lir_cmp
 890       , lir_cmp_l2i
 891       , lir_ucmp_fd2i
 892       , lir_cmp_fd2i
 893       , lir_cmove
 894       , lir_add
 895       , lir_sub
 896       , lir_mul
 897       , lir_mul_strictfp
 898       , lir_div
 899       , lir_div_strictfp
 900       , lir_rem
 901       , lir_sqrt
 902       , lir_abs
 903       , lir_sin
 904       , lir_cos
 905       , lir_tan
 906       , lir_log
 907       , lir_log10
 908       , lir_logic_and
 909       , lir_logic_or
 910       , lir_logic_xor
 911       , lir_shl
 912       , lir_shr
 913       , lir_ushr
 914       , lir_alloc_array
 915       , lir_throw
 916       , lir_compare_to
 917   , end_op2
 918   , begin_op3
 919       , lir_idiv
 920       , lir_irem
 921   , end_op3
 922   , begin_opJavaCall
 923       , lir_static_call
 924       , lir_optvirtual_call
 925       , lir_icvirtual_call
 926       , lir_virtual_call
 927       , lir_dynamic_call
 928   , end_opJavaCall
 929   , begin_opArrayCopy
 930       , lir_arraycopy
 931   , end_opArrayCopy
 932   , begin_opLock
 933     , lir_lock
 934     , lir_unlock
 935   , end_opLock
 936   , begin_delay_slot
 937     , lir_delay_slot
 938   , end_delay_slot
 939   , begin_opTypeCheck
 940     , lir_instanceof
 941     , lir_checkcast
 942     , lir_store_check
 943   , end_opTypeCheck
 944   , begin_opCompareAndSwap
 945     , lir_cas_long
 946     , lir_cas_obj
 947     , lir_cas_int
 948   , end_opCompareAndSwap
 949   , begin_opMDOProfile
 950     , lir_profile_call
 951   , end_opMDOProfile
 952 };
 953 
 954 
 955 enum LIR_Condition {
 956     lir_cond_equal
 957   , lir_cond_notEqual
 958   , lir_cond_less
 959   , lir_cond_lessEqual
 960   , lir_cond_greaterEqual
 961   , lir_cond_greater
 962   , lir_cond_belowEqual
 963   , lir_cond_aboveEqual
 964   , lir_cond_always
 965   , lir_cond_unknown = -1
 966 };
 967 
 968 
 969 enum LIR_PatchCode {
 970   lir_patch_none,
 971   lir_patch_low,
 972   lir_patch_high,
 973   lir_patch_normal
 974 };
 975 
 976 
 977 enum LIR_MoveKind {
 978   lir_move_normal,
 979   lir_move_volatile,
 980   lir_move_unaligned,
 981   lir_move_max_flag
 982 };
 983 
 984 
 985 // --------------------------------------------------
 986 // LIR_Op
 987 // --------------------------------------------------
 988 class LIR_Op: public CompilationResourceObj {
 989  friend class LIR_OpVisitState;
 990 
 991 #ifdef ASSERT
 992  private:
 993   const char *  _file;
 994   int           _line;
 995 #endif
 996 
 997  protected:
 998   LIR_Opr       _result;
 999   unsigned short _code;
1000   unsigned short _flags;
1001   CodeEmitInfo* _info;
1002   int           _id;     // value id for register allocation
1003   int           _fpu_pop_count;
1004   Instruction*  _source; // for debugging
1005 
1006   static void print_condition(outputStream* out, LIR_Condition cond) PRODUCT_RETURN;
1007 
1008  protected:
1009   static bool is_in_range(LIR_Code test, LIR_Code start, LIR_Code end)  { return start < test && test < end; }
1010 
1011  public:
1012   LIR_Op()
1013     : _result(LIR_OprFact::illegalOpr)
1014     , _code(lir_none)
1015     , _flags(0)
1016     , _info(NULL)
1017 #ifdef ASSERT
1018     , _file(NULL)
1019     , _line(0)
1020 #endif
1021     , _fpu_pop_count(0)
1022     , _source(NULL)
1023     , _id(-1)                             {}
1024 
1025   LIR_Op(LIR_Code code, LIR_Opr result, CodeEmitInfo* info)
1026     : _result(result)
1027     , _code(code)
1028     , _flags(0)
1029     , _info(info)
1030 #ifdef ASSERT
1031     , _file(NULL)
1032     , _line(0)
1033 #endif
1034     , _fpu_pop_count(0)
1035     , _source(NULL)
1036     , _id(-1)                             {}
1037 
1038   CodeEmitInfo* info() const                  { return _info;   }
1039   LIR_Code code()      const                  { return (LIR_Code)_code;   }
1040   LIR_Opr result_opr() const                  { return _result; }
1041   void    set_result_opr(LIR_Opr opr)         { _result = opr;  }
1042 
1043 #ifdef ASSERT
1044   void set_file_and_line(const char * file, int line) {
1045     _file = file;
1046     _line = line;
1047   }
1048 #endif
1049 
1050   virtual const char * name() const PRODUCT_RETURN0;
1051 
1052   int id()             const                  { return _id;     }
1053   void set_id(int id)                         { _id = id; }
1054 
1055   // FPU stack simulation helpers -- only used on Intel
1056   void set_fpu_pop_count(int count)           { assert(count >= 0 && count <= 1, "currently only 0 and 1 are valid"); _fpu_pop_count = count; }
1057   int  fpu_pop_count() const                  { return _fpu_pop_count; }
1058   bool pop_fpu_stack()                        { return _fpu_pop_count > 0; }
1059 
1060   Instruction* source() const                 { return _source; }
1061   void set_source(Instruction* ins)           { _source = ins; }
1062 
1063   virtual void emit_code(LIR_Assembler* masm) = 0;
1064   virtual void print_instr(outputStream* out) const   = 0;
1065   virtual void print_on(outputStream* st) const PRODUCT_RETURN;
1066 
1067   virtual LIR_OpCall* as_OpCall() { return NULL; }
1068   virtual LIR_OpJavaCall* as_OpJavaCall() { return NULL; }
1069   virtual LIR_OpLabel* as_OpLabel() { return NULL; }
1070   virtual LIR_OpDelay* as_OpDelay() { return NULL; }
1071   virtual LIR_OpLock* as_OpLock() { return NULL; }
1072   virtual LIR_OpAllocArray* as_OpAllocArray() { return NULL; }
1073   virtual LIR_OpAllocObj* as_OpAllocObj() { return NULL; }
1074   virtual LIR_OpRoundFP* as_OpRoundFP() { return NULL; }
1075   virtual LIR_OpBranch* as_OpBranch() { return NULL; }
1076   virtual LIR_OpRTCall* as_OpRTCall() { return NULL; }
1077   virtual LIR_OpConvert* as_OpConvert() { return NULL; }
1078   virtual LIR_Op0* as_Op0() { return NULL; }
1079   virtual LIR_Op1* as_Op1() { return NULL; }
1080   virtual LIR_Op2* as_Op2() { return NULL; }
1081   virtual LIR_Op3* as_Op3() { return NULL; }
1082   virtual LIR_OpArrayCopy* as_OpArrayCopy() { return NULL; }
1083   virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
1084   virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
1085   virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
1086 
1087   virtual void verify() const {}
1088 };
1089 
1090 // for calls
1091 class LIR_OpCall: public LIR_Op {
1092  friend class LIR_OpVisitState;
1093 
1094  protected:
1095   address      _addr;
1096   LIR_OprList* _arguments;
1097  protected:
1098   LIR_OpCall(LIR_Code code, address addr, LIR_Opr result,
1099              LIR_OprList* arguments, CodeEmitInfo* info = NULL)
1100     : LIR_Op(code, result, info)
1101     , _arguments(arguments)
1102     , _addr(addr) {}
1103 
1104  public:
1105   address addr() const                           { return _addr; }
1106   const LIR_OprList* arguments() const           { return _arguments; }
1107   virtual LIR_OpCall* as_OpCall()                { return this; }
1108 };
1109 
1110 
1111 // --------------------------------------------------
1112 // LIR_OpJavaCall
1113 // --------------------------------------------------
1114 class LIR_OpJavaCall: public LIR_OpCall {
1115  friend class LIR_OpVisitState;
1116 
1117  private:
1118   ciMethod* _method;
1119   LIR_Opr   _receiver;
1120   LIR_Opr   _method_handle_invoke_SP_save_opr;  // Used in LIR_OpVisitState::visit to store the reference to FrameMap::method_handle_invoke_SP_save_opr.
1121 
1122  public:
1123   LIR_OpJavaCall(LIR_Code code, ciMethod* method,
1124                  LIR_Opr receiver, LIR_Opr result,
1125                  address addr, LIR_OprList* arguments,
1126                  CodeEmitInfo* info)
1127   : LIR_OpCall(code, addr, result, arguments, info)
1128   , _receiver(receiver)
1129   , _method(method)
1130   , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
1131   { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
1132 
1133   LIR_OpJavaCall(LIR_Code code, ciMethod* method,
1134                  LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset,
1135                  LIR_OprList* arguments, CodeEmitInfo* info)
1136   : LIR_OpCall(code, (address)vtable_offset, result, arguments, info)
1137   , _receiver(receiver)
1138   , _method(method)
1139   , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
1140   { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
1141 
1142   LIR_Opr receiver() const                       { return _receiver; }
1143   ciMethod* method() const                       { return _method;   }
1144 
1145   // JSR 292 support.
1146   bool is_invokedynamic() const                  { return code() == lir_dynamic_call; }
1147   bool is_method_handle_invoke() const {
1148     return
1149       is_invokedynamic()  // An invokedynamic is always a MethodHandle call site.
1150       ||
1151       (method()->holder()->name() == ciSymbol::java_dyn_MethodHandle() &&
1152        methodOopDesc::is_method_handle_invoke_name(method()->name()->sid()));
1153   }
1154 
1155   intptr_t vtable_offset() const {
1156     assert(_code == lir_virtual_call, "only have vtable for real vcall");
1157     return (intptr_t) addr();
1158   }
1159 
1160   virtual void emit_code(LIR_Assembler* masm);
1161   virtual LIR_OpJavaCall* as_OpJavaCall() { return this; }
1162   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1163 };
1164 
1165 // --------------------------------------------------
1166 // LIR_OpLabel
1167 // --------------------------------------------------
1168 // Location where a branch can continue
1169 class LIR_OpLabel: public LIR_Op {
1170  friend class LIR_OpVisitState;
1171 
1172  private:
1173   Label* _label;
1174  public:
1175   LIR_OpLabel(Label* lbl)
1176    : LIR_Op(lir_label, LIR_OprFact::illegalOpr, NULL)
1177    , _label(lbl)                                 {}
1178   Label* label() const                           { return _label; }
1179 
1180   virtual void emit_code(LIR_Assembler* masm);
1181   virtual LIR_OpLabel* as_OpLabel() { return this; }
1182   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1183 };
1184 
1185 // LIR_OpArrayCopy
1186 class LIR_OpArrayCopy: public LIR_Op {
1187  friend class LIR_OpVisitState;
1188 
1189  private:
1190   ArrayCopyStub*  _stub;
1191   LIR_Opr   _src;
1192   LIR_Opr   _src_pos;
1193   LIR_Opr   _dst;
1194   LIR_Opr   _dst_pos;
1195   LIR_Opr   _length;
1196   LIR_Opr   _tmp;
1197   ciArrayKlass* _expected_type;
1198   int       _flags;
1199 
1200 public:
1201   enum Flags {
1202     src_null_check         = 1 << 0,
1203     dst_null_check         = 1 << 1,
1204     src_pos_positive_check = 1 << 2,
1205     dst_pos_positive_check = 1 << 3,
1206     length_positive_check  = 1 << 4,
1207     src_range_check        = 1 << 5,
1208     dst_range_check        = 1 << 6,
1209     type_check             = 1 << 7,
1210     all_flags              = (1 << 8) - 1
1211   };
1212 
1213   LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp,
1214                   ciArrayKlass* expected_type, int flags, CodeEmitInfo* info);
1215 
1216   LIR_Opr src() const                            { return _src; }
1217   LIR_Opr src_pos() const                        { return _src_pos; }
1218   LIR_Opr dst() const                            { return _dst; }
1219   LIR_Opr dst_pos() const                        { return _dst_pos; }
1220   LIR_Opr length() const                         { return _length; }
1221   LIR_Opr tmp() const                            { return _tmp; }
1222   int flags() const                              { return _flags; }
1223   ciArrayKlass* expected_type() const            { return _expected_type; }
1224   ArrayCopyStub* stub() const                    { return _stub; }
1225 
1226   virtual void emit_code(LIR_Assembler* masm);
1227   virtual LIR_OpArrayCopy* as_OpArrayCopy() { return this; }
1228   void print_instr(outputStream* out) const PRODUCT_RETURN;
1229 };
1230 
1231 
1232 // --------------------------------------------------
1233 // LIR_Op0
1234 // --------------------------------------------------
1235 class LIR_Op0: public LIR_Op {
1236  friend class LIR_OpVisitState;
1237 
1238  public:
1239   LIR_Op0(LIR_Code code)
1240    : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1241   LIR_Op0(LIR_Code code, LIR_Opr result, CodeEmitInfo* info = NULL)
1242    : LIR_Op(code, result, info)  { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1243 
1244   virtual void emit_code(LIR_Assembler* masm);
1245   virtual LIR_Op0* as_Op0() { return this; }
1246   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1247 };
1248 
1249 
1250 // --------------------------------------------------
1251 // LIR_Op1
1252 // --------------------------------------------------
1253 
1254 class LIR_Op1: public LIR_Op {
1255  friend class LIR_OpVisitState;
1256 
1257  protected:
1258   LIR_Opr         _opr;   // input operand
1259   BasicType       _type;  // Operand types
1260   LIR_PatchCode   _patch; // only required with patchin (NEEDS_CLEANUP: do we want a special instruction for patching?)
1261 
1262   static void print_patch_code(outputStream* out, LIR_PatchCode code);
1263 
1264   void set_kind(LIR_MoveKind kind) {
1265     assert(code() == lir_move, "must be");
1266     _flags = kind;
1267   }
1268 
1269  public:
1270   LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result = LIR_OprFact::illegalOpr, BasicType type = T_ILLEGAL, LIR_PatchCode patch = lir_patch_none, CodeEmitInfo* info = NULL)
1271     : LIR_Op(code, result, info)
1272     , _opr(opr)
1273     , _patch(patch)
1274     , _type(type)                      { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1275 
1276   LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result, BasicType type, LIR_PatchCode patch, CodeEmitInfo* info, LIR_MoveKind kind)
1277     : LIR_Op(code, result, info)
1278     , _opr(opr)
1279     , _patch(patch)
1280     , _type(type)                      {
1281     assert(code == lir_move, "must be");
1282     set_kind(kind);
1283   }
1284 
1285   LIR_Op1(LIR_Code code, LIR_Opr opr, CodeEmitInfo* info)
1286     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1287     , _opr(opr)
1288     , _patch(lir_patch_none)
1289     , _type(T_ILLEGAL)                 { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1290 
1291   LIR_Opr in_opr()           const               { return _opr;   }
1292   LIR_PatchCode patch_code() const               { return _patch; }
1293   BasicType type()           const               { return _type;  }
1294 
1295   LIR_MoveKind move_kind() const {
1296     assert(code() == lir_move, "must be");
1297     return (LIR_MoveKind)_flags;
1298   }
1299 
1300   virtual void emit_code(LIR_Assembler* masm);
1301   virtual LIR_Op1* as_Op1() { return this; }
1302   virtual const char * name() const PRODUCT_RETURN0;
1303 
1304   void set_in_opr(LIR_Opr opr) { _opr = opr; }
1305 
1306   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1307   virtual void verify() const;
1308 };
1309 
1310 
1311 // for runtime calls
1312 class LIR_OpRTCall: public LIR_OpCall {
1313  friend class LIR_OpVisitState;
1314 
1315  private:
1316   LIR_Opr _tmp;
1317  public:
1318   LIR_OpRTCall(address addr, LIR_Opr tmp,
1319                LIR_Opr result, LIR_OprList* arguments, CodeEmitInfo* info = NULL)
1320     : LIR_OpCall(lir_rtcall, addr, result, arguments, info)
1321     , _tmp(tmp) {}
1322 
1323   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1324   virtual void emit_code(LIR_Assembler* masm);
1325   virtual LIR_OpRTCall* as_OpRTCall() { return this; }
1326 
1327   LIR_Opr tmp() const                            { return _tmp; }
1328 
1329   virtual void verify() const;
1330 };
1331 
1332 
1333 class LIR_OpBranch: public LIR_Op {
1334  friend class LIR_OpVisitState;
1335 
1336  private:
1337   LIR_Condition _cond;
1338   BasicType     _type;
1339   Label*        _label;
1340   BlockBegin*   _block;  // if this is a branch to a block, this is the block
1341   BlockBegin*   _ublock; // if this is a float-branch, this is the unorderd block
1342   CodeStub*     _stub;   // if this is a branch to a stub, this is the stub
1343 
1344  public:
1345   LIR_OpBranch(LIR_Condition cond, Label* lbl)
1346     : LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL)
1347     , _cond(cond)
1348     , _label(lbl)
1349     , _block(NULL)
1350     , _ublock(NULL)
1351     , _stub(NULL) { }
1352 
1353   LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block);
1354   LIR_OpBranch(LIR_Condition cond, BasicType type, CodeStub* stub);
1355 
1356   // for unordered comparisons
1357   LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* ublock);
1358 
1359   LIR_Condition cond()        const              { return _cond;        }
1360   BasicType     type()        const              { return _type;        }
1361   Label*        label()       const              { return _label;       }
1362   BlockBegin*   block()       const              { return _block;       }
1363   BlockBegin*   ublock()      const              { return _ublock;      }
1364   CodeStub*     stub()        const              { return _stub;       }
1365 
1366   void          change_block(BlockBegin* b);
1367   void          change_ublock(BlockBegin* b);
1368   void          negate_cond();
1369 
1370   virtual void emit_code(LIR_Assembler* masm);
1371   virtual LIR_OpBranch* as_OpBranch() { return this; }
1372   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1373 };
1374 
1375 
1376 class ConversionStub;
1377 
1378 class LIR_OpConvert: public LIR_Op1 {
1379  friend class LIR_OpVisitState;
1380 
1381  private:
1382    Bytecodes::Code _bytecode;
1383    ConversionStub* _stub;
1384 #ifdef PPC
1385   LIR_Opr _tmp1;
1386   LIR_Opr _tmp2;
1387 #endif
1388 
1389  public:
1390    LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub)
1391      : LIR_Op1(lir_convert, opr, result)
1392      , _stub(stub)
1393 #ifdef PPC
1394      , _tmp1(LIR_OprDesc::illegalOpr())
1395      , _tmp2(LIR_OprDesc::illegalOpr())
1396 #endif
1397      , _bytecode(code)                           {}
1398 
1399 #ifdef PPC
1400    LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub
1401                  ,LIR_Opr tmp1, LIR_Opr tmp2)
1402      : LIR_Op1(lir_convert, opr, result)
1403      , _stub(stub)
1404      , _tmp1(tmp1)
1405      , _tmp2(tmp2)
1406      , _bytecode(code)                           {}
1407 #endif
1408 
1409   Bytecodes::Code bytecode() const               { return _bytecode; }
1410   ConversionStub* stub() const                   { return _stub; }
1411 #ifdef PPC
1412   LIR_Opr tmp1() const                           { return _tmp1; }
1413   LIR_Opr tmp2() const                           { return _tmp2; }
1414 #endif
1415 
1416   virtual void emit_code(LIR_Assembler* masm);
1417   virtual LIR_OpConvert* as_OpConvert() { return this; }
1418   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1419 
1420   static void print_bytecode(outputStream* out, Bytecodes::Code code) PRODUCT_RETURN;
1421 };
1422 
1423 
1424 // LIR_OpAllocObj
1425 class LIR_OpAllocObj : public LIR_Op1 {
1426  friend class LIR_OpVisitState;
1427 
1428  private:
1429   LIR_Opr _tmp1;
1430   LIR_Opr _tmp2;
1431   LIR_Opr _tmp3;
1432   LIR_Opr _tmp4;
1433   int     _hdr_size;
1434   int     _obj_size;
1435   CodeStub* _stub;
1436   bool    _init_check;
1437 
1438  public:
1439   LIR_OpAllocObj(LIR_Opr klass, LIR_Opr result,
1440                  LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4,
1441                  int hdr_size, int obj_size, bool init_check, CodeStub* stub)
1442     : LIR_Op1(lir_alloc_object, klass, result)
1443     , _tmp1(t1)
1444     , _tmp2(t2)
1445     , _tmp3(t3)
1446     , _tmp4(t4)
1447     , _hdr_size(hdr_size)
1448     , _obj_size(obj_size)
1449     , _init_check(init_check)
1450     , _stub(stub)                                { }
1451 
1452   LIR_Opr klass()        const                   { return in_opr();     }
1453   LIR_Opr obj()          const                   { return result_opr(); }
1454   LIR_Opr tmp1()         const                   { return _tmp1;        }
1455   LIR_Opr tmp2()         const                   { return _tmp2;        }
1456   LIR_Opr tmp3()         const                   { return _tmp3;        }
1457   LIR_Opr tmp4()         const                   { return _tmp4;        }
1458   int     header_size()  const                   { return _hdr_size;    }
1459   int     object_size()  const                   { return _obj_size;    }
1460   bool    init_check()   const                   { return _init_check;  }
1461   CodeStub* stub()       const                   { return _stub;        }
1462 
1463   virtual void emit_code(LIR_Assembler* masm);
1464   virtual LIR_OpAllocObj * as_OpAllocObj () { return this; }
1465   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1466 };
1467 
1468 
1469 // LIR_OpRoundFP
1470 class LIR_OpRoundFP : public LIR_Op1 {
1471  friend class LIR_OpVisitState;
1472 
1473  private:
1474   LIR_Opr _tmp;
1475 
1476  public:
1477   LIR_OpRoundFP(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result)
1478     : LIR_Op1(lir_roundfp, reg, result)
1479     , _tmp(stack_loc_temp) {}
1480 
1481   LIR_Opr tmp() const                            { return _tmp; }
1482   virtual LIR_OpRoundFP* as_OpRoundFP()          { return this; }
1483   void print_instr(outputStream* out) const PRODUCT_RETURN;
1484 };
1485 
1486 // LIR_OpTypeCheck
1487 class LIR_OpTypeCheck: public LIR_Op {
1488  friend class LIR_OpVisitState;
1489 
1490  private:
1491   LIR_Opr       _object;
1492   LIR_Opr       _array;
1493   ciKlass*      _klass;
1494   LIR_Opr       _tmp1;
1495   LIR_Opr       _tmp2;
1496   LIR_Opr       _tmp3;
1497   bool          _fast_check;
1498   CodeEmitInfo* _info_for_patch;
1499   CodeEmitInfo* _info_for_exception;
1500   CodeStub*     _stub;
1501   // Helpers for Tier1UpdateMethodData
1502   ciMethod*     _profiled_method;
1503   int           _profiled_bci;
1504 
1505 public:
1506   LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
1507                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
1508                   CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
1509                   ciMethod* profiled_method, int profiled_bci);
1510   LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array,
1511                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception,
1512                   ciMethod* profiled_method, int profiled_bci);
1513 
1514   LIR_Opr object() const                         { return _object;         }
1515   LIR_Opr array() const                          { assert(code() == lir_store_check, "not valid"); return _array;         }
1516   LIR_Opr tmp1() const                           { return _tmp1;           }
1517   LIR_Opr tmp2() const                           { return _tmp2;           }
1518   LIR_Opr tmp3() const                           { return _tmp3;           }
1519   ciKlass* klass() const                         { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _klass;          }
1520   bool fast_check() const                        { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _fast_check;     }
1521   CodeEmitInfo* info_for_patch() const           { return _info_for_patch;  }
1522   CodeEmitInfo* info_for_exception() const       { return _info_for_exception; }
1523   CodeStub* stub() const                         { return _stub;           }
1524 
1525   // methodDataOop profiling
1526   ciMethod* profiled_method()                    { return _profiled_method; }
1527   int       profiled_bci()                       { return _profiled_bci; }
1528 
1529   virtual void emit_code(LIR_Assembler* masm);
1530   virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; }
1531   void print_instr(outputStream* out) const PRODUCT_RETURN;
1532 };
1533 
1534 // LIR_Op2
1535 class LIR_Op2: public LIR_Op {
1536  friend class LIR_OpVisitState;
1537 
1538   int  _fpu_stack_size; // for sin/cos implementation on Intel
1539 
1540  protected:
1541   LIR_Opr   _opr1;
1542   LIR_Opr   _opr2;
1543   BasicType _type;
1544   LIR_Opr   _tmp;
1545   LIR_Condition _condition;
1546 
1547   void verify() const;
1548 
1549  public:
1550   LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, CodeEmitInfo* info = NULL)
1551     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1552     , _opr1(opr1)
1553     , _opr2(opr2)
1554     , _type(T_ILLEGAL)
1555     , _condition(condition)
1556     , _fpu_stack_size(0)
1557     , _tmp(LIR_OprFact::illegalOpr) {
1558     assert(code == lir_cmp, "code check");
1559   }
1560 
1561   LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result)
1562     : LIR_Op(code, result, NULL)
1563     , _opr1(opr1)
1564     , _opr2(opr2)
1565     , _type(T_ILLEGAL)
1566     , _condition(condition)
1567     , _fpu_stack_size(0)
1568     , _tmp(LIR_OprFact::illegalOpr) {
1569     assert(code == lir_cmove, "code check");
1570   }
1571 
1572   LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result = LIR_OprFact::illegalOpr,
1573           CodeEmitInfo* info = NULL, BasicType type = T_ILLEGAL)
1574     : LIR_Op(code, result, info)
1575     , _opr1(opr1)
1576     , _opr2(opr2)
1577     , _type(type)
1578     , _condition(lir_cond_unknown)
1579     , _fpu_stack_size(0)
1580     , _tmp(LIR_OprFact::illegalOpr) {
1581     assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
1582   }
1583 
1584   LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, LIR_Opr tmp)
1585     : LIR_Op(code, result, NULL)
1586     , _opr1(opr1)
1587     , _opr2(opr2)
1588     , _type(T_ILLEGAL)
1589     , _condition(lir_cond_unknown)
1590     , _fpu_stack_size(0)
1591     , _tmp(tmp) {
1592     assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
1593   }
1594 
1595   LIR_Opr in_opr1() const                        { return _opr1; }
1596   LIR_Opr in_opr2() const                        { return _opr2; }
1597   BasicType type()  const                        { return _type; }
1598   LIR_Opr tmp_opr() const                        { return _tmp; }
1599   LIR_Condition condition() const  {
1600     assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); return _condition;
1601   }
1602   void set_condition(LIR_Condition condition) {
1603     assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove");  _condition = condition;
1604   }
1605 
1606   void set_fpu_stack_size(int size)              { _fpu_stack_size = size; }
1607   int  fpu_stack_size() const                    { return _fpu_stack_size; }
1608 
1609   void set_in_opr1(LIR_Opr opr)                  { _opr1 = opr; }
1610   void set_in_opr2(LIR_Opr opr)                  { _opr2 = opr; }
1611 
1612   virtual void emit_code(LIR_Assembler* masm);
1613   virtual LIR_Op2* as_Op2() { return this; }
1614   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1615 };
1616 
1617 class LIR_OpAllocArray : public LIR_Op {
1618  friend class LIR_OpVisitState;
1619 
1620  private:
1621   LIR_Opr   _klass;
1622   LIR_Opr   _len;
1623   LIR_Opr   _tmp1;
1624   LIR_Opr   _tmp2;
1625   LIR_Opr   _tmp3;
1626   LIR_Opr   _tmp4;
1627   BasicType _type;
1628   CodeStub* _stub;
1629 
1630  public:
1631   LIR_OpAllocArray(LIR_Opr klass, LIR_Opr len, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, BasicType type, CodeStub* stub)
1632     : LIR_Op(lir_alloc_array, result, NULL)
1633     , _klass(klass)
1634     , _len(len)
1635     , _tmp1(t1)
1636     , _tmp2(t2)
1637     , _tmp3(t3)
1638     , _tmp4(t4)
1639     , _type(type)
1640     , _stub(stub) {}
1641 
1642   LIR_Opr   klass()   const                      { return _klass;       }
1643   LIR_Opr   len()     const                      { return _len;         }
1644   LIR_Opr   obj()     const                      { return result_opr(); }
1645   LIR_Opr   tmp1()    const                      { return _tmp1;        }
1646   LIR_Opr   tmp2()    const                      { return _tmp2;        }
1647   LIR_Opr   tmp3()    const                      { return _tmp3;        }
1648   LIR_Opr   tmp4()    const                      { return _tmp4;        }
1649   BasicType type()    const                      { return _type;        }
1650   CodeStub* stub()    const                      { return _stub;        }
1651 
1652   virtual void emit_code(LIR_Assembler* masm);
1653   virtual LIR_OpAllocArray * as_OpAllocArray () { return this; }
1654   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1655 };
1656 
1657 
1658 class LIR_Op3: public LIR_Op {
1659  friend class LIR_OpVisitState;
1660 
1661  private:
1662   LIR_Opr _opr1;
1663   LIR_Opr _opr2;
1664   LIR_Opr _opr3;
1665  public:
1666   LIR_Op3(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr opr3, LIR_Opr result, CodeEmitInfo* info = NULL)
1667     : LIR_Op(code, result, info)
1668     , _opr1(opr1)
1669     , _opr2(opr2)
1670     , _opr3(opr3)                                { assert(is_in_range(code, begin_op3, end_op3), "code check"); }
1671   LIR_Opr in_opr1() const                        { return _opr1; }
1672   LIR_Opr in_opr2() const                        { return _opr2; }
1673   LIR_Opr in_opr3() const                        { return _opr3; }
1674 
1675   virtual void emit_code(LIR_Assembler* masm);
1676   virtual LIR_Op3* as_Op3() { return this; }
1677   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1678 };
1679 
1680 
1681 //--------------------------------
1682 class LabelObj: public CompilationResourceObj {
1683  private:
1684   Label _label;
1685  public:
1686   LabelObj()                                     {}
1687   Label* label()                                 { return &_label; }
1688 };
1689 
1690 
1691 class LIR_OpLock: public LIR_Op {
1692  friend class LIR_OpVisitState;
1693 
1694  private:
1695   LIR_Opr _hdr;
1696   LIR_Opr _obj;
1697   LIR_Opr _lock;
1698   LIR_Opr _scratch;
1699   CodeStub* _stub;
1700  public:
1701   LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info)
1702     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1703     , _hdr(hdr)
1704     , _obj(obj)
1705     , _lock(lock)
1706     , _scratch(scratch)
1707     , _stub(stub)                      {}
1708 
1709   LIR_Opr hdr_opr() const                        { return _hdr; }
1710   LIR_Opr obj_opr() const                        { return _obj; }
1711   LIR_Opr lock_opr() const                       { return _lock; }
1712   LIR_Opr scratch_opr() const                    { return _scratch; }
1713   CodeStub* stub() const                         { return _stub; }
1714 
1715   virtual void emit_code(LIR_Assembler* masm);
1716   virtual LIR_OpLock* as_OpLock() { return this; }
1717   void print_instr(outputStream* out) const PRODUCT_RETURN;
1718 };
1719 
1720 
1721 class LIR_OpDelay: public LIR_Op {
1722  friend class LIR_OpVisitState;
1723 
1724  private:
1725   LIR_Op* _op;
1726 
1727  public:
1728   LIR_OpDelay(LIR_Op* op, CodeEmitInfo* info):
1729     LIR_Op(lir_delay_slot, LIR_OprFact::illegalOpr, info),
1730     _op(op) {
1731     assert(op->code() == lir_nop || LIRFillDelaySlots, "should be filling with nops");
1732   }
1733   virtual void emit_code(LIR_Assembler* masm);
1734   virtual LIR_OpDelay* as_OpDelay() { return this; }
1735   void print_instr(outputStream* out) const PRODUCT_RETURN;
1736   LIR_Op* delay_op() const { return _op; }
1737   CodeEmitInfo* call_info() const { return info(); }
1738 };
1739 
1740 
1741 // LIR_OpCompareAndSwap
1742 class LIR_OpCompareAndSwap : public LIR_Op {
1743  friend class LIR_OpVisitState;
1744 
1745  private:
1746   LIR_Opr _addr;
1747   LIR_Opr _cmp_value;
1748   LIR_Opr _new_value;
1749   LIR_Opr _tmp1;
1750   LIR_Opr _tmp2;
1751 
1752  public:
1753   LIR_OpCompareAndSwap(LIR_Code code, LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
1754                        LIR_Opr t1, LIR_Opr t2, LIR_Opr result)
1755     : LIR_Op(code, result, NULL)  // no result, no info
1756     , _addr(addr)
1757     , _cmp_value(cmp_value)
1758     , _new_value(new_value)
1759     , _tmp1(t1)
1760     , _tmp2(t2)                                  { }
1761 
1762   LIR_Opr addr()        const                    { return _addr;  }
1763   LIR_Opr cmp_value()   const                    { return _cmp_value; }
1764   LIR_Opr new_value()   const                    { return _new_value; }
1765   LIR_Opr tmp1()        const                    { return _tmp1;      }
1766   LIR_Opr tmp2()        const                    { return _tmp2;      }
1767 
1768   virtual void emit_code(LIR_Assembler* masm);
1769   virtual LIR_OpCompareAndSwap * as_OpCompareAndSwap () { return this; }
1770   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1771 };
1772 
1773 // LIR_OpProfileCall
1774 class LIR_OpProfileCall : public LIR_Op {
1775  friend class LIR_OpVisitState;
1776 
1777  private:
1778   ciMethod* _profiled_method;
1779   int _profiled_bci;
1780   LIR_Opr _mdo;
1781   LIR_Opr _recv;
1782   LIR_Opr _tmp1;
1783   ciKlass* _known_holder;
1784 
1785  public:
1786   // Destroys recv
1787   LIR_OpProfileCall(LIR_Code code, ciMethod* profiled_method, int profiled_bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
1788     : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  // no result, no info
1789     , _profiled_method(profiled_method)
1790     , _profiled_bci(profiled_bci)
1791     , _mdo(mdo)
1792     , _recv(recv)
1793     , _tmp1(t1)
1794     , _known_holder(known_holder)                { }
1795 
1796   ciMethod* profiled_method() const              { return _profiled_method;  }
1797   int       profiled_bci()    const              { return _profiled_bci;     }
1798   LIR_Opr   mdo()             const              { return _mdo;              }
1799   LIR_Opr   recv()            const              { return _recv;             }
1800   LIR_Opr   tmp1()            const              { return _tmp1;             }
1801   ciKlass*  known_holder()    const              { return _known_holder;     }
1802 
1803   virtual void emit_code(LIR_Assembler* masm);
1804   virtual LIR_OpProfileCall* as_OpProfileCall() { return this; }
1805   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1806 };
1807 
1808 
1809 class LIR_InsertionBuffer;
1810 
1811 //--------------------------------LIR_List---------------------------------------------------
1812 // Maintains a list of LIR instructions (one instance of LIR_List per basic block)
1813 // The LIR instructions are appended by the LIR_List class itself;
1814 //
1815 // Notes:
1816 // - all offsets are(should be) in bytes
1817 // - local positions are specified with an offset, with offset 0 being local 0
1818 
1819 class LIR_List: public CompilationResourceObj {
1820  private:
1821   LIR_OpList  _operations;
1822 
1823   Compilation*  _compilation;
1824 #ifndef PRODUCT
1825   BlockBegin*   _block;
1826 #endif
1827 #ifdef ASSERT
1828   const char *  _file;
1829   int           _line;
1830 #endif
1831 
1832   void append(LIR_Op* op) {
1833     if (op->source() == NULL)
1834       op->set_source(_compilation->current_instruction());
1835 #ifndef PRODUCT
1836     if (PrintIRWithLIR) {
1837       _compilation->maybe_print_current_instruction();
1838       op->print(); tty->cr();
1839     }
1840 #endif // PRODUCT
1841 
1842     _operations.append(op);
1843 
1844 #ifdef ASSERT
1845     op->verify();
1846     op->set_file_and_line(_file, _line);
1847     _file = NULL;
1848     _line = 0;
1849 #endif
1850   }
1851 
1852  public:
1853   LIR_List(Compilation* compilation, BlockBegin* block = NULL);
1854 
1855 #ifdef ASSERT
1856   void set_file_and_line(const char * file, int line);
1857 #endif
1858 
1859   //---------- accessors ---------------
1860   LIR_OpList* instructions_list()                { return &_operations; }
1861   int         length() const                     { return _operations.length(); }
1862   LIR_Op*     at(int i) const                    { return _operations.at(i); }
1863 
1864   NOT_PRODUCT(BlockBegin* block() const          { return _block; });
1865 
1866   // insert LIR_Ops in buffer to right places in LIR_List
1867   void append(LIR_InsertionBuffer* buffer);
1868 
1869   //---------- mutators ---------------
1870   void insert_before(int i, LIR_List* op_list)   { _operations.insert_before(i, op_list->instructions_list()); }
1871   void insert_before(int i, LIR_Op* op)          { _operations.insert_before(i, op); }
1872 
1873   //---------- printing -------------
1874   void print_instructions() PRODUCT_RETURN;
1875 
1876 
1877   //---------- instructions -------------
1878   void call_opt_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
1879                         address dest, LIR_OprList* arguments,
1880                         CodeEmitInfo* info) {
1881     append(new LIR_OpJavaCall(lir_optvirtual_call, method, receiver, result, dest, arguments, info));
1882   }
1883   void call_static(ciMethod* method, LIR_Opr result,
1884                    address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
1885     append(new LIR_OpJavaCall(lir_static_call, method, LIR_OprFact::illegalOpr, result, dest, arguments, info));
1886   }
1887   void call_icvirtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
1888                       address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
1889     append(new LIR_OpJavaCall(lir_icvirtual_call, method, receiver, result, dest, arguments, info));
1890   }
1891   void call_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
1892                     intptr_t vtable_offset, LIR_OprList* arguments, CodeEmitInfo* info) {
1893     append(new LIR_OpJavaCall(lir_virtual_call, method, receiver, result, vtable_offset, arguments, info));
1894   }
1895   void call_dynamic(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
1896                     address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
1897     append(new LIR_OpJavaCall(lir_dynamic_call, method, receiver, result, dest, arguments, info));
1898   }
1899 
1900   void get_thread(LIR_Opr result)                { append(new LIR_Op0(lir_get_thread, result)); }
1901   void word_align()                              { append(new LIR_Op0(lir_word_align)); }
1902   void membar()                                  { append(new LIR_Op0(lir_membar)); }
1903   void membar_acquire()                          { append(new LIR_Op0(lir_membar_acquire)); }
1904   void membar_release()                          { append(new LIR_Op0(lir_membar_release)); }
1905 
1906   void nop()                                     { append(new LIR_Op0(lir_nop)); }
1907   void build_frame()                             { append(new LIR_Op0(lir_build_frame)); }
1908 
1909   void std_entry(LIR_Opr receiver)               { append(new LIR_Op0(lir_std_entry, receiver)); }
1910   void osr_entry(LIR_Opr osrPointer)             { append(new LIR_Op0(lir_osr_entry, osrPointer)); }
1911 
1912   void branch_destination(Label* lbl)            { append(new LIR_OpLabel(lbl)); }
1913 
1914   void negate(LIR_Opr from, LIR_Opr to)          { append(new LIR_Op1(lir_neg, from, to)); }
1915   void leal(LIR_Opr from, LIR_Opr result_reg)    { append(new LIR_Op1(lir_leal, from, result_reg)); }
1916 
1917   // result is a stack location for old backend and vreg for UseLinearScan
1918   // stack_loc_temp is an illegal register for old backend
1919   void roundfp(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result) { append(new LIR_OpRoundFP(reg, stack_loc_temp, result)); }
1920   void unaligned_move(LIR_Address* src, LIR_Opr dst) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
1921   void unaligned_move(LIR_Opr src, LIR_Address* dst) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), src->type(), lir_patch_none, NULL, lir_move_unaligned)); }
1922   void unaligned_move(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
1923   void move(LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
1924   void move(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info)); }
1925   void move(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info)); }
1926 
1927   void volatile_move(LIR_Opr src, LIR_Opr dst, BasicType type, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none) { append(new LIR_Op1(lir_move, src, dst, type, patch_code, info, lir_move_volatile)); }
1928 
1929   void oop2reg  (jobject o, LIR_Opr reg)         { append(new LIR_Op1(lir_move, LIR_OprFact::oopConst(o),    reg));   }
1930   void oop2reg_patch(jobject o, LIR_Opr reg, CodeEmitInfo* info);
1931 
1932   void return_op(LIR_Opr result)                 { append(new LIR_Op1(lir_return, result)); }
1933 
1934   void safepoint(LIR_Opr tmp, CodeEmitInfo* info)  { append(new LIR_Op1(lir_safepoint, tmp, info)); }
1935 
1936 #ifdef PPC
1937   void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_OpConvert(code, left, dst, NULL, tmp1, tmp2)); }
1938 #endif
1939   void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, ConversionStub* stub = NULL/*, bool is_32bit = false*/) { append(new LIR_OpConvert(code, left, dst, stub)); }
1940 
1941   void logical_and (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_and,  left, right, dst)); }
1942   void logical_or  (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_or,   left, right, dst)); }
1943   void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor,  left, right, dst)); }
1944 
1945   void null_check(LIR_Opr opr, CodeEmitInfo* info)         { append(new LIR_Op1(lir_null_check, opr, info)); }
1946   void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
1947     append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info));
1948   }
1949   void unwind_exception(LIR_Opr exceptionOop) {
1950     append(new LIR_Op1(lir_unwind, exceptionOop));
1951   }
1952 
1953   void compare_to (LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1954     append(new LIR_Op2(lir_compare_to,  left, right, dst));
1955   }
1956 
1957   void push(LIR_Opr opr)                                   { append(new LIR_Op1(lir_push, opr)); }
1958   void pop(LIR_Opr reg)                                    { append(new LIR_Op1(lir_pop,  reg)); }
1959 
1960   void cmp(LIR_Condition condition, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info = NULL) {
1961     append(new LIR_Op2(lir_cmp, condition, left, right, info));
1962   }
1963   void cmp(LIR_Condition condition, LIR_Opr left, int right, CodeEmitInfo* info = NULL) {
1964     cmp(condition, left, LIR_OprFact::intConst(right), info);
1965   }
1966 
1967   void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info);
1968   void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Address* addr, CodeEmitInfo* info);
1969 
1970   void cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst) {
1971     append(new LIR_Op2(lir_cmove, condition, src1, src2, dst));
1972   }
1973 
1974   void cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
1975                 LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
1976   void cas_obj(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
1977                LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
1978   void cas_int(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
1979                LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
1980 
1981   void abs (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_abs , from, tmp, to)); }
1982   void sqrt(LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_sqrt, from, tmp, to)); }
1983   void log (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_log,  from, LIR_OprFact::illegalOpr, to, tmp)); }
1984   void log10 (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)              { append(new LIR_Op2(lir_log10, from, LIR_OprFact::illegalOpr, to, tmp)); }
1985   void sin (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_sin , from, tmp1, to, tmp2)); }
1986   void cos (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_cos , from, tmp1, to, tmp2)); }
1987   void tan (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_tan , from, tmp1, to, tmp2)); }
1988 
1989   void add (LIR_Opr left, LIR_Opr right, LIR_Opr res)      { append(new LIR_Op2(lir_add, left, right, res)); }
1990   void sub (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_sub, left, right, res, info)); }
1991   void mul (LIR_Opr left, LIR_Opr right, LIR_Opr res) { append(new LIR_Op2(lir_mul, left, right, res)); }
1992   void mul_strictfp (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_mul_strictfp, left, right, res, tmp)); }
1993   void div (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL)      { append(new LIR_Op2(lir_div, left, right, res, info)); }
1994   void div_strictfp (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_div_strictfp, left, right, res, tmp)); }
1995   void rem (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL)      { append(new LIR_Op2(lir_rem, left, right, res, info)); }
1996 
1997   void volatile_load_mem_reg(LIR_Address* address, LIR_Opr dst, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
1998   void volatile_load_unsafe_reg(LIR_Opr base, LIR_Opr offset, LIR_Opr dst, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
1999 
2000   void load(LIR_Address* addr, LIR_Opr src, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
2001 
2002   void prefetch(LIR_Address* addr, bool is_store);
2003 
2004   void store_mem_int(jint v,    LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2005   void store_mem_oop(jobject o, LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2006   void store(LIR_Opr src, LIR_Address* addr, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
2007   void volatile_store_mem_reg(LIR_Opr src, LIR_Address* address, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2008   void volatile_store_unsafe_reg(LIR_Opr src, LIR_Opr base, LIR_Opr offset, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
2009 
2010   void idiv(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2011   void idiv(LIR_Opr left, int   right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2012   void irem(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2013   void irem(LIR_Opr left, int   right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2014 
2015   void allocate_object(LIR_Opr dst, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, int header_size, int object_size, LIR_Opr klass, bool init_check, CodeStub* stub);
2016   void allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub);
2017 
2018   // jump is an unconditional branch
2019   void jump(BlockBegin* block) {
2020     append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, block));
2021   }
2022   void jump(CodeStub* stub) {
2023     append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, stub));
2024   }
2025   void branch(LIR_Condition cond, Label* lbl)        { append(new LIR_OpBranch(cond, lbl)); }
2026   void branch(LIR_Condition cond, BasicType type, BlockBegin* block) {
2027     assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
2028     append(new LIR_OpBranch(cond, type, block));
2029   }
2030   void branch(LIR_Condition cond, BasicType type, CodeStub* stub)    {
2031     assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
2032     append(new LIR_OpBranch(cond, type, stub));
2033   }
2034   void branch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* unordered) {
2035     assert(type == T_FLOAT || type == T_DOUBLE, "fp comparisons only");
2036     append(new LIR_OpBranch(cond, type, block, unordered));
2037   }
2038 
2039   void shift_left(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2040   void shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2041   void unsigned_shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2042 
2043   void shift_left(LIR_Opr value, int count, LIR_Opr dst)       { shift_left(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2044   void shift_right(LIR_Opr value, int count, LIR_Opr dst)      { shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2045   void unsigned_shift_right(LIR_Opr value, int count, LIR_Opr dst) { unsigned_shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2046 
2047   void lcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst)        { append(new LIR_Op2(lir_cmp_l2i,  left, right, dst)); }
2048   void fcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst, bool is_unordered_less);
2049 
2050   void call_runtime_leaf(address routine, LIR_Opr tmp, LIR_Opr result, LIR_OprList* arguments) {
2051     append(new LIR_OpRTCall(routine, tmp, result, arguments));
2052   }
2053 
2054   void call_runtime(address routine, LIR_Opr tmp, LIR_Opr result,
2055                     LIR_OprList* arguments, CodeEmitInfo* info) {
2056     append(new LIR_OpRTCall(routine, tmp, result, arguments, info));
2057   }
2058 
2059   void load_stack_address_monitor(int monitor_ix, LIR_Opr dst)  { append(new LIR_Op1(lir_monaddr, LIR_OprFact::intConst(monitor_ix), dst)); }
2060   void unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub);
2061   void lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info);
2062 
2063   void set_24bit_fpu()                                               { append(new LIR_Op0(lir_24bit_FPU )); }
2064   void restore_fpu()                                                 { append(new LIR_Op0(lir_reset_FPU )); }
2065   void breakpoint()                                                  { append(new LIR_Op0(lir_breakpoint)); }
2066 
2067   void arraycopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info) { append(new LIR_OpArrayCopy(src, src_pos, dst, dst_pos, length, tmp, expected_type, flags, info)); }
2068 
2069   void fpop_raw()                                { append(new LIR_Op0(lir_fpop_raw)); }
2070 
2071   void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass,
2072                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
2073                   CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
2074                   ciMethod* profiled_method, int profiled_bci);
2075   void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch);
2076   void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
2077 
2078   // methodDataOop profiling
2079   void profile_call(ciMethod* method, int bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) { append(new LIR_OpProfileCall(lir_profile_call, method, bci, mdo, recv, t1, cha_klass)); }
2080 };
2081 
2082 void print_LIR(BlockList* blocks);
2083 
2084 class LIR_InsertionBuffer : public CompilationResourceObj {
2085  private:
2086   LIR_List*   _lir;   // the lir list where ops of this buffer should be inserted later (NULL when uninitialized)
2087 
2088   // list of insertion points. index and count are stored alternately:
2089   // _index_and_count[i * 2]:     the index into lir list where "count" ops should be inserted
2090   // _index_and_count[i * 2 + 1]: the number of ops to be inserted at index
2091   intStack    _index_and_count;
2092 
2093   // the LIR_Ops to be inserted
2094   LIR_OpList  _ops;
2095 
2096   void append_new(int index, int count)  { _index_and_count.append(index); _index_and_count.append(count); }
2097   void set_index_at(int i, int value)    { _index_and_count.at_put((i << 1),     value); }
2098   void set_count_at(int i, int value)    { _index_and_count.at_put((i << 1) + 1, value); }
2099 
2100 #ifdef ASSERT
2101   void verify();
2102 #endif
2103  public:
2104   LIR_InsertionBuffer() : _lir(NULL), _index_and_count(8), _ops(8) { }
2105 
2106   // must be called before using the insertion buffer
2107   void init(LIR_List* lir)  { assert(!initialized(), "already initialized"); _lir = lir; _index_and_count.clear(); _ops.clear(); }
2108   bool initialized() const  { return _lir != NULL; }
2109   // called automatically when the buffer is appended to the LIR_List
2110   void finish()             { _lir = NULL; }
2111 
2112   // accessors
2113   LIR_List*  lir_list() const             { return _lir; }
2114   int number_of_insertion_points() const  { return _index_and_count.length() >> 1; }
2115   int index_at(int i) const               { return _index_and_count.at((i << 1));     }
2116   int count_at(int i) const               { return _index_and_count.at((i << 1) + 1); }
2117 
2118   int number_of_ops() const               { return _ops.length(); }
2119   LIR_Op* op_at(int i) const              { return _ops.at(i); }
2120 
2121   // append an instruction to the buffer
2122   void append(int index, LIR_Op* op);
2123 
2124   // instruction
2125   void move(int index, LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(index, new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
2126 };
2127 
2128 
2129 //
2130 // LIR_OpVisitState is used for manipulating LIR_Ops in an abstract way.
2131 // Calling a LIR_Op's visit function with a LIR_OpVisitState causes
2132 // information about the input, output and temporaries used by the
2133 // op to be recorded.  It also records whether the op has call semantics
2134 // and also records all the CodeEmitInfos used by this op.
2135 //
2136 
2137 
2138 class LIR_OpVisitState: public StackObj {
2139  public:
2140   typedef enum { inputMode, firstMode = inputMode, tempMode, outputMode, numModes, invalidMode = -1 } OprMode;
2141 
2142   enum {
2143     maxNumberOfOperands = 16,
2144     maxNumberOfInfos = 4
2145   };
2146 
2147  private:
2148   LIR_Op*          _op;
2149 
2150   // optimization: the operands and infos are not stored in a variable-length
2151   //               list, but in a fixed-size array to save time of size checks and resizing
2152   int              _oprs_len[numModes];
2153   LIR_Opr*         _oprs_new[numModes][maxNumberOfOperands];
2154   int _info_len;
2155   CodeEmitInfo*    _info_new[maxNumberOfInfos];
2156 
2157   bool             _has_call;
2158   bool             _has_slow_case;
2159 
2160 
2161   // only include register operands
2162   // addresses are decomposed to the base and index registers
2163   // constants and stack operands are ignored
2164   void append(LIR_Opr& opr, OprMode mode) {
2165     assert(opr->is_valid(), "should not call this otherwise");
2166     assert(mode >= 0 && mode < numModes, "bad mode");
2167 
2168     if (opr->is_register()) {
2169        assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2170       _oprs_new[mode][_oprs_len[mode]++] = &opr;
2171 
2172     } else if (opr->is_pointer()) {
2173       LIR_Address* address = opr->as_address_ptr();
2174       if (address != NULL) {
2175         // special handling for addresses: add base and index register of the address
2176         // both are always input operands!
2177         if (address->_base->is_valid()) {
2178           assert(address->_base->is_register(), "must be");
2179           assert(_oprs_len[inputMode] < maxNumberOfOperands, "array overflow");
2180           _oprs_new[inputMode][_oprs_len[inputMode]++] = &address->_base;
2181         }
2182         if (address->_index->is_valid()) {
2183           assert(address->_index->is_register(), "must be");
2184           assert(_oprs_len[inputMode] < maxNumberOfOperands, "array overflow");
2185           _oprs_new[inputMode][_oprs_len[inputMode]++] = &address->_index;
2186         }
2187 
2188       } else {
2189         assert(opr->is_constant(), "constant operands are not processed");
2190       }
2191     } else {
2192       assert(opr->is_stack(), "stack operands are not processed");
2193     }
2194   }
2195 
2196   void append(CodeEmitInfo* info) {
2197     assert(info != NULL, "should not call this otherwise");
2198     assert(_info_len < maxNumberOfInfos, "array overflow");
2199     _info_new[_info_len++] = info;
2200   }
2201 
2202  public:
2203   LIR_OpVisitState()         { reset(); }
2204 
2205   LIR_Op* op() const         { return _op; }
2206   void set_op(LIR_Op* op)    { reset(); _op = op; }
2207 
2208   bool has_call() const      { return _has_call; }
2209   bool has_slow_case() const { return _has_slow_case; }
2210 
2211   void reset() {
2212     _op = NULL;
2213     _has_call = false;
2214     _has_slow_case = false;
2215 
2216     _oprs_len[inputMode] = 0;
2217     _oprs_len[tempMode] = 0;
2218     _oprs_len[outputMode] = 0;
2219     _info_len = 0;
2220   }
2221 
2222 
2223   int opr_count(OprMode mode) const {
2224     assert(mode >= 0 && mode < numModes, "bad mode");
2225     return _oprs_len[mode];
2226   }
2227 
2228   LIR_Opr opr_at(OprMode mode, int index) const {
2229     assert(mode >= 0 && mode < numModes, "bad mode");
2230     assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
2231     return *_oprs_new[mode][index];
2232   }
2233 
2234   void set_opr_at(OprMode mode, int index, LIR_Opr opr) const {
2235     assert(mode >= 0 && mode < numModes, "bad mode");
2236     assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
2237     *_oprs_new[mode][index] = opr;
2238   }
2239 
2240   int info_count() const {
2241     return _info_len;
2242   }
2243 
2244   CodeEmitInfo* info_at(int index) const {
2245     assert(index < _info_len, "index out of bounds");
2246     return _info_new[index];
2247   }
2248 
2249   XHandlers* all_xhandler();
2250 
2251   // collects all register operands of the instruction
2252   void visit(LIR_Op* op);
2253 
2254 #if ASSERT
2255   // check that an operation has no operands
2256   bool no_operands(LIR_Op* op);
2257 #endif
2258 
2259   // LIR_Op visitor functions use these to fill in the state
2260   void do_input(LIR_Opr& opr)             { append(opr, LIR_OpVisitState::inputMode); }
2261   void do_output(LIR_Opr& opr)            { append(opr, LIR_OpVisitState::outputMode); }
2262   void do_temp(LIR_Opr& opr)              { append(opr, LIR_OpVisitState::tempMode); }
2263   void do_info(CodeEmitInfo* info)        { append(info); }
2264 
2265   void do_stub(CodeStub* stub);
2266   void do_call()                          { _has_call = true; }
2267   void do_slow_case()                     { _has_slow_case = true; }
2268   void do_slow_case(CodeEmitInfo* info) {
2269     _has_slow_case = true;
2270     append(info);
2271   }
2272 };
2273 
2274 
2275 inline LIR_Opr LIR_OprDesc::illegalOpr()   { return LIR_OprFact::illegalOpr; };