1 /*
   2  * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_C1_C1_LIR_HPP
  26 #define SHARE_VM_C1_C1_LIR_HPP
  27 
  28 #include "c1/c1_ValueType.hpp"
  29 
  30 class BlockBegin;
  31 class BlockList;
  32 class LIR_Assembler;
  33 class CodeEmitInfo;
  34 class CodeStub;
  35 class CodeStubList;
  36 class ArrayCopyStub;
  37 class LIR_Op;
  38 class ciType;
  39 class ValueType;
  40 class LIR_OpVisitState;
  41 class FpuStackSim;
  42 
  43 //---------------------------------------------------------------------
  44 //                 LIR Operands
  45 //  LIR_OprDesc
  46 //    LIR_OprPtr
  47 //      LIR_Const
  48 //      LIR_Address
  49 //---------------------------------------------------------------------
  50 class LIR_OprDesc;
  51 class LIR_OprPtr;
  52 class LIR_Const;
  53 class LIR_Address;
  54 class LIR_OprVisitor;
  55 
  56 
  57 typedef LIR_OprDesc* LIR_Opr;
  58 typedef int          RegNr;
  59 
  60 define_array(LIR_OprArray, LIR_Opr)
  61 define_stack(LIR_OprList, LIR_OprArray)
  62 
  63 define_array(LIR_OprRefArray, LIR_Opr*)
  64 define_stack(LIR_OprRefList, LIR_OprRefArray)
  65 
  66 define_array(CodeEmitInfoArray, CodeEmitInfo*)
  67 define_stack(CodeEmitInfoList, CodeEmitInfoArray)
  68 
  69 define_array(LIR_OpArray, LIR_Op*)
  70 define_stack(LIR_OpList, LIR_OpArray)
  71 
  72 // define LIR_OprPtr early so LIR_OprDesc can refer to it
  73 class LIR_OprPtr: public CompilationResourceObj {
  74  public:
  75   bool is_oop_pointer() const                    { return (type() == T_OBJECT); }
  76   bool is_float_kind() const                     { BasicType t = type(); return (t == T_FLOAT) || (t == T_DOUBLE); }
  77 
  78   virtual LIR_Const*  as_constant()              { return NULL; }
  79   virtual LIR_Address* as_address()              { return NULL; }
  80   virtual BasicType type() const                 = 0;
  81   virtual void print_value_on(outputStream* out) const = 0;
  82 };
  83 
  84 
  85 
  86 // LIR constants
  87 class LIR_Const: public LIR_OprPtr {
  88  private:
  89   JavaValue _value;
  90 
  91   void type_check(BasicType t) const   { assert(type() == t, "type check"); }
  92   void type_check(BasicType t1, BasicType t2) const   { assert(type() == t1 || type() == t2, "type check"); }
  93   void type_check(BasicType t1, BasicType t2, BasicType t3) const   { assert(type() == t1 || type() == t2 || type() == t3, "type check"); }
  94 
  95  public:
  96   LIR_Const(jint i, bool is_address=false)       { _value.set_type(is_address?T_ADDRESS:T_INT); _value.set_jint(i); }
  97   LIR_Const(jlong l)                             { _value.set_type(T_LONG);    _value.set_jlong(l); }
  98   LIR_Const(jfloat f)                            { _value.set_type(T_FLOAT);   _value.set_jfloat(f); }
  99   LIR_Const(jdouble d)                           { _value.set_type(T_DOUBLE);  _value.set_jdouble(d); }
 100   LIR_Const(jobject o)                           { _value.set_type(T_OBJECT);  _value.set_jobject(o); }
 101   LIR_Const(void* p) {
 102 #ifdef _LP64
 103     assert(sizeof(jlong) >= sizeof(p), "too small");;
 104     _value.set_type(T_LONG);    _value.set_jlong((jlong)p);
 105 #else
 106     assert(sizeof(jint) >= sizeof(p), "too small");;
 107     _value.set_type(T_INT);     _value.set_jint((jint)p);
 108 #endif
 109   }
 110 
 111   virtual BasicType type()       const { return _value.get_type(); }
 112   virtual LIR_Const* as_constant()     { return this; }
 113 
 114   jint      as_jint()    const         { type_check(T_INT, T_ADDRESS); return _value.get_jint(); }
 115   jlong     as_jlong()   const         { type_check(T_LONG  ); return _value.get_jlong(); }
 116   jfloat    as_jfloat()  const         { type_check(T_FLOAT ); return _value.get_jfloat(); }
 117   jdouble   as_jdouble() const         { type_check(T_DOUBLE); return _value.get_jdouble(); }
 118   jobject   as_jobject() const         { type_check(T_OBJECT); return _value.get_jobject(); }
 119   jint      as_jint_lo() const         { type_check(T_LONG  ); return low(_value.get_jlong()); }
 120   jint      as_jint_hi() const         { type_check(T_LONG  ); return high(_value.get_jlong()); }
 121 
 122 #ifdef _LP64
 123   address   as_pointer() const         { type_check(T_LONG  ); return (address)_value.get_jlong(); }
 124 #else
 125   address   as_pointer() const         { type_check(T_INT   ); return (address)_value.get_jint(); }
 126 #endif
 127 
 128 
 129   jint      as_jint_bits() const       { type_check(T_FLOAT, T_INT, T_ADDRESS); return _value.get_jint(); }
 130   jint      as_jint_lo_bits() const    {
 131     if (type() == T_DOUBLE) {
 132       return low(jlong_cast(_value.get_jdouble()));
 133     } else {
 134       return as_jint_lo();
 135     }
 136   }
 137   jint      as_jint_hi_bits() const    {
 138     if (type() == T_DOUBLE) {
 139       return high(jlong_cast(_value.get_jdouble()));
 140     } else {
 141       return as_jint_hi();
 142     }
 143   }
 144   jlong      as_jlong_bits() const    {
 145     if (type() == T_DOUBLE) {
 146       return jlong_cast(_value.get_jdouble());
 147     } else {
 148       return as_jlong();
 149     }
 150   }
 151 
 152   virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
 153 
 154 
 155   bool is_zero_float() {
 156     jfloat f = as_jfloat();
 157     jfloat ok = 0.0f;
 158     return jint_cast(f) == jint_cast(ok);
 159   }
 160 
 161   bool is_one_float() {
 162     jfloat f = as_jfloat();
 163     return !g_isnan(f) && g_isfinite(f) && f == 1.0;
 164   }
 165 
 166   bool is_zero_double() {
 167     jdouble d = as_jdouble();
 168     jdouble ok = 0.0;
 169     return jlong_cast(d) == jlong_cast(ok);
 170   }
 171 
 172   bool is_one_double() {
 173     jdouble d = as_jdouble();
 174     return !g_isnan(d) && g_isfinite(d) && d == 1.0;
 175   }
 176 };
 177 
 178 
 179 //---------------------LIR Operand descriptor------------------------------------
 180 //
 181 // The class LIR_OprDesc represents a LIR instruction operand;
 182 // it can be a register (ALU/FPU), stack location or a constant;
 183 // Constants and addresses are represented as resource area allocated
 184 // structures (see above).
 185 // Registers and stack locations are inlined into the this pointer
 186 // (see value function).
 187 
 188 class LIR_OprDesc: public CompilationResourceObj {
 189  public:
 190   // value structure:
 191   //     data       opr-type opr-kind
 192   // +--------------+-------+-------+
 193   // [max...........|7 6 5 4|3 2 1 0]
 194   //                             ^
 195   //                    is_pointer bit
 196   //
 197   // lowest bit cleared, means it is a structure pointer
 198   // we need  4 bits to represent types
 199 
 200  private:
 201   friend class LIR_OprFact;
 202 
 203   // Conversion
 204   intptr_t value() const                         { return (intptr_t) this; }
 205 
 206   bool check_value_mask(intptr_t mask, intptr_t masked_value) const {
 207     return (value() & mask) == masked_value;
 208   }
 209 
 210   enum OprKind {
 211       pointer_value      = 0
 212     , stack_value        = 1
 213     , cpu_register       = 3
 214     , fpu_register       = 5
 215     , illegal_value      = 7
 216   };
 217 
 218   enum OprBits {
 219       pointer_bits   = 1
 220     , kind_bits      = 3
 221     , type_bits      = 4
 222     , size_bits      = 2
 223     , destroys_bits  = 1
 224     , virtual_bits   = 1
 225     , is_xmm_bits    = 1
 226     , last_use_bits  = 1
 227     , is_fpu_stack_offset_bits = 1        // used in assertion checking on x86 for FPU stack slot allocation
 228     , non_data_bits  = kind_bits + type_bits + size_bits + destroys_bits + last_use_bits +
 229                        is_fpu_stack_offset_bits + virtual_bits + is_xmm_bits
 230     , data_bits      = BitsPerInt - non_data_bits
 231     , reg_bits       = data_bits / 2      // for two registers in one value encoding
 232   };
 233 
 234   enum OprShift {
 235       kind_shift     = 0
 236     , type_shift     = kind_shift     + kind_bits
 237     , size_shift     = type_shift     + type_bits
 238     , destroys_shift = size_shift     + size_bits
 239     , last_use_shift = destroys_shift + destroys_bits
 240     , is_fpu_stack_offset_shift = last_use_shift + last_use_bits
 241     , virtual_shift  = is_fpu_stack_offset_shift + is_fpu_stack_offset_bits
 242     , is_xmm_shift   = virtual_shift + virtual_bits
 243     , data_shift     = is_xmm_shift + is_xmm_bits
 244     , reg1_shift = data_shift
 245     , reg2_shift = data_shift + reg_bits
 246 
 247   };
 248 
 249   enum OprSize {
 250       single_size = 0 << size_shift
 251     , double_size = 1 << size_shift
 252   };
 253 
 254   enum OprMask {
 255       kind_mask      = right_n_bits(kind_bits)
 256     , type_mask      = right_n_bits(type_bits) << type_shift
 257     , size_mask      = right_n_bits(size_bits) << size_shift
 258     , last_use_mask  = right_n_bits(last_use_bits) << last_use_shift
 259     , is_fpu_stack_offset_mask = right_n_bits(is_fpu_stack_offset_bits) << is_fpu_stack_offset_shift
 260     , virtual_mask   = right_n_bits(virtual_bits) << virtual_shift
 261     , is_xmm_mask    = right_n_bits(is_xmm_bits) << is_xmm_shift
 262     , pointer_mask   = right_n_bits(pointer_bits)
 263     , lower_reg_mask = right_n_bits(reg_bits)
 264     , no_type_mask   = (int)(~(type_mask | last_use_mask | is_fpu_stack_offset_mask))
 265   };
 266 
 267   uintptr_t data() const                         { return value() >> data_shift; }
 268   int lo_reg_half() const                        { return data() & lower_reg_mask; }
 269   int hi_reg_half() const                        { return (data() >> reg_bits) & lower_reg_mask; }
 270   OprKind kind_field() const                     { return (OprKind)(value() & kind_mask); }
 271   OprSize size_field() const                     { return (OprSize)(value() & size_mask); }
 272 
 273   static char type_char(BasicType t);
 274 
 275  public:
 276   enum {
 277     vreg_base = ConcreteRegisterImpl::number_of_registers,
 278     vreg_max = (1 << data_bits) - 1
 279   };
 280 
 281   static inline LIR_Opr illegalOpr();
 282 
 283   enum OprType {
 284       unknown_type  = 0 << type_shift    // means: not set (catch uninitialized types)
 285     , int_type      = 1 << type_shift
 286     , long_type     = 2 << type_shift
 287     , object_type   = 3 << type_shift
 288     , address_type  = 4 << type_shift
 289     , float_type    = 5 << type_shift
 290     , double_type   = 6 << type_shift
 291   };
 292   friend OprType as_OprType(BasicType t);
 293   friend BasicType as_BasicType(OprType t);
 294 
 295   OprType type_field_valid() const               { assert(is_register() || is_stack(), "should not be called otherwise"); return (OprType)(value() & type_mask); }
 296   OprType type_field() const                     { return is_illegal() ? unknown_type : (OprType)(value() & type_mask); }
 297 
 298   static OprSize size_for(BasicType t) {
 299     switch (t) {
 300       case T_LONG:
 301       case T_DOUBLE:
 302         return double_size;
 303         break;
 304 
 305       case T_FLOAT:
 306       case T_BOOLEAN:
 307       case T_CHAR:
 308       case T_BYTE:
 309       case T_SHORT:
 310       case T_INT:
 311       case T_ADDRESS:
 312       case T_OBJECT:
 313       case T_ARRAY:
 314         return single_size;
 315         break;
 316 
 317       default:
 318         ShouldNotReachHere();
 319         return single_size;
 320       }
 321   }
 322 
 323 
 324   void validate_type() const PRODUCT_RETURN;
 325 
 326   BasicType type() const {
 327     if (is_pointer()) {
 328       return pointer()->type();
 329     }
 330     return as_BasicType(type_field());
 331   }
 332 
 333 
 334   ValueType* value_type() const                  { return as_ValueType(type()); }
 335 
 336   char type_char() const                         { return type_char((is_pointer()) ? pointer()->type() : type()); }
 337 
 338   bool is_equal(LIR_Opr opr) const         { return this == opr; }
 339   // checks whether types are same
 340   bool is_same_type(LIR_Opr opr) const     {
 341     assert(type_field() != unknown_type &&
 342            opr->type_field() != unknown_type, "shouldn't see unknown_type");
 343     return type_field() == opr->type_field();
 344   }
 345   bool is_same_register(LIR_Opr opr) {
 346     return (is_register() && opr->is_register() &&
 347             kind_field() == opr->kind_field() &&
 348             (value() & no_type_mask) == (opr->value() & no_type_mask));
 349   }
 350 
 351   bool is_pointer() const      { return check_value_mask(pointer_mask, pointer_value); }
 352   bool is_illegal() const      { return kind_field() == illegal_value; }
 353   bool is_valid() const        { return kind_field() != illegal_value; }
 354 
 355   bool is_register() const     { return is_cpu_register() || is_fpu_register(); }
 356   bool is_virtual() const      { return is_virtual_cpu()  || is_virtual_fpu();  }
 357 
 358   bool is_constant() const     { return is_pointer() && pointer()->as_constant() != NULL; }
 359   bool is_address() const      { return is_pointer() && pointer()->as_address() != NULL; }
 360 
 361   bool is_float_kind() const   { return is_pointer() ? pointer()->is_float_kind() : (kind_field() == fpu_register); }
 362   bool is_oop() const;
 363 
 364   // semantic for fpu- and xmm-registers:
 365   // * is_float and is_double return true for xmm_registers
 366   //   (so is_single_fpu and is_single_xmm are true)
 367   // * So you must always check for is_???_xmm prior to is_???_fpu to
 368   //   distinguish between fpu- and xmm-registers
 369 
 370   bool is_stack() const        { validate_type(); return check_value_mask(kind_mask,                stack_value);                 }
 371   bool is_single_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | single_size);  }
 372   bool is_double_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | double_size);  }
 373 
 374   bool is_cpu_register() const { validate_type(); return check_value_mask(kind_mask,                cpu_register);                }
 375   bool is_virtual_cpu() const  { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register | virtual_mask); }
 376   bool is_fixed_cpu() const    { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register);                }
 377   bool is_single_cpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    cpu_register | single_size);  }
 378   bool is_double_cpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    cpu_register | double_size);  }
 379 
 380   bool is_fpu_register() const { validate_type(); return check_value_mask(kind_mask,                fpu_register);                }
 381   bool is_virtual_fpu() const  { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register | virtual_mask); }
 382   bool is_fixed_fpu() const    { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register);                }
 383   bool is_single_fpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    fpu_register | single_size);  }
 384   bool is_double_fpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    fpu_register | double_size);  }
 385 
 386   bool is_xmm_register() const { validate_type(); return check_value_mask(kind_mask | is_xmm_mask,             fpu_register | is_xmm_mask); }
 387   bool is_single_xmm() const   { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | single_size | is_xmm_mask); }
 388   bool is_double_xmm() const   { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | double_size | is_xmm_mask); }
 389 
 390   // fast accessor functions for special bits that do not work for pointers
 391   // (in this functions, the check for is_pointer() is omitted)
 392   bool is_single_word() const      { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, single_size); }
 393   bool is_double_word() const      { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, double_size); }
 394   bool is_virtual_register() const { assert(is_register(),               "type check"); return check_value_mask(virtual_mask, virtual_mask); }
 395   bool is_oop_register() const     { assert(is_register() || is_stack(), "type check"); return type_field_valid() == object_type; }
 396   BasicType type_register() const  { assert(is_register() || is_stack(), "type check"); return as_BasicType(type_field_valid());  }
 397 
 398   bool is_last_use() const         { assert(is_register(), "only works for registers"); return (value() & last_use_mask) != 0; }
 399   bool is_fpu_stack_offset() const { assert(is_register(), "only works for registers"); return (value() & is_fpu_stack_offset_mask) != 0; }
 400   LIR_Opr make_last_use()          { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | last_use_mask); }
 401   LIR_Opr make_fpu_stack_offset()  { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | is_fpu_stack_offset_mask); }
 402 
 403 
 404   int single_stack_ix() const  { assert(is_single_stack() && !is_virtual(), "type check"); return (int)data(); }
 405   int double_stack_ix() const  { assert(is_double_stack() && !is_virtual(), "type check"); return (int)data(); }
 406   RegNr cpu_regnr() const      { assert(is_single_cpu()   && !is_virtual(), "type check"); return (RegNr)data(); }
 407   RegNr cpu_regnrLo() const    { assert(is_double_cpu()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
 408   RegNr cpu_regnrHi() const    { assert(is_double_cpu()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
 409   RegNr fpu_regnr() const      { assert(is_single_fpu()   && !is_virtual(), "type check"); return (RegNr)data(); }
 410   RegNr fpu_regnrLo() const    { assert(is_double_fpu()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
 411   RegNr fpu_regnrHi() const    { assert(is_double_fpu()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
 412   RegNr xmm_regnr() const      { assert(is_single_xmm()   && !is_virtual(), "type check"); return (RegNr)data(); }
 413   RegNr xmm_regnrLo() const    { assert(is_double_xmm()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
 414   RegNr xmm_regnrHi() const    { assert(is_double_xmm()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
 415   int   vreg_number() const    { assert(is_virtual(),                       "type check"); return (RegNr)data(); }
 416 
 417   LIR_OprPtr* pointer()  const                   { assert(is_pointer(), "type check");      return (LIR_OprPtr*)this; }
 418   LIR_Const* as_constant_ptr() const             { return pointer()->as_constant(); }
 419   LIR_Address* as_address_ptr() const            { return pointer()->as_address(); }
 420 
 421   Register as_register()    const;
 422   Register as_register_lo() const;
 423   Register as_register_hi() const;
 424 
 425   Register as_pointer_register() {
 426 #ifdef _LP64
 427     if (is_double_cpu()) {
 428       assert(as_register_lo() == as_register_hi(), "should be a single register");
 429       return as_register_lo();
 430     }
 431 #endif
 432     return as_register();
 433   }
 434 
 435 #ifdef X86
 436   XMMRegister as_xmm_float_reg() const;
 437   XMMRegister as_xmm_double_reg() const;
 438   // for compatibility with RInfo
 439   int fpu () const                                  { return lo_reg_half(); }
 440 #endif // X86
 441 #if defined(SPARC) || defined(ARM) || defined(PPC)
 442   FloatRegister as_float_reg   () const;
 443   FloatRegister as_double_reg  () const;
 444 #endif
 445 
 446   jint      as_jint()    const { return as_constant_ptr()->as_jint(); }
 447   jlong     as_jlong()   const { return as_constant_ptr()->as_jlong(); }
 448   jfloat    as_jfloat()  const { return as_constant_ptr()->as_jfloat(); }
 449   jdouble   as_jdouble() const { return as_constant_ptr()->as_jdouble(); }
 450   jobject   as_jobject() const { return as_constant_ptr()->as_jobject(); }
 451 
 452   void print() const PRODUCT_RETURN;
 453   void print(outputStream* out) const PRODUCT_RETURN;
 454 };
 455 
 456 
 457 inline LIR_OprDesc::OprType as_OprType(BasicType type) {
 458   switch (type) {
 459   case T_INT:      return LIR_OprDesc::int_type;
 460   case T_LONG:     return LIR_OprDesc::long_type;
 461   case T_FLOAT:    return LIR_OprDesc::float_type;
 462   case T_DOUBLE:   return LIR_OprDesc::double_type;
 463   case T_OBJECT:
 464   case T_ARRAY:    return LIR_OprDesc::object_type;
 465   case T_ADDRESS:  return LIR_OprDesc::address_type;
 466   case T_ILLEGAL:  // fall through
 467   default: ShouldNotReachHere(); return LIR_OprDesc::unknown_type;
 468   }
 469 }
 470 
 471 inline BasicType as_BasicType(LIR_OprDesc::OprType t) {
 472   switch (t) {
 473   case LIR_OprDesc::int_type:     return T_INT;
 474   case LIR_OprDesc::long_type:    return T_LONG;
 475   case LIR_OprDesc::float_type:   return T_FLOAT;
 476   case LIR_OprDesc::double_type:  return T_DOUBLE;
 477   case LIR_OprDesc::object_type:  return T_OBJECT;
 478   case LIR_OprDesc::address_type: return T_ADDRESS;
 479   case LIR_OprDesc::unknown_type: // fall through
 480   default: ShouldNotReachHere();  return T_ILLEGAL;
 481   }
 482 }
 483 
 484 
 485 // LIR_Address
 486 class LIR_Address: public LIR_OprPtr {
 487  friend class LIR_OpVisitState;
 488 
 489  public:
 490   // NOTE: currently these must be the log2 of the scale factor (and
 491   // must also be equivalent to the ScaleFactor enum in
 492   // assembler_i486.hpp)
 493   enum Scale {
 494     times_1  =  0,
 495     times_2  =  1,
 496     times_4  =  2,
 497     times_8  =  3
 498   };
 499 
 500  private:
 501   LIR_Opr   _base;
 502   LIR_Opr   _index;
 503   Scale     _scale;
 504   intx      _disp;
 505   BasicType _type;
 506 
 507  public:
 508   LIR_Address(LIR_Opr base, LIR_Opr index, BasicType type):
 509        _base(base)
 510      , _index(index)
 511      , _scale(times_1)
 512      , _type(type)
 513      , _disp(0) { verify(); }
 514 
 515   LIR_Address(LIR_Opr base, intx disp, BasicType type):
 516        _base(base)
 517      , _index(LIR_OprDesc::illegalOpr())
 518      , _scale(times_1)
 519      , _type(type)
 520      , _disp(disp) { verify(); }
 521 
 522   LIR_Address(LIR_Opr base, BasicType type):
 523        _base(base)
 524      , _index(LIR_OprDesc::illegalOpr())
 525      , _scale(times_1)
 526      , _type(type)
 527      , _disp(0) { verify(); }
 528 
 529 #if defined(X86) || defined(ARM)
 530   LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, intx disp, BasicType type):
 531        _base(base)
 532      , _index(index)
 533      , _scale(scale)
 534      , _type(type)
 535      , _disp(disp) { verify(); }
 536 #endif // X86 || ARM
 537 
 538   LIR_Opr base()  const                          { return _base;  }
 539   LIR_Opr index() const                          { return _index; }
 540   Scale   scale() const                          { return _scale; }
 541   intx    disp()  const                          { return _disp;  }
 542 
 543   bool equals(LIR_Address* other) const          { return base() == other->base() && index() == other->index() && disp() == other->disp() && scale() == other->scale(); }
 544 
 545   virtual LIR_Address* as_address()              { return this;   }
 546   virtual BasicType type() const                 { return _type; }
 547   virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
 548 
 549   void verify() const PRODUCT_RETURN;
 550 
 551   static Scale scale(BasicType type);
 552 };
 553 
 554 
 555 // operand factory
 556 class LIR_OprFact: public AllStatic {
 557  public:
 558 
 559   static LIR_Opr illegalOpr;
 560 
 561   static LIR_Opr single_cpu(int reg) {
 562     return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 563                                LIR_OprDesc::int_type             |
 564                                LIR_OprDesc::cpu_register         |
 565                                LIR_OprDesc::single_size);
 566   }
 567   static LIR_Opr single_cpu_oop(int reg) {
 568     return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 569                                LIR_OprDesc::object_type          |
 570                                LIR_OprDesc::cpu_register         |
 571                                LIR_OprDesc::single_size);
 572   }
 573   static LIR_Opr single_cpu_address(int reg) {
 574     return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 575                                LIR_OprDesc::address_type         |
 576                                LIR_OprDesc::cpu_register         |
 577                                LIR_OprDesc::single_size);
 578   }
 579   static LIR_Opr double_cpu(int reg1, int reg2) {
 580     LP64_ONLY(assert(reg1 == reg2, "must be identical"));
 581     return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
 582                                (reg2 << LIR_OprDesc::reg2_shift) |
 583                                LIR_OprDesc::long_type            |
 584                                LIR_OprDesc::cpu_register         |
 585                                LIR_OprDesc::double_size);
 586   }
 587 
 588   static LIR_Opr single_fpu(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 589                                                                              LIR_OprDesc::float_type           |
 590                                                                              LIR_OprDesc::fpu_register         |
 591                                                                              LIR_OprDesc::single_size); }
 592 #if defined(ARM)
 593   static LIR_Opr double_fpu(int reg1, int reg2)    { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size); }
 594   static LIR_Opr single_softfp(int reg)            { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift) |                                     LIR_OprDesc::float_type  | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); }
 595   static LIR_Opr double_softfp(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size); }
 596 #endif
 597 #ifdef SPARC
 598   static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
 599                                                                              (reg2 << LIR_OprDesc::reg2_shift) |
 600                                                                              LIR_OprDesc::double_type          |
 601                                                                              LIR_OprDesc::fpu_register         |
 602                                                                              LIR_OprDesc::double_size); }
 603 #endif
 604 #ifdef X86
 605   static LIR_Opr double_fpu(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 606                                                                              (reg  << LIR_OprDesc::reg2_shift) |
 607                                                                              LIR_OprDesc::double_type          |
 608                                                                              LIR_OprDesc::fpu_register         |
 609                                                                              LIR_OprDesc::double_size); }
 610 
 611   static LIR_Opr single_xmm(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 612                                                                              LIR_OprDesc::float_type           |
 613                                                                              LIR_OprDesc::fpu_register         |
 614                                                                              LIR_OprDesc::single_size          |
 615                                                                              LIR_OprDesc::is_xmm_mask); }
 616   static LIR_Opr double_xmm(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 617                                                                              (reg  << LIR_OprDesc::reg2_shift) |
 618                                                                              LIR_OprDesc::double_type          |
 619                                                                              LIR_OprDesc::fpu_register         |
 620                                                                              LIR_OprDesc::double_size          |
 621                                                                              LIR_OprDesc::is_xmm_mask); }
 622 #endif // X86
 623 #ifdef PPC
 624   static LIR_Opr double_fpu(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 625                                                                              (reg  << LIR_OprDesc::reg2_shift) |
 626                                                                              LIR_OprDesc::double_type          |
 627                                                                              LIR_OprDesc::fpu_register         |
 628                                                                              LIR_OprDesc::double_size); }
 629   static LIR_Opr single_softfp(int reg)            { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift)        |
 630                                                                              LIR_OprDesc::float_type           |
 631                                                                              LIR_OprDesc::cpu_register         |
 632                                                                              LIR_OprDesc::single_size); }
 633   static LIR_Opr double_softfp(int reg1, int reg2) { return (LIR_Opr)((reg2 << LIR_OprDesc::reg1_shift)        |
 634                                                                              (reg1 << LIR_OprDesc::reg2_shift) |
 635                                                                              LIR_OprDesc::double_type          |
 636                                                                              LIR_OprDesc::cpu_register         |
 637                                                                              LIR_OprDesc::double_size); }
 638 #endif // PPC
 639 
 640   static LIR_Opr virtual_register(int index, BasicType type) {
 641     LIR_Opr res;
 642     switch (type) {
 643       case T_OBJECT: // fall through
 644       case T_ARRAY:
 645         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift)  |
 646                                             LIR_OprDesc::object_type  |
 647                                             LIR_OprDesc::cpu_register |
 648                                             LIR_OprDesc::single_size  |
 649                                             LIR_OprDesc::virtual_mask);
 650         break;
 651 
 652       case T_INT:
 653         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 654                                   LIR_OprDesc::int_type              |
 655                                   LIR_OprDesc::cpu_register          |
 656                                   LIR_OprDesc::single_size           |
 657                                   LIR_OprDesc::virtual_mask);
 658         break;
 659 
 660       case T_ADDRESS:
 661         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 662                                   LIR_OprDesc::address_type          |
 663                                   LIR_OprDesc::cpu_register          |
 664                                   LIR_OprDesc::single_size           |
 665                                   LIR_OprDesc::virtual_mask);
 666         break;
 667 
 668       case T_LONG:
 669         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 670                                   LIR_OprDesc::long_type             |
 671                                   LIR_OprDesc::cpu_register          |
 672                                   LIR_OprDesc::double_size           |
 673                                   LIR_OprDesc::virtual_mask);
 674         break;
 675 
 676 #ifdef __SOFTFP__
 677       case T_FLOAT:
 678         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 679                                   LIR_OprDesc::float_type  |
 680                                   LIR_OprDesc::cpu_register |
 681                                   LIR_OprDesc::single_size |
 682                                   LIR_OprDesc::virtual_mask);
 683         break;
 684       case T_DOUBLE:
 685         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 686                                   LIR_OprDesc::double_type |
 687                                   LIR_OprDesc::cpu_register |
 688                                   LIR_OprDesc::double_size |
 689                                   LIR_OprDesc::virtual_mask);
 690         break;
 691 #else // __SOFTFP__
 692       case T_FLOAT:
 693         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 694                                   LIR_OprDesc::float_type           |
 695                                   LIR_OprDesc::fpu_register         |
 696                                   LIR_OprDesc::single_size          |
 697                                   LIR_OprDesc::virtual_mask);
 698         break;
 699 
 700       case
 701         T_DOUBLE: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 702                                             LIR_OprDesc::double_type           |
 703                                             LIR_OprDesc::fpu_register          |
 704                                             LIR_OprDesc::double_size           |
 705                                             LIR_OprDesc::virtual_mask);
 706         break;
 707 #endif // __SOFTFP__
 708       default:       ShouldNotReachHere(); res = illegalOpr;
 709     }
 710 
 711 #ifdef ASSERT
 712     res->validate_type();
 713     assert(res->vreg_number() == index, "conversion check");
 714     assert(index >= LIR_OprDesc::vreg_base, "must start at vreg_base");
 715     assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big");
 716 
 717     // old-style calculation; check if old and new method are equal
 718     LIR_OprDesc::OprType t = as_OprType(type);
 719 #ifdef __SOFTFP__
 720     LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 721                                t |
 722                                LIR_OprDesc::cpu_register |
 723                                LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
 724 #else // __SOFTFP__
 725     LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | t |
 726                                           ((type == T_FLOAT || type == T_DOUBLE) ?  LIR_OprDesc::fpu_register : LIR_OprDesc::cpu_register) |
 727                                LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
 728     assert(res == old_res, "old and new method not equal");
 729 #endif // __SOFTFP__
 730 #endif // ASSERT
 731 
 732     return res;
 733   }
 734 
 735   // 'index' is computed by FrameMap::local_stack_pos(index); do not use other parameters as
 736   // the index is platform independent; a double stack useing indeces 2 and 3 has always
 737   // index 2.
 738   static LIR_Opr stack(int index, BasicType type) {
 739     LIR_Opr res;
 740     switch (type) {
 741       case T_OBJECT: // fall through
 742       case T_ARRAY:
 743         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 744                                   LIR_OprDesc::object_type           |
 745                                   LIR_OprDesc::stack_value           |
 746                                   LIR_OprDesc::single_size);
 747         break;
 748 
 749       case T_INT:
 750         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 751                                   LIR_OprDesc::int_type              |
 752                                   LIR_OprDesc::stack_value           |
 753                                   LIR_OprDesc::single_size);
 754         break;
 755 
 756       case T_ADDRESS:
 757         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 758                                   LIR_OprDesc::address_type          |
 759                                   LIR_OprDesc::stack_value           |
 760                                   LIR_OprDesc::single_size);
 761         break;
 762 
 763       case T_LONG:
 764         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 765                                   LIR_OprDesc::long_type             |
 766                                   LIR_OprDesc::stack_value           |
 767                                   LIR_OprDesc::double_size);
 768         break;
 769 
 770       case T_FLOAT:
 771         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 772                                   LIR_OprDesc::float_type            |
 773                                   LIR_OprDesc::stack_value           |
 774                                   LIR_OprDesc::single_size);
 775         break;
 776       case T_DOUBLE:
 777         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 778                                   LIR_OprDesc::double_type           |
 779                                   LIR_OprDesc::stack_value           |
 780                                   LIR_OprDesc::double_size);
 781         break;
 782 
 783       default:       ShouldNotReachHere(); res = illegalOpr;
 784     }
 785 
 786 #ifdef ASSERT
 787     assert(index >= 0, "index must be positive");
 788     assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big");
 789 
 790     LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 791                                           LIR_OprDesc::stack_value           |
 792                                           as_OprType(type)                   |
 793                                           LIR_OprDesc::size_for(type));
 794     assert(res == old_res, "old and new method not equal");
 795 #endif
 796 
 797     return res;
 798   }
 799 
 800   static LIR_Opr intConst(jint i)                { return (LIR_Opr)(new LIR_Const(i)); }
 801   static LIR_Opr longConst(jlong l)              { return (LIR_Opr)(new LIR_Const(l)); }
 802   static LIR_Opr floatConst(jfloat f)            { return (LIR_Opr)(new LIR_Const(f)); }
 803   static LIR_Opr doubleConst(jdouble d)          { return (LIR_Opr)(new LIR_Const(d)); }
 804   static LIR_Opr oopConst(jobject o)             { return (LIR_Opr)(new LIR_Const(o)); }
 805   static LIR_Opr address(LIR_Address* a)         { return (LIR_Opr)a; }
 806   static LIR_Opr intptrConst(void* p)            { return (LIR_Opr)(new LIR_Const(p)); }
 807   static LIR_Opr intptrConst(intptr_t v)         { return (LIR_Opr)(new LIR_Const((void*)v)); }
 808   static LIR_Opr illegal()                       { return (LIR_Opr)-1; }
 809   static LIR_Opr addressConst(jint i)            { return (LIR_Opr)(new LIR_Const(i, true)); }
 810 
 811   static LIR_Opr value_type(ValueType* type);
 812   static LIR_Opr dummy_value_type(ValueType* type);
 813 };
 814 
 815 
 816 //-------------------------------------------------------------------------------
 817 //                   LIR Instructions
 818 //-------------------------------------------------------------------------------
 819 //
 820 // Note:
 821 //  - every instruction has a result operand
 822 //  - every instruction has an CodeEmitInfo operand (can be revisited later)
 823 //  - every instruction has a LIR_OpCode operand
 824 //  - LIR_OpN, means an instruction that has N input operands
 825 //
 826 // class hierarchy:
 827 //
 828 class  LIR_Op;
 829 class    LIR_Op0;
 830 class      LIR_OpLabel;
 831 class    LIR_Op1;
 832 class      LIR_OpBranch;
 833 class      LIR_OpConvert;
 834 class      LIR_OpAllocObj;
 835 class      LIR_OpRoundFP;
 836 class    LIR_Op2;
 837 class    LIR_OpDelay;
 838 class    LIR_Op3;
 839 class      LIR_OpAllocArray;
 840 class    LIR_OpCall;
 841 class      LIR_OpJavaCall;
 842 class      LIR_OpRTCall;
 843 class    LIR_OpArrayCopy;
 844 class    LIR_OpLock;
 845 class    LIR_OpTypeCheck;
 846 class    LIR_OpCompareAndSwap;
 847 class    LIR_OpProfileCall;
 848 
 849 
 850 // LIR operation codes
 851 enum LIR_Code {
 852     lir_none
 853   , begin_op0
 854       , lir_word_align
 855       , lir_label
 856       , lir_nop
 857       , lir_backwardbranch_target
 858       , lir_std_entry
 859       , lir_osr_entry
 860       , lir_build_frame
 861       , lir_fpop_raw
 862       , lir_24bit_FPU
 863       , lir_reset_FPU
 864       , lir_breakpoint
 865       , lir_rtcall
 866       , lir_membar
 867       , lir_membar_acquire
 868       , lir_membar_release
 869       , lir_membar_loadload
 870       , lir_membar_storestore
 871       , lir_membar_loadstore
 872       , lir_membar_storeload
 873       , lir_get_thread
 874   , end_op0
 875   , begin_op1
 876       , lir_fxch
 877       , lir_fld
 878       , lir_ffree
 879       , lir_push
 880       , lir_pop
 881       , lir_null_check
 882       , lir_return
 883       , lir_leal
 884       , lir_neg
 885       , lir_branch
 886       , lir_cond_float_branch
 887       , lir_move
 888       , lir_prefetchr
 889       , lir_prefetchw
 890       , lir_convert
 891       , lir_alloc_object
 892       , lir_monaddr
 893       , lir_roundfp
 894       , lir_safepoint
 895       , lir_pack64
 896       , lir_unpack64
 897       , lir_unwind
 898   , end_op1
 899   , begin_op2
 900       , lir_cmp
 901       , lir_cmp_l2i
 902       , lir_ucmp_fd2i
 903       , lir_cmp_fd2i
 904       , lir_cmove
 905       , lir_add
 906       , lir_sub
 907       , lir_mul
 908       , lir_mul_strictfp
 909       , lir_div
 910       , lir_div_strictfp
 911       , lir_rem
 912       , lir_sqrt
 913       , lir_abs
 914       , lir_sin
 915       , lir_cos
 916       , lir_tan
 917       , lir_log
 918       , lir_log10
 919       , lir_exp
 920       , lir_pow
 921       , lir_logic_and
 922       , lir_logic_or
 923       , lir_logic_xor
 924       , lir_shl
 925       , lir_shr
 926       , lir_ushr
 927       , lir_alloc_array
 928       , lir_throw
 929       , lir_compare_to
 930   , end_op2
 931   , begin_op3
 932       , lir_idiv
 933       , lir_irem
 934   , end_op3
 935   , begin_opJavaCall
 936       , lir_static_call
 937       , lir_optvirtual_call
 938       , lir_icvirtual_call
 939       , lir_virtual_call
 940       , lir_dynamic_call
 941   , end_opJavaCall
 942   , begin_opArrayCopy
 943       , lir_arraycopy
 944   , end_opArrayCopy
 945   , begin_opLock
 946     , lir_lock
 947     , lir_unlock
 948   , end_opLock
 949   , begin_delay_slot
 950     , lir_delay_slot
 951   , end_delay_slot
 952   , begin_opTypeCheck
 953     , lir_instanceof
 954     , lir_checkcast
 955     , lir_store_check
 956   , end_opTypeCheck
 957   , begin_opCompareAndSwap
 958     , lir_cas_long
 959     , lir_cas_obj
 960     , lir_cas_int
 961   , end_opCompareAndSwap
 962   , begin_opMDOProfile
 963     , lir_profile_call
 964   , end_opMDOProfile
 965 };
 966 
 967 
 968 enum LIR_Condition {
 969     lir_cond_equal
 970   , lir_cond_notEqual
 971   , lir_cond_less
 972   , lir_cond_lessEqual
 973   , lir_cond_greaterEqual
 974   , lir_cond_greater
 975   , lir_cond_belowEqual
 976   , lir_cond_aboveEqual
 977   , lir_cond_always
 978   , lir_cond_unknown = -1
 979 };
 980 
 981 
 982 enum LIR_PatchCode {
 983   lir_patch_none,
 984   lir_patch_low,
 985   lir_patch_high,
 986   lir_patch_normal
 987 };
 988 
 989 
 990 enum LIR_MoveKind {
 991   lir_move_normal,
 992   lir_move_volatile,
 993   lir_move_unaligned,
 994   lir_move_wide,
 995   lir_move_max_flag
 996 };
 997 
 998 
 999 // --------------------------------------------------
1000 // LIR_Op
1001 // --------------------------------------------------
1002 class LIR_Op: public CompilationResourceObj {
1003  friend class LIR_OpVisitState;
1004 
1005 #ifdef ASSERT
1006  private:
1007   const char *  _file;
1008   int           _line;
1009 #endif
1010 
1011  protected:
1012   LIR_Opr       _result;
1013   unsigned short _code;
1014   unsigned short _flags;
1015   CodeEmitInfo* _info;
1016   int           _id;     // value id for register allocation
1017   int           _fpu_pop_count;
1018   Instruction*  _source; // for debugging
1019 
1020   static void print_condition(outputStream* out, LIR_Condition cond) PRODUCT_RETURN;
1021 
1022  protected:
1023   static bool is_in_range(LIR_Code test, LIR_Code start, LIR_Code end)  { return start < test && test < end; }
1024 
1025  public:
1026   LIR_Op()
1027     : _result(LIR_OprFact::illegalOpr)
1028     , _code(lir_none)
1029     , _flags(0)
1030     , _info(NULL)
1031 #ifdef ASSERT
1032     , _file(NULL)
1033     , _line(0)
1034 #endif
1035     , _fpu_pop_count(0)
1036     , _source(NULL)
1037     , _id(-1)                             {}
1038 
1039   LIR_Op(LIR_Code code, LIR_Opr result, CodeEmitInfo* info)
1040     : _result(result)
1041     , _code(code)
1042     , _flags(0)
1043     , _info(info)
1044 #ifdef ASSERT
1045     , _file(NULL)
1046     , _line(0)
1047 #endif
1048     , _fpu_pop_count(0)
1049     , _source(NULL)
1050     , _id(-1)                             {}
1051 
1052   CodeEmitInfo* info() const                  { return _info;   }
1053   LIR_Code code()      const                  { return (LIR_Code)_code;   }
1054   LIR_Opr result_opr() const                  { return _result; }
1055   void    set_result_opr(LIR_Opr opr)         { _result = opr;  }
1056 
1057 #ifdef ASSERT
1058   void set_file_and_line(const char * file, int line) {
1059     _file = file;
1060     _line = line;
1061   }
1062 #endif
1063 
1064   virtual const char * name() const PRODUCT_RETURN0;
1065 
1066   int id()             const                  { return _id;     }
1067   void set_id(int id)                         { _id = id; }
1068 
1069   // FPU stack simulation helpers -- only used on Intel
1070   void set_fpu_pop_count(int count)           { assert(count >= 0 && count <= 1, "currently only 0 and 1 are valid"); _fpu_pop_count = count; }
1071   int  fpu_pop_count() const                  { return _fpu_pop_count; }
1072   bool pop_fpu_stack()                        { return _fpu_pop_count > 0; }
1073 
1074   Instruction* source() const                 { return _source; }
1075   void set_source(Instruction* ins)           { _source = ins; }
1076 
1077   virtual void emit_code(LIR_Assembler* masm) = 0;
1078   virtual void print_instr(outputStream* out) const   = 0;
1079   virtual void print_on(outputStream* st) const PRODUCT_RETURN;
1080 
1081   virtual LIR_OpCall* as_OpCall() { return NULL; }
1082   virtual LIR_OpJavaCall* as_OpJavaCall() { return NULL; }
1083   virtual LIR_OpLabel* as_OpLabel() { return NULL; }
1084   virtual LIR_OpDelay* as_OpDelay() { return NULL; }
1085   virtual LIR_OpLock* as_OpLock() { return NULL; }
1086   virtual LIR_OpAllocArray* as_OpAllocArray() { return NULL; }
1087   virtual LIR_OpAllocObj* as_OpAllocObj() { return NULL; }
1088   virtual LIR_OpRoundFP* as_OpRoundFP() { return NULL; }
1089   virtual LIR_OpBranch* as_OpBranch() { return NULL; }
1090   virtual LIR_OpRTCall* as_OpRTCall() { return NULL; }
1091   virtual LIR_OpConvert* as_OpConvert() { return NULL; }
1092   virtual LIR_Op0* as_Op0() { return NULL; }
1093   virtual LIR_Op1* as_Op1() { return NULL; }
1094   virtual LIR_Op2* as_Op2() { return NULL; }
1095   virtual LIR_Op3* as_Op3() { return NULL; }
1096   virtual LIR_OpArrayCopy* as_OpArrayCopy() { return NULL; }
1097   virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
1098   virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
1099   virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
1100 
1101   virtual void verify() const {}
1102 };
1103 
1104 // for calls
1105 class LIR_OpCall: public LIR_Op {
1106  friend class LIR_OpVisitState;
1107 
1108  protected:
1109   address      _addr;
1110   LIR_OprList* _arguments;
1111  protected:
1112   LIR_OpCall(LIR_Code code, address addr, LIR_Opr result,
1113              LIR_OprList* arguments, CodeEmitInfo* info = NULL)
1114     : LIR_Op(code, result, info)
1115     , _arguments(arguments)
1116     , _addr(addr) {}
1117 
1118  public:
1119   address addr() const                           { return _addr; }
1120   const LIR_OprList* arguments() const           { return _arguments; }
1121   virtual LIR_OpCall* as_OpCall()                { return this; }
1122 };
1123 
1124 
1125 // --------------------------------------------------
1126 // LIR_OpJavaCall
1127 // --------------------------------------------------
1128 class LIR_OpJavaCall: public LIR_OpCall {
1129  friend class LIR_OpVisitState;
1130 
1131  private:
1132   ciMethod* _method;
1133   LIR_Opr   _receiver;
1134   LIR_Opr   _method_handle_invoke_SP_save_opr;  // Used in LIR_OpVisitState::visit to store the reference to FrameMap::method_handle_invoke_SP_save_opr.
1135 
1136  public:
1137   LIR_OpJavaCall(LIR_Code code, ciMethod* method,
1138                  LIR_Opr receiver, LIR_Opr result,
1139                  address addr, LIR_OprList* arguments,
1140                  CodeEmitInfo* info)
1141   : LIR_OpCall(code, addr, result, arguments, info)
1142   , _receiver(receiver)
1143   , _method(method)
1144   , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
1145   { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
1146 
1147   LIR_OpJavaCall(LIR_Code code, ciMethod* method,
1148                  LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset,
1149                  LIR_OprList* arguments, CodeEmitInfo* info)
1150   : LIR_OpCall(code, (address)vtable_offset, result, arguments, info)
1151   , _receiver(receiver)
1152   , _method(method)
1153   , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
1154   { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
1155 
1156   LIR_Opr receiver() const                       { return _receiver; }
1157   ciMethod* method() const                       { return _method;   }
1158 
1159   // JSR 292 support.
1160   bool is_invokedynamic() const                  { return code() == lir_dynamic_call; }
1161   bool is_method_handle_invoke() const {
1162     return
1163       is_invokedynamic()  // An invokedynamic is always a MethodHandle call site.
1164       ||
1165       (method()->holder()->name() == ciSymbol::java_lang_invoke_MethodHandle() &&
1166        methodOopDesc::is_method_handle_invoke_name(method()->name()->sid()));
1167   }
1168 
1169   intptr_t vtable_offset() const {
1170     assert(_code == lir_virtual_call, "only have vtable for real vcall");
1171     return (intptr_t) addr();
1172   }
1173 
1174   virtual void emit_code(LIR_Assembler* masm);
1175   virtual LIR_OpJavaCall* as_OpJavaCall() { return this; }
1176   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1177 };
1178 
1179 // --------------------------------------------------
1180 // LIR_OpLabel
1181 // --------------------------------------------------
1182 // Location where a branch can continue
1183 class LIR_OpLabel: public LIR_Op {
1184  friend class LIR_OpVisitState;
1185 
1186  private:
1187   Label* _label;
1188  public:
1189   LIR_OpLabel(Label* lbl)
1190    : LIR_Op(lir_label, LIR_OprFact::illegalOpr, NULL)
1191    , _label(lbl)                                 {}
1192   Label* label() const                           { return _label; }
1193 
1194   virtual void emit_code(LIR_Assembler* masm);
1195   virtual LIR_OpLabel* as_OpLabel() { return this; }
1196   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1197 };
1198 
1199 // LIR_OpArrayCopy
1200 class LIR_OpArrayCopy: public LIR_Op {
1201  friend class LIR_OpVisitState;
1202 
1203  private:
1204   ArrayCopyStub*  _stub;
1205   LIR_Opr   _src;
1206   LIR_Opr   _src_pos;
1207   LIR_Opr   _dst;
1208   LIR_Opr   _dst_pos;
1209   LIR_Opr   _length;
1210   LIR_Opr   _tmp;
1211   ciArrayKlass* _expected_type;
1212   int       _flags;
1213 
1214 public:
1215   enum Flags {
1216     src_null_check         = 1 << 0,
1217     dst_null_check         = 1 << 1,
1218     src_pos_positive_check = 1 << 2,
1219     dst_pos_positive_check = 1 << 3,
1220     length_positive_check  = 1 << 4,
1221     src_range_check        = 1 << 5,
1222     dst_range_check        = 1 << 6,
1223     type_check             = 1 << 7,
1224     overlapping            = 1 << 8,
1225     unaligned              = 1 << 9,
1226     src_objarray           = 1 << 10,
1227     dst_objarray           = 1 << 11,
1228     all_flags              = (1 << 12) - 1
1229   };
1230 
1231   LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp,
1232                   ciArrayKlass* expected_type, int flags, CodeEmitInfo* info);
1233 
1234   LIR_Opr src() const                            { return _src; }
1235   LIR_Opr src_pos() const                        { return _src_pos; }
1236   LIR_Opr dst() const                            { return _dst; }
1237   LIR_Opr dst_pos() const                        { return _dst_pos; }
1238   LIR_Opr length() const                         { return _length; }
1239   LIR_Opr tmp() const                            { return _tmp; }
1240   int flags() const                              { return _flags; }
1241   ciArrayKlass* expected_type() const            { return _expected_type; }
1242   ArrayCopyStub* stub() const                    { return _stub; }
1243 
1244   virtual void emit_code(LIR_Assembler* masm);
1245   virtual LIR_OpArrayCopy* as_OpArrayCopy() { return this; }
1246   void print_instr(outputStream* out) const PRODUCT_RETURN;
1247 };
1248 
1249 
1250 // --------------------------------------------------
1251 // LIR_Op0
1252 // --------------------------------------------------
1253 class LIR_Op0: public LIR_Op {
1254  friend class LIR_OpVisitState;
1255 
1256  public:
1257   LIR_Op0(LIR_Code code)
1258    : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1259   LIR_Op0(LIR_Code code, LIR_Opr result, CodeEmitInfo* info = NULL)
1260    : LIR_Op(code, result, info)  { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1261 
1262   virtual void emit_code(LIR_Assembler* masm);
1263   virtual LIR_Op0* as_Op0() { return this; }
1264   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1265 };
1266 
1267 
1268 // --------------------------------------------------
1269 // LIR_Op1
1270 // --------------------------------------------------
1271 
1272 class LIR_Op1: public LIR_Op {
1273  friend class LIR_OpVisitState;
1274 
1275  protected:
1276   LIR_Opr         _opr;   // input operand
1277   BasicType       _type;  // Operand types
1278   LIR_PatchCode   _patch; // only required with patchin (NEEDS_CLEANUP: do we want a special instruction for patching?)
1279 
1280   static void print_patch_code(outputStream* out, LIR_PatchCode code);
1281 
1282   void set_kind(LIR_MoveKind kind) {
1283     assert(code() == lir_move, "must be");
1284     _flags = kind;
1285   }
1286 
1287  public:
1288   LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result = LIR_OprFact::illegalOpr, BasicType type = T_ILLEGAL, LIR_PatchCode patch = lir_patch_none, CodeEmitInfo* info = NULL)
1289     : LIR_Op(code, result, info)
1290     , _opr(opr)
1291     , _patch(patch)
1292     , _type(type)                      { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1293 
1294   LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result, BasicType type, LIR_PatchCode patch, CodeEmitInfo* info, LIR_MoveKind kind)
1295     : LIR_Op(code, result, info)
1296     , _opr(opr)
1297     , _patch(patch)
1298     , _type(type)                      {
1299     assert(code == lir_move, "must be");
1300     set_kind(kind);
1301   }
1302 
1303   LIR_Op1(LIR_Code code, LIR_Opr opr, CodeEmitInfo* info)
1304     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1305     , _opr(opr)
1306     , _patch(lir_patch_none)
1307     , _type(T_ILLEGAL)                 { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1308 
1309   LIR_Opr in_opr()           const               { return _opr;   }
1310   LIR_PatchCode patch_code() const               { return _patch; }
1311   BasicType type()           const               { return _type;  }
1312 
1313   LIR_MoveKind move_kind() const {
1314     assert(code() == lir_move, "must be");
1315     return (LIR_MoveKind)_flags;
1316   }
1317 
1318   virtual void emit_code(LIR_Assembler* masm);
1319   virtual LIR_Op1* as_Op1() { return this; }
1320   virtual const char * name() const PRODUCT_RETURN0;
1321 
1322   void set_in_opr(LIR_Opr opr) { _opr = opr; }
1323 
1324   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1325   virtual void verify() const;
1326 };
1327 
1328 
1329 // for runtime calls
1330 class LIR_OpRTCall: public LIR_OpCall {
1331  friend class LIR_OpVisitState;
1332 
1333  private:
1334   LIR_Opr _tmp;
1335  public:
1336   LIR_OpRTCall(address addr, LIR_Opr tmp,
1337                LIR_Opr result, LIR_OprList* arguments, CodeEmitInfo* info = NULL)
1338     : LIR_OpCall(lir_rtcall, addr, result, arguments, info)
1339     , _tmp(tmp) {}
1340 
1341   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1342   virtual void emit_code(LIR_Assembler* masm);
1343   virtual LIR_OpRTCall* as_OpRTCall() { return this; }
1344 
1345   LIR_Opr tmp() const                            { return _tmp; }
1346 
1347   virtual void verify() const;
1348 };
1349 
1350 
1351 class LIR_OpBranch: public LIR_Op {
1352  friend class LIR_OpVisitState;
1353 
1354  private:
1355   LIR_Condition _cond;
1356   BasicType     _type;
1357   Label*        _label;
1358   BlockBegin*   _block;  // if this is a branch to a block, this is the block
1359   BlockBegin*   _ublock; // if this is a float-branch, this is the unorderd block
1360   CodeStub*     _stub;   // if this is a branch to a stub, this is the stub
1361 
1362  public:
1363   LIR_OpBranch(LIR_Condition cond, BasicType type, Label* lbl)
1364     : LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL)
1365     , _cond(cond)
1366     , _type(type)
1367     , _label(lbl)
1368     , _block(NULL)
1369     , _ublock(NULL)
1370     , _stub(NULL) { }
1371 
1372   LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block);
1373   LIR_OpBranch(LIR_Condition cond, BasicType type, CodeStub* stub);
1374 
1375   // for unordered comparisons
1376   LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* ublock);
1377 
1378   LIR_Condition cond()        const              { return _cond;        }
1379   BasicType     type()        const              { return _type;        }
1380   Label*        label()       const              { return _label;       }
1381   BlockBegin*   block()       const              { return _block;       }
1382   BlockBegin*   ublock()      const              { return _ublock;      }
1383   CodeStub*     stub()        const              { return _stub;       }
1384 
1385   void          change_block(BlockBegin* b);
1386   void          change_ublock(BlockBegin* b);
1387   void          negate_cond();
1388 
1389   virtual void emit_code(LIR_Assembler* masm);
1390   virtual LIR_OpBranch* as_OpBranch() { return this; }
1391   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1392 };
1393 
1394 
1395 class ConversionStub;
1396 
1397 class LIR_OpConvert: public LIR_Op1 {
1398  friend class LIR_OpVisitState;
1399 
1400  private:
1401    Bytecodes::Code _bytecode;
1402    ConversionStub* _stub;
1403 #ifdef PPC
1404   LIR_Opr _tmp1;
1405   LIR_Opr _tmp2;
1406 #endif
1407 
1408  public:
1409    LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub)
1410      : LIR_Op1(lir_convert, opr, result)
1411      , _stub(stub)
1412 #ifdef PPC
1413      , _tmp1(LIR_OprDesc::illegalOpr())
1414      , _tmp2(LIR_OprDesc::illegalOpr())
1415 #endif
1416      , _bytecode(code)                           {}
1417 
1418 #ifdef PPC
1419    LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub
1420                  ,LIR_Opr tmp1, LIR_Opr tmp2)
1421      : LIR_Op1(lir_convert, opr, result)
1422      , _stub(stub)
1423      , _tmp1(tmp1)
1424      , _tmp2(tmp2)
1425      , _bytecode(code)                           {}
1426 #endif
1427 
1428   Bytecodes::Code bytecode() const               { return _bytecode; }
1429   ConversionStub* stub() const                   { return _stub; }
1430 #ifdef PPC
1431   LIR_Opr tmp1() const                           { return _tmp1; }
1432   LIR_Opr tmp2() const                           { return _tmp2; }
1433 #endif
1434 
1435   virtual void emit_code(LIR_Assembler* masm);
1436   virtual LIR_OpConvert* as_OpConvert() { return this; }
1437   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1438 
1439   static void print_bytecode(outputStream* out, Bytecodes::Code code) PRODUCT_RETURN;
1440 };
1441 
1442 
1443 // LIR_OpAllocObj
1444 class LIR_OpAllocObj : public LIR_Op1 {
1445  friend class LIR_OpVisitState;
1446 
1447  private:
1448   LIR_Opr _tmp1;
1449   LIR_Opr _tmp2;
1450   LIR_Opr _tmp3;
1451   LIR_Opr _tmp4;
1452   int     _hdr_size;
1453   int     _obj_size;
1454   CodeStub* _stub;
1455   bool    _init_check;
1456 
1457  public:
1458   LIR_OpAllocObj(LIR_Opr klass, LIR_Opr result,
1459                  LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4,
1460                  int hdr_size, int obj_size, bool init_check, CodeStub* stub)
1461     : LIR_Op1(lir_alloc_object, klass, result)
1462     , _tmp1(t1)
1463     , _tmp2(t2)
1464     , _tmp3(t3)
1465     , _tmp4(t4)
1466     , _hdr_size(hdr_size)
1467     , _obj_size(obj_size)
1468     , _init_check(init_check)
1469     , _stub(stub)                                { }
1470 
1471   LIR_Opr klass()        const                   { return in_opr();     }
1472   LIR_Opr obj()          const                   { return result_opr(); }
1473   LIR_Opr tmp1()         const                   { return _tmp1;        }
1474   LIR_Opr tmp2()         const                   { return _tmp2;        }
1475   LIR_Opr tmp3()         const                   { return _tmp3;        }
1476   LIR_Opr tmp4()         const                   { return _tmp4;        }
1477   int     header_size()  const                   { return _hdr_size;    }
1478   int     object_size()  const                   { return _obj_size;    }
1479   bool    init_check()   const                   { return _init_check;  }
1480   CodeStub* stub()       const                   { return _stub;        }
1481 
1482   virtual void emit_code(LIR_Assembler* masm);
1483   virtual LIR_OpAllocObj * as_OpAllocObj () { return this; }
1484   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1485 };
1486 
1487 
1488 // LIR_OpRoundFP
1489 class LIR_OpRoundFP : public LIR_Op1 {
1490  friend class LIR_OpVisitState;
1491 
1492  private:
1493   LIR_Opr _tmp;
1494 
1495  public:
1496   LIR_OpRoundFP(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result)
1497     : LIR_Op1(lir_roundfp, reg, result)
1498     , _tmp(stack_loc_temp) {}
1499 
1500   LIR_Opr tmp() const                            { return _tmp; }
1501   virtual LIR_OpRoundFP* as_OpRoundFP()          { return this; }
1502   void print_instr(outputStream* out) const PRODUCT_RETURN;
1503 };
1504 
1505 // LIR_OpTypeCheck
1506 class LIR_OpTypeCheck: public LIR_Op {
1507  friend class LIR_OpVisitState;
1508 
1509  private:
1510   LIR_Opr       _object;
1511   LIR_Opr       _array;
1512   ciKlass*      _klass;
1513   LIR_Opr       _tmp1;
1514   LIR_Opr       _tmp2;
1515   LIR_Opr       _tmp3;
1516   bool          _fast_check;
1517   CodeEmitInfo* _info_for_patch;
1518   CodeEmitInfo* _info_for_exception;
1519   CodeStub*     _stub;
1520   ciMethod*     _profiled_method;
1521   int           _profiled_bci;
1522   bool          _should_profile;
1523 
1524 public:
1525   LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
1526                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
1527                   CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub);
1528   LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array,
1529                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
1530 
1531   LIR_Opr object() const                         { return _object;         }
1532   LIR_Opr array() const                          { assert(code() == lir_store_check, "not valid"); return _array;         }
1533   LIR_Opr tmp1() const                           { return _tmp1;           }
1534   LIR_Opr tmp2() const                           { return _tmp2;           }
1535   LIR_Opr tmp3() const                           { return _tmp3;           }
1536   ciKlass* klass() const                         { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _klass;          }
1537   bool fast_check() const                        { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _fast_check;     }
1538   CodeEmitInfo* info_for_patch() const           { return _info_for_patch;  }
1539   CodeEmitInfo* info_for_exception() const       { return _info_for_exception; }
1540   CodeStub* stub() const                         { return _stub;           }
1541 
1542   // methodDataOop profiling
1543   void set_profiled_method(ciMethod *method)     { _profiled_method = method; }
1544   void set_profiled_bci(int bci)                 { _profiled_bci = bci;       }
1545   void set_should_profile(bool b)                { _should_profile = b;       }
1546   ciMethod* profiled_method() const              { return _profiled_method;   }
1547   int       profiled_bci() const                 { return _profiled_bci;      }
1548   bool      should_profile() const               { return _should_profile;    }
1549 
1550   virtual void emit_code(LIR_Assembler* masm);
1551   virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; }
1552   void print_instr(outputStream* out) const PRODUCT_RETURN;
1553 };
1554 
1555 // LIR_Op2
1556 class LIR_Op2: public LIR_Op {
1557  friend class LIR_OpVisitState;
1558 
1559   int  _fpu_stack_size; // for sin/cos implementation on Intel
1560 
1561  protected:
1562   LIR_Opr   _opr1;
1563   LIR_Opr   _opr2;
1564   BasicType _type;
1565   LIR_Opr   _tmp1;
1566   LIR_Opr   _tmp2;
1567   LIR_Opr   _tmp3;
1568   LIR_Opr   _tmp4;
1569   LIR_Opr   _tmp5;
1570   LIR_Condition _condition;
1571 
1572   void verify() const;
1573 
1574  public:
1575   LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, CodeEmitInfo* info = NULL)
1576     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1577     , _opr1(opr1)
1578     , _opr2(opr2)
1579     , _type(T_ILLEGAL)
1580     , _condition(condition)
1581     , _fpu_stack_size(0)
1582     , _tmp1(LIR_OprFact::illegalOpr)
1583     , _tmp2(LIR_OprFact::illegalOpr)
1584     , _tmp3(LIR_OprFact::illegalOpr)
1585     , _tmp4(LIR_OprFact::illegalOpr)
1586     , _tmp5(LIR_OprFact::illegalOpr) {
1587     assert(code == lir_cmp, "code check");
1588   }
1589 
1590   LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type)
1591     : LIR_Op(code, result, NULL)
1592     , _opr1(opr1)
1593     , _opr2(opr2)
1594     , _type(type)
1595     , _condition(condition)
1596     , _fpu_stack_size(0)
1597     , _tmp1(LIR_OprFact::illegalOpr)
1598     , _tmp2(LIR_OprFact::illegalOpr)
1599     , _tmp3(LIR_OprFact::illegalOpr)
1600     , _tmp4(LIR_OprFact::illegalOpr)
1601     , _tmp5(LIR_OprFact::illegalOpr) {
1602     assert(code == lir_cmove, "code check");
1603     assert(type != T_ILLEGAL, "cmove should have type");
1604   }
1605 
1606   LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result = LIR_OprFact::illegalOpr,
1607           CodeEmitInfo* info = NULL, BasicType type = T_ILLEGAL)
1608     : LIR_Op(code, result, info)
1609     , _opr1(opr1)
1610     , _opr2(opr2)
1611     , _type(type)
1612     , _condition(lir_cond_unknown)
1613     , _fpu_stack_size(0)
1614     , _tmp1(LIR_OprFact::illegalOpr)
1615     , _tmp2(LIR_OprFact::illegalOpr)
1616     , _tmp3(LIR_OprFact::illegalOpr)
1617     , _tmp4(LIR_OprFact::illegalOpr)
1618     , _tmp5(LIR_OprFact::illegalOpr) {
1619     assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
1620   }
1621 
1622   LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, LIR_Opr tmp1, LIR_Opr tmp2 = LIR_OprFact::illegalOpr, 
1623           LIR_Opr tmp3 = LIR_OprFact::illegalOpr, LIR_Opr tmp4 = LIR_OprFact::illegalOpr, LIR_Opr tmp5 = LIR_OprFact::illegalOpr)
1624     : LIR_Op(code, result, NULL)
1625     , _opr1(opr1)
1626     , _opr2(opr2)
1627     , _type(T_ILLEGAL)
1628     , _condition(lir_cond_unknown)
1629     , _fpu_stack_size(0)
1630     , _tmp1(tmp1)
1631     , _tmp2(tmp2)
1632     , _tmp3(tmp3)
1633     , _tmp4(tmp4)
1634     , _tmp5(tmp5) {
1635     assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
1636   }
1637 
1638   LIR_Opr in_opr1() const                        { return _opr1; }
1639   LIR_Opr in_opr2() const                        { return _opr2; }
1640   BasicType type()  const                        { return _type; }
1641   LIR_Opr tmp1_opr() const                       { return _tmp1; }
1642   LIR_Opr tmp2_opr() const                       { return _tmp2; }
1643   LIR_Opr tmp3_opr() const                       { return _tmp3; }
1644   LIR_Opr tmp4_opr() const                       { return _tmp4; }
1645   LIR_Opr tmp5_opr() const                       { return _tmp5; }
1646   LIR_Condition condition() const  {
1647     assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); return _condition;
1648   }
1649   void set_condition(LIR_Condition condition) {
1650     assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove");  _condition = condition;
1651   }
1652 
1653   void set_fpu_stack_size(int size)              { _fpu_stack_size = size; }
1654   int  fpu_stack_size() const                    { return _fpu_stack_size; }
1655 
1656   void set_in_opr1(LIR_Opr opr)                  { _opr1 = opr; }
1657   void set_in_opr2(LIR_Opr opr)                  { _opr2 = opr; }
1658 
1659   virtual void emit_code(LIR_Assembler* masm);
1660   virtual LIR_Op2* as_Op2() { return this; }
1661   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1662 };
1663 
1664 class LIR_OpAllocArray : public LIR_Op {
1665  friend class LIR_OpVisitState;
1666 
1667  private:
1668   LIR_Opr   _klass;
1669   LIR_Opr   _len;
1670   LIR_Opr   _tmp1;
1671   LIR_Opr   _tmp2;
1672   LIR_Opr   _tmp3;
1673   LIR_Opr   _tmp4;
1674   BasicType _type;
1675   CodeStub* _stub;
1676 
1677  public:
1678   LIR_OpAllocArray(LIR_Opr klass, LIR_Opr len, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, BasicType type, CodeStub* stub)
1679     : LIR_Op(lir_alloc_array, result, NULL)
1680     , _klass(klass)
1681     , _len(len)
1682     , _tmp1(t1)
1683     , _tmp2(t2)
1684     , _tmp3(t3)
1685     , _tmp4(t4)
1686     , _type(type)
1687     , _stub(stub) {}
1688 
1689   LIR_Opr   klass()   const                      { return _klass;       }
1690   LIR_Opr   len()     const                      { return _len;         }
1691   LIR_Opr   obj()     const                      { return result_opr(); }
1692   LIR_Opr   tmp1()    const                      { return _tmp1;        }
1693   LIR_Opr   tmp2()    const                      { return _tmp2;        }
1694   LIR_Opr   tmp3()    const                      { return _tmp3;        }
1695   LIR_Opr   tmp4()    const                      { return _tmp4;        }
1696   BasicType type()    const                      { return _type;        }
1697   CodeStub* stub()    const                      { return _stub;        }
1698 
1699   virtual void emit_code(LIR_Assembler* masm);
1700   virtual LIR_OpAllocArray * as_OpAllocArray () { return this; }
1701   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1702 };
1703 
1704 
1705 class LIR_Op3: public LIR_Op {
1706  friend class LIR_OpVisitState;
1707 
1708  private:
1709   LIR_Opr _opr1;
1710   LIR_Opr _opr2;
1711   LIR_Opr _opr3;
1712  public:
1713   LIR_Op3(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr opr3, LIR_Opr result, CodeEmitInfo* info = NULL)
1714     : LIR_Op(code, result, info)
1715     , _opr1(opr1)
1716     , _opr2(opr2)
1717     , _opr3(opr3)                                { assert(is_in_range(code, begin_op3, end_op3), "code check"); }
1718   LIR_Opr in_opr1() const                        { return _opr1; }
1719   LIR_Opr in_opr2() const                        { return _opr2; }
1720   LIR_Opr in_opr3() const                        { return _opr3; }
1721 
1722   virtual void emit_code(LIR_Assembler* masm);
1723   virtual LIR_Op3* as_Op3() { return this; }
1724   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1725 };
1726 
1727 
1728 //--------------------------------
1729 class LabelObj: public CompilationResourceObj {
1730  private:
1731   Label _label;
1732  public:
1733   LabelObj()                                     {}
1734   Label* label()                                 { return &_label; }
1735 };
1736 
1737 
1738 class LIR_OpLock: public LIR_Op {
1739  friend class LIR_OpVisitState;
1740 
1741  private:
1742   LIR_Opr _hdr;
1743   LIR_Opr _obj;
1744   LIR_Opr _lock;
1745   LIR_Opr _scratch;
1746   CodeStub* _stub;
1747  public:
1748   LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info)
1749     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1750     , _hdr(hdr)
1751     , _obj(obj)
1752     , _lock(lock)
1753     , _scratch(scratch)
1754     , _stub(stub)                      {}
1755 
1756   LIR_Opr hdr_opr() const                        { return _hdr; }
1757   LIR_Opr obj_opr() const                        { return _obj; }
1758   LIR_Opr lock_opr() const                       { return _lock; }
1759   LIR_Opr scratch_opr() const                    { return _scratch; }
1760   CodeStub* stub() const                         { return _stub; }
1761 
1762   virtual void emit_code(LIR_Assembler* masm);
1763   virtual LIR_OpLock* as_OpLock() { return this; }
1764   void print_instr(outputStream* out) const PRODUCT_RETURN;
1765 };
1766 
1767 
1768 class LIR_OpDelay: public LIR_Op {
1769  friend class LIR_OpVisitState;
1770 
1771  private:
1772   LIR_Op* _op;
1773 
1774  public:
1775   LIR_OpDelay(LIR_Op* op, CodeEmitInfo* info):
1776     LIR_Op(lir_delay_slot, LIR_OprFact::illegalOpr, info),
1777     _op(op) {
1778     assert(op->code() == lir_nop || LIRFillDelaySlots, "should be filling with nops");
1779   }
1780   virtual void emit_code(LIR_Assembler* masm);
1781   virtual LIR_OpDelay* as_OpDelay() { return this; }
1782   void print_instr(outputStream* out) const PRODUCT_RETURN;
1783   LIR_Op* delay_op() const { return _op; }
1784   CodeEmitInfo* call_info() const { return info(); }
1785 };
1786 
1787 
1788 // LIR_OpCompareAndSwap
1789 class LIR_OpCompareAndSwap : public LIR_Op {
1790  friend class LIR_OpVisitState;
1791 
1792  private:
1793   LIR_Opr _addr;
1794   LIR_Opr _cmp_value;
1795   LIR_Opr _new_value;
1796   LIR_Opr _tmp1;
1797   LIR_Opr _tmp2;
1798 
1799  public:
1800   LIR_OpCompareAndSwap(LIR_Code code, LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
1801                        LIR_Opr t1, LIR_Opr t2, LIR_Opr result)
1802     : LIR_Op(code, result, NULL)  // no result, no info
1803     , _addr(addr)
1804     , _cmp_value(cmp_value)
1805     , _new_value(new_value)
1806     , _tmp1(t1)
1807     , _tmp2(t2)                                  { }
1808 
1809   LIR_Opr addr()        const                    { return _addr;  }
1810   LIR_Opr cmp_value()   const                    { return _cmp_value; }
1811   LIR_Opr new_value()   const                    { return _new_value; }
1812   LIR_Opr tmp1()        const                    { return _tmp1;      }
1813   LIR_Opr tmp2()        const                    { return _tmp2;      }
1814 
1815   virtual void emit_code(LIR_Assembler* masm);
1816   virtual LIR_OpCompareAndSwap * as_OpCompareAndSwap () { return this; }
1817   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1818 };
1819 
1820 // LIR_OpProfileCall
1821 class LIR_OpProfileCall : public LIR_Op {
1822  friend class LIR_OpVisitState;
1823 
1824  private:
1825   ciMethod* _profiled_method;
1826   int _profiled_bci;
1827   LIR_Opr _mdo;
1828   LIR_Opr _recv;
1829   LIR_Opr _tmp1;
1830   ciKlass* _known_holder;
1831 
1832  public:
1833   // Destroys recv
1834   LIR_OpProfileCall(LIR_Code code, ciMethod* profiled_method, int profiled_bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
1835     : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  // no result, no info
1836     , _profiled_method(profiled_method)
1837     , _profiled_bci(profiled_bci)
1838     , _mdo(mdo)
1839     , _recv(recv)
1840     , _tmp1(t1)
1841     , _known_holder(known_holder)                { }
1842 
1843   ciMethod* profiled_method() const              { return _profiled_method;  }
1844   int       profiled_bci()    const              { return _profiled_bci;     }
1845   LIR_Opr   mdo()             const              { return _mdo;              }
1846   LIR_Opr   recv()            const              { return _recv;             }
1847   LIR_Opr   tmp1()            const              { return _tmp1;             }
1848   ciKlass*  known_holder()    const              { return _known_holder;     }
1849 
1850   virtual void emit_code(LIR_Assembler* masm);
1851   virtual LIR_OpProfileCall* as_OpProfileCall() { return this; }
1852   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1853 };
1854 
1855 class LIR_InsertionBuffer;
1856 
1857 //--------------------------------LIR_List---------------------------------------------------
1858 // Maintains a list of LIR instructions (one instance of LIR_List per basic block)
1859 // The LIR instructions are appended by the LIR_List class itself;
1860 //
1861 // Notes:
1862 // - all offsets are(should be) in bytes
1863 // - local positions are specified with an offset, with offset 0 being local 0
1864 
1865 class LIR_List: public CompilationResourceObj {
1866  private:
1867   LIR_OpList  _operations;
1868 
1869   Compilation*  _compilation;
1870 #ifndef PRODUCT
1871   BlockBegin*   _block;
1872 #endif
1873 #ifdef ASSERT
1874   const char *  _file;
1875   int           _line;
1876 #endif
1877 
1878   void append(LIR_Op* op) {
1879     if (op->source() == NULL)
1880       op->set_source(_compilation->current_instruction());
1881 #ifndef PRODUCT
1882     if (PrintIRWithLIR) {
1883       _compilation->maybe_print_current_instruction();
1884       op->print(); tty->cr();
1885     }
1886 #endif // PRODUCT
1887 
1888     _operations.append(op);
1889 
1890 #ifdef ASSERT
1891     op->verify();
1892     op->set_file_and_line(_file, _line);
1893     _file = NULL;
1894     _line = 0;
1895 #endif
1896   }
1897 
1898  public:
1899   LIR_List(Compilation* compilation, BlockBegin* block = NULL);
1900 
1901 #ifdef ASSERT
1902   void set_file_and_line(const char * file, int line);
1903 #endif
1904 
1905   //---------- accessors ---------------
1906   LIR_OpList* instructions_list()                { return &_operations; }
1907   int         length() const                     { return _operations.length(); }
1908   LIR_Op*     at(int i) const                    { return _operations.at(i); }
1909 
1910   NOT_PRODUCT(BlockBegin* block() const          { return _block; });
1911 
1912   // insert LIR_Ops in buffer to right places in LIR_List
1913   void append(LIR_InsertionBuffer* buffer);
1914 
1915   //---------- mutators ---------------
1916   void insert_before(int i, LIR_List* op_list)   { _operations.insert_before(i, op_list->instructions_list()); }
1917   void insert_before(int i, LIR_Op* op)          { _operations.insert_before(i, op); }
1918   void remove_at(int i)                          { _operations.remove_at(i); }
1919 
1920   //---------- printing -------------
1921   void print_instructions() PRODUCT_RETURN;
1922 
1923 
1924   //---------- instructions -------------
1925   void call_opt_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
1926                         address dest, LIR_OprList* arguments,
1927                         CodeEmitInfo* info) {
1928     append(new LIR_OpJavaCall(lir_optvirtual_call, method, receiver, result, dest, arguments, info));
1929   }
1930   void call_static(ciMethod* method, LIR_Opr result,
1931                    address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
1932     append(new LIR_OpJavaCall(lir_static_call, method, LIR_OprFact::illegalOpr, result, dest, arguments, info));
1933   }
1934   void call_icvirtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
1935                       address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
1936     append(new LIR_OpJavaCall(lir_icvirtual_call, method, receiver, result, dest, arguments, info));
1937   }
1938   void call_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
1939                     intptr_t vtable_offset, LIR_OprList* arguments, CodeEmitInfo* info) {
1940     append(new LIR_OpJavaCall(lir_virtual_call, method, receiver, result, vtable_offset, arguments, info));
1941   }
1942   void call_dynamic(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
1943                     address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
1944     append(new LIR_OpJavaCall(lir_dynamic_call, method, receiver, result, dest, arguments, info));
1945   }
1946 
1947   void get_thread(LIR_Opr result)                { append(new LIR_Op0(lir_get_thread, result)); }
1948   void word_align()                              { append(new LIR_Op0(lir_word_align)); }
1949   void membar()                                  { append(new LIR_Op0(lir_membar)); }
1950   void membar_acquire()                          { append(new LIR_Op0(lir_membar_acquire)); }
1951   void membar_release()                          { append(new LIR_Op0(lir_membar_release)); }
1952   void membar_loadload()                         { append(new LIR_Op0(lir_membar_loadload)); }
1953   void membar_storestore()                       { append(new LIR_Op0(lir_membar_storestore)); }
1954   void membar_loadstore()                        { append(new LIR_Op0(lir_membar_loadstore)); }
1955   void membar_storeload()                        { append(new LIR_Op0(lir_membar_storeload)); }
1956 
1957   void nop()                                     { append(new LIR_Op0(lir_nop)); }
1958   void build_frame()                             { append(new LIR_Op0(lir_build_frame)); }
1959 
1960   void std_entry(LIR_Opr receiver)               { append(new LIR_Op0(lir_std_entry, receiver)); }
1961   void osr_entry(LIR_Opr osrPointer)             { append(new LIR_Op0(lir_osr_entry, osrPointer)); }
1962 
1963   void branch_destination(Label* lbl)            { append(new LIR_OpLabel(lbl)); }
1964 
1965   void negate(LIR_Opr from, LIR_Opr to)          { append(new LIR_Op1(lir_neg, from, to)); }
1966   void leal(LIR_Opr from, LIR_Opr result_reg)    { append(new LIR_Op1(lir_leal, from, result_reg)); }
1967 
1968   // result is a stack location for old backend and vreg for UseLinearScan
1969   // stack_loc_temp is an illegal register for old backend
1970   void roundfp(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result) { append(new LIR_OpRoundFP(reg, stack_loc_temp, result)); }
1971   void unaligned_move(LIR_Address* src, LIR_Opr dst) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
1972   void unaligned_move(LIR_Opr src, LIR_Address* dst) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), src->type(), lir_patch_none, NULL, lir_move_unaligned)); }
1973   void unaligned_move(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
1974   void move(LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
1975   void move(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info)); }
1976   void move(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info)); }
1977   void move_wide(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) {
1978     if (UseCompressedOops) {
1979       append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info, lir_move_wide));
1980     } else {
1981       move(src, dst, info);
1982     }
1983   }
1984   void move_wide(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) {
1985     if (UseCompressedOops) {
1986       append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info, lir_move_wide));
1987     } else {
1988       move(src, dst, info);
1989     }
1990   }
1991   void volatile_move(LIR_Opr src, LIR_Opr dst, BasicType type, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none) { append(new LIR_Op1(lir_move, src, dst, type, patch_code, info, lir_move_volatile)); }
1992 
1993   void oop2reg  (jobject o, LIR_Opr reg)         { append(new LIR_Op1(lir_move, LIR_OprFact::oopConst(o),    reg));   }
1994   void oop2reg_patch(jobject o, LIR_Opr reg, CodeEmitInfo* info);
1995 
1996   void return_op(LIR_Opr result)                 { append(new LIR_Op1(lir_return, result)); }
1997 
1998   void safepoint(LIR_Opr tmp, CodeEmitInfo* info)  { append(new LIR_Op1(lir_safepoint, tmp, info)); }
1999 
2000 #ifdef PPC
2001   void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_OpConvert(code, left, dst, NULL, tmp1, tmp2)); }
2002 #endif
2003   void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, ConversionStub* stub = NULL/*, bool is_32bit = false*/) { append(new LIR_OpConvert(code, left, dst, stub)); }
2004 
2005   void logical_and (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_and,  left, right, dst)); }
2006   void logical_or  (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_or,   left, right, dst)); }
2007   void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor,  left, right, dst)); }
2008 
2009   void   pack64(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_pack64,   src, dst, T_LONG, lir_patch_none, NULL)); }
2010   void unpack64(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_unpack64, src, dst, T_LONG, lir_patch_none, NULL)); }
2011 
2012   void null_check(LIR_Opr opr, CodeEmitInfo* info)         { append(new LIR_Op1(lir_null_check, opr, info)); }
2013   void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2014     append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info));
2015   }
2016   void unwind_exception(LIR_Opr exceptionOop) {
2017     append(new LIR_Op1(lir_unwind, exceptionOop));
2018   }
2019 
2020   void compare_to (LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
2021     append(new LIR_Op2(lir_compare_to,  left, right, dst));
2022   }
2023 
2024   void push(LIR_Opr opr)                                   { append(new LIR_Op1(lir_push, opr)); }
2025   void pop(LIR_Opr reg)                                    { append(new LIR_Op1(lir_pop,  reg)); }
2026 
2027   void cmp(LIR_Condition condition, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info = NULL) {
2028     append(new LIR_Op2(lir_cmp, condition, left, right, info));
2029   }
2030   void cmp(LIR_Condition condition, LIR_Opr left, int right, CodeEmitInfo* info = NULL) {
2031     cmp(condition, left, LIR_OprFact::intConst(right), info);
2032   }
2033 
2034   void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info);
2035   void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Address* addr, CodeEmitInfo* info);
2036 
2037   void cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type) {
2038     append(new LIR_Op2(lir_cmove, condition, src1, src2, dst, type));
2039   }
2040 
2041   void cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2042                 LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2043   void cas_obj(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2044                LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2045   void cas_int(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2046                LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2047 
2048   void abs (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_abs , from, tmp, to)); }
2049   void sqrt(LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_sqrt, from, tmp, to)); }
2050   void log (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_log,  from, LIR_OprFact::illegalOpr, to, tmp)); }
2051   void log10 (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)              { append(new LIR_Op2(lir_log10, from, LIR_OprFact::illegalOpr, to, tmp)); }
2052   void sin (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_sin , from, tmp1, to, tmp2)); }
2053   void cos (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_cos , from, tmp1, to, tmp2)); }
2054   void tan (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_tan , from, tmp1, to, tmp2)); }
2055   void exp (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, LIR_Opr tmp4, LIR_Opr tmp5)                { append(new LIR_Op2(lir_exp , from, tmp1, to, tmp2, tmp3, tmp4, tmp5)); }
2056   void pow (LIR_Opr arg1, LIR_Opr arg2, LIR_Opr res, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, LIR_Opr tmp4, LIR_Opr tmp5) { append(new LIR_Op2(lir_pow, arg1, arg2, res, tmp1, tmp2, tmp3, tmp4, tmp5)); }
2057 
2058   void add (LIR_Opr left, LIR_Opr right, LIR_Opr res)      { append(new LIR_Op2(lir_add, left, right, res)); }
2059   void sub (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_sub, left, right, res, info)); }
2060   void mul (LIR_Opr left, LIR_Opr right, LIR_Opr res) { append(new LIR_Op2(lir_mul, left, right, res)); }
2061   void mul_strictfp (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_mul_strictfp, left, right, res, tmp)); }
2062   void div (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL)      { append(new LIR_Op2(lir_div, left, right, res, info)); }
2063   void div_strictfp (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_div_strictfp, left, right, res, tmp)); }
2064   void rem (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL)      { append(new LIR_Op2(lir_rem, left, right, res, info)); }
2065 
2066   void volatile_load_mem_reg(LIR_Address* address, LIR_Opr dst, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2067   void volatile_load_unsafe_reg(LIR_Opr base, LIR_Opr offset, LIR_Opr dst, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
2068 
2069   void load(LIR_Address* addr, LIR_Opr src, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
2070 
2071   void prefetch(LIR_Address* addr, bool is_store);
2072 
2073   void store_mem_int(jint v,    LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2074   void store_mem_oop(jobject o, LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2075   void store(LIR_Opr src, LIR_Address* addr, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
2076   void volatile_store_mem_reg(LIR_Opr src, LIR_Address* address, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2077   void volatile_store_unsafe_reg(LIR_Opr src, LIR_Opr base, LIR_Opr offset, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
2078 
2079   void idiv(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2080   void idiv(LIR_Opr left, int   right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2081   void irem(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2082   void irem(LIR_Opr left, int   right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2083 
2084   void allocate_object(LIR_Opr dst, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, int header_size, int object_size, LIR_Opr klass, bool init_check, CodeStub* stub);
2085   void allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub);
2086 
2087   // jump is an unconditional branch
2088   void jump(BlockBegin* block) {
2089     append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, block));
2090   }
2091   void jump(CodeStub* stub) {
2092     append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, stub));
2093   }
2094   void branch(LIR_Condition cond, BasicType type, Label* lbl)        { append(new LIR_OpBranch(cond, type, lbl)); }
2095   void branch(LIR_Condition cond, BasicType type, BlockBegin* block) {
2096     assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
2097     append(new LIR_OpBranch(cond, type, block));
2098   }
2099   void branch(LIR_Condition cond, BasicType type, CodeStub* stub)    {
2100     assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
2101     append(new LIR_OpBranch(cond, type, stub));
2102   }
2103   void branch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* unordered) {
2104     assert(type == T_FLOAT || type == T_DOUBLE, "fp comparisons only");
2105     append(new LIR_OpBranch(cond, type, block, unordered));
2106   }
2107 
2108   void shift_left(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2109   void shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2110   void unsigned_shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2111 
2112   void shift_left(LIR_Opr value, int count, LIR_Opr dst)       { shift_left(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2113   void shift_right(LIR_Opr value, int count, LIR_Opr dst)      { shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2114   void unsigned_shift_right(LIR_Opr value, int count, LIR_Opr dst) { unsigned_shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2115 
2116   void lcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst)        { append(new LIR_Op2(lir_cmp_l2i,  left, right, dst)); }
2117   void fcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst, bool is_unordered_less);
2118 
2119   void call_runtime_leaf(address routine, LIR_Opr tmp, LIR_Opr result, LIR_OprList* arguments) {
2120     append(new LIR_OpRTCall(routine, tmp, result, arguments));
2121   }
2122 
2123   void call_runtime(address routine, LIR_Opr tmp, LIR_Opr result,
2124                     LIR_OprList* arguments, CodeEmitInfo* info) {
2125     append(new LIR_OpRTCall(routine, tmp, result, arguments, info));
2126   }
2127 
2128   void load_stack_address_monitor(int monitor_ix, LIR_Opr dst)  { append(new LIR_Op1(lir_monaddr, LIR_OprFact::intConst(monitor_ix), dst)); }
2129   void unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub);
2130   void lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info);
2131 
2132   void set_24bit_fpu()                                               { append(new LIR_Op0(lir_24bit_FPU )); }
2133   void restore_fpu()                                                 { append(new LIR_Op0(lir_reset_FPU )); }
2134   void breakpoint()                                                  { append(new LIR_Op0(lir_breakpoint)); }
2135 
2136   void arraycopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info) { append(new LIR_OpArrayCopy(src, src_pos, dst, dst_pos, length, tmp, expected_type, flags, info)); }
2137 
2138   void fpop_raw()                                { append(new LIR_Op0(lir_fpop_raw)); }
2139 
2140   void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci);
2141   void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception, ciMethod* profiled_method, int profiled_bci);
2142 
2143   void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass,
2144                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
2145                   CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
2146                   ciMethod* profiled_method, int profiled_bci);
2147   // methodDataOop profiling
2148   void profile_call(ciMethod* method, int bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) {
2149     append(new LIR_OpProfileCall(lir_profile_call, method, bci, mdo, recv, t1, cha_klass));
2150   }
2151 };
2152 
2153 void print_LIR(BlockList* blocks);
2154 
2155 class LIR_InsertionBuffer : public CompilationResourceObj {
2156  private:
2157   LIR_List*   _lir;   // the lir list where ops of this buffer should be inserted later (NULL when uninitialized)
2158 
2159   // list of insertion points. index and count are stored alternately:
2160   // _index_and_count[i * 2]:     the index into lir list where "count" ops should be inserted
2161   // _index_and_count[i * 2 + 1]: the number of ops to be inserted at index
2162   intStack    _index_and_count;
2163 
2164   // the LIR_Ops to be inserted
2165   LIR_OpList  _ops;
2166 
2167   void append_new(int index, int count)  { _index_and_count.append(index); _index_and_count.append(count); }
2168   void set_index_at(int i, int value)    { _index_and_count.at_put((i << 1),     value); }
2169   void set_count_at(int i, int value)    { _index_and_count.at_put((i << 1) + 1, value); }
2170 
2171 #ifdef ASSERT
2172   void verify();
2173 #endif
2174  public:
2175   LIR_InsertionBuffer() : _lir(NULL), _index_and_count(8), _ops(8) { }
2176 
2177   // must be called before using the insertion buffer
2178   void init(LIR_List* lir)  { assert(!initialized(), "already initialized"); _lir = lir; _index_and_count.clear(); _ops.clear(); }
2179   bool initialized() const  { return _lir != NULL; }
2180   // called automatically when the buffer is appended to the LIR_List
2181   void finish()             { _lir = NULL; }
2182 
2183   // accessors
2184   LIR_List*  lir_list() const             { return _lir; }
2185   int number_of_insertion_points() const  { return _index_and_count.length() >> 1; }
2186   int index_at(int i) const               { return _index_and_count.at((i << 1));     }
2187   int count_at(int i) const               { return _index_and_count.at((i << 1) + 1); }
2188 
2189   int number_of_ops() const               { return _ops.length(); }
2190   LIR_Op* op_at(int i) const              { return _ops.at(i); }
2191 
2192   // append an instruction to the buffer
2193   void append(int index, LIR_Op* op);
2194 
2195   // instruction
2196   void move(int index, LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(index, new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
2197 };
2198 
2199 
2200 //
2201 // LIR_OpVisitState is used for manipulating LIR_Ops in an abstract way.
2202 // Calling a LIR_Op's visit function with a LIR_OpVisitState causes
2203 // information about the input, output and temporaries used by the
2204 // op to be recorded.  It also records whether the op has call semantics
2205 // and also records all the CodeEmitInfos used by this op.
2206 //
2207 
2208 
2209 class LIR_OpVisitState: public StackObj {
2210  public:
2211   typedef enum { inputMode, firstMode = inputMode, tempMode, outputMode, numModes, invalidMode = -1 } OprMode;
2212 
2213   enum {
2214     maxNumberOfOperands = 16,
2215     maxNumberOfInfos = 4
2216   };
2217 
2218  private:
2219   LIR_Op*          _op;
2220 
2221   // optimization: the operands and infos are not stored in a variable-length
2222   //               list, but in a fixed-size array to save time of size checks and resizing
2223   int              _oprs_len[numModes];
2224   LIR_Opr*         _oprs_new[numModes][maxNumberOfOperands];
2225   int _info_len;
2226   CodeEmitInfo*    _info_new[maxNumberOfInfos];
2227 
2228   bool             _has_call;
2229   bool             _has_slow_case;
2230 
2231 
2232   // only include register operands
2233   // addresses are decomposed to the base and index registers
2234   // constants and stack operands are ignored
2235   void append(LIR_Opr& opr, OprMode mode) {
2236     assert(opr->is_valid(), "should not call this otherwise");
2237     assert(mode >= 0 && mode < numModes, "bad mode");
2238 
2239     if (opr->is_register()) {
2240        assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2241       _oprs_new[mode][_oprs_len[mode]++] = &opr;
2242 
2243     } else if (opr->is_pointer()) {
2244       LIR_Address* address = opr->as_address_ptr();
2245       if (address != NULL) {
2246         // special handling for addresses: add base and index register of the address
2247         // both are always input operands!
2248         if (address->_base->is_valid()) {
2249           assert(address->_base->is_register(), "must be");
2250           assert(_oprs_len[inputMode] < maxNumberOfOperands, "array overflow");
2251           _oprs_new[inputMode][_oprs_len[inputMode]++] = &address->_base;
2252         }
2253         if (address->_index->is_valid()) {
2254           assert(address->_index->is_register(), "must be");
2255           assert(_oprs_len[inputMode] < maxNumberOfOperands, "array overflow");
2256           _oprs_new[inputMode][_oprs_len[inputMode]++] = &address->_index;
2257         }
2258 
2259       } else {
2260         assert(opr->is_constant(), "constant operands are not processed");
2261       }
2262     } else {
2263       assert(opr->is_stack(), "stack operands are not processed");
2264     }
2265   }
2266 
2267   void append(CodeEmitInfo* info) {
2268     assert(info != NULL, "should not call this otherwise");
2269     assert(_info_len < maxNumberOfInfos, "array overflow");
2270     _info_new[_info_len++] = info;
2271   }
2272 
2273  public:
2274   LIR_OpVisitState()         { reset(); }
2275 
2276   LIR_Op* op() const         { return _op; }
2277   void set_op(LIR_Op* op)    { reset(); _op = op; }
2278 
2279   bool has_call() const      { return _has_call; }
2280   bool has_slow_case() const { return _has_slow_case; }
2281 
2282   void reset() {
2283     _op = NULL;
2284     _has_call = false;
2285     _has_slow_case = false;
2286 
2287     _oprs_len[inputMode] = 0;
2288     _oprs_len[tempMode] = 0;
2289     _oprs_len[outputMode] = 0;
2290     _info_len = 0;
2291   }
2292 
2293 
2294   int opr_count(OprMode mode) const {
2295     assert(mode >= 0 && mode < numModes, "bad mode");
2296     return _oprs_len[mode];
2297   }
2298 
2299   LIR_Opr opr_at(OprMode mode, int index) const {
2300     assert(mode >= 0 && mode < numModes, "bad mode");
2301     assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
2302     return *_oprs_new[mode][index];
2303   }
2304 
2305   void set_opr_at(OprMode mode, int index, LIR_Opr opr) const {
2306     assert(mode >= 0 && mode < numModes, "bad mode");
2307     assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
2308     *_oprs_new[mode][index] = opr;
2309   }
2310 
2311   int info_count() const {
2312     return _info_len;
2313   }
2314 
2315   CodeEmitInfo* info_at(int index) const {
2316     assert(index < _info_len, "index out of bounds");
2317     return _info_new[index];
2318   }
2319 
2320   XHandlers* all_xhandler();
2321 
2322   // collects all register operands of the instruction
2323   void visit(LIR_Op* op);
2324 
2325 #if ASSERT
2326   // check that an operation has no operands
2327   bool no_operands(LIR_Op* op);
2328 #endif
2329 
2330   // LIR_Op visitor functions use these to fill in the state
2331   void do_input(LIR_Opr& opr)             { append(opr, LIR_OpVisitState::inputMode); }
2332   void do_output(LIR_Opr& opr)            { append(opr, LIR_OpVisitState::outputMode); }
2333   void do_temp(LIR_Opr& opr)              { append(opr, LIR_OpVisitState::tempMode); }
2334   void do_info(CodeEmitInfo* info)        { append(info); }
2335 
2336   void do_stub(CodeStub* stub);
2337   void do_call()                          { _has_call = true; }
2338   void do_slow_case()                     { _has_slow_case = true; }
2339   void do_slow_case(CodeEmitInfo* info) {
2340     _has_slow_case = true;
2341     append(info);
2342   }
2343 };
2344 
2345 
2346 inline LIR_Opr LIR_OprDesc::illegalOpr()   { return LIR_OprFact::illegalOpr; };
2347 
2348 #endif // SHARE_VM_C1_C1_LIR_HPP