1 /*
   2  * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_C1_C1_LIR_HPP
  26 #define SHARE_VM_C1_C1_LIR_HPP
  27 
  28 #include "c1/c1_Defs.hpp"
  29 #include "c1/c1_ValueType.hpp"
  30 #include "oops/method.hpp"
  31 
  32 class BlockBegin;
  33 class BlockList;
  34 class LIR_Assembler;
  35 class CodeEmitInfo;
  36 class CodeStub;
  37 class CodeStubList;
  38 class ArrayCopyStub;
  39 class LIR_Op;
  40 class ciType;
  41 class ValueType;
  42 class LIR_OpVisitState;
  43 class FpuStackSim;
  44 
  45 //---------------------------------------------------------------------
  46 //                 LIR Operands
  47 //  LIR_OprDesc
  48 //    LIR_OprPtr
  49 //      LIR_Const
  50 //      LIR_Address
  51 //---------------------------------------------------------------------
  52 class LIR_OprDesc;
  53 class LIR_OprPtr;
  54 class LIR_Const;
  55 class LIR_Address;
  56 class LIR_OprVisitor;
  57 
  58 
  59 typedef LIR_OprDesc* LIR_Opr;
  60 typedef int          RegNr;
  61 
  62 typedef GrowableArray<LIR_Opr> LIR_OprList;
  63 typedef GrowableArray<LIR_Op*> LIR_OpArray;
  64 typedef GrowableArray<LIR_Op*> LIR_OpList;
  65 
  66 // define LIR_OprPtr early so LIR_OprDesc can refer to it
  67 class LIR_OprPtr: public CompilationResourceObj {
  68  public:
  69   bool is_oop_pointer() const                    { return (type() == T_OBJECT); }
  70   bool is_float_kind() const                     { BasicType t = type(); return (t == T_FLOAT) || (t == T_DOUBLE); }
  71 
  72   virtual LIR_Const*  as_constant()              { return NULL; }
  73   virtual LIR_Address* as_address()              { return NULL; }
  74   virtual BasicType type() const                 = 0;
  75   virtual void print_value_on(outputStream* out) const = 0;
  76 };
  77 
  78 
  79 
  80 // LIR constants
  81 class LIR_Const: public LIR_OprPtr {
  82  private:
  83   JavaValue _value;
  84 
  85   void type_check(BasicType t) const   { assert(type() == t, "type check"); }
  86   void type_check(BasicType t1, BasicType t2) const   { assert(type() == t1 || type() == t2, "type check"); }
  87   void type_check(BasicType t1, BasicType t2, BasicType t3) const   { assert(type() == t1 || type() == t2 || type() == t3, "type check"); }
  88 
  89  public:
  90   LIR_Const(jint i, bool is_address=false)       { _value.set_type(is_address?T_ADDRESS:T_INT); _value.set_jint(i); }
  91   LIR_Const(jlong l)                             { _value.set_type(T_LONG);    _value.set_jlong(l); }
  92   LIR_Const(jfloat f)                            { _value.set_type(T_FLOAT);   _value.set_jfloat(f); }
  93   LIR_Const(jdouble d)                           { _value.set_type(T_DOUBLE);  _value.set_jdouble(d); }
  94   LIR_Const(jobject o)                           { _value.set_type(T_OBJECT);  _value.set_jobject(o); }
  95   LIR_Const(void* p) {
  96 #ifdef _LP64
  97     assert(sizeof(jlong) >= sizeof(p), "too small");;
  98     _value.set_type(T_LONG);    _value.set_jlong((jlong)p);
  99 #else
 100     assert(sizeof(jint) >= sizeof(p), "too small");;
 101     _value.set_type(T_INT);     _value.set_jint((jint)p);
 102 #endif
 103   }
 104   LIR_Const(Metadata* m) {
 105     _value.set_type(T_METADATA);
 106 #ifdef _LP64
 107     _value.set_jlong((jlong)m);
 108 #else
 109     _value.set_jint((jint)m);
 110 #endif // _LP64
 111   }
 112 
 113   virtual BasicType type()       const { return _value.get_type(); }
 114   virtual LIR_Const* as_constant()     { return this; }
 115 
 116   jint      as_jint()    const         { type_check(T_INT, T_ADDRESS); return _value.get_jint(); }
 117   jlong     as_jlong()   const         { type_check(T_LONG  ); return _value.get_jlong(); }
 118   jfloat    as_jfloat()  const         { type_check(T_FLOAT ); return _value.get_jfloat(); }
 119   jdouble   as_jdouble() const         { type_check(T_DOUBLE); return _value.get_jdouble(); }
 120   jobject   as_jobject() const         { type_check(T_OBJECT); return _value.get_jobject(); }
 121   jint      as_jint_lo() const         { type_check(T_LONG  ); return low(_value.get_jlong()); }
 122   jint      as_jint_hi() const         { type_check(T_LONG  ); return high(_value.get_jlong()); }
 123 
 124 #ifdef _LP64
 125   address   as_pointer() const         { type_check(T_LONG  ); return (address)_value.get_jlong(); }
 126   Metadata* as_metadata() const        { type_check(T_METADATA); return (Metadata*)_value.get_jlong(); }
 127 #else
 128   address   as_pointer() const         { type_check(T_INT   ); return (address)_value.get_jint(); }
 129   Metadata* as_metadata() const        { type_check(T_METADATA); return (Metadata*)_value.get_jint(); }
 130 #endif
 131 
 132 
 133   jint      as_jint_bits() const       { type_check(T_FLOAT, T_INT, T_ADDRESS); return _value.get_jint(); }
 134   jint      as_jint_lo_bits() const    {
 135     if (type() == T_DOUBLE) {
 136       return low(jlong_cast(_value.get_jdouble()));
 137     } else {
 138       return as_jint_lo();
 139     }
 140   }
 141   jint      as_jint_hi_bits() const    {
 142     if (type() == T_DOUBLE) {
 143       return high(jlong_cast(_value.get_jdouble()));
 144     } else {
 145       return as_jint_hi();
 146     }
 147   }
 148   jlong      as_jlong_bits() const    {
 149     if (type() == T_DOUBLE) {
 150       return jlong_cast(_value.get_jdouble());
 151     } else {
 152       return as_jlong();
 153     }
 154   }
 155 
 156   virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
 157 
 158 
 159   bool is_zero_float() {
 160     jfloat f = as_jfloat();
 161     jfloat ok = 0.0f;
 162     return jint_cast(f) == jint_cast(ok);
 163   }
 164 
 165   bool is_one_float() {
 166     jfloat f = as_jfloat();
 167     return !g_isnan(f) && g_isfinite(f) && f == 1.0;
 168   }
 169 
 170   bool is_zero_double() {
 171     jdouble d = as_jdouble();
 172     jdouble ok = 0.0;
 173     return jlong_cast(d) == jlong_cast(ok);
 174   }
 175 
 176   bool is_one_double() {
 177     jdouble d = as_jdouble();
 178     return !g_isnan(d) && g_isfinite(d) && d == 1.0;
 179   }
 180 };
 181 
 182 
 183 //---------------------LIR Operand descriptor------------------------------------
 184 //
 185 // The class LIR_OprDesc represents a LIR instruction operand;
 186 // it can be a register (ALU/FPU), stack location or a constant;
 187 // Constants and addresses are represented as resource area allocated
 188 // structures (see above).
 189 // Registers and stack locations are inlined into the this pointer
 190 // (see value function).
 191 
 192 class LIR_OprDesc: public CompilationResourceObj {
 193  public:
 194   // value structure:
 195   //     data       opr-type opr-kind
 196   // +--------------+-------+-------+
 197   // [max...........|7 6 5 4|3 2 1 0]
 198   //                             ^
 199   //                    is_pointer bit
 200   //
 201   // lowest bit cleared, means it is a structure pointer
 202   // we need  4 bits to represent types
 203 
 204  private:
 205   friend class LIR_OprFact;
 206 
 207   // Conversion
 208   intptr_t value() const                         { return (intptr_t) this; }
 209 
 210   bool check_value_mask(intptr_t mask, intptr_t masked_value) const {
 211     return (value() & mask) == masked_value;
 212   }
 213 
 214   enum OprKind {
 215       pointer_value      = 0
 216     , stack_value        = 1
 217     , cpu_register       = 3
 218     , fpu_register       = 5
 219     , illegal_value      = 7
 220   };
 221 
 222   enum OprBits {
 223       pointer_bits   = 1
 224     , kind_bits      = 3
 225     , type_bits      = 4
 226     , size_bits      = 2
 227     , destroys_bits  = 1
 228     , virtual_bits   = 1
 229     , is_xmm_bits    = 1
 230     , last_use_bits  = 1
 231     , is_fpu_stack_offset_bits = 1        // used in assertion checking on x86 for FPU stack slot allocation
 232     , non_data_bits  = kind_bits + type_bits + size_bits + destroys_bits + last_use_bits +
 233                        is_fpu_stack_offset_bits + virtual_bits + is_xmm_bits
 234     , data_bits      = BitsPerInt - non_data_bits
 235     , reg_bits       = data_bits / 2      // for two registers in one value encoding
 236   };
 237 
 238   enum OprShift {
 239       kind_shift     = 0
 240     , type_shift     = kind_shift     + kind_bits
 241     , size_shift     = type_shift     + type_bits
 242     , destroys_shift = size_shift     + size_bits
 243     , last_use_shift = destroys_shift + destroys_bits
 244     , is_fpu_stack_offset_shift = last_use_shift + last_use_bits
 245     , virtual_shift  = is_fpu_stack_offset_shift + is_fpu_stack_offset_bits
 246     , is_xmm_shift   = virtual_shift + virtual_bits
 247     , data_shift     = is_xmm_shift + is_xmm_bits
 248     , reg1_shift = data_shift
 249     , reg2_shift = data_shift + reg_bits
 250 
 251   };
 252 
 253   enum OprSize {
 254       single_size = 0 << size_shift
 255     , double_size = 1 << size_shift
 256   };
 257 
 258   enum OprMask {
 259       kind_mask      = right_n_bits(kind_bits)
 260     , type_mask      = right_n_bits(type_bits) << type_shift
 261     , size_mask      = right_n_bits(size_bits) << size_shift
 262     , last_use_mask  = right_n_bits(last_use_bits) << last_use_shift
 263     , is_fpu_stack_offset_mask = right_n_bits(is_fpu_stack_offset_bits) << is_fpu_stack_offset_shift
 264     , virtual_mask   = right_n_bits(virtual_bits) << virtual_shift
 265     , is_xmm_mask    = right_n_bits(is_xmm_bits) << is_xmm_shift
 266     , pointer_mask   = right_n_bits(pointer_bits)
 267     , lower_reg_mask = right_n_bits(reg_bits)
 268     , no_type_mask   = (int)(~(type_mask | last_use_mask | is_fpu_stack_offset_mask))
 269   };
 270 
 271   uintptr_t data() const                         { return value() >> data_shift; }
 272   int lo_reg_half() const                        { return data() & lower_reg_mask; }
 273   int hi_reg_half() const                        { return (data() >> reg_bits) & lower_reg_mask; }
 274   OprKind kind_field() const                     { return (OprKind)(value() & kind_mask); }
 275   OprSize size_field() const                     { return (OprSize)(value() & size_mask); }
 276 
 277   static char type_char(BasicType t);
 278 
 279  public:
 280   enum {
 281     vreg_base = ConcreteRegisterImpl::number_of_registers,
 282     vreg_max = (1 << data_bits) - 1
 283   };
 284 
 285   static inline LIR_Opr illegalOpr();
 286 
 287   enum OprType {
 288       unknown_type  = 0 << type_shift    // means: not set (catch uninitialized types)
 289     , int_type      = 1 << type_shift
 290     , long_type     = 2 << type_shift
 291     , object_type   = 3 << type_shift
 292     , address_type  = 4 << type_shift
 293     , float_type    = 5 << type_shift
 294     , double_type   = 6 << type_shift
 295     , metadata_type = 7 << type_shift
 296   };
 297   friend OprType as_OprType(BasicType t);
 298   friend BasicType as_BasicType(OprType t);
 299 
 300   OprType type_field_valid() const               { assert(is_register() || is_stack(), "should not be called otherwise"); return (OprType)(value() & type_mask); }
 301   OprType type_field() const                     { return is_illegal() ? unknown_type : (OprType)(value() & type_mask); }
 302 
 303   static OprSize size_for(BasicType t) {
 304     switch (t) {
 305       case T_LONG:
 306       case T_DOUBLE:
 307         return double_size;
 308         break;
 309 
 310       case T_FLOAT:
 311       case T_BOOLEAN:
 312       case T_CHAR:
 313       case T_BYTE:
 314       case T_SHORT:
 315       case T_INT:
 316       case T_ADDRESS:
 317       case T_OBJECT:
 318       case T_ARRAY:
 319       case T_METADATA:
 320         return single_size;
 321         break;
 322 
 323       default:
 324         ShouldNotReachHere();
 325         return single_size;
 326       }
 327   }
 328 
 329 
 330   void validate_type() const PRODUCT_RETURN;
 331 
 332   BasicType type() const {
 333     if (is_pointer()) {
 334       return pointer()->type();
 335     }
 336     return as_BasicType(type_field());
 337   }
 338 
 339 
 340   ValueType* value_type() const                  { return as_ValueType(type()); }
 341 
 342   char type_char() const                         { return type_char((is_pointer()) ? pointer()->type() : type()); }
 343 
 344   bool is_equal(LIR_Opr opr) const         { return this == opr; }
 345   // checks whether types are same
 346   bool is_same_type(LIR_Opr opr) const     {
 347     assert(type_field() != unknown_type &&
 348            opr->type_field() != unknown_type, "shouldn't see unknown_type");
 349     return type_field() == opr->type_field();
 350   }
 351   bool is_same_register(LIR_Opr opr) {
 352     return (is_register() && opr->is_register() &&
 353             kind_field() == opr->kind_field() &&
 354             (value() & no_type_mask) == (opr->value() & no_type_mask));
 355   }
 356 
 357   bool is_pointer() const      { return check_value_mask(pointer_mask, pointer_value); }
 358   bool is_illegal() const      { return kind_field() == illegal_value; }
 359   bool is_valid() const        { return kind_field() != illegal_value; }
 360 
 361   bool is_register() const     { return is_cpu_register() || is_fpu_register(); }
 362   bool is_virtual() const      { return is_virtual_cpu()  || is_virtual_fpu();  }
 363 
 364   bool is_constant() const     { return is_pointer() && pointer()->as_constant() != NULL; }
 365   bool is_address() const      { return is_pointer() && pointer()->as_address() != NULL; }
 366 
 367   bool is_float_kind() const   { return is_pointer() ? pointer()->is_float_kind() : (kind_field() == fpu_register); }
 368   bool is_oop() const;
 369 
 370   // semantic for fpu- and xmm-registers:
 371   // * is_float and is_double return true for xmm_registers
 372   //   (so is_single_fpu and is_single_xmm are true)
 373   // * So you must always check for is_???_xmm prior to is_???_fpu to
 374   //   distinguish between fpu- and xmm-registers
 375 
 376   bool is_stack() const        { validate_type(); return check_value_mask(kind_mask,                stack_value);                 }
 377   bool is_single_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | single_size);  }
 378   bool is_double_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | double_size);  }
 379 
 380   bool is_cpu_register() const { validate_type(); return check_value_mask(kind_mask,                cpu_register);                }
 381   bool is_virtual_cpu() const  { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register | virtual_mask); }
 382   bool is_fixed_cpu() const    { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register);                }
 383   bool is_single_cpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    cpu_register | single_size);  }
 384   bool is_double_cpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    cpu_register | double_size);  }
 385 
 386   bool is_fpu_register() const { validate_type(); return check_value_mask(kind_mask,                fpu_register);                }
 387   bool is_virtual_fpu() const  { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register | virtual_mask); }
 388   bool is_fixed_fpu() const    { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register);                }
 389   bool is_single_fpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    fpu_register | single_size);  }
 390   bool is_double_fpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    fpu_register | double_size);  }
 391 
 392   bool is_xmm_register() const { validate_type(); return check_value_mask(kind_mask | is_xmm_mask,             fpu_register | is_xmm_mask); }
 393   bool is_single_xmm() const   { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | single_size | is_xmm_mask); }
 394   bool is_double_xmm() const   { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | double_size | is_xmm_mask); }
 395 
 396   // fast accessor functions for special bits that do not work for pointers
 397   // (in this functions, the check for is_pointer() is omitted)
 398   bool is_single_word() const      { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, single_size); }
 399   bool is_double_word() const      { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, double_size); }
 400   bool is_virtual_register() const { assert(is_register(),               "type check"); return check_value_mask(virtual_mask, virtual_mask); }
 401   bool is_oop_register() const     { assert(is_register() || is_stack(), "type check"); return type_field_valid() == object_type; }
 402   BasicType type_register() const  { assert(is_register() || is_stack(), "type check"); return as_BasicType(type_field_valid());  }
 403 
 404   bool is_last_use() const         { assert(is_register(), "only works for registers"); return (value() & last_use_mask) != 0; }
 405   bool is_fpu_stack_offset() const { assert(is_register(), "only works for registers"); return (value() & is_fpu_stack_offset_mask) != 0; }
 406   LIR_Opr make_last_use()          { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | last_use_mask); }
 407   LIR_Opr make_fpu_stack_offset()  { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | is_fpu_stack_offset_mask); }
 408 
 409 
 410   int single_stack_ix() const  { assert(is_single_stack() && !is_virtual(), "type check"); return (int)data(); }
 411   int double_stack_ix() const  { assert(is_double_stack() && !is_virtual(), "type check"); return (int)data(); }
 412   RegNr cpu_regnr() const      { assert(is_single_cpu()   && !is_virtual(), "type check"); return (RegNr)data(); }
 413   RegNr cpu_regnrLo() const    { assert(is_double_cpu()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
 414   RegNr cpu_regnrHi() const    { assert(is_double_cpu()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
 415   RegNr fpu_regnr() const      { assert(is_single_fpu()   && !is_virtual(), "type check"); return (RegNr)data(); }
 416   RegNr fpu_regnrLo() const    { assert(is_double_fpu()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
 417   RegNr fpu_regnrHi() const    { assert(is_double_fpu()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
 418   RegNr xmm_regnr() const      { assert(is_single_xmm()   && !is_virtual(), "type check"); return (RegNr)data(); }
 419   RegNr xmm_regnrLo() const    { assert(is_double_xmm()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
 420   RegNr xmm_regnrHi() const    { assert(is_double_xmm()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
 421   int   vreg_number() const    { assert(is_virtual(),                       "type check"); return (RegNr)data(); }
 422 
 423   LIR_OprPtr* pointer()  const                   { assert(is_pointer(), "type check");      return (LIR_OprPtr*)this; }
 424   LIR_Const* as_constant_ptr() const             { return pointer()->as_constant(); }
 425   LIR_Address* as_address_ptr() const            { return pointer()->as_address(); }
 426 
 427   Register as_register()    const;
 428   Register as_register_lo() const;
 429   Register as_register_hi() const;
 430 
 431   Register as_pointer_register() {
 432 #ifdef _LP64
 433     if (is_double_cpu()) {
 434       assert(as_register_lo() == as_register_hi(), "should be a single register");
 435       return as_register_lo();
 436     }
 437 #endif
 438     return as_register();
 439   }
 440 
 441 #ifdef X86
 442   XMMRegister as_xmm_float_reg() const;
 443   XMMRegister as_xmm_double_reg() const;
 444   // for compatibility with RInfo
 445   int fpu () const                                  { return lo_reg_half(); }
 446 #endif
 447 #if defined(SPARC) || defined(ARM) || defined(PPC) || defined(AARCH64)
 448   FloatRegister as_float_reg   () const;
 449   FloatRegister as_double_reg  () const;
 450 #endif
 451 
 452   jint      as_jint()    const { return as_constant_ptr()->as_jint(); }
 453   jlong     as_jlong()   const { return as_constant_ptr()->as_jlong(); }
 454   jfloat    as_jfloat()  const { return as_constant_ptr()->as_jfloat(); }
 455   jdouble   as_jdouble() const { return as_constant_ptr()->as_jdouble(); }
 456   jobject   as_jobject() const { return as_constant_ptr()->as_jobject(); }
 457 
 458   void print() const PRODUCT_RETURN;
 459   void print(outputStream* out) const PRODUCT_RETURN;
 460 };
 461 
 462 
 463 inline LIR_OprDesc::OprType as_OprType(BasicType type) {
 464   switch (type) {
 465   case T_INT:      return LIR_OprDesc::int_type;
 466   case T_LONG:     return LIR_OprDesc::long_type;
 467   case T_FLOAT:    return LIR_OprDesc::float_type;
 468   case T_DOUBLE:   return LIR_OprDesc::double_type;
 469   case T_OBJECT:
 470   case T_ARRAY:    return LIR_OprDesc::object_type;
 471   case T_ADDRESS:  return LIR_OprDesc::address_type;
 472   case T_METADATA: return LIR_OprDesc::metadata_type;
 473   case T_ILLEGAL:  // fall through
 474   default: ShouldNotReachHere(); return LIR_OprDesc::unknown_type;
 475   }
 476 }
 477 
 478 inline BasicType as_BasicType(LIR_OprDesc::OprType t) {
 479   switch (t) {
 480   case LIR_OprDesc::int_type:     return T_INT;
 481   case LIR_OprDesc::long_type:    return T_LONG;
 482   case LIR_OprDesc::float_type:   return T_FLOAT;
 483   case LIR_OprDesc::double_type:  return T_DOUBLE;
 484   case LIR_OprDesc::object_type:  return T_OBJECT;
 485   case LIR_OprDesc::address_type: return T_ADDRESS;
 486   case LIR_OprDesc::metadata_type:return T_METADATA;
 487   case LIR_OprDesc::unknown_type: // fall through
 488   default: ShouldNotReachHere();  return T_ILLEGAL;
 489   }
 490 }
 491 
 492 
 493 // LIR_Address
 494 class LIR_Address: public LIR_OprPtr {
 495  friend class LIR_OpVisitState;
 496 
 497  public:
 498   // NOTE: currently these must be the log2 of the scale factor (and
 499   // must also be equivalent to the ScaleFactor enum in
 500   // assembler_i486.hpp)
 501   enum Scale {
 502     times_1  =  0,
 503     times_2  =  1,
 504     times_4  =  2,
 505     times_8  =  3
 506   };
 507 
 508  private:
 509   LIR_Opr   _base;
 510   LIR_Opr   _index;
 511   Scale     _scale;
 512   intx      _disp;
 513   BasicType _type;
 514 
 515  public:
 516   LIR_Address(LIR_Opr base, LIR_Opr index, BasicType type):
 517        _base(base)
 518      , _index(index)
 519      , _scale(times_1)
 520      , _type(type)
 521      , _disp(0) { verify(); }
 522 
 523   LIR_Address(LIR_Opr base, intx disp, BasicType type):
 524        _base(base)
 525      , _index(LIR_OprDesc::illegalOpr())
 526      , _scale(times_1)
 527      , _type(type)
 528      , _disp(disp) { verify(); }
 529 
 530   LIR_Address(LIR_Opr base, BasicType type):
 531        _base(base)
 532      , _index(LIR_OprDesc::illegalOpr())
 533      , _scale(times_1)
 534      , _type(type)
 535      , _disp(0) { verify(); }
 536 
 537 #if defined(X86) || defined(ARM) || defined(AARCH64)
 538   LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, intx disp, BasicType type):
 539        _base(base)
 540      , _index(index)
 541      , _scale(scale)
 542      , _type(type)
 543      , _disp(disp) { verify(); }
 544 #endif // X86 || ARM
 545 
 546   LIR_Opr base()  const                          { return _base;  }
 547   LIR_Opr index() const                          { return _index; }
 548   Scale   scale() const                          { return _scale; }
 549   intx    disp()  const                          { return _disp;  }
 550 
 551   bool equals(LIR_Address* other) const          { return base() == other->base() && index() == other->index() && disp() == other->disp() && scale() == other->scale(); }
 552 
 553   virtual LIR_Address* as_address()              { return this;   }
 554   virtual BasicType type() const                 { return _type; }
 555   virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
 556 
 557   void verify0() const PRODUCT_RETURN;
 558 #if defined(LIR_ADDRESS_PD_VERIFY) && !defined(PRODUCT)
 559   void pd_verify() const;
 560   void verify() const { pd_verify(); }
 561 #else
 562   void verify() const { verify0(); }
 563 #endif
 564 
 565   static Scale scale(BasicType type);
 566 };
 567 
 568 
 569 // operand factory
 570 class LIR_OprFact: public AllStatic {
 571  public:
 572 
 573   static LIR_Opr illegalOpr;
 574 
 575   static LIR_Opr single_cpu(int reg) {
 576     return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 577                                LIR_OprDesc::int_type             |
 578                                LIR_OprDesc::cpu_register         |
 579                                LIR_OprDesc::single_size);
 580   }
 581   static LIR_Opr single_cpu_oop(int reg) {
 582     return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 583                                LIR_OprDesc::object_type          |
 584                                LIR_OprDesc::cpu_register         |
 585                                LIR_OprDesc::single_size);
 586   }
 587   static LIR_Opr single_cpu_address(int reg) {
 588     return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 589                                LIR_OprDesc::address_type         |
 590                                LIR_OprDesc::cpu_register         |
 591                                LIR_OprDesc::single_size);
 592   }
 593   static LIR_Opr single_cpu_metadata(int reg) {
 594     return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 595                                LIR_OprDesc::metadata_type        |
 596                                LIR_OprDesc::cpu_register         |
 597                                LIR_OprDesc::single_size);
 598   }
 599   static LIR_Opr double_cpu(int reg1, int reg2) {
 600     LP64_ONLY(assert(reg1 == reg2, "must be identical"));
 601     return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
 602                                (reg2 << LIR_OprDesc::reg2_shift) |
 603                                LIR_OprDesc::long_type            |
 604                                LIR_OprDesc::cpu_register         |
 605                                LIR_OprDesc::double_size);
 606   }
 607 
 608   static LIR_Opr single_fpu(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 609                                                                              LIR_OprDesc::float_type           |
 610                                                                              LIR_OprDesc::fpu_register         |
 611                                                                              LIR_OprDesc::single_size); }
 612 #if defined(ARM32)
 613   static LIR_Opr double_fpu(int reg1, int reg2)    { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size); }
 614   static LIR_Opr single_softfp(int reg)            { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift) |                                     LIR_OprDesc::float_type  | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); }
 615   static LIR_Opr double_softfp(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size); }
 616 #endif
 617 #ifdef SPARC
 618   static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
 619                                                                              (reg2 << LIR_OprDesc::reg2_shift) |
 620                                                                              LIR_OprDesc::double_type          |
 621                                                                              LIR_OprDesc::fpu_register         |
 622                                                                              LIR_OprDesc::double_size); }
 623 #endif
 624 #if defined(X86) || defined(AARCH64)
 625   static LIR_Opr double_fpu(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 626                                                                              (reg  << LIR_OprDesc::reg2_shift) |
 627                                                                              LIR_OprDesc::double_type          |
 628                                                                              LIR_OprDesc::fpu_register         |
 629                                                                              LIR_OprDesc::double_size); }
 630 
 631   static LIR_Opr single_xmm(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 632                                                                              LIR_OprDesc::float_type           |
 633                                                                              LIR_OprDesc::fpu_register         |
 634                                                                              LIR_OprDesc::single_size          |
 635                                                                              LIR_OprDesc::is_xmm_mask); }
 636   static LIR_Opr double_xmm(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 637                                                                              (reg  << LIR_OprDesc::reg2_shift) |
 638                                                                              LIR_OprDesc::double_type          |
 639                                                                              LIR_OprDesc::fpu_register         |
 640                                                                              LIR_OprDesc::double_size          |
 641                                                                              LIR_OprDesc::is_xmm_mask); }
 642 #endif // X86
 643 #if defined(PPC)
 644   static LIR_Opr double_fpu(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 645                                                                              (reg  << LIR_OprDesc::reg2_shift) |
 646                                                                              LIR_OprDesc::double_type          |
 647                                                                              LIR_OprDesc::fpu_register         |
 648                                                                              LIR_OprDesc::double_size); }
 649 #endif
 650 #ifdef PPC32
 651   static LIR_Opr single_softfp(int reg)            { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift)        |
 652                                                                              LIR_OprDesc::float_type           |
 653                                                                              LIR_OprDesc::cpu_register         |
 654                                                                              LIR_OprDesc::single_size); }
 655   static LIR_Opr double_softfp(int reg1, int reg2) { return (LIR_Opr)((reg2 << LIR_OprDesc::reg1_shift)        |
 656                                                                              (reg1 << LIR_OprDesc::reg2_shift) |
 657                                                                              LIR_OprDesc::double_type          |
 658                                                                              LIR_OprDesc::cpu_register         |
 659                                                                              LIR_OprDesc::double_size); }
 660 #endif // PPC32
 661 
 662   static LIR_Opr virtual_register(int index, BasicType type) {
 663     LIR_Opr res;
 664     switch (type) {
 665       case T_OBJECT: // fall through
 666       case T_ARRAY:
 667         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift)  |
 668                                             LIR_OprDesc::object_type  |
 669                                             LIR_OprDesc::cpu_register |
 670                                             LIR_OprDesc::single_size  |
 671                                             LIR_OprDesc::virtual_mask);
 672         break;
 673 
 674       case T_METADATA:
 675         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift)  |
 676                                             LIR_OprDesc::metadata_type|
 677                                             LIR_OprDesc::cpu_register |
 678                                             LIR_OprDesc::single_size  |
 679                                             LIR_OprDesc::virtual_mask);
 680         break;
 681 
 682       case T_INT:
 683         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 684                                   LIR_OprDesc::int_type              |
 685                                   LIR_OprDesc::cpu_register          |
 686                                   LIR_OprDesc::single_size           |
 687                                   LIR_OprDesc::virtual_mask);
 688         break;
 689 
 690       case T_ADDRESS:
 691         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 692                                   LIR_OprDesc::address_type          |
 693                                   LIR_OprDesc::cpu_register          |
 694                                   LIR_OprDesc::single_size           |
 695                                   LIR_OprDesc::virtual_mask);
 696         break;
 697 
 698       case T_LONG:
 699         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 700                                   LIR_OprDesc::long_type             |
 701                                   LIR_OprDesc::cpu_register          |
 702                                   LIR_OprDesc::double_size           |
 703                                   LIR_OprDesc::virtual_mask);
 704         break;
 705 
 706 #ifdef __SOFTFP__
 707       case T_FLOAT:
 708         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 709                                   LIR_OprDesc::float_type  |
 710                                   LIR_OprDesc::cpu_register |
 711                                   LIR_OprDesc::single_size |
 712                                   LIR_OprDesc::virtual_mask);
 713         break;
 714       case T_DOUBLE:
 715         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 716                                   LIR_OprDesc::double_type |
 717                                   LIR_OprDesc::cpu_register |
 718                                   LIR_OprDesc::double_size |
 719                                   LIR_OprDesc::virtual_mask);
 720         break;
 721 #else // __SOFTFP__
 722       case T_FLOAT:
 723         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 724                                   LIR_OprDesc::float_type           |
 725                                   LIR_OprDesc::fpu_register         |
 726                                   LIR_OprDesc::single_size          |
 727                                   LIR_OprDesc::virtual_mask);
 728         break;
 729 
 730       case
 731         T_DOUBLE: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 732                                             LIR_OprDesc::double_type           |
 733                                             LIR_OprDesc::fpu_register          |
 734                                             LIR_OprDesc::double_size           |
 735                                             LIR_OprDesc::virtual_mask);
 736         break;
 737 #endif // __SOFTFP__
 738       default:       ShouldNotReachHere(); res = illegalOpr;
 739     }
 740 
 741 #ifdef ASSERT
 742     res->validate_type();
 743     assert(res->vreg_number() == index, "conversion check");
 744     assert(index >= LIR_OprDesc::vreg_base, "must start at vreg_base");
 745     assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big");
 746 
 747     // old-style calculation; check if old and new method are equal
 748     LIR_OprDesc::OprType t = as_OprType(type);
 749 #ifdef __SOFTFP__
 750     LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 751                                t |
 752                                LIR_OprDesc::cpu_register |
 753                                LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
 754 #else // __SOFTFP__
 755     LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | t |
 756                                           ((type == T_FLOAT || type == T_DOUBLE) ?  LIR_OprDesc::fpu_register : LIR_OprDesc::cpu_register) |
 757                                LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
 758     assert(res == old_res, "old and new method not equal");
 759 #endif // __SOFTFP__
 760 #endif // ASSERT
 761 
 762     return res;
 763   }
 764 
 765   // 'index' is computed by FrameMap::local_stack_pos(index); do not use other parameters as
 766   // the index is platform independent; a double stack useing indeces 2 and 3 has always
 767   // index 2.
 768   static LIR_Opr stack(int index, BasicType type) {
 769     LIR_Opr res;
 770     switch (type) {
 771       case T_OBJECT: // fall through
 772       case T_ARRAY:
 773         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 774                                   LIR_OprDesc::object_type           |
 775                                   LIR_OprDesc::stack_value           |
 776                                   LIR_OprDesc::single_size);
 777         break;
 778 
 779       case T_METADATA:
 780         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 781                                   LIR_OprDesc::metadata_type         |
 782                                   LIR_OprDesc::stack_value           |
 783                                   LIR_OprDesc::single_size);
 784         break;
 785       case T_INT:
 786         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 787                                   LIR_OprDesc::int_type              |
 788                                   LIR_OprDesc::stack_value           |
 789                                   LIR_OprDesc::single_size);
 790         break;
 791 
 792       case T_ADDRESS:
 793         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 794                                   LIR_OprDesc::address_type          |
 795                                   LIR_OprDesc::stack_value           |
 796                                   LIR_OprDesc::single_size);
 797         break;
 798 
 799       case T_LONG:
 800         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 801                                   LIR_OprDesc::long_type             |
 802                                   LIR_OprDesc::stack_value           |
 803                                   LIR_OprDesc::double_size);
 804         break;
 805 
 806       case T_FLOAT:
 807         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 808                                   LIR_OprDesc::float_type            |
 809                                   LIR_OprDesc::stack_value           |
 810                                   LIR_OprDesc::single_size);
 811         break;
 812       case T_DOUBLE:
 813         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 814                                   LIR_OprDesc::double_type           |
 815                                   LIR_OprDesc::stack_value           |
 816                                   LIR_OprDesc::double_size);
 817         break;
 818 
 819       default:       ShouldNotReachHere(); res = illegalOpr;
 820     }
 821 
 822 #ifdef ASSERT
 823     assert(index >= 0, "index must be positive");
 824     assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big");
 825 
 826     LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 827                                           LIR_OprDesc::stack_value           |
 828                                           as_OprType(type)                   |
 829                                           LIR_OprDesc::size_for(type));
 830     assert(res == old_res, "old and new method not equal");
 831 #endif
 832 
 833     return res;
 834   }
 835 
 836   static LIR_Opr intConst(jint i)                { return (LIR_Opr)(new LIR_Const(i)); }
 837   static LIR_Opr longConst(jlong l)              { return (LIR_Opr)(new LIR_Const(l)); }
 838   static LIR_Opr floatConst(jfloat f)            { return (LIR_Opr)(new LIR_Const(f)); }
 839   static LIR_Opr doubleConst(jdouble d)          { return (LIR_Opr)(new LIR_Const(d)); }
 840   static LIR_Opr oopConst(jobject o)             { return (LIR_Opr)(new LIR_Const(o)); }
 841   static LIR_Opr address(LIR_Address* a)         { return (LIR_Opr)a; }
 842   static LIR_Opr intptrConst(void* p)            { return (LIR_Opr)(new LIR_Const(p)); }
 843   static LIR_Opr intptrConst(intptr_t v)         { return (LIR_Opr)(new LIR_Const((void*)v)); }
 844   static LIR_Opr illegal()                       { return (LIR_Opr)-1; }
 845   static LIR_Opr addressConst(jint i)            { return (LIR_Opr)(new LIR_Const(i, true)); }
 846   static LIR_Opr metadataConst(Metadata* m)      { return (LIR_Opr)(new LIR_Const(m)); }
 847 
 848   static LIR_Opr value_type(ValueType* type);
 849   static LIR_Opr dummy_value_type(ValueType* type);
 850 };
 851 
 852 
 853 //-------------------------------------------------------------------------------
 854 //                   LIR Instructions
 855 //-------------------------------------------------------------------------------
 856 //
 857 // Note:
 858 //  - every instruction has a result operand
 859 //  - every instruction has an CodeEmitInfo operand (can be revisited later)
 860 //  - every instruction has a LIR_OpCode operand
 861 //  - LIR_OpN, means an instruction that has N input operands
 862 //
 863 // class hierarchy:
 864 //
 865 class  LIR_Op;
 866 class    LIR_Op0;
 867 class      LIR_OpLabel;
 868 class    LIR_Op1;
 869 class      LIR_OpBranch;
 870 class      LIR_OpConvert;
 871 class      LIR_OpAllocObj;
 872 class      LIR_OpRoundFP;
 873 class    LIR_Op2;
 874 class    LIR_OpDelay;
 875 class    LIR_Op3;
 876 class      LIR_OpAllocArray;
 877 class    LIR_OpCall;
 878 class      LIR_OpJavaCall;
 879 class      LIR_OpRTCall;
 880 class    LIR_OpArrayCopy;
 881 class    LIR_OpUpdateCRC32;
 882 class    LIR_OpLock;
 883 class    LIR_OpTypeCheck;
 884 class    LIR_OpCompareAndSwap;
 885 class    LIR_OpProfileCall;
 886 class    LIR_OpProfileType;
 887 #ifdef ASSERT
 888 class    LIR_OpAssert;
 889 #endif
 890 
 891 // LIR operation codes
 892 enum LIR_Code {
 893     lir_none
 894   , begin_op0
 895       , lir_word_align
 896       , lir_label
 897       , lir_nop
 898       , lir_backwardbranch_target
 899       , lir_std_entry
 900       , lir_osr_entry
 901       , lir_build_frame
 902       , lir_fpop_raw
 903       , lir_24bit_FPU
 904       , lir_reset_FPU
 905       , lir_breakpoint
 906       , lir_rtcall
 907       , lir_membar
 908       , lir_membar_acquire
 909       , lir_membar_release
 910       , lir_membar_loadload
 911       , lir_membar_storestore
 912       , lir_membar_loadstore
 913       , lir_membar_storeload
 914       , lir_get_thread
 915       , lir_on_spin_wait
 916   , end_op0
 917   , begin_op1
 918       , lir_fxch
 919       , lir_fld
 920       , lir_ffree
 921       , lir_push
 922       , lir_pop
 923       , lir_null_check
 924       , lir_return
 925       , lir_leal
 926       , lir_neg
 927       , lir_branch
 928       , lir_cond_float_branch
 929       , lir_move
 930       , lir_convert
 931       , lir_alloc_object
 932       , lir_monaddr
 933       , lir_roundfp
 934       , lir_safepoint
 935       , lir_pack64
 936       , lir_unpack64
 937       , lir_unwind
 938   , end_op1
 939   , begin_op2
 940       , lir_cmp
 941       , lir_cmp_l2i
 942       , lir_ucmp_fd2i
 943       , lir_cmp_fd2i
 944       , lir_cmove
 945       , lir_add
 946       , lir_sub
 947       , lir_mul
 948       , lir_mul_strictfp
 949       , lir_div
 950       , lir_div_strictfp
 951       , lir_rem
 952       , lir_sqrt
 953       , lir_abs
 954       , lir_tan
 955       , lir_log10
 956       , lir_logic_and
 957       , lir_logic_or
 958       , lir_logic_xor
 959       , lir_shl
 960       , lir_shr
 961       , lir_ushr
 962       , lir_alloc_array
 963       , lir_throw
 964       , lir_compare_to
 965       , lir_xadd
 966       , lir_xchg
 967   , end_op2
 968   , begin_op3
 969       , lir_idiv
 970       , lir_irem
 971   , end_op3
 972   , begin_opJavaCall
 973       , lir_static_call
 974       , lir_optvirtual_call
 975       , lir_icvirtual_call
 976       , lir_virtual_call
 977       , lir_dynamic_call
 978   , end_opJavaCall
 979   , begin_opArrayCopy
 980       , lir_arraycopy
 981   , end_opArrayCopy
 982   , begin_opUpdateCRC32
 983       , lir_updatecrc32
 984   , end_opUpdateCRC32
 985   , begin_opLock
 986     , lir_lock
 987     , lir_unlock
 988   , end_opLock
 989   , begin_delay_slot
 990     , lir_delay_slot
 991   , end_delay_slot
 992   , begin_opTypeCheck
 993     , lir_instanceof
 994     , lir_checkcast
 995     , lir_store_check
 996   , end_opTypeCheck
 997   , begin_opCompareAndSwap
 998     , lir_cas_long
 999     , lir_cas_obj
1000     , lir_cas_int
1001   , end_opCompareAndSwap
1002   , begin_opMDOProfile
1003     , lir_profile_call
1004     , lir_profile_type
1005   , end_opMDOProfile
1006   , begin_opAssert
1007     , lir_assert
1008   , end_opAssert
1009 };
1010 
1011 
1012 enum LIR_Condition {
1013     lir_cond_equal
1014   , lir_cond_notEqual
1015   , lir_cond_less
1016   , lir_cond_lessEqual
1017   , lir_cond_greaterEqual
1018   , lir_cond_greater
1019   , lir_cond_belowEqual
1020   , lir_cond_aboveEqual
1021   , lir_cond_always
1022   , lir_cond_unknown = -1
1023 };
1024 
1025 
1026 enum LIR_PatchCode {
1027   lir_patch_none,
1028   lir_patch_low,
1029   lir_patch_high,
1030   lir_patch_normal
1031 };
1032 
1033 
1034 enum LIR_MoveKind {
1035   lir_move_normal,
1036   lir_move_volatile,
1037   lir_move_unaligned,
1038   lir_move_wide,
1039   lir_move_max_flag
1040 };
1041 
1042 
1043 // --------------------------------------------------
1044 // LIR_Op
1045 // --------------------------------------------------
1046 class LIR_Op: public CompilationResourceObj {
1047  friend class LIR_OpVisitState;
1048 
1049 #ifdef ASSERT
1050  private:
1051   const char *  _file;
1052   int           _line;
1053 #endif
1054 
1055  protected:
1056   LIR_Opr       _result;
1057   unsigned short _code;
1058   unsigned short _flags;
1059   CodeEmitInfo* _info;
1060   int           _id;     // value id for register allocation
1061   int           _fpu_pop_count;
1062   Instruction*  _source; // for debugging
1063 
1064   static void print_condition(outputStream* out, LIR_Condition cond) PRODUCT_RETURN;
1065 
1066  protected:
1067   static bool is_in_range(LIR_Code test, LIR_Code start, LIR_Code end)  { return start < test && test < end; }
1068 
1069  public:
1070   LIR_Op()
1071     : _result(LIR_OprFact::illegalOpr)
1072     , _code(lir_none)
1073     , _flags(0)
1074     , _info(NULL)
1075 #ifdef ASSERT
1076     , _file(NULL)
1077     , _line(0)
1078 #endif
1079     , _fpu_pop_count(0)
1080     , _source(NULL)
1081     , _id(-1)                             {}
1082 
1083   LIR_Op(LIR_Code code, LIR_Opr result, CodeEmitInfo* info)
1084     : _result(result)
1085     , _code(code)
1086     , _flags(0)
1087     , _info(info)
1088 #ifdef ASSERT
1089     , _file(NULL)
1090     , _line(0)
1091 #endif
1092     , _fpu_pop_count(0)
1093     , _source(NULL)
1094     , _id(-1)                             {}
1095 
1096   CodeEmitInfo* info() const                  { return _info;   }
1097   LIR_Code code()      const                  { return (LIR_Code)_code;   }
1098   LIR_Opr result_opr() const                  { return _result; }
1099   void    set_result_opr(LIR_Opr opr)         { _result = opr;  }
1100 
1101 #ifdef ASSERT
1102   void set_file_and_line(const char * file, int line) {
1103     _file = file;
1104     _line = line;
1105   }
1106 #endif
1107 
1108   virtual const char * name() const PRODUCT_RETURN0;
1109 
1110   int id()             const                  { return _id;     }
1111   void set_id(int id)                         { _id = id; }
1112 
1113   // FPU stack simulation helpers -- only used on Intel
1114   void set_fpu_pop_count(int count)           { assert(count >= 0 && count <= 1, "currently only 0 and 1 are valid"); _fpu_pop_count = count; }
1115   int  fpu_pop_count() const                  { return _fpu_pop_count; }
1116   bool pop_fpu_stack()                        { return _fpu_pop_count > 0; }
1117 
1118   Instruction* source() const                 { return _source; }
1119   void set_source(Instruction* ins)           { _source = ins; }
1120 
1121   virtual void emit_code(LIR_Assembler* masm) = 0;
1122   virtual void print_instr(outputStream* out) const   = 0;
1123   virtual void print_on(outputStream* st) const PRODUCT_RETURN;
1124 
1125   virtual bool is_patching() { return false; }
1126   virtual LIR_OpCall* as_OpCall() { return NULL; }
1127   virtual LIR_OpJavaCall* as_OpJavaCall() { return NULL; }
1128   virtual LIR_OpLabel* as_OpLabel() { return NULL; }
1129   virtual LIR_OpDelay* as_OpDelay() { return NULL; }
1130   virtual LIR_OpLock* as_OpLock() { return NULL; }
1131   virtual LIR_OpAllocArray* as_OpAllocArray() { return NULL; }
1132   virtual LIR_OpAllocObj* as_OpAllocObj() { return NULL; }
1133   virtual LIR_OpRoundFP* as_OpRoundFP() { return NULL; }
1134   virtual LIR_OpBranch* as_OpBranch() { return NULL; }
1135   virtual LIR_OpRTCall* as_OpRTCall() { return NULL; }
1136   virtual LIR_OpConvert* as_OpConvert() { return NULL; }
1137   virtual LIR_Op0* as_Op0() { return NULL; }
1138   virtual LIR_Op1* as_Op1() { return NULL; }
1139   virtual LIR_Op2* as_Op2() { return NULL; }
1140   virtual LIR_Op3* as_Op3() { return NULL; }
1141   virtual LIR_OpArrayCopy* as_OpArrayCopy() { return NULL; }
1142   virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32() { return NULL; }
1143   virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
1144   virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
1145   virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
1146   virtual LIR_OpProfileType* as_OpProfileType() { return NULL; }
1147 #ifdef ASSERT
1148   virtual LIR_OpAssert* as_OpAssert() { return NULL; }
1149 #endif
1150 
1151   virtual void verify() const {}
1152 };
1153 
1154 // for calls
1155 class LIR_OpCall: public LIR_Op {
1156  friend class LIR_OpVisitState;
1157 
1158  protected:
1159   address      _addr;
1160   LIR_OprList* _arguments;
1161  protected:
1162   LIR_OpCall(LIR_Code code, address addr, LIR_Opr result,
1163              LIR_OprList* arguments, CodeEmitInfo* info = NULL)
1164     : LIR_Op(code, result, info)
1165     , _arguments(arguments)
1166     , _addr(addr) {}
1167 
1168  public:
1169   address addr() const                           { return _addr; }
1170   const LIR_OprList* arguments() const           { return _arguments; }
1171   virtual LIR_OpCall* as_OpCall()                { return this; }
1172 };
1173 
1174 
1175 // --------------------------------------------------
1176 // LIR_OpJavaCall
1177 // --------------------------------------------------
1178 class LIR_OpJavaCall: public LIR_OpCall {
1179  friend class LIR_OpVisitState;
1180 
1181  private:
1182   ciMethod* _method;
1183   LIR_Opr   _receiver;
1184   LIR_Opr   _method_handle_invoke_SP_save_opr;  // Used in LIR_OpVisitState::visit to store the reference to FrameMap::method_handle_invoke_SP_save_opr.
1185 
1186  public:
1187   LIR_OpJavaCall(LIR_Code code, ciMethod* method,
1188                  LIR_Opr receiver, LIR_Opr result,
1189                  address addr, LIR_OprList* arguments,
1190                  CodeEmitInfo* info)
1191   : LIR_OpCall(code, addr, result, arguments, info)
1192   , _receiver(receiver)
1193   , _method(method)
1194   , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
1195   { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
1196 
1197   LIR_OpJavaCall(LIR_Code code, ciMethod* method,
1198                  LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset,
1199                  LIR_OprList* arguments, CodeEmitInfo* info)
1200   : LIR_OpCall(code, (address)vtable_offset, result, arguments, info)
1201   , _receiver(receiver)
1202   , _method(method)
1203   , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
1204   { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
1205 
1206   LIR_Opr receiver() const                       { return _receiver; }
1207   ciMethod* method() const                       { return _method;   }
1208 
1209   // JSR 292 support.
1210   bool is_invokedynamic() const                  { return code() == lir_dynamic_call; }
1211   bool is_method_handle_invoke() const {
1212     return method()->is_compiled_lambda_form() ||   // Java-generated lambda form
1213            method()->is_method_handle_intrinsic();  // JVM-generated MH intrinsic
1214   }
1215 
1216   intptr_t vtable_offset() const {
1217     assert(_code == lir_virtual_call, "only have vtable for real vcall");
1218     return (intptr_t) addr();
1219   }
1220 
1221   virtual void emit_code(LIR_Assembler* masm);
1222   virtual LIR_OpJavaCall* as_OpJavaCall() { return this; }
1223   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1224 };
1225 
1226 // --------------------------------------------------
1227 // LIR_OpLabel
1228 // --------------------------------------------------
1229 // Location where a branch can continue
1230 class LIR_OpLabel: public LIR_Op {
1231  friend class LIR_OpVisitState;
1232 
1233  private:
1234   Label* _label;
1235  public:
1236   LIR_OpLabel(Label* lbl)
1237    : LIR_Op(lir_label, LIR_OprFact::illegalOpr, NULL)
1238    , _label(lbl)                                 {}
1239   Label* label() const                           { return _label; }
1240 
1241   virtual void emit_code(LIR_Assembler* masm);
1242   virtual LIR_OpLabel* as_OpLabel() { return this; }
1243   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1244 };
1245 
1246 // LIR_OpArrayCopy
1247 class LIR_OpArrayCopy: public LIR_Op {
1248  friend class LIR_OpVisitState;
1249 
1250  private:
1251   ArrayCopyStub*  _stub;
1252   LIR_Opr   _src;
1253   LIR_Opr   _src_pos;
1254   LIR_Opr   _dst;
1255   LIR_Opr   _dst_pos;
1256   LIR_Opr   _length;
1257   LIR_Opr   _tmp;
1258   ciArrayKlass* _expected_type;
1259   int       _flags;
1260 
1261 public:
1262   enum Flags {
1263     src_null_check         = 1 << 0,
1264     dst_null_check         = 1 << 1,
1265     src_pos_positive_check = 1 << 2,
1266     dst_pos_positive_check = 1 << 3,
1267     length_positive_check  = 1 << 4,
1268     src_range_check        = 1 << 5,
1269     dst_range_check        = 1 << 6,
1270     type_check             = 1 << 7,
1271     overlapping            = 1 << 8,
1272     unaligned              = 1 << 9,
1273     src_objarray           = 1 << 10,
1274     dst_objarray           = 1 << 11,
1275     all_flags              = (1 << 12) - 1
1276   };
1277 
1278   LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp,
1279                   ciArrayKlass* expected_type, int flags, CodeEmitInfo* info);
1280 
1281   LIR_Opr src() const                            { return _src; }
1282   LIR_Opr src_pos() const                        { return _src_pos; }
1283   LIR_Opr dst() const                            { return _dst; }
1284   LIR_Opr dst_pos() const                        { return _dst_pos; }
1285   LIR_Opr length() const                         { return _length; }
1286   LIR_Opr tmp() const                            { return _tmp; }
1287   int flags() const                              { return _flags; }
1288   ciArrayKlass* expected_type() const            { return _expected_type; }
1289   ArrayCopyStub* stub() const                    { return _stub; }
1290 
1291   virtual void emit_code(LIR_Assembler* masm);
1292   virtual LIR_OpArrayCopy* as_OpArrayCopy() { return this; }
1293   void print_instr(outputStream* out) const PRODUCT_RETURN;
1294 };
1295 
1296 // LIR_OpUpdateCRC32
1297 class LIR_OpUpdateCRC32: public LIR_Op {
1298   friend class LIR_OpVisitState;
1299 
1300 private:
1301   LIR_Opr   _crc;
1302   LIR_Opr   _val;
1303 
1304 public:
1305 
1306   LIR_OpUpdateCRC32(LIR_Opr crc, LIR_Opr val, LIR_Opr res);
1307 
1308   LIR_Opr crc() const                            { return _crc; }
1309   LIR_Opr val() const                            { return _val; }
1310 
1311   virtual void emit_code(LIR_Assembler* masm);
1312   virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32()  { return this; }
1313   void print_instr(outputStream* out) const PRODUCT_RETURN;
1314 };
1315 
1316 // --------------------------------------------------
1317 // LIR_Op0
1318 // --------------------------------------------------
1319 class LIR_Op0: public LIR_Op {
1320  friend class LIR_OpVisitState;
1321 
1322  public:
1323   LIR_Op0(LIR_Code code)
1324    : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1325   LIR_Op0(LIR_Code code, LIR_Opr result, CodeEmitInfo* info = NULL)
1326    : LIR_Op(code, result, info)  { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1327 
1328   virtual void emit_code(LIR_Assembler* masm);
1329   virtual LIR_Op0* as_Op0() { return this; }
1330   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1331 };
1332 
1333 
1334 // --------------------------------------------------
1335 // LIR_Op1
1336 // --------------------------------------------------
1337 
1338 class LIR_Op1: public LIR_Op {
1339  friend class LIR_OpVisitState;
1340 
1341  protected:
1342   LIR_Opr         _opr;   // input operand
1343   BasicType       _type;  // Operand types
1344   LIR_PatchCode   _patch; // only required with patchin (NEEDS_CLEANUP: do we want a special instruction for patching?)
1345 
1346   static void print_patch_code(outputStream* out, LIR_PatchCode code);
1347 
1348   void set_kind(LIR_MoveKind kind) {
1349     assert(code() == lir_move, "must be");
1350     _flags = kind;
1351   }
1352 
1353  public:
1354   LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result = LIR_OprFact::illegalOpr, BasicType type = T_ILLEGAL, LIR_PatchCode patch = lir_patch_none, CodeEmitInfo* info = NULL)
1355     : LIR_Op(code, result, info)
1356     , _opr(opr)
1357     , _patch(patch)
1358     , _type(type)                      { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1359 
1360   LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result, BasicType type, LIR_PatchCode patch, CodeEmitInfo* info, LIR_MoveKind kind)
1361     : LIR_Op(code, result, info)
1362     , _opr(opr)
1363     , _patch(patch)
1364     , _type(type)                      {
1365     assert(code == lir_move, "must be");
1366     set_kind(kind);
1367   }
1368 
1369   LIR_Op1(LIR_Code code, LIR_Opr opr, CodeEmitInfo* info)
1370     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1371     , _opr(opr)
1372     , _patch(lir_patch_none)
1373     , _type(T_ILLEGAL)                 { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1374 
1375   LIR_Opr in_opr()           const               { return _opr;   }
1376   LIR_PatchCode patch_code() const               { return _patch; }
1377   BasicType type()           const               { return _type;  }
1378 
1379   LIR_MoveKind move_kind() const {
1380     assert(code() == lir_move, "must be");
1381     return (LIR_MoveKind)_flags;
1382   }
1383 
1384   virtual bool is_patching() { return _patch != lir_patch_none; }
1385   virtual void emit_code(LIR_Assembler* masm);
1386   virtual LIR_Op1* as_Op1() { return this; }
1387   virtual const char * name() const PRODUCT_RETURN0;
1388 
1389   void set_in_opr(LIR_Opr opr) { _opr = opr; }
1390 
1391   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1392   virtual void verify() const;
1393 };
1394 
1395 
1396 // for runtime calls
1397 class LIR_OpRTCall: public LIR_OpCall {
1398  friend class LIR_OpVisitState;
1399 
1400  private:
1401   LIR_Opr _tmp;
1402  public:
1403   LIR_OpRTCall(address addr, LIR_Opr tmp,
1404                LIR_Opr result, LIR_OprList* arguments, CodeEmitInfo* info = NULL)
1405     : LIR_OpCall(lir_rtcall, addr, result, arguments, info)
1406     , _tmp(tmp) {}
1407 
1408   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1409   virtual void emit_code(LIR_Assembler* masm);
1410   virtual LIR_OpRTCall* as_OpRTCall() { return this; }
1411 
1412   LIR_Opr tmp() const                            { return _tmp; }
1413 
1414   virtual void verify() const;
1415 };
1416 
1417 
1418 class LIR_OpBranch: public LIR_Op {
1419  friend class LIR_OpVisitState;
1420 
1421  private:
1422   LIR_Condition _cond;
1423   BasicType     _type;
1424   Label*        _label;
1425   BlockBegin*   _block;  // if this is a branch to a block, this is the block
1426   BlockBegin*   _ublock; // if this is a float-branch, this is the unorderd block
1427   CodeStub*     _stub;   // if this is a branch to a stub, this is the stub
1428 
1429  public:
1430   LIR_OpBranch(LIR_Condition cond, BasicType type, Label* lbl)
1431     : LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL)
1432     , _cond(cond)
1433     , _type(type)
1434     , _label(lbl)
1435     , _block(NULL)
1436     , _ublock(NULL)
1437     , _stub(NULL) { }
1438 
1439   LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block);
1440   LIR_OpBranch(LIR_Condition cond, BasicType type, CodeStub* stub);
1441 
1442   // for unordered comparisons
1443   LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* ublock);
1444 
1445   LIR_Condition cond()        const              { return _cond;        }
1446   BasicType     type()        const              { return _type;        }
1447   Label*        label()       const              { return _label;       }
1448   BlockBegin*   block()       const              { return _block;       }
1449   BlockBegin*   ublock()      const              { return _ublock;      }
1450   CodeStub*     stub()        const              { return _stub;       }
1451 
1452   void          change_block(BlockBegin* b);
1453   void          change_ublock(BlockBegin* b);
1454   void          negate_cond();
1455 
1456   virtual void emit_code(LIR_Assembler* masm);
1457   virtual LIR_OpBranch* as_OpBranch() { return this; }
1458   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1459 };
1460 
1461 
1462 class ConversionStub;
1463 
1464 class LIR_OpConvert: public LIR_Op1 {
1465  friend class LIR_OpVisitState;
1466 
1467  private:
1468    Bytecodes::Code _bytecode;
1469    ConversionStub* _stub;
1470 #ifdef PPC32
1471   LIR_Opr _tmp1;
1472   LIR_Opr _tmp2;
1473 #endif
1474 
1475  public:
1476    LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub)
1477      : LIR_Op1(lir_convert, opr, result)
1478      , _stub(stub)
1479 #ifdef PPC32
1480      , _tmp1(LIR_OprDesc::illegalOpr())
1481      , _tmp2(LIR_OprDesc::illegalOpr())
1482 #endif
1483      , _bytecode(code)                           {}
1484 
1485 #ifdef PPC32
1486    LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub
1487                  ,LIR_Opr tmp1, LIR_Opr tmp2)
1488      : LIR_Op1(lir_convert, opr, result)
1489      , _stub(stub)
1490      , _tmp1(tmp1)
1491      , _tmp2(tmp2)
1492      , _bytecode(code)                           {}
1493 #endif
1494 
1495   Bytecodes::Code bytecode() const               { return _bytecode; }
1496   ConversionStub* stub() const                   { return _stub; }
1497 #ifdef PPC32
1498   LIR_Opr tmp1() const                           { return _tmp1; }
1499   LIR_Opr tmp2() const                           { return _tmp2; }
1500 #endif
1501 
1502   virtual void emit_code(LIR_Assembler* masm);
1503   virtual LIR_OpConvert* as_OpConvert() { return this; }
1504   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1505 
1506   static void print_bytecode(outputStream* out, Bytecodes::Code code) PRODUCT_RETURN;
1507 };
1508 
1509 
1510 // LIR_OpAllocObj
1511 class LIR_OpAllocObj : public LIR_Op1 {
1512  friend class LIR_OpVisitState;
1513 
1514  private:
1515   LIR_Opr _tmp1;
1516   LIR_Opr _tmp2;
1517   LIR_Opr _tmp3;
1518   LIR_Opr _tmp4;
1519   int     _hdr_size;
1520   int     _obj_size;
1521   CodeStub* _stub;
1522   bool    _init_check;
1523 
1524  public:
1525   LIR_OpAllocObj(LIR_Opr klass, LIR_Opr result,
1526                  LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4,
1527                  int hdr_size, int obj_size, bool init_check, CodeStub* stub)
1528     : LIR_Op1(lir_alloc_object, klass, result)
1529     , _tmp1(t1)
1530     , _tmp2(t2)
1531     , _tmp3(t3)
1532     , _tmp4(t4)
1533     , _hdr_size(hdr_size)
1534     , _obj_size(obj_size)
1535     , _init_check(init_check)
1536     , _stub(stub)                                { }
1537 
1538   LIR_Opr klass()        const                   { return in_opr();     }
1539   LIR_Opr obj()          const                   { return result_opr(); }
1540   LIR_Opr tmp1()         const                   { return _tmp1;        }
1541   LIR_Opr tmp2()         const                   { return _tmp2;        }
1542   LIR_Opr tmp3()         const                   { return _tmp3;        }
1543   LIR_Opr tmp4()         const                   { return _tmp4;        }
1544   int     header_size()  const                   { return _hdr_size;    }
1545   int     object_size()  const                   { return _obj_size;    }
1546   bool    init_check()   const                   { return _init_check;  }
1547   CodeStub* stub()       const                   { return _stub;        }
1548 
1549   virtual void emit_code(LIR_Assembler* masm);
1550   virtual LIR_OpAllocObj * as_OpAllocObj () { return this; }
1551   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1552 };
1553 
1554 
1555 // LIR_OpRoundFP
1556 class LIR_OpRoundFP : public LIR_Op1 {
1557  friend class LIR_OpVisitState;
1558 
1559  private:
1560   LIR_Opr _tmp;
1561 
1562  public:
1563   LIR_OpRoundFP(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result)
1564     : LIR_Op1(lir_roundfp, reg, result)
1565     , _tmp(stack_loc_temp) {}
1566 
1567   LIR_Opr tmp() const                            { return _tmp; }
1568   virtual LIR_OpRoundFP* as_OpRoundFP()          { return this; }
1569   void print_instr(outputStream* out) const PRODUCT_RETURN;
1570 };
1571 
1572 // LIR_OpTypeCheck
1573 class LIR_OpTypeCheck: public LIR_Op {
1574  friend class LIR_OpVisitState;
1575 
1576  private:
1577   LIR_Opr       _object;
1578   LIR_Opr       _array;
1579   ciKlass*      _klass;
1580   LIR_Opr       _tmp1;
1581   LIR_Opr       _tmp2;
1582   LIR_Opr       _tmp3;
1583   bool          _fast_check;
1584   CodeEmitInfo* _info_for_patch;
1585   CodeEmitInfo* _info_for_exception;
1586   CodeStub*     _stub;
1587   ciMethod*     _profiled_method;
1588   int           _profiled_bci;
1589   bool          _should_profile;
1590 
1591 public:
1592   LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
1593                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
1594                   CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub);
1595   LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array,
1596                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
1597 
1598   LIR_Opr object() const                         { return _object;         }
1599   LIR_Opr array() const                          { assert(code() == lir_store_check, "not valid"); return _array;         }
1600   LIR_Opr tmp1() const                           { return _tmp1;           }
1601   LIR_Opr tmp2() const                           { return _tmp2;           }
1602   LIR_Opr tmp3() const                           { return _tmp3;           }
1603   ciKlass* klass() const                         { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _klass;          }
1604   bool fast_check() const                        { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _fast_check;     }
1605   CodeEmitInfo* info_for_patch() const           { return _info_for_patch;  }
1606   CodeEmitInfo* info_for_exception() const       { return _info_for_exception; }
1607   CodeStub* stub() const                         { return _stub;           }
1608 
1609   // MethodData* profiling
1610   void set_profiled_method(ciMethod *method)     { _profiled_method = method; }
1611   void set_profiled_bci(int bci)                 { _profiled_bci = bci;       }
1612   void set_should_profile(bool b)                { _should_profile = b;       }
1613   ciMethod* profiled_method() const              { return _profiled_method;   }
1614   int       profiled_bci() const                 { return _profiled_bci;      }
1615   bool      should_profile() const               { return _should_profile;    }
1616 
1617   virtual bool is_patching() { return _info_for_patch != NULL; }
1618   virtual void emit_code(LIR_Assembler* masm);
1619   virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; }
1620   void print_instr(outputStream* out) const PRODUCT_RETURN;
1621 };
1622 
1623 // LIR_Op2
1624 class LIR_Op2: public LIR_Op {
1625  friend class LIR_OpVisitState;
1626 
1627   int  _fpu_stack_size; // for sin/cos implementation on Intel
1628 
1629  protected:
1630   LIR_Opr   _opr1;
1631   LIR_Opr   _opr2;
1632   BasicType _type;
1633   LIR_Opr   _tmp1;
1634   LIR_Opr   _tmp2;
1635   LIR_Opr   _tmp3;
1636   LIR_Opr   _tmp4;
1637   LIR_Opr   _tmp5;
1638   LIR_Condition _condition;
1639 
1640   void verify() const;
1641 
1642  public:
1643   LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, CodeEmitInfo* info = NULL)
1644     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1645     , _opr1(opr1)
1646     , _opr2(opr2)
1647     , _type(T_ILLEGAL)
1648     , _condition(condition)
1649     , _fpu_stack_size(0)
1650     , _tmp1(LIR_OprFact::illegalOpr)
1651     , _tmp2(LIR_OprFact::illegalOpr)
1652     , _tmp3(LIR_OprFact::illegalOpr)
1653     , _tmp4(LIR_OprFact::illegalOpr)
1654     , _tmp5(LIR_OprFact::illegalOpr) {
1655     assert(code == lir_cmp || code == lir_assert, "code check");
1656   }
1657 
1658   LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type)
1659     : LIR_Op(code, result, NULL)
1660     , _opr1(opr1)
1661     , _opr2(opr2)
1662     , _type(type)
1663     , _condition(condition)
1664     , _fpu_stack_size(0)
1665     , _tmp1(LIR_OprFact::illegalOpr)
1666     , _tmp2(LIR_OprFact::illegalOpr)
1667     , _tmp3(LIR_OprFact::illegalOpr)
1668     , _tmp4(LIR_OprFact::illegalOpr)
1669     , _tmp5(LIR_OprFact::illegalOpr) {
1670     assert(code == lir_cmove, "code check");
1671     assert(type != T_ILLEGAL, "cmove should have type");
1672   }
1673 
1674   LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result = LIR_OprFact::illegalOpr,
1675           CodeEmitInfo* info = NULL, BasicType type = T_ILLEGAL)
1676     : LIR_Op(code, result, info)
1677     , _opr1(opr1)
1678     , _opr2(opr2)
1679     , _type(type)
1680     , _condition(lir_cond_unknown)
1681     , _fpu_stack_size(0)
1682     , _tmp1(LIR_OprFact::illegalOpr)
1683     , _tmp2(LIR_OprFact::illegalOpr)
1684     , _tmp3(LIR_OprFact::illegalOpr)
1685     , _tmp4(LIR_OprFact::illegalOpr)
1686     , _tmp5(LIR_OprFact::illegalOpr) {
1687     assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
1688   }
1689 
1690   LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, LIR_Opr tmp1, LIR_Opr tmp2 = LIR_OprFact::illegalOpr,
1691           LIR_Opr tmp3 = LIR_OprFact::illegalOpr, LIR_Opr tmp4 = LIR_OprFact::illegalOpr, LIR_Opr tmp5 = LIR_OprFact::illegalOpr)
1692     : LIR_Op(code, result, NULL)
1693     , _opr1(opr1)
1694     , _opr2(opr2)
1695     , _type(T_ILLEGAL)
1696     , _condition(lir_cond_unknown)
1697     , _fpu_stack_size(0)
1698     , _tmp1(tmp1)
1699     , _tmp2(tmp2)
1700     , _tmp3(tmp3)
1701     , _tmp4(tmp4)
1702     , _tmp5(tmp5) {
1703     assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
1704   }
1705 
1706   LIR_Opr in_opr1() const                        { return _opr1; }
1707   LIR_Opr in_opr2() const                        { return _opr2; }
1708   BasicType type()  const                        { return _type; }
1709   LIR_Opr tmp1_opr() const                       { return _tmp1; }
1710   LIR_Opr tmp2_opr() const                       { return _tmp2; }
1711   LIR_Opr tmp3_opr() const                       { return _tmp3; }
1712   LIR_Opr tmp4_opr() const                       { return _tmp4; }
1713   LIR_Opr tmp5_opr() const                       { return _tmp5; }
1714   LIR_Condition condition() const  {
1715     assert(code() == lir_cmp || code() == lir_cmove || code() == lir_assert, "only valid for cmp and cmove and assert"); return _condition;
1716   }
1717   void set_condition(LIR_Condition condition) {
1718     assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove");  _condition = condition;
1719   }
1720 
1721   void set_fpu_stack_size(int size)              { _fpu_stack_size = size; }
1722   int  fpu_stack_size() const                    { return _fpu_stack_size; }
1723 
1724   void set_in_opr1(LIR_Opr opr)                  { _opr1 = opr; }
1725   void set_in_opr2(LIR_Opr opr)                  { _opr2 = opr; }
1726 
1727   virtual void emit_code(LIR_Assembler* masm);
1728   virtual LIR_Op2* as_Op2() { return this; }
1729   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1730 };
1731 
1732 class LIR_OpAllocArray : public LIR_Op {
1733  friend class LIR_OpVisitState;
1734 
1735  private:
1736   LIR_Opr   _klass;
1737   LIR_Opr   _len;
1738   LIR_Opr   _tmp1;
1739   LIR_Opr   _tmp2;
1740   LIR_Opr   _tmp3;
1741   LIR_Opr   _tmp4;
1742   BasicType _type;
1743   CodeStub* _stub;
1744 
1745  public:
1746   LIR_OpAllocArray(LIR_Opr klass, LIR_Opr len, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, BasicType type, CodeStub* stub)
1747     : LIR_Op(lir_alloc_array, result, NULL)
1748     , _klass(klass)
1749     , _len(len)
1750     , _tmp1(t1)
1751     , _tmp2(t2)
1752     , _tmp3(t3)
1753     , _tmp4(t4)
1754     , _type(type)
1755     , _stub(stub) {}
1756 
1757   LIR_Opr   klass()   const                      { return _klass;       }
1758   LIR_Opr   len()     const                      { return _len;         }
1759   LIR_Opr   obj()     const                      { return result_opr(); }
1760   LIR_Opr   tmp1()    const                      { return _tmp1;        }
1761   LIR_Opr   tmp2()    const                      { return _tmp2;        }
1762   LIR_Opr   tmp3()    const                      { return _tmp3;        }
1763   LIR_Opr   tmp4()    const                      { return _tmp4;        }
1764   BasicType type()    const                      { return _type;        }
1765   CodeStub* stub()    const                      { return _stub;        }
1766 
1767   virtual void emit_code(LIR_Assembler* masm);
1768   virtual LIR_OpAllocArray * as_OpAllocArray () { return this; }
1769   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1770 };
1771 
1772 
1773 class LIR_Op3: public LIR_Op {
1774  friend class LIR_OpVisitState;
1775 
1776  private:
1777   LIR_Opr _opr1;
1778   LIR_Opr _opr2;
1779   LIR_Opr _opr3;
1780  public:
1781   LIR_Op3(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr opr3, LIR_Opr result, CodeEmitInfo* info = NULL)
1782     : LIR_Op(code, result, info)
1783     , _opr1(opr1)
1784     , _opr2(opr2)
1785     , _opr3(opr3)                                { assert(is_in_range(code, begin_op3, end_op3), "code check"); }
1786   LIR_Opr in_opr1() const                        { return _opr1; }
1787   LIR_Opr in_opr2() const                        { return _opr2; }
1788   LIR_Opr in_opr3() const                        { return _opr3; }
1789 
1790   virtual void emit_code(LIR_Assembler* masm);
1791   virtual LIR_Op3* as_Op3() { return this; }
1792   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1793 };
1794 
1795 
1796 //--------------------------------
1797 class LabelObj: public CompilationResourceObj {
1798  private:
1799   Label _label;
1800  public:
1801   LabelObj()                                     {}
1802   Label* label()                                 { return &_label; }
1803 };
1804 
1805 
1806 class LIR_OpLock: public LIR_Op {
1807  friend class LIR_OpVisitState;
1808 
1809  private:
1810   LIR_Opr _hdr;
1811   LIR_Opr _obj;
1812   LIR_Opr _lock;
1813   LIR_Opr _scratch;
1814   CodeStub* _stub;
1815  public:
1816   LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info)
1817     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1818     , _hdr(hdr)
1819     , _obj(obj)
1820     , _lock(lock)
1821     , _scratch(scratch)
1822     , _stub(stub)                      {}
1823 
1824   LIR_Opr hdr_opr() const                        { return _hdr; }
1825   LIR_Opr obj_opr() const                        { return _obj; }
1826   LIR_Opr lock_opr() const                       { return _lock; }
1827   LIR_Opr scratch_opr() const                    { return _scratch; }
1828   CodeStub* stub() const                         { return _stub; }
1829 
1830   virtual void emit_code(LIR_Assembler* masm);
1831   virtual LIR_OpLock* as_OpLock() { return this; }
1832   void print_instr(outputStream* out) const PRODUCT_RETURN;
1833 };
1834 
1835 
1836 class LIR_OpDelay: public LIR_Op {
1837  friend class LIR_OpVisitState;
1838 
1839  private:
1840   LIR_Op* _op;
1841 
1842  public:
1843   LIR_OpDelay(LIR_Op* op, CodeEmitInfo* info):
1844     LIR_Op(lir_delay_slot, LIR_OprFact::illegalOpr, info),
1845     _op(op) {
1846     assert(op->code() == lir_nop || LIRFillDelaySlots, "should be filling with nops");
1847   }
1848   virtual void emit_code(LIR_Assembler* masm);
1849   virtual LIR_OpDelay* as_OpDelay() { return this; }
1850   void print_instr(outputStream* out) const PRODUCT_RETURN;
1851   LIR_Op* delay_op() const { return _op; }
1852   CodeEmitInfo* call_info() const { return info(); }
1853 };
1854 
1855 #ifdef ASSERT
1856 // LIR_OpAssert
1857 class LIR_OpAssert : public LIR_Op2 {
1858  friend class LIR_OpVisitState;
1859 
1860  private:
1861   const char* _msg;
1862   bool        _halt;
1863 
1864  public:
1865   LIR_OpAssert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt)
1866     : LIR_Op2(lir_assert, condition, opr1, opr2)
1867     , _halt(halt)
1868     , _msg(msg) {
1869   }
1870 
1871   const char* msg() const                        { return _msg; }
1872   bool        halt() const                       { return _halt; }
1873 
1874   virtual void emit_code(LIR_Assembler* masm);
1875   virtual LIR_OpAssert* as_OpAssert()            { return this; }
1876   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1877 };
1878 #endif
1879 
1880 // LIR_OpCompareAndSwap
1881 class LIR_OpCompareAndSwap : public LIR_Op {
1882  friend class LIR_OpVisitState;
1883 
1884  private:
1885   LIR_Opr _addr;
1886   LIR_Opr _cmp_value;
1887   LIR_Opr _new_value;
1888   LIR_Opr _tmp1;
1889   LIR_Opr _tmp2;
1890 
1891  public:
1892   LIR_OpCompareAndSwap(LIR_Code code, LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
1893                        LIR_Opr t1, LIR_Opr t2, LIR_Opr result)
1894     : LIR_Op(code, result, NULL)  // no result, no info
1895     , _addr(addr)
1896     , _cmp_value(cmp_value)
1897     , _new_value(new_value)
1898     , _tmp1(t1)
1899     , _tmp2(t2)                                  { }
1900 
1901   LIR_Opr addr()        const                    { return _addr;  }
1902   LIR_Opr cmp_value()   const                    { return _cmp_value; }
1903   LIR_Opr new_value()   const                    { return _new_value; }
1904   LIR_Opr tmp1()        const                    { return _tmp1;      }
1905   LIR_Opr tmp2()        const                    { return _tmp2;      }
1906 
1907   virtual void emit_code(LIR_Assembler* masm);
1908   virtual LIR_OpCompareAndSwap * as_OpCompareAndSwap () { return this; }
1909   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1910 };
1911 
1912 // LIR_OpProfileCall
1913 class LIR_OpProfileCall : public LIR_Op {
1914  friend class LIR_OpVisitState;
1915 
1916  private:
1917   ciMethod* _profiled_method;
1918   int       _profiled_bci;
1919   ciMethod* _profiled_callee;
1920   LIR_Opr   _mdo;
1921   LIR_Opr   _recv;
1922   LIR_Opr   _tmp1;
1923   ciKlass*  _known_holder;
1924 
1925  public:
1926   // Destroys recv
1927   LIR_OpProfileCall(ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
1928     : LIR_Op(lir_profile_call, LIR_OprFact::illegalOpr, NULL)  // no result, no info
1929     , _profiled_method(profiled_method)
1930     , _profiled_bci(profiled_bci)
1931     , _profiled_callee(profiled_callee)
1932     , _mdo(mdo)
1933     , _recv(recv)
1934     , _tmp1(t1)
1935     , _known_holder(known_holder)                { }
1936 
1937   ciMethod* profiled_method() const              { return _profiled_method;  }
1938   int       profiled_bci()    const              { return _profiled_bci;     }
1939   ciMethod* profiled_callee() const              { return _profiled_callee;  }
1940   LIR_Opr   mdo()             const              { return _mdo;              }
1941   LIR_Opr   recv()            const              { return _recv;             }
1942   LIR_Opr   tmp1()            const              { return _tmp1;             }
1943   ciKlass*  known_holder()    const              { return _known_holder;     }
1944 
1945   virtual void emit_code(LIR_Assembler* masm);
1946   virtual LIR_OpProfileCall* as_OpProfileCall() { return this; }
1947   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1948 };
1949 
1950 // LIR_OpProfileType
1951 class LIR_OpProfileType : public LIR_Op {
1952  friend class LIR_OpVisitState;
1953 
1954  private:
1955   LIR_Opr      _mdp;
1956   LIR_Opr      _obj;
1957   LIR_Opr      _tmp;
1958   ciKlass*     _exact_klass;   // non NULL if we know the klass statically (no need to load it from _obj)
1959   intptr_t     _current_klass; // what the profiling currently reports
1960   bool         _not_null;      // true if we know statically that _obj cannot be null
1961   bool         _no_conflict;   // true if we're profling parameters, _exact_klass is not NULL and we know
1962                                // _exact_klass it the only possible type for this parameter in any context.
1963 
1964  public:
1965   // Destroys recv
1966   LIR_OpProfileType(LIR_Opr mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict)
1967     : LIR_Op(lir_profile_type, LIR_OprFact::illegalOpr, NULL)  // no result, no info
1968     , _mdp(mdp)
1969     , _obj(obj)
1970     , _exact_klass(exact_klass)
1971     , _current_klass(current_klass)
1972     , _tmp(tmp)
1973     , _not_null(not_null)
1974     , _no_conflict(no_conflict) { }
1975 
1976   LIR_Opr      mdp()              const             { return _mdp;              }
1977   LIR_Opr      obj()              const             { return _obj;              }
1978   LIR_Opr      tmp()              const             { return _tmp;              }
1979   ciKlass*     exact_klass()      const             { return _exact_klass;      }
1980   intptr_t     current_klass()    const             { return _current_klass;    }
1981   bool         not_null()         const             { return _not_null;         }
1982   bool         no_conflict()      const             { return _no_conflict;      }
1983 
1984   virtual void emit_code(LIR_Assembler* masm);
1985   virtual LIR_OpProfileType* as_OpProfileType() { return this; }
1986   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1987 };
1988 
1989 class LIR_InsertionBuffer;
1990 
1991 //--------------------------------LIR_List---------------------------------------------------
1992 // Maintains a list of LIR instructions (one instance of LIR_List per basic block)
1993 // The LIR instructions are appended by the LIR_List class itself;
1994 //
1995 // Notes:
1996 // - all offsets are(should be) in bytes
1997 // - local positions are specified with an offset, with offset 0 being local 0
1998 
1999 class LIR_List: public CompilationResourceObj {
2000  private:
2001   LIR_OpList  _operations;
2002 
2003   Compilation*  _compilation;
2004 #ifndef PRODUCT
2005   BlockBegin*   _block;
2006 #endif
2007 #ifdef ASSERT
2008   const char *  _file;
2009   int           _line;
2010 #endif
2011 
2012   void append(LIR_Op* op) {
2013     if (op->source() == NULL)
2014       op->set_source(_compilation->current_instruction());
2015 #ifndef PRODUCT
2016     if (PrintIRWithLIR) {
2017       _compilation->maybe_print_current_instruction();
2018       op->print(); tty->cr();
2019     }
2020 #endif // PRODUCT
2021 
2022     _operations.append(op);
2023 
2024 #ifdef ASSERT
2025     op->verify();
2026     op->set_file_and_line(_file, _line);
2027     _file = NULL;
2028     _line = 0;
2029 #endif
2030   }
2031 
2032  public:
2033   LIR_List(Compilation* compilation, BlockBegin* block = NULL);
2034 
2035 #ifdef ASSERT
2036   void set_file_and_line(const char * file, int line);
2037 #endif
2038 
2039   //---------- accessors ---------------
2040   LIR_OpList* instructions_list()                { return &_operations; }
2041   int         length() const                     { return _operations.length(); }
2042   LIR_Op*     at(int i) const                    { return _operations.at(i); }
2043 
2044   NOT_PRODUCT(BlockBegin* block() const          { return _block; });
2045 
2046   // insert LIR_Ops in buffer to right places in LIR_List
2047   void append(LIR_InsertionBuffer* buffer);
2048 
2049   //---------- mutators ---------------
2050   void insert_before(int i, LIR_List* op_list)   { _operations.insert_before(i, op_list->instructions_list()); }
2051   void insert_before(int i, LIR_Op* op)          { _operations.insert_before(i, op); }
2052   void remove_at(int i)                          { _operations.remove_at(i); }
2053 
2054   //---------- printing -------------
2055   void print_instructions() PRODUCT_RETURN;
2056 
2057 
2058   //---------- instructions -------------
2059   void call_opt_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
2060                         address dest, LIR_OprList* arguments,
2061                         CodeEmitInfo* info) {
2062     append(new LIR_OpJavaCall(lir_optvirtual_call, method, receiver, result, dest, arguments, info));
2063   }
2064   void call_static(ciMethod* method, LIR_Opr result,
2065                    address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
2066     append(new LIR_OpJavaCall(lir_static_call, method, LIR_OprFact::illegalOpr, result, dest, arguments, info));
2067   }
2068   void call_icvirtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
2069                       address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
2070     append(new LIR_OpJavaCall(lir_icvirtual_call, method, receiver, result, dest, arguments, info));
2071   }
2072   void call_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
2073                     intptr_t vtable_offset, LIR_OprList* arguments, CodeEmitInfo* info) {
2074     append(new LIR_OpJavaCall(lir_virtual_call, method, receiver, result, vtable_offset, arguments, info));
2075   }
2076   void call_dynamic(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
2077                     address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
2078     append(new LIR_OpJavaCall(lir_dynamic_call, method, receiver, result, dest, arguments, info));
2079   }
2080 
2081   void get_thread(LIR_Opr result)                { append(new LIR_Op0(lir_get_thread, result)); }
2082   void word_align()                              { append(new LIR_Op0(lir_word_align)); }
2083   void membar()                                  { append(new LIR_Op0(lir_membar)); }
2084   void membar_acquire()                          { append(new LIR_Op0(lir_membar_acquire)); }
2085   void membar_release()                          { append(new LIR_Op0(lir_membar_release)); }
2086   void membar_loadload()                         { append(new LIR_Op0(lir_membar_loadload)); }
2087   void membar_storestore()                       { append(new LIR_Op0(lir_membar_storestore)); }
2088   void membar_loadstore()                        { append(new LIR_Op0(lir_membar_loadstore)); }
2089   void membar_storeload()                        { append(new LIR_Op0(lir_membar_storeload)); }
2090 
2091   void nop()                                     { append(new LIR_Op0(lir_nop)); }
2092   void build_frame()                             { append(new LIR_Op0(lir_build_frame)); }
2093 
2094   void std_entry(LIR_Opr receiver)               { append(new LIR_Op0(lir_std_entry, receiver)); }
2095   void osr_entry(LIR_Opr osrPointer)             { append(new LIR_Op0(lir_osr_entry, osrPointer)); }
2096 
2097   void on_spin_wait()                            { append(new LIR_Op0(lir_on_spin_wait)); }
2098 
2099   void branch_destination(Label* lbl)            { append(new LIR_OpLabel(lbl)); }
2100 
2101   void negate(LIR_Opr from, LIR_Opr to)          { append(new LIR_Op1(lir_neg, from, to)); }
2102   void leal(LIR_Opr from, LIR_Opr result_reg)    { append(new LIR_Op1(lir_leal, from, result_reg)); }
2103 
2104   // result is a stack location for old backend and vreg for UseLinearScan
2105   // stack_loc_temp is an illegal register for old backend
2106   void roundfp(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result) { append(new LIR_OpRoundFP(reg, stack_loc_temp, result)); }
2107   void unaligned_move(LIR_Address* src, LIR_Opr dst) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
2108   void unaligned_move(LIR_Opr src, LIR_Address* dst) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), src->type(), lir_patch_none, NULL, lir_move_unaligned)); }
2109   void unaligned_move(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
2110   void move(LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
2111   void move(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info)); }
2112   void move(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info)); }
2113   void move_wide(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) {
2114     if (UseCompressedOops) {
2115       append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info, lir_move_wide));
2116     } else {
2117       move(src, dst, info);
2118     }
2119   }
2120   void move_wide(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) {
2121     if (UseCompressedOops) {
2122       append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info, lir_move_wide));
2123     } else {
2124       move(src, dst, info);
2125     }
2126   }
2127   void volatile_move(LIR_Opr src, LIR_Opr dst, BasicType type, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none) { append(new LIR_Op1(lir_move, src, dst, type, patch_code, info, lir_move_volatile)); }
2128 
2129   void oop2reg  (jobject o, LIR_Opr reg)         { assert(reg->type() == T_OBJECT, "bad reg"); append(new LIR_Op1(lir_move, LIR_OprFact::oopConst(o),    reg));   }
2130   void oop2reg_patch(jobject o, LIR_Opr reg, CodeEmitInfo* info);
2131 
2132   void metadata2reg  (Metadata* o, LIR_Opr reg)  { assert(reg->type() == T_METADATA, "bad reg"); append(new LIR_Op1(lir_move, LIR_OprFact::metadataConst(o), reg));   }
2133   void klass2reg_patch(Metadata* o, LIR_Opr reg, CodeEmitInfo* info);
2134 
2135   void return_op(LIR_Opr result)                 { append(new LIR_Op1(lir_return, result)); }
2136 
2137   void safepoint(LIR_Opr tmp, CodeEmitInfo* info)  { append(new LIR_Op1(lir_safepoint, tmp, info)); }
2138 
2139 #ifdef PPC32
2140   void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_OpConvert(code, left, dst, NULL, tmp1, tmp2)); }
2141 #endif
2142   void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, ConversionStub* stub = NULL/*, bool is_32bit = false*/) { append(new LIR_OpConvert(code, left, dst, stub)); }
2143 
2144   void logical_and (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_and,  left, right, dst)); }
2145   void logical_or  (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_or,   left, right, dst)); }
2146   void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor,  left, right, dst)); }
2147 
2148   void   pack64(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_pack64,   src, dst, T_LONG, lir_patch_none, NULL)); }
2149   void unpack64(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_unpack64, src, dst, T_LONG, lir_patch_none, NULL)); }
2150 
2151   void null_check(LIR_Opr opr, CodeEmitInfo* info)         { append(new LIR_Op1(lir_null_check, opr, info)); }
2152   void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2153     append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info));
2154   }
2155   void unwind_exception(LIR_Opr exceptionOop) {
2156     append(new LIR_Op1(lir_unwind, exceptionOop));
2157   }
2158 
2159   void compare_to (LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
2160     append(new LIR_Op2(lir_compare_to,  left, right, dst));
2161   }
2162 
2163   void push(LIR_Opr opr)                                   { append(new LIR_Op1(lir_push, opr)); }
2164   void pop(LIR_Opr reg)                                    { append(new LIR_Op1(lir_pop,  reg)); }
2165 
2166   void cmp(LIR_Condition condition, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info = NULL) {
2167     append(new LIR_Op2(lir_cmp, condition, left, right, info));
2168   }
2169   void cmp(LIR_Condition condition, LIR_Opr left, int right, CodeEmitInfo* info = NULL) {
2170     cmp(condition, left, LIR_OprFact::intConst(right), info);
2171   }
2172 
2173   void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info);
2174   void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Address* addr, CodeEmitInfo* info);
2175 
2176   void cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type) {
2177     append(new LIR_Op2(lir_cmove, condition, src1, src2, dst, type));
2178   }
2179 
2180   void cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2181                 LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2182   void cas_obj(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2183                LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2184   void cas_int(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2185                LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2186 
2187   void abs (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_abs , from, tmp, to)); }
2188   void sqrt(LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_sqrt, from, tmp, to)); }
2189   void log10 (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)              { append(new LIR_Op2(lir_log10, from, LIR_OprFact::illegalOpr, to, tmp)); }
2190   void tan (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_tan , from, tmp1, to, tmp2)); }
2191 
2192   void add (LIR_Opr left, LIR_Opr right, LIR_Opr res)      { append(new LIR_Op2(lir_add, left, right, res)); }
2193   void sub (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_sub, left, right, res, info)); }
2194   void mul (LIR_Opr left, LIR_Opr right, LIR_Opr res) { append(new LIR_Op2(lir_mul, left, right, res)); }
2195   void mul_strictfp (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_mul_strictfp, left, right, res, tmp)); }
2196   void div (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL)      { append(new LIR_Op2(lir_div, left, right, res, info)); }
2197   void div_strictfp (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_div_strictfp, left, right, res, tmp)); }
2198   void rem (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL)      { append(new LIR_Op2(lir_rem, left, right, res, info)); }
2199 
2200   void volatile_load_mem_reg(LIR_Address* address, LIR_Opr dst, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2201   void volatile_load_unsafe_reg(LIR_Opr base, LIR_Opr offset, LIR_Opr dst, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
2202 
2203   void load(LIR_Address* addr, LIR_Opr src, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
2204 
2205   void store_mem_int(jint v,    LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2206   void store_mem_oop(jobject o, LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2207   void store(LIR_Opr src, LIR_Address* addr, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
2208   void volatile_store_mem_reg(LIR_Opr src, LIR_Address* address, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2209   void volatile_store_unsafe_reg(LIR_Opr src, LIR_Opr base, LIR_Opr offset, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
2210 
2211   void idiv(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2212   void idiv(LIR_Opr left, int   right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2213   void irem(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2214   void irem(LIR_Opr left, int   right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2215 
2216   void allocate_object(LIR_Opr dst, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, int header_size, int object_size, LIR_Opr klass, bool init_check, CodeStub* stub);
2217   void allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub);
2218 
2219   // jump is an unconditional branch
2220   void jump(BlockBegin* block) {
2221     append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, block));
2222   }
2223   void jump(CodeStub* stub) {
2224     append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, stub));
2225   }
2226   void branch(LIR_Condition cond, BasicType type, Label* lbl)        { append(new LIR_OpBranch(cond, type, lbl)); }
2227   void branch(LIR_Condition cond, BasicType type, BlockBegin* block) {
2228     assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
2229     append(new LIR_OpBranch(cond, type, block));
2230   }
2231   void branch(LIR_Condition cond, BasicType type, CodeStub* stub)    {
2232     assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
2233     append(new LIR_OpBranch(cond, type, stub));
2234   }
2235   void branch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* unordered) {
2236     assert(type == T_FLOAT || type == T_DOUBLE, "fp comparisons only");
2237     append(new LIR_OpBranch(cond, type, block, unordered));
2238   }
2239 
2240   void shift_left(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2241   void shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2242   void unsigned_shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2243 
2244   void shift_left(LIR_Opr value, int count, LIR_Opr dst)       { shift_left(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2245   void shift_right(LIR_Opr value, int count, LIR_Opr dst)      { shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2246   void unsigned_shift_right(LIR_Opr value, int count, LIR_Opr dst) { unsigned_shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2247 
2248   void lcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst)        { append(new LIR_Op2(lir_cmp_l2i,  left, right, dst)); }
2249   void fcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst, bool is_unordered_less);
2250 
2251   void call_runtime_leaf(address routine, LIR_Opr tmp, LIR_Opr result, LIR_OprList* arguments) {
2252     append(new LIR_OpRTCall(routine, tmp, result, arguments));
2253   }
2254 
2255   void call_runtime(address routine, LIR_Opr tmp, LIR_Opr result,
2256                     LIR_OprList* arguments, CodeEmitInfo* info) {
2257     append(new LIR_OpRTCall(routine, tmp, result, arguments, info));
2258   }
2259 
2260   void load_stack_address_monitor(int monitor_ix, LIR_Opr dst)  { append(new LIR_Op1(lir_monaddr, LIR_OprFact::intConst(monitor_ix), dst)); }
2261   void unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub);
2262   void lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info);
2263 
2264   void set_24bit_fpu()                                               { append(new LIR_Op0(lir_24bit_FPU )); }
2265   void restore_fpu()                                                 { append(new LIR_Op0(lir_reset_FPU )); }
2266   void breakpoint()                                                  { append(new LIR_Op0(lir_breakpoint)); }
2267 
2268   void arraycopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info) { append(new LIR_OpArrayCopy(src, src_pos, dst, dst_pos, length, tmp, expected_type, flags, info)); }
2269 
2270   void update_crc32(LIR_Opr crc, LIR_Opr val, LIR_Opr res)  { append(new LIR_OpUpdateCRC32(crc, val, res)); }
2271 
2272   void fpop_raw()                                { append(new LIR_Op0(lir_fpop_raw)); }
2273 
2274   void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci);
2275   void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception, ciMethod* profiled_method, int profiled_bci);
2276 
2277   void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass,
2278                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
2279                   CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
2280                   ciMethod* profiled_method, int profiled_bci);
2281   // MethodData* profiling
2282   void profile_call(ciMethod* method, int bci, ciMethod* callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) {
2283     append(new LIR_OpProfileCall(method, bci, callee, mdo, recv, t1, cha_klass));
2284   }
2285   void profile_type(LIR_Address* mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict) {
2286     append(new LIR_OpProfileType(LIR_OprFact::address(mdp), obj, exact_klass, current_klass, tmp, not_null, no_conflict));
2287   }
2288 
2289   void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); }
2290   void xchg(LIR_Opr src, LIR_Opr set, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xchg, src, set, res, tmp)); }
2291 #ifdef ASSERT
2292   void lir_assert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt) { append(new LIR_OpAssert(condition, opr1, opr2, msg, halt)); }
2293 #endif
2294 };
2295 
2296 void print_LIR(BlockList* blocks);
2297 
2298 class LIR_InsertionBuffer : public CompilationResourceObj {
2299  private:
2300   LIR_List*   _lir;   // the lir list where ops of this buffer should be inserted later (NULL when uninitialized)
2301 
2302   // list of insertion points. index and count are stored alternately:
2303   // _index_and_count[i * 2]:     the index into lir list where "count" ops should be inserted
2304   // _index_and_count[i * 2 + 1]: the number of ops to be inserted at index
2305   intStack    _index_and_count;
2306 
2307   // the LIR_Ops to be inserted
2308   LIR_OpList  _ops;
2309 
2310   void append_new(int index, int count)  { _index_and_count.append(index); _index_and_count.append(count); }
2311   void set_index_at(int i, int value)    { _index_and_count.at_put((i << 1),     value); }
2312   void set_count_at(int i, int value)    { _index_and_count.at_put((i << 1) + 1, value); }
2313 
2314 #ifdef ASSERT
2315   void verify();
2316 #endif
2317  public:
2318   LIR_InsertionBuffer() : _lir(NULL), _index_and_count(8), _ops(8) { }
2319 
2320   // must be called before using the insertion buffer
2321   void init(LIR_List* lir)  { assert(!initialized(), "already initialized"); _lir = lir; _index_and_count.clear(); _ops.clear(); }
2322   bool initialized() const  { return _lir != NULL; }
2323   // called automatically when the buffer is appended to the LIR_List
2324   void finish()             { _lir = NULL; }
2325 
2326   // accessors
2327   LIR_List*  lir_list() const             { return _lir; }
2328   int number_of_insertion_points() const  { return _index_and_count.length() >> 1; }
2329   int index_at(int i) const               { return _index_and_count.at((i << 1));     }
2330   int count_at(int i) const               { return _index_and_count.at((i << 1) + 1); }
2331 
2332   int number_of_ops() const               { return _ops.length(); }
2333   LIR_Op* op_at(int i) const              { return _ops.at(i); }
2334 
2335   // append an instruction to the buffer
2336   void append(int index, LIR_Op* op);
2337 
2338   // instruction
2339   void move(int index, LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(index, new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
2340 };
2341 
2342 
2343 //
2344 // LIR_OpVisitState is used for manipulating LIR_Ops in an abstract way.
2345 // Calling a LIR_Op's visit function with a LIR_OpVisitState causes
2346 // information about the input, output and temporaries used by the
2347 // op to be recorded.  It also records whether the op has call semantics
2348 // and also records all the CodeEmitInfos used by this op.
2349 //
2350 
2351 
2352 class LIR_OpVisitState: public StackObj {
2353  public:
2354   typedef enum { inputMode, firstMode = inputMode, tempMode, outputMode, numModes, invalidMode = -1 } OprMode;
2355 
2356   enum {
2357     maxNumberOfOperands = 20,
2358     maxNumberOfInfos = 4
2359   };
2360 
2361  private:
2362   LIR_Op*          _op;
2363 
2364   // optimization: the operands and infos are not stored in a variable-length
2365   //               list, but in a fixed-size array to save time of size checks and resizing
2366   int              _oprs_len[numModes];
2367   LIR_Opr*         _oprs_new[numModes][maxNumberOfOperands];
2368   int _info_len;
2369   CodeEmitInfo*    _info_new[maxNumberOfInfos];
2370 
2371   bool             _has_call;
2372   bool             _has_slow_case;
2373 
2374 
2375   // only include register operands
2376   // addresses are decomposed to the base and index registers
2377   // constants and stack operands are ignored
2378   void append(LIR_Opr& opr, OprMode mode) {
2379     assert(opr->is_valid(), "should not call this otherwise");
2380     assert(mode >= 0 && mode < numModes, "bad mode");
2381 
2382     if (opr->is_register()) {
2383        assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2384       _oprs_new[mode][_oprs_len[mode]++] = &opr;
2385 
2386     } else if (opr->is_pointer()) {
2387       LIR_Address* address = opr->as_address_ptr();
2388       if (address != NULL) {
2389         // special handling for addresses: add base and index register of the address
2390         // both are always input operands or temp if we want to extend
2391         // their liveness!
2392         if (mode == outputMode) {
2393           mode = inputMode;
2394         }
2395         assert (mode == inputMode || mode == tempMode, "input or temp only for addresses");
2396         if (address->_base->is_valid()) {
2397           assert(address->_base->is_register(), "must be");
2398           assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2399           _oprs_new[mode][_oprs_len[mode]++] = &address->_base;
2400         }
2401         if (address->_index->is_valid()) {
2402           assert(address->_index->is_register(), "must be");
2403           assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2404           _oprs_new[mode][_oprs_len[mode]++] = &address->_index;
2405         }
2406 
2407       } else {
2408         assert(opr->is_constant(), "constant operands are not processed");
2409       }
2410     } else {
2411       assert(opr->is_stack(), "stack operands are not processed");
2412     }
2413   }
2414 
2415   void append(CodeEmitInfo* info) {
2416     assert(info != NULL, "should not call this otherwise");
2417     assert(_info_len < maxNumberOfInfos, "array overflow");
2418     _info_new[_info_len++] = info;
2419   }
2420 
2421  public:
2422   LIR_OpVisitState()         { reset(); }
2423 
2424   LIR_Op* op() const         { return _op; }
2425   void set_op(LIR_Op* op)    { reset(); _op = op; }
2426 
2427   bool has_call() const      { return _has_call; }
2428   bool has_slow_case() const { return _has_slow_case; }
2429 
2430   void reset() {
2431     _op = NULL;
2432     _has_call = false;
2433     _has_slow_case = false;
2434 
2435     _oprs_len[inputMode] = 0;
2436     _oprs_len[tempMode] = 0;
2437     _oprs_len[outputMode] = 0;
2438     _info_len = 0;
2439   }
2440 
2441 
2442   int opr_count(OprMode mode) const {
2443     assert(mode >= 0 && mode < numModes, "bad mode");
2444     return _oprs_len[mode];
2445   }
2446 
2447   LIR_Opr opr_at(OprMode mode, int index) const {
2448     assert(mode >= 0 && mode < numModes, "bad mode");
2449     assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
2450     return *_oprs_new[mode][index];
2451   }
2452 
2453   void set_opr_at(OprMode mode, int index, LIR_Opr opr) const {
2454     assert(mode >= 0 && mode < numModes, "bad mode");
2455     assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
2456     *_oprs_new[mode][index] = opr;
2457   }
2458 
2459   int info_count() const {
2460     return _info_len;
2461   }
2462 
2463   CodeEmitInfo* info_at(int index) const {
2464     assert(index < _info_len, "index out of bounds");
2465     return _info_new[index];
2466   }
2467 
2468   XHandlers* all_xhandler();
2469 
2470   // collects all register operands of the instruction
2471   void visit(LIR_Op* op);
2472 
2473 #ifdef ASSERT
2474   // check that an operation has no operands
2475   bool no_operands(LIR_Op* op);
2476 #endif
2477 
2478   // LIR_Op visitor functions use these to fill in the state
2479   void do_input(LIR_Opr& opr)             { append(opr, LIR_OpVisitState::inputMode); }
2480   void do_output(LIR_Opr& opr)            { append(opr, LIR_OpVisitState::outputMode); }
2481   void do_temp(LIR_Opr& opr)              { append(opr, LIR_OpVisitState::tempMode); }
2482   void do_info(CodeEmitInfo* info)        { append(info); }
2483 
2484   void do_stub(CodeStub* stub);
2485   void do_call()                          { _has_call = true; }
2486   void do_slow_case()                     { _has_slow_case = true; }
2487   void do_slow_case(CodeEmitInfo* info) {
2488     _has_slow_case = true;
2489     append(info);
2490   }
2491 };
2492 
2493 
2494 inline LIR_Opr LIR_OprDesc::illegalOpr()   { return LIR_OprFact::illegalOpr; };
2495 
2496 #endif // SHARE_VM_C1_C1_LIR_HPP