1 /*
   2  * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_C1_C1_LIR_HPP
  26 #define SHARE_C1_C1_LIR_HPP
  27 
  28 #include "c1/c1_Defs.hpp"
  29 #include "c1/c1_ValueType.hpp"
  30 #include "oops/method.hpp"
  31 #include "utilities/globalDefinitions.hpp"
  32 
  33 class BlockBegin;
  34 class BlockList;
  35 class LIR_Assembler;
  36 class CodeEmitInfo;
  37 class CodeStub;
  38 class CodeStubList;
  39 class ArrayCopyStub;
  40 class LIR_Op;
  41 class ciType;
  42 class ValueType;
  43 class LIR_OpVisitState;
  44 class FpuStackSim;
  45 
  46 //---------------------------------------------------------------------
  47 //                 LIR Operands
  48 //  LIR_OprDesc
  49 //    LIR_OprPtr
  50 //      LIR_Const
  51 //      LIR_Address
  52 //---------------------------------------------------------------------
  53 class LIR_OprDesc;
  54 class LIR_OprPtr;
  55 class LIR_Const;
  56 class LIR_Address;
  57 class LIR_OprVisitor;
  58 
  59 
  60 typedef LIR_OprDesc* LIR_Opr;
  61 typedef int          RegNr;
  62 
  63 typedef GrowableArray<LIR_Opr> LIR_OprList;
  64 typedef GrowableArray<LIR_Op*> LIR_OpArray;
  65 typedef GrowableArray<LIR_Op*> LIR_OpList;
  66 
  67 // define LIR_OprPtr early so LIR_OprDesc can refer to it
  68 class LIR_OprPtr: public CompilationResourceObj {
  69  public:
  70   bool is_oop_pointer() const                    { return (type() == T_OBJECT); }
  71   bool is_float_kind() const                     { BasicType t = type(); return (t == T_FLOAT) || (t == T_DOUBLE); }
  72 
  73   virtual LIR_Const*  as_constant()              { return NULL; }
  74   virtual LIR_Address* as_address()              { return NULL; }
  75   virtual BasicType type() const                 = 0;
  76   virtual void print_value_on(outputStream* out) const = 0;
  77 };
  78 
  79 
  80 
  81 // LIR constants
  82 class LIR_Const: public LIR_OprPtr {
  83  private:
  84   JavaValue _value;
  85 
  86   void type_check(BasicType t) const   { assert(type() == t, "type check"); }
  87   void type_check(BasicType t1, BasicType t2) const   { assert(type() == t1 || type() == t2, "type check"); }
  88   void type_check(BasicType t1, BasicType t2, BasicType t3) const   { assert(type() == t1 || type() == t2 || type() == t3, "type check"); }
  89 
  90  public:
  91   LIR_Const(jint i, bool is_address=false)       { _value.set_type(is_address?T_ADDRESS:T_INT); _value.set_jint(i); }
  92   LIR_Const(jlong l)                             { _value.set_type(T_LONG);    _value.set_jlong(l); }
  93   LIR_Const(jfloat f)                            { _value.set_type(T_FLOAT);   _value.set_jfloat(f); }
  94   LIR_Const(jdouble d)                           { _value.set_type(T_DOUBLE);  _value.set_jdouble(d); }
  95   LIR_Const(jobject o)                           { _value.set_type(T_OBJECT);  _value.set_jobject(o); }
  96   LIR_Const(void* p) {
  97 #ifdef _LP64
  98     assert(sizeof(jlong) >= sizeof(p), "too small");;
  99     _value.set_type(T_LONG);    _value.set_jlong((jlong)p);
 100 #else
 101     assert(sizeof(jint) >= sizeof(p), "too small");;
 102     _value.set_type(T_INT);     _value.set_jint((jint)p);
 103 #endif
 104   }
 105   LIR_Const(Metadata* m) {
 106     _value.set_type(T_METADATA);
 107 #ifdef _LP64
 108     _value.set_jlong((jlong)m);
 109 #else
 110     _value.set_jint((jint)m);
 111 #endif // _LP64
 112   }
 113 
 114   virtual BasicType type()       const { return _value.get_type(); }
 115   virtual LIR_Const* as_constant()     { return this; }
 116 
 117   jint      as_jint()    const         { type_check(T_INT, T_ADDRESS); return _value.get_jint(); }
 118   jlong     as_jlong()   const         { type_check(T_LONG  ); return _value.get_jlong(); }
 119   jfloat    as_jfloat()  const         { type_check(T_FLOAT ); return _value.get_jfloat(); }
 120   jdouble   as_jdouble() const         { type_check(T_DOUBLE); return _value.get_jdouble(); }
 121   jobject   as_jobject() const         { type_check(T_OBJECT); return _value.get_jobject(); }
 122   jint      as_jint_lo() const         { type_check(T_LONG  ); return low(_value.get_jlong()); }
 123   jint      as_jint_hi() const         { type_check(T_LONG  ); return high(_value.get_jlong()); }
 124 
 125 #ifdef _LP64
 126   address   as_pointer() const         { type_check(T_LONG  ); return (address)_value.get_jlong(); }
 127   Metadata* as_metadata() const        { type_check(T_METADATA); return (Metadata*)_value.get_jlong(); }
 128 #else
 129   address   as_pointer() const         { type_check(T_INT   ); return (address)_value.get_jint(); }
 130   Metadata* as_metadata() const        { type_check(T_METADATA); return (Metadata*)_value.get_jint(); }
 131 #endif
 132 
 133 
 134   jint      as_jint_bits() const       { type_check(T_FLOAT, T_INT, T_ADDRESS); return _value.get_jint(); }
 135   jint      as_jint_lo_bits() const    {
 136     if (type() == T_DOUBLE) {
 137       return low(jlong_cast(_value.get_jdouble()));
 138     } else {
 139       return as_jint_lo();
 140     }
 141   }
 142   jint      as_jint_hi_bits() const    {
 143     if (type() == T_DOUBLE) {
 144       return high(jlong_cast(_value.get_jdouble()));
 145     } else {
 146       return as_jint_hi();
 147     }
 148   }
 149   jlong      as_jlong_bits() const    {
 150     if (type() == T_DOUBLE) {
 151       return jlong_cast(_value.get_jdouble());
 152     } else {
 153       return as_jlong();
 154     }
 155   }
 156 
 157   virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
 158 
 159 
 160   bool is_zero_float() {
 161     jfloat f = as_jfloat();
 162     jfloat ok = 0.0f;
 163     return jint_cast(f) == jint_cast(ok);
 164   }
 165 
 166   bool is_one_float() {
 167     jfloat f = as_jfloat();
 168     return !g_isnan(f) && g_isfinite(f) && f == 1.0;
 169   }
 170 
 171   bool is_zero_double() {
 172     jdouble d = as_jdouble();
 173     jdouble ok = 0.0;
 174     return jlong_cast(d) == jlong_cast(ok);
 175   }
 176 
 177   bool is_one_double() {
 178     jdouble d = as_jdouble();
 179     return !g_isnan(d) && g_isfinite(d) && d == 1.0;
 180   }
 181 };
 182 
 183 
 184 //---------------------LIR Operand descriptor------------------------------------
 185 //
 186 // The class LIR_OprDesc represents a LIR instruction operand;
 187 // it can be a register (ALU/FPU), stack location or a constant;
 188 // Constants and addresses are represented as resource area allocated
 189 // structures (see above).
 190 // Registers and stack locations are inlined into the this pointer
 191 // (see value function).
 192 
 193 class LIR_OprDesc: public CompilationResourceObj {
 194  public:
 195   // value structure:
 196   //     data       opr-type opr-kind
 197   // +--------------+-------+-------+
 198   // [max...........|7 6 5 4|3 2 1 0]
 199   //                               ^
 200   //                         is_pointer bit
 201   //
 202   // lowest bit cleared, means it is a structure pointer
 203   // we need  4 bits to represent types
 204 
 205  private:
 206   friend class LIR_OprFact;
 207 
 208   // Conversion
 209   intptr_t value() const                         { return (intptr_t) this; }
 210 
 211   bool check_value_mask(intptr_t mask, intptr_t masked_value) const {
 212     return (value() & mask) == masked_value;
 213   }
 214 
 215   enum OprKind {
 216       pointer_value      = 0
 217     , stack_value        = 1
 218     , cpu_register       = 3
 219     , fpu_register       = 5
 220     , illegal_value      = 7
 221   };
 222 
 223   enum OprBits {
 224       pointer_bits   = 1
 225     , kind_bits      = 3
 226     , type_bits      = 4
 227     , size_bits      = 2
 228     , destroys_bits  = 1
 229     , virtual_bits   = 1
 230     , is_xmm_bits    = 1
 231     , last_use_bits  = 1
 232     , is_fpu_stack_offset_bits = 1        // used in assertion checking on x86 for FPU stack slot allocation
 233     , non_data_bits  = kind_bits + type_bits + size_bits + destroys_bits + last_use_bits +
 234                        is_fpu_stack_offset_bits + virtual_bits + is_xmm_bits
 235     , data_bits      = BitsPerInt - non_data_bits
 236     , reg_bits       = data_bits / 2      // for two registers in one value encoding
 237   };
 238 
 239   enum OprShift {
 240       kind_shift     = 0
 241     , type_shift     = kind_shift     + kind_bits
 242     , size_shift     = type_shift     + type_bits
 243     , destroys_shift = size_shift     + size_bits
 244     , last_use_shift = destroys_shift + destroys_bits
 245     , is_fpu_stack_offset_shift = last_use_shift + last_use_bits
 246     , virtual_shift  = is_fpu_stack_offset_shift + is_fpu_stack_offset_bits
 247     , is_xmm_shift   = virtual_shift + virtual_bits
 248     , data_shift     = is_xmm_shift + is_xmm_bits
 249     , reg1_shift = data_shift
 250     , reg2_shift = data_shift + reg_bits
 251 
 252   };
 253 
 254   enum OprSize {
 255       single_size = 0 << size_shift
 256     , double_size = 1 << size_shift
 257   };
 258 
 259   enum OprMask {
 260       kind_mask      = right_n_bits(kind_bits)
 261     , type_mask      = right_n_bits(type_bits) << type_shift
 262     , size_mask      = right_n_bits(size_bits) << size_shift
 263     , last_use_mask  = right_n_bits(last_use_bits) << last_use_shift
 264     , is_fpu_stack_offset_mask = right_n_bits(is_fpu_stack_offset_bits) << is_fpu_stack_offset_shift
 265     , virtual_mask   = right_n_bits(virtual_bits) << virtual_shift
 266     , is_xmm_mask    = right_n_bits(is_xmm_bits) << is_xmm_shift
 267     , pointer_mask   = right_n_bits(pointer_bits)
 268     , lower_reg_mask = right_n_bits(reg_bits)
 269     , no_type_mask   = (int)(~(type_mask | last_use_mask | is_fpu_stack_offset_mask))
 270   };
 271 
 272   uintptr_t data() const                         { return value() >> data_shift; }
 273   int lo_reg_half() const                        { return data() & lower_reg_mask; }
 274   int hi_reg_half() const                        { return (data() >> reg_bits) & lower_reg_mask; }
 275   OprKind kind_field() const                     { return (OprKind)(value() & kind_mask); }
 276   OprSize size_field() const                     { return (OprSize)(value() & size_mask); }
 277 
 278   static char type_char(BasicType t);
 279 
 280  public:
 281   enum {
 282     vreg_base = ConcreteRegisterImpl::number_of_registers,
 283     vreg_max = (1 << data_bits) - 1
 284   };
 285 
 286   static inline LIR_Opr illegalOpr();
 287 
 288   enum OprType {
 289       unknown_type  = 0 << type_shift    // means: not set (catch uninitialized types)
 290     , int_type      = 1 << type_shift
 291     , long_type     = 2 << type_shift
 292     , object_type   = 3 << type_shift
 293     , address_type  = 4 << type_shift
 294     , float_type    = 5 << type_shift
 295     , double_type   = 6 << type_shift
 296     , metadata_type = 7 << type_shift
 297   };
 298   friend OprType as_OprType(BasicType t);
 299   friend BasicType as_BasicType(OprType t);
 300 
 301   OprType type_field_valid() const               { assert(is_register() || is_stack(), "should not be called otherwise"); return (OprType)(value() & type_mask); }
 302   OprType type_field() const                     { return is_illegal() ? unknown_type : (OprType)(value() & type_mask); }
 303 
 304   static OprSize size_for(BasicType t) {
 305     switch (t) {
 306       case T_LONG:
 307       case T_DOUBLE:
 308         return double_size;
 309         break;
 310 
 311       case T_FLOAT:
 312       case T_BOOLEAN:
 313       case T_CHAR:
 314       case T_BYTE:
 315       case T_SHORT:
 316       case T_INT:
 317       case T_ADDRESS:
 318       case T_OBJECT:
 319       case T_ARRAY:
 320       case T_METADATA:
 321         return single_size;
 322         break;
 323 
 324       default:
 325         ShouldNotReachHere();
 326         return single_size;
 327       }
 328   }
 329 
 330 
 331   void validate_type() const PRODUCT_RETURN;
 332 
 333   BasicType type() const {
 334     if (is_pointer()) {
 335       return pointer()->type();
 336     }
 337     return as_BasicType(type_field());
 338   }
 339 
 340 
 341   ValueType* value_type() const                  { return as_ValueType(type()); }
 342 
 343   char type_char() const                         { return type_char((is_pointer()) ? pointer()->type() : type()); }
 344 
 345   bool is_equal(LIR_Opr opr) const         { return this == opr; }
 346   // checks whether types are same
 347   bool is_same_type(LIR_Opr opr) const     {
 348     assert(type_field() != unknown_type &&
 349            opr->type_field() != unknown_type, "shouldn't see unknown_type");
 350     return type_field() == opr->type_field();
 351   }
 352   bool is_same_register(LIR_Opr opr) {
 353     return (is_register() && opr->is_register() &&
 354             kind_field() == opr->kind_field() &&
 355             (value() & no_type_mask) == (opr->value() & no_type_mask));
 356   }
 357 
 358   bool is_pointer() const      { return check_value_mask(pointer_mask, pointer_value); }
 359   bool is_illegal() const      { return kind_field() == illegal_value; }
 360   bool is_valid() const        { return kind_field() != illegal_value; }
 361 
 362   bool is_register() const     { return is_cpu_register() || is_fpu_register(); }
 363   bool is_virtual() const      { return is_virtual_cpu()  || is_virtual_fpu();  }
 364 
 365   bool is_constant() const     { return is_pointer() && pointer()->as_constant() != NULL; }
 366   bool is_address() const      { return is_pointer() && pointer()->as_address() != NULL; }
 367 
 368   bool is_float_kind() const   { return is_pointer() ? pointer()->is_float_kind() : (kind_field() == fpu_register); }
 369   bool is_oop() const;
 370 
 371   // semantic for fpu- and xmm-registers:
 372   // * is_float and is_double return true for xmm_registers
 373   //   (so is_single_fpu and is_single_xmm are true)
 374   // * So you must always check for is_???_xmm prior to is_???_fpu to
 375   //   distinguish between fpu- and xmm-registers
 376 
 377   bool is_stack() const        { validate_type(); return check_value_mask(kind_mask,                stack_value);                 }
 378   bool is_single_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | single_size);  }
 379   bool is_double_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | double_size);  }
 380 
 381   bool is_cpu_register() const { validate_type(); return check_value_mask(kind_mask,                cpu_register);                }
 382   bool is_virtual_cpu() const  { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register | virtual_mask); }
 383   bool is_fixed_cpu() const    { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register);                }
 384   bool is_single_cpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    cpu_register | single_size);  }
 385   bool is_double_cpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    cpu_register | double_size);  }
 386 
 387   bool is_fpu_register() const { validate_type(); return check_value_mask(kind_mask,                fpu_register);                }
 388   bool is_virtual_fpu() const  { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register | virtual_mask); }
 389   bool is_fixed_fpu() const    { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register);                }
 390   bool is_single_fpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    fpu_register | single_size);  }
 391   bool is_double_fpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    fpu_register | double_size);  }
 392 
 393   bool is_xmm_register() const { validate_type(); return check_value_mask(kind_mask | is_xmm_mask,             fpu_register | is_xmm_mask); }
 394   bool is_single_xmm() const   { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | single_size | is_xmm_mask); }
 395   bool is_double_xmm() const   { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | double_size | is_xmm_mask); }
 396 
 397   // fast accessor functions for special bits that do not work for pointers
 398   // (in this functions, the check for is_pointer() is omitted)
 399   bool is_single_word() const      { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, single_size); }
 400   bool is_double_word() const      { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, double_size); }
 401   bool is_virtual_register() const { assert(is_register(),               "type check"); return check_value_mask(virtual_mask, virtual_mask); }
 402   bool is_oop_register() const     { assert(is_register() || is_stack(), "type check"); return type_field_valid() == object_type; }
 403   BasicType type_register() const  { assert(is_register() || is_stack(), "type check"); return as_BasicType(type_field_valid());  }
 404 
 405   bool is_last_use() const         { assert(is_register(), "only works for registers"); return (value() & last_use_mask) != 0; }
 406   bool is_fpu_stack_offset() const { assert(is_register(), "only works for registers"); return (value() & is_fpu_stack_offset_mask) != 0; }
 407   LIR_Opr make_last_use()          { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | last_use_mask); }
 408   LIR_Opr make_fpu_stack_offset()  { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | is_fpu_stack_offset_mask); }
 409 
 410 
 411   int single_stack_ix() const  { assert(is_single_stack() && !is_virtual(), "type check"); return (int)data(); }
 412   int double_stack_ix() const  { assert(is_double_stack() && !is_virtual(), "type check"); return (int)data(); }
 413   RegNr cpu_regnr() const      { assert(is_single_cpu()   && !is_virtual(), "type check"); return (RegNr)data(); }
 414   RegNr cpu_regnrLo() const    { assert(is_double_cpu()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
 415   RegNr cpu_regnrHi() const    { assert(is_double_cpu()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
 416   RegNr fpu_regnr() const      { assert(is_single_fpu()   && !is_virtual(), "type check"); return (RegNr)data(); }
 417   RegNr fpu_regnrLo() const    { assert(is_double_fpu()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
 418   RegNr fpu_regnrHi() const    { assert(is_double_fpu()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
 419   RegNr xmm_regnr() const      { assert(is_single_xmm()   && !is_virtual(), "type check"); return (RegNr)data(); }
 420   RegNr xmm_regnrLo() const    { assert(is_double_xmm()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
 421   RegNr xmm_regnrHi() const    { assert(is_double_xmm()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
 422   int   vreg_number() const    { assert(is_virtual(),                       "type check"); return (RegNr)data(); }
 423 
 424   LIR_OprPtr* pointer()  const                   { assert(is_pointer(), "type check");      return (LIR_OprPtr*)this; }
 425   LIR_Const* as_constant_ptr() const             { return pointer()->as_constant(); }
 426   LIR_Address* as_address_ptr() const            { return pointer()->as_address(); }
 427 
 428   Register as_register()    const;
 429   Register as_register_lo() const;
 430   Register as_register_hi() const;
 431 
 432   Register as_pointer_register() {
 433 #ifdef _LP64
 434     if (is_double_cpu()) {
 435       assert(as_register_lo() == as_register_hi(), "should be a single register");
 436       return as_register_lo();
 437     }
 438 #endif
 439     return as_register();
 440   }
 441 
 442   FloatRegister as_float_reg   () const;
 443   FloatRegister as_double_reg  () const;
 444 #ifdef X86
 445   XMMRegister as_xmm_float_reg () const;
 446   XMMRegister as_xmm_double_reg() const;
 447   // for compatibility with RInfo
 448   int fpu() const { return lo_reg_half(); }
 449 #endif
 450 
 451   jint      as_jint()    const { return as_constant_ptr()->as_jint(); }
 452   jlong     as_jlong()   const { return as_constant_ptr()->as_jlong(); }
 453   jfloat    as_jfloat()  const { return as_constant_ptr()->as_jfloat(); }
 454   jdouble   as_jdouble() const { return as_constant_ptr()->as_jdouble(); }
 455   jobject   as_jobject() const { return as_constant_ptr()->as_jobject(); }
 456 
 457   void print() const PRODUCT_RETURN;
 458   void print(outputStream* out) const PRODUCT_RETURN;
 459 };
 460 
 461 
 462 inline LIR_OprDesc::OprType as_OprType(BasicType type) {
 463   switch (type) {
 464   case T_INT:      return LIR_OprDesc::int_type;
 465   case T_LONG:     return LIR_OprDesc::long_type;
 466   case T_FLOAT:    return LIR_OprDesc::float_type;
 467   case T_DOUBLE:   return LIR_OprDesc::double_type;
 468   case T_OBJECT:
 469   case T_ARRAY:    return LIR_OprDesc::object_type;
 470   case T_ADDRESS:  return LIR_OprDesc::address_type;
 471   case T_METADATA: return LIR_OprDesc::metadata_type;
 472   case T_ILLEGAL:  // fall through
 473   default: ShouldNotReachHere(); return LIR_OprDesc::unknown_type;
 474   }
 475 }
 476 
 477 inline BasicType as_BasicType(LIR_OprDesc::OprType t) {
 478   switch (t) {
 479   case LIR_OprDesc::int_type:     return T_INT;
 480   case LIR_OprDesc::long_type:    return T_LONG;
 481   case LIR_OprDesc::float_type:   return T_FLOAT;
 482   case LIR_OprDesc::double_type:  return T_DOUBLE;
 483   case LIR_OprDesc::object_type:  return T_OBJECT;
 484   case LIR_OprDesc::address_type: return T_ADDRESS;
 485   case LIR_OprDesc::metadata_type:return T_METADATA;
 486   case LIR_OprDesc::unknown_type: // fall through
 487   default: ShouldNotReachHere();  return T_ILLEGAL;
 488   }
 489 }
 490 
 491 
 492 // LIR_Address
 493 class LIR_Address: public LIR_OprPtr {
 494  friend class LIR_OpVisitState;
 495 
 496  public:
 497   // NOTE: currently these must be the log2 of the scale factor (and
 498   // must also be equivalent to the ScaleFactor enum in
 499   // assembler_i486.hpp)
 500   enum Scale {
 501     times_1  =  0,
 502     times_2  =  1,
 503     times_4  =  2,
 504     times_8  =  3
 505   };
 506 
 507  private:
 508   LIR_Opr   _base;
 509   LIR_Opr   _index;
 510   Scale     _scale;
 511   intx      _disp;
 512   BasicType _type;
 513 
 514  public:
 515   LIR_Address(LIR_Opr base, LIR_Opr index, BasicType type):
 516        _base(base)
 517      , _index(index)
 518      , _scale(times_1)
 519      , _disp(0)
 520      , _type(type) { verify(); }
 521 
 522   LIR_Address(LIR_Opr base, intx disp, BasicType type):
 523        _base(base)
 524      , _index(LIR_OprDesc::illegalOpr())
 525      , _scale(times_1)
 526      , _disp(disp)
 527      , _type(type) { verify(); }
 528 
 529   LIR_Address(LIR_Opr base, BasicType type):
 530        _base(base)
 531      , _index(LIR_OprDesc::illegalOpr())
 532      , _scale(times_1)
 533      , _disp(0)
 534      , _type(type) { verify(); }
 535 
 536   LIR_Address(LIR_Opr base, LIR_Opr index, intx disp, BasicType type):
 537        _base(base)
 538      , _index(index)
 539      , _scale(times_1)
 540      , _disp(disp)
 541      , _type(type) { verify(); }
 542 
 543   LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, intx disp, BasicType type):
 544        _base(base)
 545      , _index(index)
 546      , _scale(scale)
 547      , _disp(disp)
 548      , _type(type) { verify(); }
 549 
 550   LIR_Opr base()  const                          { return _base;  }
 551   LIR_Opr index() const                          { return _index; }
 552   Scale   scale() const                          { return _scale; }
 553   intx    disp()  const                          { return _disp;  }
 554 
 555   bool equals(LIR_Address* other) const          { return base() == other->base() && index() == other->index() && disp() == other->disp() && scale() == other->scale(); }
 556 
 557   virtual LIR_Address* as_address()              { return this;   }
 558   virtual BasicType type() const                 { return _type; }
 559   virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
 560 
 561   void verify() const PRODUCT_RETURN;
 562 
 563   static Scale scale(BasicType type);
 564 };
 565 
 566 
 567 // operand factory
 568 class LIR_OprFact: public AllStatic {
 569  public:
 570 
 571   static LIR_Opr illegalOpr;
 572 
 573   static LIR_Opr single_cpu(int reg) {
 574     return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 575                                LIR_OprDesc::int_type             |
 576                                LIR_OprDesc::cpu_register         |
 577                                LIR_OprDesc::single_size);
 578   }
 579   static LIR_Opr single_cpu_oop(int reg) {
 580     return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 581                                LIR_OprDesc::object_type          |
 582                                LIR_OprDesc::cpu_register         |
 583                                LIR_OprDesc::single_size);
 584   }
 585   static LIR_Opr single_cpu_address(int reg) {
 586     return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 587                                LIR_OprDesc::address_type         |
 588                                LIR_OprDesc::cpu_register         |
 589                                LIR_OprDesc::single_size);
 590   }
 591   static LIR_Opr single_cpu_metadata(int reg) {
 592     return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 593                                LIR_OprDesc::metadata_type        |
 594                                LIR_OprDesc::cpu_register         |
 595                                LIR_OprDesc::single_size);
 596   }
 597   static LIR_Opr double_cpu(int reg1, int reg2) {
 598     LP64_ONLY(assert(reg1 == reg2, "must be identical"));
 599     return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
 600                                (reg2 << LIR_OprDesc::reg2_shift) |
 601                                LIR_OprDesc::long_type            |
 602                                LIR_OprDesc::cpu_register         |
 603                                LIR_OprDesc::double_size);
 604   }
 605 
 606   static LIR_Opr single_fpu(int reg) {
 607     return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 608                                LIR_OprDesc::float_type           |
 609                                LIR_OprDesc::fpu_register         |
 610                                LIR_OprDesc::single_size);
 611   }
 612 
 613   // Platform dependant.
 614   static LIR_Opr double_fpu(int reg1, int reg2 = -1 /*fnoreg*/);
 615 
 616 #ifdef ARM32
 617   static LIR_Opr single_softfp(int reg) {
 618     return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 619                                LIR_OprDesc::float_type           |
 620                                LIR_OprDesc::cpu_register         |
 621                                LIR_OprDesc::single_size);
 622   }
 623   static LIR_Opr double_softfp(int reg1, int reg2) {
 624     return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
 625                                (reg2 << LIR_OprDesc::reg2_shift) |
 626                                LIR_OprDesc::double_type          |
 627                                LIR_OprDesc::cpu_register         |
 628                                LIR_OprDesc::double_size);
 629   }
 630 #endif // ARM32
 631 
 632 #if defined(X86)
 633   static LIR_Opr single_xmm(int reg) {
 634     return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
 635                                LIR_OprDesc::float_type          |
 636                                LIR_OprDesc::fpu_register        |
 637                                LIR_OprDesc::single_size         |
 638                                LIR_OprDesc::is_xmm_mask);
 639   }
 640   static LIR_Opr double_xmm(int reg) {
 641     return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
 642                                (reg << LIR_OprDesc::reg2_shift) |
 643                                LIR_OprDesc::double_type         |
 644                                LIR_OprDesc::fpu_register        |
 645                                LIR_OprDesc::double_size         |
 646                                LIR_OprDesc::is_xmm_mask);
 647   }
 648 #endif // X86
 649 
 650   static LIR_Opr virtual_register(int index, BasicType type) {
 651     LIR_Opr res;
 652     switch (type) {
 653       case T_OBJECT: // fall through
 654       case T_ARRAY:
 655         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift)  |
 656                                             LIR_OprDesc::object_type  |
 657                                             LIR_OprDesc::cpu_register |
 658                                             LIR_OprDesc::single_size  |
 659                                             LIR_OprDesc::virtual_mask);
 660         break;
 661 
 662       case T_METADATA:
 663         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift)  |
 664                                             LIR_OprDesc::metadata_type|
 665                                             LIR_OprDesc::cpu_register |
 666                                             LIR_OprDesc::single_size  |
 667                                             LIR_OprDesc::virtual_mask);
 668         break;
 669 
 670       case T_INT:
 671         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 672                                   LIR_OprDesc::int_type              |
 673                                   LIR_OprDesc::cpu_register          |
 674                                   LIR_OprDesc::single_size           |
 675                                   LIR_OprDesc::virtual_mask);
 676         break;
 677 
 678       case T_ADDRESS:
 679         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 680                                   LIR_OprDesc::address_type          |
 681                                   LIR_OprDesc::cpu_register          |
 682                                   LIR_OprDesc::single_size           |
 683                                   LIR_OprDesc::virtual_mask);
 684         break;
 685 
 686       case T_LONG:
 687         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 688                                   LIR_OprDesc::long_type             |
 689                                   LIR_OprDesc::cpu_register          |
 690                                   LIR_OprDesc::double_size           |
 691                                   LIR_OprDesc::virtual_mask);
 692         break;
 693 
 694 #ifdef __SOFTFP__
 695       case T_FLOAT:
 696         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 697                                   LIR_OprDesc::float_type  |
 698                                   LIR_OprDesc::cpu_register |
 699                                   LIR_OprDesc::single_size |
 700                                   LIR_OprDesc::virtual_mask);
 701         break;
 702       case T_DOUBLE:
 703         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 704                                   LIR_OprDesc::double_type |
 705                                   LIR_OprDesc::cpu_register |
 706                                   LIR_OprDesc::double_size |
 707                                   LIR_OprDesc::virtual_mask);
 708         break;
 709 #else // __SOFTFP__
 710       case T_FLOAT:
 711         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 712                                   LIR_OprDesc::float_type           |
 713                                   LIR_OprDesc::fpu_register         |
 714                                   LIR_OprDesc::single_size          |
 715                                   LIR_OprDesc::virtual_mask);
 716         break;
 717 
 718       case
 719         T_DOUBLE: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 720                                             LIR_OprDesc::double_type           |
 721                                             LIR_OprDesc::fpu_register          |
 722                                             LIR_OprDesc::double_size           |
 723                                             LIR_OprDesc::virtual_mask);
 724         break;
 725 #endif // __SOFTFP__
 726       default:       ShouldNotReachHere(); res = illegalOpr;
 727     }
 728 
 729 #ifdef ASSERT
 730     res->validate_type();
 731     assert(res->vreg_number() == index, "conversion check");
 732     assert(index >= LIR_OprDesc::vreg_base, "must start at vreg_base");
 733     assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big");
 734 
 735     // old-style calculation; check if old and new method are equal
 736     LIR_OprDesc::OprType t = as_OprType(type);
 737 #ifdef __SOFTFP__
 738     LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 739                                t |
 740                                LIR_OprDesc::cpu_register |
 741                                LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
 742 #else // __SOFTFP__
 743     LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | t |
 744                                           ((type == T_FLOAT || type == T_DOUBLE) ?  LIR_OprDesc::fpu_register : LIR_OprDesc::cpu_register) |
 745                                LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
 746     assert(res == old_res, "old and new method not equal");
 747 #endif // __SOFTFP__
 748 #endif // ASSERT
 749 
 750     return res;
 751   }
 752 
 753   // 'index' is computed by FrameMap::local_stack_pos(index); do not use other parameters as
 754   // the index is platform independent; a double stack useing indeces 2 and 3 has always
 755   // index 2.
 756   static LIR_Opr stack(int index, BasicType type) {
 757     LIR_Opr res;
 758     switch (type) {
 759       case T_OBJECT: // fall through
 760       case T_ARRAY:
 761         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 762                                   LIR_OprDesc::object_type           |
 763                                   LIR_OprDesc::stack_value           |
 764                                   LIR_OprDesc::single_size);
 765         break;
 766 
 767       case T_METADATA:
 768         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 769                                   LIR_OprDesc::metadata_type         |
 770                                   LIR_OprDesc::stack_value           |
 771                                   LIR_OprDesc::single_size);
 772         break;
 773       case T_INT:
 774         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 775                                   LIR_OprDesc::int_type              |
 776                                   LIR_OprDesc::stack_value           |
 777                                   LIR_OprDesc::single_size);
 778         break;
 779 
 780       case T_ADDRESS:
 781         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 782                                   LIR_OprDesc::address_type          |
 783                                   LIR_OprDesc::stack_value           |
 784                                   LIR_OprDesc::single_size);
 785         break;
 786 
 787       case T_LONG:
 788         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 789                                   LIR_OprDesc::long_type             |
 790                                   LIR_OprDesc::stack_value           |
 791                                   LIR_OprDesc::double_size);
 792         break;
 793 
 794       case T_FLOAT:
 795         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 796                                   LIR_OprDesc::float_type            |
 797                                   LIR_OprDesc::stack_value           |
 798                                   LIR_OprDesc::single_size);
 799         break;
 800       case T_DOUBLE:
 801         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 802                                   LIR_OprDesc::double_type           |
 803                                   LIR_OprDesc::stack_value           |
 804                                   LIR_OprDesc::double_size);
 805         break;
 806 
 807       default:       ShouldNotReachHere(); res = illegalOpr;
 808     }
 809 
 810 #ifdef ASSERT
 811     assert(index >= 0, "index must be positive");
 812     assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big");
 813 
 814     LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 815                                           LIR_OprDesc::stack_value           |
 816                                           as_OprType(type)                   |
 817                                           LIR_OprDesc::size_for(type));
 818     assert(res == old_res, "old and new method not equal");
 819 #endif
 820 
 821     return res;
 822   }
 823 
 824   static LIR_Opr intConst(jint i)                { return (LIR_Opr)(new LIR_Const(i)); }
 825   static LIR_Opr longConst(jlong l)              { return (LIR_Opr)(new LIR_Const(l)); }
 826   static LIR_Opr floatConst(jfloat f)            { return (LIR_Opr)(new LIR_Const(f)); }
 827   static LIR_Opr doubleConst(jdouble d)          { return (LIR_Opr)(new LIR_Const(d)); }
 828   static LIR_Opr oopConst(jobject o)             { return (LIR_Opr)(new LIR_Const(o)); }
 829   static LIR_Opr address(LIR_Address* a)         { return (LIR_Opr)a; }
 830   static LIR_Opr intptrConst(void* p)            { return (LIR_Opr)(new LIR_Const(p)); }
 831   static LIR_Opr intptrConst(intptr_t v)         { return (LIR_Opr)(new LIR_Const((void*)v)); }
 832   static LIR_Opr illegal()                       { return (LIR_Opr)-1; }
 833   static LIR_Opr addressConst(jint i)            { return (LIR_Opr)(new LIR_Const(i, true)); }
 834   static LIR_Opr metadataConst(Metadata* m)      { return (LIR_Opr)(new LIR_Const(m)); }
 835 
 836   static LIR_Opr value_type(ValueType* type);
 837 };
 838 
 839 
 840 //-------------------------------------------------------------------------------
 841 //                   LIR Instructions
 842 //-------------------------------------------------------------------------------
 843 //
 844 // Note:
 845 //  - every instruction has a result operand
 846 //  - every instruction has an CodeEmitInfo operand (can be revisited later)
 847 //  - every instruction has a LIR_OpCode operand
 848 //  - LIR_OpN, means an instruction that has N input operands
 849 //
 850 // class hierarchy:
 851 //
 852 class  LIR_Op;
 853 class    LIR_Op0;
 854 class      LIR_OpLabel;
 855 class    LIR_Op1;
 856 class      LIR_OpBranch;
 857 class      LIR_OpConvert;
 858 class      LIR_OpAllocObj;
 859 class      LIR_OpRoundFP;
 860 class    LIR_Op2;
 861 class    LIR_OpDelay;
 862 class    LIR_Op3;
 863 class      LIR_OpAllocArray;
 864 class    LIR_OpCall;
 865 class      LIR_OpJavaCall;
 866 class      LIR_OpRTCall;
 867 class    LIR_OpArrayCopy;
 868 class    LIR_OpUpdateCRC32;
 869 class    LIR_OpLock;
 870 class    LIR_OpTypeCheck;
 871 class    LIR_OpCompareAndSwap;
 872 class    LIR_OpProfileCall;
 873 class    LIR_OpProfileType;
 874 #ifdef ASSERT
 875 class    LIR_OpAssert;
 876 #endif
 877 
 878 // LIR operation codes
 879 enum LIR_Code {
 880     lir_none
 881   , begin_op0
 882       , lir_label
 883       , lir_nop
 884       , lir_backwardbranch_target
 885       , lir_std_entry
 886       , lir_osr_entry
 887       , lir_fpop_raw
 888       , lir_breakpoint
 889       , lir_rtcall
 890       , lir_membar
 891       , lir_membar_acquire
 892       , lir_membar_release
 893       , lir_membar_loadload
 894       , lir_membar_storestore
 895       , lir_membar_loadstore
 896       , lir_membar_storeload
 897       , lir_get_thread
 898       , lir_on_spin_wait
 899   , end_op0
 900   , begin_op1
 901       , lir_fxch
 902       , lir_fld
 903       , lir_push
 904       , lir_pop
 905       , lir_null_check
 906       , lir_return
 907       , lir_leal
 908       , lir_branch
 909       , lir_cond_float_branch
 910       , lir_move
 911       , lir_convert
 912       , lir_alloc_object
 913       , lir_monaddr
 914       , lir_roundfp
 915       , lir_safepoint
 916       , lir_pack64
 917       , lir_unpack64
 918       , lir_unwind
 919   , end_op1
 920   , begin_op2
 921       , lir_cmp
 922       , lir_cmp_l2i
 923       , lir_ucmp_fd2i
 924       , lir_cmp_fd2i
 925       , lir_cmove
 926       , lir_add
 927       , lir_sub
 928       , lir_mul
 929       , lir_mul_strictfp
 930       , lir_div
 931       , lir_div_strictfp
 932       , lir_rem
 933       , lir_sqrt
 934       , lir_abs
 935       , lir_neg
 936       , lir_tan
 937       , lir_log10
 938       , lir_logic_and
 939       , lir_logic_or
 940       , lir_logic_xor
 941       , lir_shl
 942       , lir_shr
 943       , lir_ushr
 944       , lir_alloc_array
 945       , lir_throw
 946       , lir_xadd
 947       , lir_xchg
 948   , end_op2
 949   , begin_op3
 950       , lir_idiv
 951       , lir_irem
 952       , lir_fmad
 953       , lir_fmaf
 954   , end_op3
 955   , begin_opJavaCall
 956       , lir_static_call
 957       , lir_optvirtual_call
 958       , lir_icvirtual_call
 959       , lir_virtual_call
 960       , lir_dynamic_call
 961   , end_opJavaCall
 962   , begin_opArrayCopy
 963       , lir_arraycopy
 964   , end_opArrayCopy
 965   , begin_opUpdateCRC32
 966       , lir_updatecrc32
 967   , end_opUpdateCRC32
 968   , begin_opLock
 969     , lir_lock
 970     , lir_unlock
 971   , end_opLock
 972   , begin_delay_slot
 973     , lir_delay_slot
 974   , end_delay_slot
 975   , begin_opTypeCheck
 976     , lir_instanceof
 977     , lir_checkcast
 978     , lir_store_check
 979   , end_opTypeCheck
 980   , begin_opCompareAndSwap
 981     , lir_cas_long
 982     , lir_cas_obj
 983     , lir_cas_int
 984   , end_opCompareAndSwap
 985   , begin_opMDOProfile
 986     , lir_profile_call
 987     , lir_profile_type
 988   , end_opMDOProfile
 989   , begin_opAssert
 990     , lir_assert
 991   , end_opAssert
 992 };
 993 
 994 
 995 enum LIR_Condition {
 996     lir_cond_equal
 997   , lir_cond_notEqual
 998   , lir_cond_less
 999   , lir_cond_lessEqual
1000   , lir_cond_greaterEqual
1001   , lir_cond_greater
1002   , lir_cond_belowEqual
1003   , lir_cond_aboveEqual
1004   , lir_cond_always
1005   , lir_cond_unknown = -1
1006 };
1007 
1008 
1009 enum LIR_PatchCode {
1010   lir_patch_none,
1011   lir_patch_low,
1012   lir_patch_high,
1013   lir_patch_normal
1014 };
1015 
1016 
1017 enum LIR_MoveKind {
1018   lir_move_normal,
1019   lir_move_volatile,
1020   lir_move_unaligned,
1021   lir_move_wide,
1022   lir_move_max_flag
1023 };
1024 
1025 
1026 // --------------------------------------------------
1027 // LIR_Op
1028 // --------------------------------------------------
1029 class LIR_Op: public CompilationResourceObj {
1030  friend class LIR_OpVisitState;
1031 
1032 #ifdef ASSERT
1033  private:
1034   const char *  _file;
1035   int           _line;
1036 #endif
1037 
1038  protected:
1039   LIR_Opr       _result;
1040   unsigned short _code;
1041   unsigned short _flags;
1042   CodeEmitInfo* _info;
1043   int           _id;     // value id for register allocation
1044   int           _fpu_pop_count;
1045   Instruction*  _source; // for debugging
1046 
1047   static void print_condition(outputStream* out, LIR_Condition cond) PRODUCT_RETURN;
1048 
1049  protected:
1050   static bool is_in_range(LIR_Code test, LIR_Code start, LIR_Code end)  { return start < test && test < end; }
1051 
1052  public:
1053   LIR_Op()
1054     :
1055 #ifdef ASSERT
1056       _file(NULL)
1057     , _line(0),
1058 #endif
1059       _result(LIR_OprFact::illegalOpr)
1060     , _code(lir_none)
1061     , _flags(0)
1062     , _info(NULL)
1063     , _id(-1)
1064     , _fpu_pop_count(0)
1065     , _source(NULL) {}
1066 
1067   LIR_Op(LIR_Code code, LIR_Opr result, CodeEmitInfo* info)
1068     :
1069 #ifdef ASSERT
1070       _file(NULL)
1071     , _line(0),
1072 #endif
1073       _result(result)
1074     , _code(code)
1075     , _flags(0)
1076     , _info(info)
1077     , _id(-1)
1078     , _fpu_pop_count(0)
1079     , _source(NULL) {}
1080 
1081   CodeEmitInfo* info() const                  { return _info;   }
1082   LIR_Code code()      const                  { return (LIR_Code)_code;   }
1083   LIR_Opr result_opr() const                  { return _result; }
1084   void    set_result_opr(LIR_Opr opr)         { _result = opr;  }
1085 
1086 #ifdef ASSERT
1087   void set_file_and_line(const char * file, int line) {
1088     _file = file;
1089     _line = line;
1090   }
1091 #endif
1092 
1093   virtual const char * name() const PRODUCT_RETURN0;
1094   virtual void visit(LIR_OpVisitState* state);
1095 
1096   int id()             const                  { return _id;     }
1097   void set_id(int id)                         { _id = id; }
1098 
1099   // FPU stack simulation helpers -- only used on Intel
1100   void set_fpu_pop_count(int count)           { assert(count >= 0 && count <= 1, "currently only 0 and 1 are valid"); _fpu_pop_count = count; }
1101   int  fpu_pop_count() const                  { return _fpu_pop_count; }
1102   bool pop_fpu_stack()                        { return _fpu_pop_count > 0; }
1103 
1104   Instruction* source() const                 { return _source; }
1105   void set_source(Instruction* ins)           { _source = ins; }
1106 
1107   virtual void emit_code(LIR_Assembler* masm) = 0;
1108   virtual void print_instr(outputStream* out) const   = 0;
1109   virtual void print_on(outputStream* st) const PRODUCT_RETURN;
1110 
1111   virtual bool is_patching() { return false; }
1112   virtual LIR_OpCall* as_OpCall() { return NULL; }
1113   virtual LIR_OpJavaCall* as_OpJavaCall() { return NULL; }
1114   virtual LIR_OpLabel* as_OpLabel() { return NULL; }
1115   virtual LIR_OpDelay* as_OpDelay() { return NULL; }
1116   virtual LIR_OpLock* as_OpLock() { return NULL; }
1117   virtual LIR_OpAllocArray* as_OpAllocArray() { return NULL; }
1118   virtual LIR_OpAllocObj* as_OpAllocObj() { return NULL; }
1119   virtual LIR_OpRoundFP* as_OpRoundFP() { return NULL; }
1120   virtual LIR_OpBranch* as_OpBranch() { return NULL; }
1121   virtual LIR_OpRTCall* as_OpRTCall() { return NULL; }
1122   virtual LIR_OpConvert* as_OpConvert() { return NULL; }
1123   virtual LIR_Op0* as_Op0() { return NULL; }
1124   virtual LIR_Op1* as_Op1() { return NULL; }
1125   virtual LIR_Op2* as_Op2() { return NULL; }
1126   virtual LIR_Op3* as_Op3() { return NULL; }
1127   virtual LIR_OpArrayCopy* as_OpArrayCopy() { return NULL; }
1128   virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32() { return NULL; }
1129   virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
1130   virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
1131   virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
1132   virtual LIR_OpProfileType* as_OpProfileType() { return NULL; }
1133 #ifdef ASSERT
1134   virtual LIR_OpAssert* as_OpAssert() { return NULL; }
1135 #endif
1136 
1137   virtual void verify() const {}
1138 };
1139 
1140 // for calls
1141 class LIR_OpCall: public LIR_Op {
1142  friend class LIR_OpVisitState;
1143 
1144  protected:
1145   address      _addr;
1146   LIR_OprList* _arguments;
1147  protected:
1148   LIR_OpCall(LIR_Code code, address addr, LIR_Opr result,
1149              LIR_OprList* arguments, CodeEmitInfo* info = NULL)
1150     : LIR_Op(code, result, info)
1151     , _addr(addr)
1152     , _arguments(arguments) {}
1153 
1154  public:
1155   address addr() const                           { return _addr; }
1156   const LIR_OprList* arguments() const           { return _arguments; }
1157   virtual LIR_OpCall* as_OpCall()                { return this; }
1158 };
1159 
1160 
1161 // --------------------------------------------------
1162 // LIR_OpJavaCall
1163 // --------------------------------------------------
1164 class LIR_OpJavaCall: public LIR_OpCall {
1165  friend class LIR_OpVisitState;
1166 
1167  private:
1168   ciMethod* _method;
1169   LIR_Opr   _receiver;
1170   LIR_Opr   _method_handle_invoke_SP_save_opr;  // Used in LIR_OpVisitState::visit to store the reference to FrameMap::method_handle_invoke_SP_save_opr.
1171 
1172  public:
1173   LIR_OpJavaCall(LIR_Code code, ciMethod* method,
1174                  LIR_Opr receiver, LIR_Opr result,
1175                  address addr, LIR_OprList* arguments,
1176                  CodeEmitInfo* info)
1177   : LIR_OpCall(code, addr, result, arguments, info)
1178   , _method(method)
1179   , _receiver(receiver)
1180   , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
1181   { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
1182 
1183   LIR_OpJavaCall(LIR_Code code, ciMethod* method,
1184                  LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset,
1185                  LIR_OprList* arguments, CodeEmitInfo* info)
1186   : LIR_OpCall(code, (address)vtable_offset, result, arguments, info)
1187   , _method(method)
1188   , _receiver(receiver)
1189   , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
1190   { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
1191 
1192   LIR_Opr receiver() const                       { return _receiver; }
1193   ciMethod* method() const                       { return _method;   }
1194 
1195   // JSR 292 support.
1196   bool is_invokedynamic() const                  { return code() == lir_dynamic_call; }
1197   bool is_method_handle_invoke() const {
1198     return method()->is_compiled_lambda_form() ||   // Java-generated lambda form
1199            method()->is_method_handle_intrinsic();  // JVM-generated MH intrinsic
1200   }
1201 
1202   intptr_t vtable_offset() const {
1203     assert(_code == lir_virtual_call, "only have vtable for real vcall");
1204     return (intptr_t) addr();
1205   }
1206 
1207   virtual void emit_code(LIR_Assembler* masm);
1208   virtual LIR_OpJavaCall* as_OpJavaCall() { return this; }
1209   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1210 };
1211 
1212 // --------------------------------------------------
1213 // LIR_OpLabel
1214 // --------------------------------------------------
1215 // Location where a branch can continue
1216 class LIR_OpLabel: public LIR_Op {
1217  friend class LIR_OpVisitState;
1218 
1219  private:
1220   Label* _label;
1221  public:
1222   LIR_OpLabel(Label* lbl)
1223    : LIR_Op(lir_label, LIR_OprFact::illegalOpr, NULL)
1224    , _label(lbl)                                 {}
1225   Label* label() const                           { return _label; }
1226 
1227   virtual void emit_code(LIR_Assembler* masm);
1228   virtual LIR_OpLabel* as_OpLabel() { return this; }
1229   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1230 };
1231 
1232 // LIR_OpArrayCopy
1233 class LIR_OpArrayCopy: public LIR_Op {
1234  friend class LIR_OpVisitState;
1235 
1236  private:
1237   ArrayCopyStub*  _stub;
1238   LIR_Opr   _src;
1239   LIR_Opr   _src_pos;
1240   LIR_Opr   _dst;
1241   LIR_Opr   _dst_pos;
1242   LIR_Opr   _length;
1243   LIR_Opr   _tmp;
1244   ciArrayKlass* _expected_type;
1245   int       _flags;
1246 
1247 public:
1248   enum Flags {
1249     src_null_check         = 1 << 0,
1250     dst_null_check         = 1 << 1,
1251     src_pos_positive_check = 1 << 2,
1252     dst_pos_positive_check = 1 << 3,
1253     length_positive_check  = 1 << 4,
1254     src_range_check        = 1 << 5,
1255     dst_range_check        = 1 << 6,
1256     type_check             = 1 << 7,
1257     overlapping            = 1 << 8,
1258     unaligned              = 1 << 9,
1259     src_objarray           = 1 << 10,
1260     dst_objarray           = 1 << 11,
1261     all_flags              = (1 << 12) - 1
1262   };
1263 
1264   LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp,
1265                   ciArrayKlass* expected_type, int flags, CodeEmitInfo* info);
1266 
1267   LIR_Opr src() const                            { return _src; }
1268   LIR_Opr src_pos() const                        { return _src_pos; }
1269   LIR_Opr dst() const                            { return _dst; }
1270   LIR_Opr dst_pos() const                        { return _dst_pos; }
1271   LIR_Opr length() const                         { return _length; }
1272   LIR_Opr tmp() const                            { return _tmp; }
1273   int flags() const                              { return _flags; }
1274   ciArrayKlass* expected_type() const            { return _expected_type; }
1275   ArrayCopyStub* stub() const                    { return _stub; }
1276 
1277   virtual void emit_code(LIR_Assembler* masm);
1278   virtual LIR_OpArrayCopy* as_OpArrayCopy() { return this; }
1279   void print_instr(outputStream* out) const PRODUCT_RETURN;
1280 };
1281 
1282 // LIR_OpUpdateCRC32
1283 class LIR_OpUpdateCRC32: public LIR_Op {
1284   friend class LIR_OpVisitState;
1285 
1286 private:
1287   LIR_Opr   _crc;
1288   LIR_Opr   _val;
1289 
1290 public:
1291 
1292   LIR_OpUpdateCRC32(LIR_Opr crc, LIR_Opr val, LIR_Opr res);
1293 
1294   LIR_Opr crc() const                            { return _crc; }
1295   LIR_Opr val() const                            { return _val; }
1296 
1297   virtual void emit_code(LIR_Assembler* masm);
1298   virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32()  { return this; }
1299   void print_instr(outputStream* out) const PRODUCT_RETURN;
1300 };
1301 
1302 // --------------------------------------------------
1303 // LIR_Op0
1304 // --------------------------------------------------
1305 class LIR_Op0: public LIR_Op {
1306  friend class LIR_OpVisitState;
1307 
1308  public:
1309   LIR_Op0(LIR_Code code)
1310    : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1311   LIR_Op0(LIR_Code code, LIR_Opr result, CodeEmitInfo* info = NULL)
1312    : LIR_Op(code, result, info)  { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1313 
1314   virtual void emit_code(LIR_Assembler* masm);
1315   virtual LIR_Op0* as_Op0() { return this; }
1316   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1317 };
1318 
1319 
1320 // --------------------------------------------------
1321 // LIR_Op1
1322 // --------------------------------------------------
1323 
1324 class LIR_Op1: public LIR_Op {
1325  friend class LIR_OpVisitState;
1326 
1327  protected:
1328   LIR_Opr         _opr;   // input operand
1329   BasicType       _type;  // Operand types
1330   LIR_PatchCode   _patch; // only required with patchin (NEEDS_CLEANUP: do we want a special instruction for patching?)
1331 
1332   static void print_patch_code(outputStream* out, LIR_PatchCode code);
1333 
1334   void set_kind(LIR_MoveKind kind) {
1335     assert(code() == lir_move, "must be");
1336     _flags = kind;
1337   }
1338 
1339  public:
1340   LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result = LIR_OprFact::illegalOpr, BasicType type = T_ILLEGAL, LIR_PatchCode patch = lir_patch_none, CodeEmitInfo* info = NULL)
1341     : LIR_Op(code, result, info)
1342     , _opr(opr)
1343     , _type(type)
1344     , _patch(patch)                    { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1345 
1346   LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result, BasicType type, LIR_PatchCode patch, CodeEmitInfo* info, LIR_MoveKind kind)
1347     : LIR_Op(code, result, info)
1348     , _opr(opr)
1349     , _type(type)
1350     , _patch(patch)                    {
1351     assert(code == lir_move, "must be");
1352     set_kind(kind);
1353   }
1354 
1355   LIR_Op1(LIR_Code code, LIR_Opr opr, CodeEmitInfo* info)
1356     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1357     , _opr(opr)
1358     , _type(T_ILLEGAL)
1359     , _patch(lir_patch_none)           { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1360 
1361   LIR_Opr in_opr()           const               { return _opr;   }
1362   LIR_PatchCode patch_code() const               { return _patch; }
1363   BasicType type()           const               { return _type;  }
1364 
1365   LIR_MoveKind move_kind() const {
1366     assert(code() == lir_move, "must be");
1367     return (LIR_MoveKind)_flags;
1368   }
1369 
1370   virtual bool is_patching() { return _patch != lir_patch_none; }
1371   virtual void emit_code(LIR_Assembler* masm);
1372   virtual LIR_Op1* as_Op1() { return this; }
1373   virtual const char * name() const PRODUCT_RETURN0;
1374 
1375   void set_in_opr(LIR_Opr opr) { _opr = opr; }
1376 
1377   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1378   virtual void verify() const;
1379 };
1380 
1381 
1382 // for runtime calls
1383 class LIR_OpRTCall: public LIR_OpCall {
1384  friend class LIR_OpVisitState;
1385 
1386  private:
1387   LIR_Opr _tmp;
1388  public:
1389   LIR_OpRTCall(address addr, LIR_Opr tmp,
1390                LIR_Opr result, LIR_OprList* arguments, CodeEmitInfo* info = NULL)
1391     : LIR_OpCall(lir_rtcall, addr, result, arguments, info)
1392     , _tmp(tmp) {}
1393 
1394   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1395   virtual void emit_code(LIR_Assembler* masm);
1396   virtual LIR_OpRTCall* as_OpRTCall() { return this; }
1397 
1398   LIR_Opr tmp() const                            { return _tmp; }
1399 
1400   virtual void verify() const;
1401 };
1402 
1403 
1404 class LIR_OpBranch: public LIR_Op {
1405  friend class LIR_OpVisitState;
1406 
1407  private:
1408   LIR_Condition _cond;
1409   BasicType     _type;
1410   Label*        _label;
1411   BlockBegin*   _block;  // if this is a branch to a block, this is the block
1412   BlockBegin*   _ublock; // if this is a float-branch, this is the unorderd block
1413   CodeStub*     _stub;   // if this is a branch to a stub, this is the stub
1414 
1415  public:
1416   LIR_OpBranch(LIR_Condition cond, BasicType type, Label* lbl)
1417     : LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL)
1418     , _cond(cond)
1419     , _type(type)
1420     , _label(lbl)
1421     , _block(NULL)
1422     , _ublock(NULL)
1423     , _stub(NULL) { }
1424 
1425   LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block);
1426   LIR_OpBranch(LIR_Condition cond, BasicType type, CodeStub* stub);
1427 
1428   // for unordered comparisons
1429   LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* ublock);
1430 
1431   LIR_Condition cond()        const              { return _cond;        }
1432   BasicType     type()        const              { return _type;        }
1433   Label*        label()       const              { return _label;       }
1434   BlockBegin*   block()       const              { return _block;       }
1435   BlockBegin*   ublock()      const              { return _ublock;      }
1436   CodeStub*     stub()        const              { return _stub;       }
1437 
1438   void          change_block(BlockBegin* b);
1439   void          change_ublock(BlockBegin* b);
1440   void          negate_cond();
1441 
1442   virtual void emit_code(LIR_Assembler* masm);
1443   virtual LIR_OpBranch* as_OpBranch() { return this; }
1444   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1445 };
1446 
1447 
1448 class ConversionStub;
1449 
1450 class LIR_OpConvert: public LIR_Op1 {
1451  friend class LIR_OpVisitState;
1452 
1453  private:
1454    Bytecodes::Code _bytecode;
1455    ConversionStub* _stub;
1456 
1457  public:
1458    LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub)
1459      : LIR_Op1(lir_convert, opr, result)
1460      , _bytecode(code)
1461      , _stub(stub)                               {}
1462 
1463   Bytecodes::Code bytecode() const               { return _bytecode; }
1464   ConversionStub* stub() const                   { return _stub; }
1465 
1466   virtual void emit_code(LIR_Assembler* masm);
1467   virtual LIR_OpConvert* as_OpConvert() { return this; }
1468   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1469 
1470   static void print_bytecode(outputStream* out, Bytecodes::Code code) PRODUCT_RETURN;
1471 };
1472 
1473 
1474 // LIR_OpAllocObj
1475 class LIR_OpAllocObj : public LIR_Op1 {
1476  friend class LIR_OpVisitState;
1477 
1478  private:
1479   LIR_Opr _tmp1;
1480   LIR_Opr _tmp2;
1481   LIR_Opr _tmp3;
1482   LIR_Opr _tmp4;
1483   int     _hdr_size;
1484   int     _obj_size;
1485   CodeStub* _stub;
1486   bool    _init_check;
1487 
1488  public:
1489   LIR_OpAllocObj(LIR_Opr klass, LIR_Opr result,
1490                  LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4,
1491                  int hdr_size, int obj_size, bool init_check, CodeStub* stub)
1492     : LIR_Op1(lir_alloc_object, klass, result)
1493     , _tmp1(t1)
1494     , _tmp2(t2)
1495     , _tmp3(t3)
1496     , _tmp4(t4)
1497     , _hdr_size(hdr_size)
1498     , _obj_size(obj_size)
1499     , _stub(stub)
1500     , _init_check(init_check)                    { }
1501 
1502   LIR_Opr klass()        const                   { return in_opr();     }
1503   LIR_Opr obj()          const                   { return result_opr(); }
1504   LIR_Opr tmp1()         const                   { return _tmp1;        }
1505   LIR_Opr tmp2()         const                   { return _tmp2;        }
1506   LIR_Opr tmp3()         const                   { return _tmp3;        }
1507   LIR_Opr tmp4()         const                   { return _tmp4;        }
1508   int     header_size()  const                   { return _hdr_size;    }
1509   int     object_size()  const                   { return _obj_size;    }
1510   bool    init_check()   const                   { return _init_check;  }
1511   CodeStub* stub()       const                   { return _stub;        }
1512 
1513   virtual void emit_code(LIR_Assembler* masm);
1514   virtual LIR_OpAllocObj * as_OpAllocObj () { return this; }
1515   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1516 };
1517 
1518 
1519 // LIR_OpRoundFP
1520 class LIR_OpRoundFP : public LIR_Op1 {
1521  friend class LIR_OpVisitState;
1522 
1523  private:
1524   LIR_Opr _tmp;
1525 
1526  public:
1527   LIR_OpRoundFP(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result)
1528     : LIR_Op1(lir_roundfp, reg, result)
1529     , _tmp(stack_loc_temp) {}
1530 
1531   LIR_Opr tmp() const                            { return _tmp; }
1532   virtual LIR_OpRoundFP* as_OpRoundFP()          { return this; }
1533   void print_instr(outputStream* out) const PRODUCT_RETURN;
1534 };
1535 
1536 // LIR_OpTypeCheck
1537 class LIR_OpTypeCheck: public LIR_Op {
1538  friend class LIR_OpVisitState;
1539 
1540  private:
1541   LIR_Opr       _object;
1542   LIR_Opr       _array;
1543   ciKlass*      _klass;
1544   LIR_Opr       _tmp1;
1545   LIR_Opr       _tmp2;
1546   LIR_Opr       _tmp3;
1547   bool          _fast_check;
1548   CodeEmitInfo* _info_for_patch;
1549   CodeEmitInfo* _info_for_exception;
1550   CodeStub*     _stub;
1551   ciMethod*     _profiled_method;
1552   int           _profiled_bci;
1553   bool          _should_profile;
1554 
1555 public:
1556   LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
1557                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
1558                   CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub);
1559   LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array,
1560                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
1561 
1562   LIR_Opr object() const                         { return _object;         }
1563   LIR_Opr array() const                          { assert(code() == lir_store_check, "not valid"); return _array;         }
1564   LIR_Opr tmp1() const                           { return _tmp1;           }
1565   LIR_Opr tmp2() const                           { return _tmp2;           }
1566   LIR_Opr tmp3() const                           { return _tmp3;           }
1567   ciKlass* klass() const                         { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _klass;          }
1568   bool fast_check() const                        { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _fast_check;     }
1569   CodeEmitInfo* info_for_patch() const           { return _info_for_patch;  }
1570   CodeEmitInfo* info_for_exception() const       { return _info_for_exception; }
1571   CodeStub* stub() const                         { return _stub;           }
1572 
1573   // MethodData* profiling
1574   void set_profiled_method(ciMethod *method)     { _profiled_method = method; }
1575   void set_profiled_bci(int bci)                 { _profiled_bci = bci;       }
1576   void set_should_profile(bool b)                { _should_profile = b;       }
1577   ciMethod* profiled_method() const              { return _profiled_method;   }
1578   int       profiled_bci() const                 { return _profiled_bci;      }
1579   bool      should_profile() const               { return _should_profile;    }
1580 
1581   virtual bool is_patching() { return _info_for_patch != NULL; }
1582   virtual void emit_code(LIR_Assembler* masm);
1583   virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; }
1584   void print_instr(outputStream* out) const PRODUCT_RETURN;
1585 };
1586 
1587 // LIR_Op2
1588 class LIR_Op2: public LIR_Op {
1589  friend class LIR_OpVisitState;
1590 
1591   int  _fpu_stack_size; // for sin/cos implementation on Intel
1592 
1593  protected:
1594   LIR_Opr   _opr1;
1595   LIR_Opr   _opr2;
1596   BasicType _type;
1597   LIR_Opr   _tmp1;
1598   LIR_Opr   _tmp2;
1599   LIR_Opr   _tmp3;
1600   LIR_Opr   _tmp4;
1601   LIR_Opr   _tmp5;
1602   LIR_Condition _condition;
1603 
1604   void verify() const;
1605 
1606  public:
1607   LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, CodeEmitInfo* info = NULL)
1608     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1609     , _fpu_stack_size(0)
1610     , _opr1(opr1)
1611     , _opr2(opr2)
1612     , _type(T_ILLEGAL)
1613     , _tmp1(LIR_OprFact::illegalOpr)
1614     , _tmp2(LIR_OprFact::illegalOpr)
1615     , _tmp3(LIR_OprFact::illegalOpr)
1616     , _tmp4(LIR_OprFact::illegalOpr)
1617     , _tmp5(LIR_OprFact::illegalOpr)
1618     , _condition(condition) {
1619     assert(code == lir_cmp || code == lir_assert, "code check");
1620   }
1621 
1622   LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type)
1623     : LIR_Op(code, result, NULL)
1624     , _fpu_stack_size(0)
1625     , _opr1(opr1)
1626     , _opr2(opr2)
1627     , _type(type)
1628     , _tmp1(LIR_OprFact::illegalOpr)
1629     , _tmp2(LIR_OprFact::illegalOpr)
1630     , _tmp3(LIR_OprFact::illegalOpr)
1631     , _tmp4(LIR_OprFact::illegalOpr)
1632     , _tmp5(LIR_OprFact::illegalOpr)
1633     , _condition(condition) {
1634     assert(code == lir_cmove, "code check");
1635     assert(type != T_ILLEGAL, "cmove should have type");
1636   }
1637 
1638   LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result = LIR_OprFact::illegalOpr,
1639           CodeEmitInfo* info = NULL, BasicType type = T_ILLEGAL)
1640     : LIR_Op(code, result, info)
1641     , _fpu_stack_size(0)
1642     , _opr1(opr1)
1643     , _opr2(opr2)
1644     , _type(type)
1645     , _tmp1(LIR_OprFact::illegalOpr)
1646     , _tmp2(LIR_OprFact::illegalOpr)
1647     , _tmp3(LIR_OprFact::illegalOpr)
1648     , _tmp4(LIR_OprFact::illegalOpr)
1649     , _tmp5(LIR_OprFact::illegalOpr)
1650     , _condition(lir_cond_unknown) {
1651     assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
1652   }
1653 
1654   LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, LIR_Opr tmp1, LIR_Opr tmp2 = LIR_OprFact::illegalOpr,
1655           LIR_Opr tmp3 = LIR_OprFact::illegalOpr, LIR_Opr tmp4 = LIR_OprFact::illegalOpr, LIR_Opr tmp5 = LIR_OprFact::illegalOpr)
1656     : LIR_Op(code, result, NULL)
1657     , _fpu_stack_size(0)
1658     , _opr1(opr1)
1659     , _opr2(opr2)
1660     , _type(T_ILLEGAL)
1661     , _tmp1(tmp1)
1662     , _tmp2(tmp2)
1663     , _tmp3(tmp3)
1664     , _tmp4(tmp4)
1665     , _tmp5(tmp5)
1666     , _condition(lir_cond_unknown) {
1667     assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
1668   }
1669 
1670   LIR_Opr in_opr1() const                        { return _opr1; }
1671   LIR_Opr in_opr2() const                        { return _opr2; }
1672   BasicType type()  const                        { return _type; }
1673   LIR_Opr tmp1_opr() const                       { return _tmp1; }
1674   LIR_Opr tmp2_opr() const                       { return _tmp2; }
1675   LIR_Opr tmp3_opr() const                       { return _tmp3; }
1676   LIR_Opr tmp4_opr() const                       { return _tmp4; }
1677   LIR_Opr tmp5_opr() const                       { return _tmp5; }
1678   LIR_Condition condition() const  {
1679     assert(code() == lir_cmp || code() == lir_cmove || code() == lir_assert, "only valid for cmp and cmove and assert"); return _condition;
1680   }
1681   void set_condition(LIR_Condition condition) {
1682     assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove");  _condition = condition;
1683   }
1684 
1685   void set_fpu_stack_size(int size)              { _fpu_stack_size = size; }
1686   int  fpu_stack_size() const                    { return _fpu_stack_size; }
1687 
1688   void set_in_opr1(LIR_Opr opr)                  { _opr1 = opr; }
1689   void set_in_opr2(LIR_Opr opr)                  { _opr2 = opr; }
1690 
1691   virtual void emit_code(LIR_Assembler* masm);
1692   virtual LIR_Op2* as_Op2() { return this; }
1693   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1694 };
1695 
1696 class LIR_OpAllocArray : public LIR_Op {
1697  friend class LIR_OpVisitState;
1698 
1699  private:
1700   LIR_Opr   _klass;
1701   LIR_Opr   _len;
1702   LIR_Opr   _tmp1;
1703   LIR_Opr   _tmp2;
1704   LIR_Opr   _tmp3;
1705   LIR_Opr   _tmp4;
1706   BasicType _type;
1707   CodeStub* _stub;
1708 
1709  public:
1710   LIR_OpAllocArray(LIR_Opr klass, LIR_Opr len, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, BasicType type, CodeStub* stub)
1711     : LIR_Op(lir_alloc_array, result, NULL)
1712     , _klass(klass)
1713     , _len(len)
1714     , _tmp1(t1)
1715     , _tmp2(t2)
1716     , _tmp3(t3)
1717     , _tmp4(t4)
1718     , _type(type)
1719     , _stub(stub) {}
1720 
1721   LIR_Opr   klass()   const                      { return _klass;       }
1722   LIR_Opr   len()     const                      { return _len;         }
1723   LIR_Opr   obj()     const                      { return result_opr(); }
1724   LIR_Opr   tmp1()    const                      { return _tmp1;        }
1725   LIR_Opr   tmp2()    const                      { return _tmp2;        }
1726   LIR_Opr   tmp3()    const                      { return _tmp3;        }
1727   LIR_Opr   tmp4()    const                      { return _tmp4;        }
1728   BasicType type()    const                      { return _type;        }
1729   CodeStub* stub()    const                      { return _stub;        }
1730 
1731   virtual void emit_code(LIR_Assembler* masm);
1732   virtual LIR_OpAllocArray * as_OpAllocArray () { return this; }
1733   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1734 };
1735 
1736 
1737 class LIR_Op3: public LIR_Op {
1738  friend class LIR_OpVisitState;
1739 
1740  private:
1741   LIR_Opr _opr1;
1742   LIR_Opr _opr2;
1743   LIR_Opr _opr3;
1744  public:
1745   LIR_Op3(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr opr3, LIR_Opr result, CodeEmitInfo* info = NULL)
1746     : LIR_Op(code, result, info)
1747     , _opr1(opr1)
1748     , _opr2(opr2)
1749     , _opr3(opr3)                                { assert(is_in_range(code, begin_op3, end_op3), "code check"); }
1750   LIR_Opr in_opr1() const                        { return _opr1; }
1751   LIR_Opr in_opr2() const                        { return _opr2; }
1752   LIR_Opr in_opr3() const                        { return _opr3; }
1753 
1754   virtual void emit_code(LIR_Assembler* masm);
1755   virtual LIR_Op3* as_Op3() { return this; }
1756   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1757 };
1758 
1759 
1760 //--------------------------------
1761 class LabelObj: public CompilationResourceObj {
1762  private:
1763   Label _label;
1764  public:
1765   LabelObj()                                     {}
1766   Label* label()                                 { return &_label; }
1767 };
1768 
1769 
1770 class LIR_OpLock: public LIR_Op {
1771  friend class LIR_OpVisitState;
1772 
1773  private:
1774   LIR_Opr _hdr;
1775   LIR_Opr _obj;
1776   LIR_Opr _lock;
1777   LIR_Opr _scratch;
1778   CodeStub* _stub;
1779  public:
1780   LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info)
1781     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1782     , _hdr(hdr)
1783     , _obj(obj)
1784     , _lock(lock)
1785     , _scratch(scratch)
1786     , _stub(stub)                      {}
1787 
1788   LIR_Opr hdr_opr() const                        { return _hdr; }
1789   LIR_Opr obj_opr() const                        { return _obj; }
1790   LIR_Opr lock_opr() const                       { return _lock; }
1791   LIR_Opr scratch_opr() const                    { return _scratch; }
1792   CodeStub* stub() const                         { return _stub; }
1793 
1794   virtual void emit_code(LIR_Assembler* masm);
1795   virtual LIR_OpLock* as_OpLock() { return this; }
1796   void print_instr(outputStream* out) const PRODUCT_RETURN;
1797 };
1798 
1799 
1800 class LIR_OpDelay: public LIR_Op {
1801  friend class LIR_OpVisitState;
1802 
1803  private:
1804   LIR_Op* _op;
1805 
1806  public:
1807   LIR_OpDelay(LIR_Op* op, CodeEmitInfo* info):
1808     LIR_Op(lir_delay_slot, LIR_OprFact::illegalOpr, info),
1809     _op(op) {
1810     assert(op->code() == lir_nop || LIRFillDelaySlots, "should be filling with nops");
1811   }
1812   virtual void emit_code(LIR_Assembler* masm);
1813   virtual LIR_OpDelay* as_OpDelay() { return this; }
1814   void print_instr(outputStream* out) const PRODUCT_RETURN;
1815   LIR_Op* delay_op() const { return _op; }
1816   CodeEmitInfo* call_info() const { return info(); }
1817 };
1818 
1819 #ifdef ASSERT
1820 // LIR_OpAssert
1821 class LIR_OpAssert : public LIR_Op2 {
1822  friend class LIR_OpVisitState;
1823 
1824  private:
1825   const char* _msg;
1826   bool        _halt;
1827 
1828  public:
1829   LIR_OpAssert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt)
1830     : LIR_Op2(lir_assert, condition, opr1, opr2)
1831     , _msg(msg)
1832     , _halt(halt) {
1833   }
1834 
1835   const char* msg() const                        { return _msg; }
1836   bool        halt() const                       { return _halt; }
1837 
1838   virtual void emit_code(LIR_Assembler* masm);
1839   virtual LIR_OpAssert* as_OpAssert()            { return this; }
1840   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1841 };
1842 #endif
1843 
1844 // LIR_OpCompareAndSwap
1845 class LIR_OpCompareAndSwap : public LIR_Op {
1846  friend class LIR_OpVisitState;
1847 
1848  private:
1849   LIR_Opr _addr;
1850   LIR_Opr _cmp_value;
1851   LIR_Opr _new_value;
1852   LIR_Opr _tmp1;
1853   LIR_Opr _tmp2;
1854 
1855  public:
1856   LIR_OpCompareAndSwap(LIR_Code code, LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
1857                        LIR_Opr t1, LIR_Opr t2, LIR_Opr result)
1858     : LIR_Op(code, result, NULL)  // no result, no info
1859     , _addr(addr)
1860     , _cmp_value(cmp_value)
1861     , _new_value(new_value)
1862     , _tmp1(t1)
1863     , _tmp2(t2)                                  { }
1864 
1865   LIR_Opr addr()        const                    { return _addr;  }
1866   LIR_Opr cmp_value()   const                    { return _cmp_value; }
1867   LIR_Opr new_value()   const                    { return _new_value; }
1868   LIR_Opr tmp1()        const                    { return _tmp1;      }
1869   LIR_Opr tmp2()        const                    { return _tmp2;      }
1870 
1871   virtual void emit_code(LIR_Assembler* masm);
1872   virtual LIR_OpCompareAndSwap * as_OpCompareAndSwap () { return this; }
1873   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1874 };
1875 
1876 // LIR_OpProfileCall
1877 class LIR_OpProfileCall : public LIR_Op {
1878  friend class LIR_OpVisitState;
1879 
1880  private:
1881   ciMethod* _profiled_method;
1882   int       _profiled_bci;
1883   ciMethod* _profiled_callee;
1884   LIR_Opr   _mdo;
1885   LIR_Opr   _recv;
1886   LIR_Opr   _tmp1;
1887   ciKlass*  _known_holder;
1888 
1889  public:
1890   // Destroys recv
1891   LIR_OpProfileCall(ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
1892     : LIR_Op(lir_profile_call, LIR_OprFact::illegalOpr, NULL)  // no result, no info
1893     , _profiled_method(profiled_method)
1894     , _profiled_bci(profiled_bci)
1895     , _profiled_callee(profiled_callee)
1896     , _mdo(mdo)
1897     , _recv(recv)
1898     , _tmp1(t1)
1899     , _known_holder(known_holder)                { }
1900 
1901   ciMethod* profiled_method() const              { return _profiled_method;  }
1902   int       profiled_bci()    const              { return _profiled_bci;     }
1903   ciMethod* profiled_callee() const              { return _profiled_callee;  }
1904   LIR_Opr   mdo()             const              { return _mdo;              }
1905   LIR_Opr   recv()            const              { return _recv;             }
1906   LIR_Opr   tmp1()            const              { return _tmp1;             }
1907   ciKlass*  known_holder()    const              { return _known_holder;     }
1908 
1909   virtual void emit_code(LIR_Assembler* masm);
1910   virtual LIR_OpProfileCall* as_OpProfileCall() { return this; }
1911   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1912   bool should_profile_receiver_type() const {
1913     bool callee_is_static = _profiled_callee->is_loaded() && _profiled_callee->is_static();
1914     Bytecodes::Code bc = _profiled_method->java_code_at_bci(_profiled_bci);
1915     bool call_is_virtual = (bc == Bytecodes::_invokevirtual && !_profiled_callee->can_be_statically_bound()) || bc == Bytecodes::_invokeinterface;
1916     return C1ProfileVirtualCalls && call_is_virtual && !callee_is_static;
1917   }
1918 };
1919 
1920 // LIR_OpProfileType
1921 class LIR_OpProfileType : public LIR_Op {
1922  friend class LIR_OpVisitState;
1923 
1924  private:
1925   LIR_Opr      _mdp;
1926   LIR_Opr      _obj;
1927   LIR_Opr      _tmp;
1928   ciKlass*     _exact_klass;   // non NULL if we know the klass statically (no need to load it from _obj)
1929   intptr_t     _current_klass; // what the profiling currently reports
1930   bool         _not_null;      // true if we know statically that _obj cannot be null
1931   bool         _no_conflict;   // true if we're profling parameters, _exact_klass is not NULL and we know
1932                                // _exact_klass it the only possible type for this parameter in any context.
1933 
1934  public:
1935   // Destroys recv
1936   LIR_OpProfileType(LIR_Opr mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict)
1937     : LIR_Op(lir_profile_type, LIR_OprFact::illegalOpr, NULL)  // no result, no info
1938     , _mdp(mdp)
1939     , _obj(obj)
1940     , _tmp(tmp)
1941     , _exact_klass(exact_klass)
1942     , _current_klass(current_klass)
1943     , _not_null(not_null)
1944     , _no_conflict(no_conflict) { }
1945 
1946   LIR_Opr      mdp()              const             { return _mdp;              }
1947   LIR_Opr      obj()              const             { return _obj;              }
1948   LIR_Opr      tmp()              const             { return _tmp;              }
1949   ciKlass*     exact_klass()      const             { return _exact_klass;      }
1950   intptr_t     current_klass()    const             { return _current_klass;    }
1951   bool         not_null()         const             { return _not_null;         }
1952   bool         no_conflict()      const             { return _no_conflict;      }
1953 
1954   virtual void emit_code(LIR_Assembler* masm);
1955   virtual LIR_OpProfileType* as_OpProfileType() { return this; }
1956   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1957 };
1958 
1959 class LIR_InsertionBuffer;
1960 
1961 //--------------------------------LIR_List---------------------------------------------------
1962 // Maintains a list of LIR instructions (one instance of LIR_List per basic block)
1963 // The LIR instructions are appended by the LIR_List class itself;
1964 //
1965 // Notes:
1966 // - all offsets are(should be) in bytes
1967 // - local positions are specified with an offset, with offset 0 being local 0
1968 
1969 class LIR_List: public CompilationResourceObj {
1970  private:
1971   LIR_OpList  _operations;
1972 
1973   Compilation*  _compilation;
1974 #ifndef PRODUCT
1975   BlockBegin*   _block;
1976 #endif
1977 #ifdef ASSERT
1978   const char *  _file;
1979   int           _line;
1980 #endif
1981 
1982  public:
1983   void append(LIR_Op* op) {
1984     if (op->source() == NULL)
1985       op->set_source(_compilation->current_instruction());
1986 #ifndef PRODUCT
1987     if (PrintIRWithLIR) {
1988       _compilation->maybe_print_current_instruction();
1989       op->print(); tty->cr();
1990     }
1991 #endif // PRODUCT
1992 
1993     _operations.append(op);
1994 
1995 #ifdef ASSERT
1996     op->verify();
1997     op->set_file_and_line(_file, _line);
1998     _file = NULL;
1999     _line = 0;
2000 #endif
2001   }
2002 
2003   LIR_List(Compilation* compilation, BlockBegin* block = NULL);
2004 
2005 #ifdef ASSERT
2006   void set_file_and_line(const char * file, int line);
2007 #endif
2008 
2009   //---------- accessors ---------------
2010   LIR_OpList* instructions_list()                { return &_operations; }
2011   int         length() const                     { return _operations.length(); }
2012   LIR_Op*     at(int i) const                    { return _operations.at(i); }
2013 
2014   NOT_PRODUCT(BlockBegin* block() const          { return _block; });
2015 
2016   // insert LIR_Ops in buffer to right places in LIR_List
2017   void append(LIR_InsertionBuffer* buffer);
2018 
2019   //---------- mutators ---------------
2020   void insert_before(int i, LIR_List* op_list)   { _operations.insert_before(i, op_list->instructions_list()); }
2021   void insert_before(int i, LIR_Op* op)          { _operations.insert_before(i, op); }
2022   void remove_at(int i)                          { _operations.remove_at(i); }
2023 
2024   //---------- printing -------------
2025   void print_instructions() PRODUCT_RETURN;
2026 
2027 
2028   //---------- instructions -------------
2029   void call_opt_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
2030                         address dest, LIR_OprList* arguments,
2031                         CodeEmitInfo* info) {
2032     append(new LIR_OpJavaCall(lir_optvirtual_call, method, receiver, result, dest, arguments, info));
2033   }
2034   void call_static(ciMethod* method, LIR_Opr result,
2035                    address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
2036     append(new LIR_OpJavaCall(lir_static_call, method, LIR_OprFact::illegalOpr, result, dest, arguments, info));
2037   }
2038   void call_icvirtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
2039                       address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
2040     append(new LIR_OpJavaCall(lir_icvirtual_call, method, receiver, result, dest, arguments, info));
2041   }
2042   void call_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
2043                     intptr_t vtable_offset, LIR_OprList* arguments, CodeEmitInfo* info) {
2044     append(new LIR_OpJavaCall(lir_virtual_call, method, receiver, result, vtable_offset, arguments, info));
2045   }
2046   void call_dynamic(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
2047                     address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
2048     append(new LIR_OpJavaCall(lir_dynamic_call, method, receiver, result, dest, arguments, info));
2049   }
2050 
2051   void get_thread(LIR_Opr result)                { append(new LIR_Op0(lir_get_thread, result)); }
2052   void membar()                                  { append(new LIR_Op0(lir_membar)); }
2053   void membar_acquire()                          { append(new LIR_Op0(lir_membar_acquire)); }
2054   void membar_release()                          { append(new LIR_Op0(lir_membar_release)); }
2055   void membar_loadload()                         { append(new LIR_Op0(lir_membar_loadload)); }
2056   void membar_storestore()                       { append(new LIR_Op0(lir_membar_storestore)); }
2057   void membar_loadstore()                        { append(new LIR_Op0(lir_membar_loadstore)); }
2058   void membar_storeload()                        { append(new LIR_Op0(lir_membar_storeload)); }
2059 
2060   void nop()                                     { append(new LIR_Op0(lir_nop)); }
2061 
2062   void std_entry(LIR_Opr receiver)               { append(new LIR_Op0(lir_std_entry, receiver)); }
2063   void osr_entry(LIR_Opr osrPointer)             { append(new LIR_Op0(lir_osr_entry, osrPointer)); }
2064 
2065   void on_spin_wait()                            { append(new LIR_Op0(lir_on_spin_wait)); }
2066 
2067   void branch_destination(Label* lbl)            { append(new LIR_OpLabel(lbl)); }
2068 
2069   void leal(LIR_Opr from, LIR_Opr result_reg, LIR_PatchCode patch_code = lir_patch_none, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_leal, from, result_reg, T_ILLEGAL, patch_code, info)); }
2070 
2071   // result is a stack location for old backend and vreg for UseLinearScan
2072   // stack_loc_temp is an illegal register for old backend
2073   void roundfp(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result) { append(new LIR_OpRoundFP(reg, stack_loc_temp, result)); }
2074   void unaligned_move(LIR_Address* src, LIR_Opr dst) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
2075   void unaligned_move(LIR_Opr src, LIR_Address* dst) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), src->type(), lir_patch_none, NULL, lir_move_unaligned)); }
2076   void unaligned_move(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
2077   void move(LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
2078   void move(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info)); }
2079   void move(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info)); }
2080   void move_wide(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) {
2081     if (UseCompressedOops) {
2082       append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info, lir_move_wide));
2083     } else {
2084       move(src, dst, info);
2085     }
2086   }
2087   void move_wide(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) {
2088     if (UseCompressedOops) {
2089       append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info, lir_move_wide));
2090     } else {
2091       move(src, dst, info);
2092     }
2093   }
2094   void volatile_move(LIR_Opr src, LIR_Opr dst, BasicType type, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none) { append(new LIR_Op1(lir_move, src, dst, type, patch_code, info, lir_move_volatile)); }
2095 
2096   void oop2reg  (jobject o, LIR_Opr reg)         { assert(reg->type() == T_OBJECT, "bad reg"); append(new LIR_Op1(lir_move, LIR_OprFact::oopConst(o),    reg));   }
2097   void oop2reg_patch(jobject o, LIR_Opr reg, CodeEmitInfo* info);
2098 
2099   void metadata2reg  (Metadata* o, LIR_Opr reg)  { assert(reg->type() == T_METADATA, "bad reg"); append(new LIR_Op1(lir_move, LIR_OprFact::metadataConst(o), reg));   }
2100   void klass2reg_patch(Metadata* o, LIR_Opr reg, CodeEmitInfo* info);
2101 
2102   void return_op(LIR_Opr result)                 { append(new LIR_Op1(lir_return, result)); }
2103 
2104   void safepoint(LIR_Opr tmp, CodeEmitInfo* info)  { append(new LIR_Op1(lir_safepoint, tmp, info)); }
2105 
2106   void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, ConversionStub* stub = NULL/*, bool is_32bit = false*/) { append(new LIR_OpConvert(code, left, dst, stub)); }
2107 
2108   void logical_and (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_and,  left, right, dst)); }
2109   void logical_or  (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_or,   left, right, dst)); }
2110   void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor,  left, right, dst)); }
2111 
2112   void   pack64(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_pack64,   src, dst, T_LONG, lir_patch_none, NULL)); }
2113   void unpack64(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_unpack64, src, dst, T_LONG, lir_patch_none, NULL)); }
2114 
2115   void null_check(LIR_Opr opr, CodeEmitInfo* info, bool deoptimize_on_null = false);
2116   void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2117     append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info));
2118   }
2119   void unwind_exception(LIR_Opr exceptionOop) {
2120     append(new LIR_Op1(lir_unwind, exceptionOop));
2121   }
2122 
2123   void push(LIR_Opr opr)                                   { append(new LIR_Op1(lir_push, opr)); }
2124   void pop(LIR_Opr reg)                                    { append(new LIR_Op1(lir_pop,  reg)); }
2125 
2126   void cmp(LIR_Condition condition, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info = NULL) {
2127     append(new LIR_Op2(lir_cmp, condition, left, right, info));
2128   }
2129   void cmp(LIR_Condition condition, LIR_Opr left, int right, CodeEmitInfo* info = NULL) {
2130     cmp(condition, left, LIR_OprFact::intConst(right), info);
2131   }
2132 
2133   void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info);
2134   void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Address* addr, CodeEmitInfo* info);
2135 
2136   void cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type) {
2137     append(new LIR_Op2(lir_cmove, condition, src1, src2, dst, type));
2138   }
2139 
2140   void cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2141                 LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2142   void cas_obj(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2143                LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2144   void cas_int(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2145                LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2146 
2147   void abs (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_abs , from, tmp, to)); }
2148   void negate(LIR_Opr from, LIR_Opr to, LIR_Opr tmp = LIR_OprFact::illegalOpr)              { append(new LIR_Op2(lir_neg, from, tmp, to)); }
2149   void sqrt(LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_sqrt, from, tmp, to)); }
2150   void fmad(LIR_Opr from, LIR_Opr from1, LIR_Opr from2, LIR_Opr to) { append(new LIR_Op3(lir_fmad, from, from1, from2, to)); }
2151   void fmaf(LIR_Opr from, LIR_Opr from1, LIR_Opr from2, LIR_Opr to) { append(new LIR_Op3(lir_fmaf, from, from1, from2, to)); }
2152   void log10 (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)              { append(new LIR_Op2(lir_log10, from, LIR_OprFact::illegalOpr, to, tmp)); }
2153   void tan (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_tan , from, tmp1, to, tmp2)); }
2154 
2155   void add (LIR_Opr left, LIR_Opr right, LIR_Opr res)      { append(new LIR_Op2(lir_add, left, right, res)); }
2156   void sub (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_sub, left, right, res, info)); }
2157   void mul (LIR_Opr left, LIR_Opr right, LIR_Opr res) { append(new LIR_Op2(lir_mul, left, right, res)); }
2158   void mul_strictfp (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_mul_strictfp, left, right, res, tmp)); }
2159   void div (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL)      { append(new LIR_Op2(lir_div, left, right, res, info)); }
2160   void div_strictfp (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_div_strictfp, left, right, res, tmp)); }
2161   void rem (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL)      { append(new LIR_Op2(lir_rem, left, right, res, info)); }
2162 
2163   void volatile_load_mem_reg(LIR_Address* address, LIR_Opr dst, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2164   void volatile_load_unsafe_reg(LIR_Opr base, LIR_Opr offset, LIR_Opr dst, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
2165 
2166   void load(LIR_Address* addr, LIR_Opr src, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
2167 
2168   void store_mem_int(jint v,    LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2169   void store_mem_oop(jobject o, LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2170   void store(LIR_Opr src, LIR_Address* addr, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
2171   void volatile_store_mem_reg(LIR_Opr src, LIR_Address* address, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2172   void volatile_store_unsafe_reg(LIR_Opr src, LIR_Opr base, LIR_Opr offset, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
2173 
2174   void idiv(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2175   void idiv(LIR_Opr left, int   right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2176   void irem(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2177   void irem(LIR_Opr left, int   right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2178 
2179   void allocate_object(LIR_Opr dst, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, int header_size, int object_size, LIR_Opr klass, bool init_check, CodeStub* stub);
2180   void allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub);
2181 
2182   // jump is an unconditional branch
2183   void jump(BlockBegin* block) {
2184     append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, block));
2185   }
2186   void jump(CodeStub* stub) {
2187     append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, stub));
2188   }
2189   void branch(LIR_Condition cond, BasicType type, Label* lbl)        { append(new LIR_OpBranch(cond, type, lbl)); }
2190   void branch(LIR_Condition cond, BasicType type, BlockBegin* block) {
2191     assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
2192     append(new LIR_OpBranch(cond, type, block));
2193   }
2194   void branch(LIR_Condition cond, BasicType type, CodeStub* stub)    {
2195     assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
2196     append(new LIR_OpBranch(cond, type, stub));
2197   }
2198   void branch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* unordered) {
2199     assert(type == T_FLOAT || type == T_DOUBLE, "fp comparisons only");
2200     append(new LIR_OpBranch(cond, type, block, unordered));
2201   }
2202 
2203   void shift_left(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2204   void shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2205   void unsigned_shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2206 
2207   void shift_left(LIR_Opr value, int count, LIR_Opr dst)       { shift_left(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2208   void shift_right(LIR_Opr value, int count, LIR_Opr dst)      { shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2209   void unsigned_shift_right(LIR_Opr value, int count, LIR_Opr dst) { unsigned_shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2210 
2211   void lcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst)        { append(new LIR_Op2(lir_cmp_l2i,  left, right, dst)); }
2212   void fcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst, bool is_unordered_less);
2213 
2214   void call_runtime_leaf(address routine, LIR_Opr tmp, LIR_Opr result, LIR_OprList* arguments) {
2215     append(new LIR_OpRTCall(routine, tmp, result, arguments));
2216   }
2217 
2218   void call_runtime(address routine, LIR_Opr tmp, LIR_Opr result,
2219                     LIR_OprList* arguments, CodeEmitInfo* info) {
2220     append(new LIR_OpRTCall(routine, tmp, result, arguments, info));
2221   }
2222 
2223   void load_stack_address_monitor(int monitor_ix, LIR_Opr dst)  { append(new LIR_Op1(lir_monaddr, LIR_OprFact::intConst(monitor_ix), dst)); }
2224   void unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub);
2225   void lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info);
2226 
2227   void breakpoint()                                                  { append(new LIR_Op0(lir_breakpoint)); }
2228 
2229   void arraycopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info) { append(new LIR_OpArrayCopy(src, src_pos, dst, dst_pos, length, tmp, expected_type, flags, info)); }
2230 
2231   void update_crc32(LIR_Opr crc, LIR_Opr val, LIR_Opr res)  { append(new LIR_OpUpdateCRC32(crc, val, res)); }
2232 
2233   void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci);
2234   void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception, ciMethod* profiled_method, int profiled_bci);
2235 
2236   void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass,
2237                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
2238                   CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
2239                   ciMethod* profiled_method, int profiled_bci);
2240   // MethodData* profiling
2241   void profile_call(ciMethod* method, int bci, ciMethod* callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) {
2242     append(new LIR_OpProfileCall(method, bci, callee, mdo, recv, t1, cha_klass));
2243   }
2244   void profile_type(LIR_Address* mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict) {
2245     append(new LIR_OpProfileType(LIR_OprFact::address(mdp), obj, exact_klass, current_klass, tmp, not_null, no_conflict));
2246   }
2247 
2248   void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); }
2249   void xchg(LIR_Opr src, LIR_Opr set, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xchg, src, set, res, tmp)); }
2250 #ifdef ASSERT
2251   void lir_assert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt) { append(new LIR_OpAssert(condition, opr1, opr2, msg, halt)); }
2252 #endif
2253 };
2254 
2255 void print_LIR(BlockList* blocks);
2256 
2257 class LIR_InsertionBuffer : public CompilationResourceObj {
2258  private:
2259   LIR_List*   _lir;   // the lir list where ops of this buffer should be inserted later (NULL when uninitialized)
2260 
2261   // list of insertion points. index and count are stored alternately:
2262   // _index_and_count[i * 2]:     the index into lir list where "count" ops should be inserted
2263   // _index_and_count[i * 2 + 1]: the number of ops to be inserted at index
2264   intStack    _index_and_count;
2265 
2266   // the LIR_Ops to be inserted
2267   LIR_OpList  _ops;
2268 
2269   void append_new(int index, int count)  { _index_and_count.append(index); _index_and_count.append(count); }
2270   void set_index_at(int i, int value)    { _index_and_count.at_put((i << 1),     value); }
2271   void set_count_at(int i, int value)    { _index_and_count.at_put((i << 1) + 1, value); }
2272 
2273 #ifdef ASSERT
2274   void verify();
2275 #endif
2276  public:
2277   LIR_InsertionBuffer() : _lir(NULL), _index_and_count(8), _ops(8) { }
2278 
2279   // must be called before using the insertion buffer
2280   void init(LIR_List* lir)  { assert(!initialized(), "already initialized"); _lir = lir; _index_and_count.clear(); _ops.clear(); }
2281   bool initialized() const  { return _lir != NULL; }
2282   // called automatically when the buffer is appended to the LIR_List
2283   void finish()             { _lir = NULL; }
2284 
2285   // accessors
2286   LIR_List*  lir_list() const             { return _lir; }
2287   int number_of_insertion_points() const  { return _index_and_count.length() >> 1; }
2288   int index_at(int i) const               { return _index_and_count.at((i << 1));     }
2289   int count_at(int i) const               { return _index_and_count.at((i << 1) + 1); }
2290 
2291   int number_of_ops() const               { return _ops.length(); }
2292   LIR_Op* op_at(int i) const              { return _ops.at(i); }
2293 
2294   // append an instruction to the buffer
2295   void append(int index, LIR_Op* op);
2296 
2297   // instruction
2298   void move(int index, LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(index, new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
2299 };
2300 
2301 
2302 //
2303 // LIR_OpVisitState is used for manipulating LIR_Ops in an abstract way.
2304 // Calling a LIR_Op's visit function with a LIR_OpVisitState causes
2305 // information about the input, output and temporaries used by the
2306 // op to be recorded.  It also records whether the op has call semantics
2307 // and also records all the CodeEmitInfos used by this op.
2308 //
2309 
2310 
2311 class LIR_OpVisitState: public StackObj {
2312  public:
2313   typedef enum { inputMode, firstMode = inputMode, tempMode, outputMode, numModes, invalidMode = -1 } OprMode;
2314 
2315   enum {
2316     maxNumberOfOperands = 20,
2317     maxNumberOfInfos = 4
2318   };
2319 
2320  private:
2321   LIR_Op*          _op;
2322 
2323   // optimization: the operands and infos are not stored in a variable-length
2324   //               list, but in a fixed-size array to save time of size checks and resizing
2325   int              _oprs_len[numModes];
2326   LIR_Opr*         _oprs_new[numModes][maxNumberOfOperands];
2327   int _info_len;
2328   CodeEmitInfo*    _info_new[maxNumberOfInfos];
2329 
2330   bool             _has_call;
2331   bool             _has_slow_case;
2332 
2333 
2334   // only include register operands
2335   // addresses are decomposed to the base and index registers
2336   // constants and stack operands are ignored
2337   void append(LIR_Opr& opr, OprMode mode) {
2338     assert(opr->is_valid(), "should not call this otherwise");
2339     assert(mode >= 0 && mode < numModes, "bad mode");
2340 
2341     if (opr->is_register()) {
2342        assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2343       _oprs_new[mode][_oprs_len[mode]++] = &opr;
2344 
2345     } else if (opr->is_pointer()) {
2346       LIR_Address* address = opr->as_address_ptr();
2347       if (address != NULL) {
2348         // special handling for addresses: add base and index register of the address
2349         // both are always input operands or temp if we want to extend
2350         // their liveness!
2351         if (mode == outputMode) {
2352           mode = inputMode;
2353         }
2354         assert (mode == inputMode || mode == tempMode, "input or temp only for addresses");
2355         if (address->_base->is_valid()) {
2356           assert(address->_base->is_register(), "must be");
2357           assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2358           _oprs_new[mode][_oprs_len[mode]++] = &address->_base;
2359         }
2360         if (address->_index->is_valid()) {
2361           assert(address->_index->is_register(), "must be");
2362           assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2363           _oprs_new[mode][_oprs_len[mode]++] = &address->_index;
2364         }
2365 
2366       } else {
2367         assert(opr->is_constant(), "constant operands are not processed");
2368       }
2369     } else {
2370       assert(opr->is_stack(), "stack operands are not processed");
2371     }
2372   }
2373 
2374   void append(CodeEmitInfo* info) {
2375     assert(info != NULL, "should not call this otherwise");
2376     assert(_info_len < maxNumberOfInfos, "array overflow");
2377     _info_new[_info_len++] = info;
2378   }
2379 
2380  public:
2381   LIR_OpVisitState()         { reset(); }
2382 
2383   LIR_Op* op() const         { return _op; }
2384   void set_op(LIR_Op* op)    { reset(); _op = op; }
2385 
2386   bool has_call() const      { return _has_call; }
2387   bool has_slow_case() const { return _has_slow_case; }
2388 
2389   void reset() {
2390     _op = NULL;
2391     _has_call = false;
2392     _has_slow_case = false;
2393 
2394     _oprs_len[inputMode] = 0;
2395     _oprs_len[tempMode] = 0;
2396     _oprs_len[outputMode] = 0;
2397     _info_len = 0;
2398   }
2399 
2400 
2401   int opr_count(OprMode mode) const {
2402     assert(mode >= 0 && mode < numModes, "bad mode");
2403     return _oprs_len[mode];
2404   }
2405 
2406   LIR_Opr opr_at(OprMode mode, int index) const {
2407     assert(mode >= 0 && mode < numModes, "bad mode");
2408     assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
2409     return *_oprs_new[mode][index];
2410   }
2411 
2412   void set_opr_at(OprMode mode, int index, LIR_Opr opr) const {
2413     assert(mode >= 0 && mode < numModes, "bad mode");
2414     assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
2415     *_oprs_new[mode][index] = opr;
2416   }
2417 
2418   int info_count() const {
2419     return _info_len;
2420   }
2421 
2422   CodeEmitInfo* info_at(int index) const {
2423     assert(index < _info_len, "index out of bounds");
2424     return _info_new[index];
2425   }
2426 
2427   XHandlers* all_xhandler();
2428 
2429   // collects all register operands of the instruction
2430   void visit(LIR_Op* op);
2431 
2432 #ifdef ASSERT
2433   // check that an operation has no operands
2434   bool no_operands(LIR_Op* op);
2435 #endif
2436 
2437   // LIR_Op visitor functions use these to fill in the state
2438   void do_input(LIR_Opr& opr)             { append(opr, LIR_OpVisitState::inputMode); }
2439   void do_output(LIR_Opr& opr)            { append(opr, LIR_OpVisitState::outputMode); }
2440   void do_temp(LIR_Opr& opr)              { append(opr, LIR_OpVisitState::tempMode); }
2441   void do_info(CodeEmitInfo* info)        { append(info); }
2442 
2443   void do_stub(CodeStub* stub);
2444   void do_call()                          { _has_call = true; }
2445   void do_slow_case()                     { _has_slow_case = true; }
2446   void do_slow_case(CodeEmitInfo* info) {
2447     _has_slow_case = true;
2448     append(info);
2449   }
2450 };
2451 
2452 
2453 inline LIR_Opr LIR_OprDesc::illegalOpr()   { return LIR_OprFact::illegalOpr; };
2454 
2455 #endif // SHARE_C1_C1_LIR_HPP