< prev index next >

src/share/vm/c1/c1_LIRGenerator.cpp

Print this page




   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"

  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_FrameMap.hpp"
  28 #include "c1/c1_Instruction.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_LIRGenerator.hpp"
  31 #include "c1/c1_ValueStack.hpp"
  32 #include "ci/ciArrayKlass.hpp"
  33 #include "ci/ciInstance.hpp"
  34 #include "ci/ciObjArray.hpp"
  35 #include "runtime/arguments.hpp"
  36 #include "runtime/sharedRuntime.hpp"
  37 #include "runtime/stubRoutines.hpp"
  38 #include "runtime/vm_version.hpp"
  39 #include "utilities/bitMap.inline.hpp"
  40 #include "utilities/macros.hpp"
  41 #if INCLUDE_ALL_GCS
  42 #include "gc_implementation/g1/heapRegion.hpp"
  43 #endif // INCLUDE_ALL_GCS
  44 
  45 #ifdef ASSERT
  46 #define __ gen()->lir(__FILE__, __LINE__)->
  47 #else
  48 #define __ gen()->lir()->
  49 #endif
  50 
  51 // TODO: ARM - Use some recognizable constant which still fits architectural constraints
  52 #ifdef ARM
  53 #define PATCHED_ADDR  (204)
  54 #else
  55 #define PATCHED_ADDR  (max_jint)
  56 #endif
  57 
  58 void PhiResolverState::reset(int max_vregs) {
  59   // Initialize array sizes
  60   _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
  61   _virtual_operands.trunc_to(0);
  62   _other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
  63   _other_operands.trunc_to(0);
  64   _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL);
  65   _vreg_table.trunc_to(0);
  66 }
  67 
  68 
  69 
  70 //--------------------------------------------------------------
  71 // PhiResolver
  72 
  73 // Resolves cycles:
  74 //


1582 
1583 void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1584 
1585   assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
1586   LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base);
1587   if (addr->is_address()) {
1588     LIR_Address* address = addr->as_address_ptr();
1589     // ptr cannot be an object because we use this barrier for array card marks
1590     // and addr can point in the middle of an array.
1591     LIR_Opr ptr = new_pointer_register();
1592     if (!address->index()->is_valid() && address->disp() == 0) {
1593       __ move(address->base(), ptr);
1594     } else {
1595       assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1596       __ leal(addr, ptr);
1597     }
1598     addr = ptr;
1599   }
1600   assert(addr->is_register(), "must be a register at this point");
1601 
1602 #ifdef ARM
1603   // TODO: ARM - move to platform-dependent code
1604   LIR_Opr tmp = FrameMap::R14_opr;
1605   if (VM_Version::supports_movw()) {
1606     __ move((LIR_Opr)card_table_base, tmp);
1607   } else {
1608     __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp);
1609   }
1610 
1611   CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
1612   LIR_Address *card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE);
1613   if(((int)ct->byte_map_base & 0xff) == 0) {
1614     __ move(tmp, card_addr);
1615   } else {
1616     LIR_Opr tmp_zero = new_register(T_INT);
1617     __ move(LIR_OprFact::intConst(0), tmp_zero);
1618     __ move(tmp_zero, card_addr);
1619   }
1620 #else // ARM
1621   LIR_Opr tmp = new_pointer_register();
1622   if (TwoOperandLIRForm) {
1623     __ move(addr, tmp);
1624     __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1625   } else {
1626     __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1627   }
1628   if (can_inline_as_constant(card_table_base)) {
1629     __ move(LIR_OprFact::intConst(0),
1630               new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
1631   } else {
1632     __ move(LIR_OprFact::intConst(0),
1633               new LIR_Address(tmp, load_constant(card_table_base),
1634                               T_BYTE));
1635   }
1636 #endif // ARM
1637 }
1638 
1639 
1640 //------------------------field access--------------------------------------
1641 
1642 // Comment copied form templateTable_i486.cpp
1643 // ----------------------------------------------------------------------------
1644 // Volatile variables demand their effects be made known to all CPU's in
1645 // order.  Store buffers on most chips allow reads & writes to reorder; the
1646 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1647 // memory barrier (i.e., it's not sufficient that the interpreter does not
1648 // reorder volatile references, the hardware also must not reorder them).
1649 //
1650 // According to the new Java Memory Model (JMM):
1651 // (1) All volatiles are serialized wrt to each other.
1652 // ALSO reads & writes act as aquire & release, so:
1653 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1654 // the read float up to before the read.  It's OK for non-volatile memory refs
1655 // that happen before the volatile read to float down below it.
1656 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs


2106   // At this point base is a long non-constant
2107   // Index is a long register or a int constant.
2108   // We allow the constant to stay an int because that would allow us a more compact encoding by
2109   // embedding an immediate offset in the address expression. If we have a long constant, we have to
2110   // move it into a register first.
2111   assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant");
2112   assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) ||
2113                             (index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type");
2114 #endif
2115 
2116   BasicType dst_type = x->basic_type();
2117 
2118   LIR_Address* addr;
2119   if (index_op->is_constant()) {
2120     assert(log2_scale == 0, "must not have a scale");
2121     assert(index_op->type() == T_INT, "only int constants supported");
2122     addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
2123   } else {
2124 #ifdef X86
2125     addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
2126 #elif defined(ARM)
2127     addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
2128 #else
2129     if (index_op->is_illegal() || log2_scale == 0) {
2130       addr = new LIR_Address(base_op, index_op, dst_type);
2131     } else {
2132       LIR_Opr tmp = new_pointer_register();
2133       __ shift_left(index_op, log2_scale, tmp);
2134       addr = new LIR_Address(base_op, tmp, dst_type);
2135     }
2136 #endif
2137   }
2138 
2139   if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
2140     __ unaligned_move(addr, reg);
2141   } else {
2142     if (dst_type == T_OBJECT && x->is_wide()) {
2143       __ move_wide(addr, reg);
2144     } else {
2145       __ move(addr, reg);
2146     }


2160   LIRItem value(x->value(), this);
2161   LIRItem idx(this);
2162 
2163   base.load_item();
2164   if (x->has_index()) {
2165     idx.set_instruction(x->index());
2166     idx.load_item();
2167   }
2168 
2169   if (type == T_BYTE || type == T_BOOLEAN) {
2170     value.load_byte_item();
2171   } else {
2172     value.load_item();
2173   }
2174 
2175   set_no_result(x);
2176 
2177   LIR_Opr base_op = base.result();
2178   LIR_Opr index_op = idx.result();
2179 



2180 #ifndef _LP64
2181   if (base_op->type() == T_LONG) {
2182     base_op = new_register(T_INT);
2183     __ convert(Bytecodes::_l2i, base.result(), base_op);
2184   }
2185   if (x->has_index()) {
2186     if (index_op->type() == T_LONG) {
2187       index_op = new_register(T_INT);
2188       __ convert(Bytecodes::_l2i, idx.result(), index_op);
2189     }
2190   }
2191   // At this point base and index should be all ints and not constants
2192   assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2193   assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
2194 #else
2195   if (x->has_index()) {
2196     if (index_op->type() == T_INT) {
2197       index_op = new_register(T_LONG);
2198       __ convert(Bytecodes::_i2l, idx.result(), index_op);
2199     }
2200   }
2201   // At this point base and index are long and non-constant
2202   assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a non-constant long");
2203   assert(!x->has_index() || (index_op->type() == T_LONG && !index_op->is_constant()), "index must be a non-constant long");
2204 #endif
2205 
2206   if (log2_scale != 0) {
2207     // temporary fix (platform dependent code without shift on Intel would be better)
2208     // TODO: ARM also allows embedded shift in the address
2209     __ shift_left(index_op, log2_scale, index_op);
2210   }
2211 
2212   LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());

2213   __ move(value.result(), addr);
2214 }
2215 
2216 
2217 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2218   BasicType type = x->basic_type();
2219   LIRItem src(x->object(), this);
2220   LIRItem off(x->offset(), this);
2221 
2222   off.load_item();
2223   src.load_item();
2224 
2225   LIR_Opr value = rlock_result(x, x->basic_type());
2226 
2227   get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2228 
2229 #if INCLUDE_ALL_GCS
2230   // We might be reading the value of the referent field of a
2231   // Reference object in order to attach it back to the live
2232   // object graph. If G1 is enabled then we need to record




   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Defs.hpp"
  27 #include "c1/c1_Compilation.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciObjArray.hpp"
  36 #include "runtime/arguments.hpp"
  37 #include "runtime/sharedRuntime.hpp"
  38 #include "runtime/stubRoutines.hpp"
  39 #include "runtime/vm_version.hpp"
  40 #include "utilities/bitMap.inline.hpp"
  41 #include "utilities/macros.hpp"
  42 #if INCLUDE_ALL_GCS
  43 #include "gc_implementation/g1/heapRegion.hpp"
  44 #endif // INCLUDE_ALL_GCS
  45 
  46 #ifdef ASSERT
  47 #define __ gen()->lir(__FILE__, __LINE__)->
  48 #else
  49 #define __ gen()->lir()->
  50 #endif
  51 
  52 #ifndef PATCHED_ADDR



  53 #define PATCHED_ADDR  (max_jint)
  54 #endif
  55 
  56 void PhiResolverState::reset(int max_vregs) {
  57   // Initialize array sizes
  58   _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
  59   _virtual_operands.trunc_to(0);
  60   _other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
  61   _other_operands.trunc_to(0);
  62   _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL);
  63   _vreg_table.trunc_to(0);
  64 }
  65 
  66 
  67 
  68 //--------------------------------------------------------------
  69 // PhiResolver
  70 
  71 // Resolves cycles:
  72 //


1580 
1581 void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1582 
1583   assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
1584   LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base);
1585   if (addr->is_address()) {
1586     LIR_Address* address = addr->as_address_ptr();
1587     // ptr cannot be an object because we use this barrier for array card marks
1588     // and addr can point in the middle of an array.
1589     LIR_Opr ptr = new_pointer_register();
1590     if (!address->index()->is_valid() && address->disp() == 0) {
1591       __ move(address->base(), ptr);
1592     } else {
1593       assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1594       __ leal(addr, ptr);
1595     }
1596     addr = ptr;
1597   }
1598   assert(addr->is_register(), "must be a register at this point");
1599 
1600 #ifdef CARDTABLEMODREF_POST_BARRIER_HELPER
1601   CardTableModRef_post_barrier_helper(addr, card_table_base);
1602 #else
















1603   LIR_Opr tmp = new_pointer_register();
1604   if (TwoOperandLIRForm) {
1605     __ move(addr, tmp);
1606     __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1607   } else {
1608     __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1609   }
1610   if (can_inline_as_constant(card_table_base)) {
1611     __ move(LIR_OprFact::intConst(0),
1612               new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
1613   } else {
1614     __ move(LIR_OprFact::intConst(0),
1615               new LIR_Address(tmp, load_constant(card_table_base),
1616                               T_BYTE));
1617   }
1618 #endif
1619 }
1620 
1621 
1622 //------------------------field access--------------------------------------
1623 
1624 // Comment copied form templateTable_i486.cpp
1625 // ----------------------------------------------------------------------------
1626 // Volatile variables demand their effects be made known to all CPU's in
1627 // order.  Store buffers on most chips allow reads & writes to reorder; the
1628 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1629 // memory barrier (i.e., it's not sufficient that the interpreter does not
1630 // reorder volatile references, the hardware also must not reorder them).
1631 //
1632 // According to the new Java Memory Model (JMM):
1633 // (1) All volatiles are serialized wrt to each other.
1634 // ALSO reads & writes act as aquire & release, so:
1635 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1636 // the read float up to before the read.  It's OK for non-volatile memory refs
1637 // that happen before the volatile read to float down below it.
1638 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs


2088   // At this point base is a long non-constant
2089   // Index is a long register or a int constant.
2090   // We allow the constant to stay an int because that would allow us a more compact encoding by
2091   // embedding an immediate offset in the address expression. If we have a long constant, we have to
2092   // move it into a register first.
2093   assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant");
2094   assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) ||
2095                             (index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type");
2096 #endif
2097 
2098   BasicType dst_type = x->basic_type();
2099 
2100   LIR_Address* addr;
2101   if (index_op->is_constant()) {
2102     assert(log2_scale == 0, "must not have a scale");
2103     assert(index_op->type() == T_INT, "only int constants supported");
2104     addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
2105   } else {
2106 #ifdef X86
2107     addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
2108 #elif defined(GENERATE_ADDRESS_IS_PREFERRED)
2109     addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
2110 #else
2111     if (index_op->is_illegal() || log2_scale == 0) {
2112       addr = new LIR_Address(base_op, index_op, dst_type);
2113     } else {
2114       LIR_Opr tmp = new_pointer_register();
2115       __ shift_left(index_op, log2_scale, tmp);
2116       addr = new LIR_Address(base_op, tmp, dst_type);
2117     }
2118 #endif
2119   }
2120 
2121   if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
2122     __ unaligned_move(addr, reg);
2123   } else {
2124     if (dst_type == T_OBJECT && x->is_wide()) {
2125       __ move_wide(addr, reg);
2126     } else {
2127       __ move(addr, reg);
2128     }


2142   LIRItem value(x->value(), this);
2143   LIRItem idx(this);
2144 
2145   base.load_item();
2146   if (x->has_index()) {
2147     idx.set_instruction(x->index());
2148     idx.load_item();
2149   }
2150 
2151   if (type == T_BYTE || type == T_BOOLEAN) {
2152     value.load_byte_item();
2153   } else {
2154     value.load_item();
2155   }
2156 
2157   set_no_result(x);
2158 
2159   LIR_Opr base_op = base.result();
2160   LIR_Opr index_op = idx.result();
2161 
2162 #ifdef GENERATE_ADDRESS_IS_PREFERRED
2163   LIR_Address* addr = generate_address(base_op, index_op, log2_scale, 0, x->basic_type());
2164 #else
2165 #ifndef _LP64
2166   if (base_op->type() == T_LONG) {
2167     base_op = new_register(T_INT);
2168     __ convert(Bytecodes::_l2i, base.result(), base_op);
2169   }
2170   if (x->has_index()) {
2171     if (index_op->type() == T_LONG) {
2172       index_op = new_register(T_INT);
2173       __ convert(Bytecodes::_l2i, idx.result(), index_op);
2174     }
2175   }
2176   // At this point base and index should be all ints and not constants
2177   assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2178   assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
2179 #else
2180   if (x->has_index()) {
2181     if (index_op->type() == T_INT) {
2182       index_op = new_register(T_LONG);
2183       __ convert(Bytecodes::_i2l, idx.result(), index_op);
2184     }
2185   }
2186   // At this point base and index are long and non-constant
2187   assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a non-constant long");
2188   assert(!x->has_index() || (index_op->type() == T_LONG && !index_op->is_constant()), "index must be a non-constant long");
2189 #endif
2190 
2191   if (log2_scale != 0) {
2192     // temporary fix (platform dependent code without shift on Intel would be better)
2193     // TODO: ARM also allows embedded shift in the address
2194     __ shift_left(index_op, log2_scale, index_op);
2195   }
2196 
2197   LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2198 #endif // !GENERATE_ADDRESS_IS_PREFERRED
2199   __ move(value.result(), addr);
2200 }
2201 
2202 
2203 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2204   BasicType type = x->basic_type();
2205   LIRItem src(x->object(), this);
2206   LIRItem off(x->offset(), this);
2207 
2208   off.load_item();
2209   src.load_item();
2210 
2211   LIR_Opr value = rlock_result(x, x->basic_type());
2212 
2213   get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2214 
2215 #if INCLUDE_ALL_GCS
2216   // We might be reading the value of the referent field of a
2217   // Reference object in order to attach it back to the live
2218   // object graph. If G1 is enabled then we need to record


< prev index next >