< prev index next >

src/cpu/x86/vm/c1_LIRAssembler_x86.cpp

Print this page
rev 8961 : [mq]: diff-shenandoah.patch


  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"

  35 #include "gc/shared/barrierSet.hpp"
  36 #include "gc/shared/cardTableModRefBS.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "nativeInst_x86.hpp"
  39 #include "oops/objArrayKlass.hpp"
  40 #include "runtime/sharedRuntime.hpp"
  41 #include "vmreg_x86.inline.hpp"
  42 
  43 
  44 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  45 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  46 // fast versions of NegF/NegD and AbsF/AbsD.
  47 
  48 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  49 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
  50   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  51   // of 128-bits operands for SSE instructions.
  52   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
  53   // Store the value to a 128-bits operand.
  54   operand[0] = lo;


1449         case lir_cond_greater:      acond = Assembler::above;      break;
1450         default:                         ShouldNotReachHere();
1451       }
1452     } else {
1453       switch (op->cond()) {
1454         case lir_cond_equal:        acond = Assembler::equal;       break;
1455         case lir_cond_notEqual:     acond = Assembler::notEqual;    break;
1456         case lir_cond_less:         acond = Assembler::less;        break;
1457         case lir_cond_lessEqual:    acond = Assembler::lessEqual;   break;
1458         case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
1459         case lir_cond_greater:      acond = Assembler::greater;     break;
1460         case lir_cond_belowEqual:   acond = Assembler::belowEqual;  break;
1461         case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;  break;
1462         default:                         ShouldNotReachHere();
1463       }
1464     }
1465     __ jcc(acond,*(op->label()));
1466   }
1467 }
1468 

















































1469 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1470   LIR_Opr src  = op->in_opr();
1471   LIR_Opr dest = op->result_opr();
1472 
1473   switch (op->bytecode()) {
1474     case Bytecodes::_i2l:
1475 #ifdef _LP64
1476       __ movl2ptr(dest->as_register_lo(), src->as_register());
1477 #else
1478       move_regs(src->as_register(), dest->as_register_lo());
1479       move_regs(src->as_register(), dest->as_register_hi());
1480       __ sarl(dest->as_register_hi(), 31);
1481 #endif // LP64
1482       break;
1483 
1484     case Bytecodes::_l2i:
1485 #ifdef _LP64
1486       __ movl(dest->as_register(), src->as_register_lo());
1487 #else
1488       move_regs(src->as_register_lo(), dest->as_register());


1945     assert(cmpval == rax, "wrong register");
1946     assert(newval != NULL, "new val must be register");
1947     assert(cmpval != newval, "cmp and new values must be in different registers");
1948     assert(cmpval != addr, "cmp and addr must be in different registers");
1949     assert(newval != addr, "new value and addr must be in different registers");
1950 
1951     if ( op->code() == lir_cas_obj) {
1952 #ifdef _LP64
1953       if (UseCompressedOops) {
1954         __ encode_heap_oop(cmpval);
1955         __ mov(rscratch1, newval);
1956         __ encode_heap_oop(rscratch1);
1957         if (os::is_MP()) {
1958           __ lock();
1959         }
1960         // cmpval (rax) is implicitly used by this instruction
1961         __ cmpxchgl(rscratch1, Address(addr, 0));
1962       } else
1963 #endif
1964       {
































1965         if (os::is_MP()) {
1966           __ lock();
1967         }
1968         __ cmpxchgptr(newval, Address(addr, 0));

1969       }
1970     } else {
1971       assert(op->code() == lir_cas_int, "lir_cas_int expected");
1972       if (os::is_MP()) {
1973         __ lock();
1974       }
1975       __ cmpxchgl(newval, Address(addr, 0));
1976     }
1977 #ifdef _LP64
1978   } else if (op->code() == lir_cas_long) {
1979     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1980     Register newval = op->new_value()->as_register_lo();
1981     Register cmpval = op->cmp_value()->as_register_lo();
1982     assert(cmpval == rax, "wrong register");
1983     assert(newval != NULL, "new val must be register");
1984     assert(cmpval != newval, "cmp and new values must be in different registers");
1985     assert(cmpval != addr, "cmp and addr must be in different registers");
1986     assert(newval != addr, "new value and addr must be in different registers");
1987     if (os::is_MP()) {
1988       __ lock();




  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "gc/shenandoah/shenandoahHeap.hpp"
  36 #include "gc/shared/barrierSet.hpp"
  37 #include "gc/shared/cardTableModRefBS.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "nativeInst_x86.hpp"
  40 #include "oops/objArrayKlass.hpp"
  41 #include "runtime/sharedRuntime.hpp"
  42 #include "vmreg_x86.inline.hpp"
  43 
  44 
  45 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  46 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  47 // fast versions of NegF/NegD and AbsF/AbsD.
  48 
  49 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  50 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
  51   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  52   // of 128-bits operands for SSE instructions.
  53   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
  54   // Store the value to a 128-bits operand.
  55   operand[0] = lo;


1450         case lir_cond_greater:      acond = Assembler::above;      break;
1451         default:                         ShouldNotReachHere();
1452       }
1453     } else {
1454       switch (op->cond()) {
1455         case lir_cond_equal:        acond = Assembler::equal;       break;
1456         case lir_cond_notEqual:     acond = Assembler::notEqual;    break;
1457         case lir_cond_less:         acond = Assembler::less;        break;
1458         case lir_cond_lessEqual:    acond = Assembler::lessEqual;   break;
1459         case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
1460         case lir_cond_greater:      acond = Assembler::greater;     break;
1461         case lir_cond_belowEqual:   acond = Assembler::belowEqual;  break;
1462         case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;  break;
1463         default:                         ShouldNotReachHere();
1464       }
1465     }
1466     __ jcc(acond,*(op->label()));
1467   }
1468 }
1469 
1470 void LIR_Assembler::emit_opShenandoahWriteBarrier(LIR_OpShenandoahWriteBarrier* op) {
1471   Label done;
1472   Register obj = op->in_opr()->as_register();
1473   Register res = op->result_opr()->as_register();
1474   Register tmp1 = op->tmp1_opr()->as_register();
1475   Register tmp2 = op->tmp2_opr()->as_register();
1476   assert_different_registers(res, tmp1, tmp2);
1477 
1478   if (res != obj) {
1479     __ mov(res, obj);
1480   }
1481 
1482   // Check for null.
1483   if (op->need_null_check()) {
1484     __ testptr(res, res);
1485     __ jcc(Assembler::zero, done);
1486   }
1487 
1488   // Check for evacuation-in-progress
1489   Address evacuation_in_progress = Address(r15_thread, in_bytes(JavaThread::evacuation_in_progress_offset()));
1490   __ cmpb(evacuation_in_progress, 0);
1491 
1492   // The read-barrier.
1493   __ movptr(res, Address(res, -8));
1494 
1495   __ jcc(Assembler::equal, done);
1496 
1497   // Check for object in collection set.
1498   __ movptr(tmp1, res);
1499   __ shrptr(tmp1, ShenandoahHeapRegion::RegionSizeShift);
1500   __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
1501   __ movbool(tmp2, Address(tmp2, tmp1, Address::times_1));
1502   __ testb(tmp2, 0x1);
1503   __ jcc(Assembler::zero, done);
1504 
1505   if (res != rax) {
1506     __ xchgptr(res, rax); // Move obj into rax and save rax into obj.
1507   }
1508 
1509   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::shenandoah_write_barrier_slow_id)));
1510 
1511   if (res != rax) {
1512     __ xchgptr(rax, res); // Swap back obj with rax.
1513   }
1514 
1515   __ bind(done);
1516 
1517 }
1518 
1519 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1520   LIR_Opr src  = op->in_opr();
1521   LIR_Opr dest = op->result_opr();
1522 
1523   switch (op->bytecode()) {
1524     case Bytecodes::_i2l:
1525 #ifdef _LP64
1526       __ movl2ptr(dest->as_register_lo(), src->as_register());
1527 #else
1528       move_regs(src->as_register(), dest->as_register_lo());
1529       move_regs(src->as_register(), dest->as_register_hi());
1530       __ sarl(dest->as_register_hi(), 31);
1531 #endif // LP64
1532       break;
1533 
1534     case Bytecodes::_l2i:
1535 #ifdef _LP64
1536       __ movl(dest->as_register(), src->as_register_lo());
1537 #else
1538       move_regs(src->as_register_lo(), dest->as_register());


1995     assert(cmpval == rax, "wrong register");
1996     assert(newval != NULL, "new val must be register");
1997     assert(cmpval != newval, "cmp and new values must be in different registers");
1998     assert(cmpval != addr, "cmp and addr must be in different registers");
1999     assert(newval != addr, "new value and addr must be in different registers");
2000 
2001     if ( op->code() == lir_cas_obj) {
2002 #ifdef _LP64
2003       if (UseCompressedOops) {
2004         __ encode_heap_oop(cmpval);
2005         __ mov(rscratch1, newval);
2006         __ encode_heap_oop(rscratch1);
2007         if (os::is_MP()) {
2008           __ lock();
2009         }
2010         // cmpval (rax) is implicitly used by this instruction
2011         __ cmpxchgl(rscratch1, Address(addr, 0));
2012       } else
2013 #endif
2014       {
2015         if (UseShenandoahGC) {
2016           Label done;
2017           Label retry;
2018 
2019           __ bind(retry);
2020 
2021           // Save original cmp-value into tmp1, before following cas destroys it.
2022           __ movptr(op->tmp1()->as_register(), op->cmp_value()->as_register());
2023 
2024           if (os::is_MP()) {
2025             __ lock();
2026           }
2027           __ cmpxchgptr(newval, Address(addr, 0));
2028 
2029           // If the cmpxchg succeeded, then we're done.
2030           __ jcc(Assembler::equal, done);
2031 
2032           // Resolve the original cmp value.
2033           oopDesc::bs()->interpreter_read_barrier(masm(), op->tmp1()->as_register());
2034           // Resolve the old value at address. We get the old value in cmp/rax
2035           // when the comparison in cmpxchg failed.
2036           __ movptr(op->tmp2()->as_register(), cmpval);
2037           oopDesc::bs()->interpreter_read_barrier(masm(), op->tmp2()->as_register());
2038 
2039           // We're done if the expected/cmp value is not the same as old. It's a valid
2040           // cmpxchg failure then. Otherwise we need special treatment for Shenandoah
2041           // to prevent false positives.
2042           __ cmpptr(op->tmp1()->as_register(), op->tmp2()->as_register());
2043           __ jcc(Assembler::equal, retry);
2044 
2045           __ bind(done);
2046         } else {
2047           if (os::is_MP()) {
2048             __ lock();
2049           }
2050           __ cmpxchgptr(newval, Address(addr, 0));
2051         }
2052       }
2053     } else {
2054       assert(op->code() == lir_cas_int, "lir_cas_int expected");
2055       if (os::is_MP()) {
2056         __ lock();
2057       }
2058       __ cmpxchgl(newval, Address(addr, 0));
2059     }
2060 #ifdef _LP64
2061   } else if (op->code() == lir_cas_long) {
2062     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2063     Register newval = op->new_value()->as_register_lo();
2064     Register cmpval = op->cmp_value()->as_register_lo();
2065     assert(cmpval == rax, "wrong register");
2066     assert(newval != NULL, "new val must be register");
2067     assert(cmpval != newval, "cmp and new values must be in different registers");
2068     assert(cmpval != addr, "cmp and addr must be in different registers");
2069     assert(newval != addr, "new value and addr must be in different registers");
2070     if (os::is_MP()) {
2071       __ lock();


< prev index next >