1436 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1437 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1438 break;
1439 case T_OBJECT:
1440 if (c->as_jobject() != other->as_jobject()) continue;
1441 break;
1442 default:
1443 break;
1444 }
1445 return _reg_for_constants.at(i);
1446 }
1447 }
1448
1449 LIR_Opr result = new_register(t);
1450 __ move((LIR_Opr)c, result);
1451 _constants.append(c);
1452 _reg_for_constants.append(result);
1453 return result;
1454 }
1455
1456 // Various barriers
1457
1458 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1459 bool do_load, bool patch, CodeEmitInfo* info) {
1460 // Do the pre-write barrier, if any.
1461 switch (_bs->kind()) {
1462 #if INCLUDE_ALL_GCS
1463 case BarrierSet::G1BarrierSet:
1464 G1BarrierSet_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1465 break;
1466 #endif // INCLUDE_ALL_GCS
1467 case BarrierSet::CardTableBarrierSet:
1468 // No pre barriers
1469 break;
1470 default :
1471 ShouldNotReachHere();
1472
1473 }
1474 }
1475
1476 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1477 switch (_bs->kind()) {
1478 #if INCLUDE_ALL_GCS
1479 case BarrierSet::G1BarrierSet:
1480 G1BarrierSet_post_barrier(addr, new_val);
1481 break;
1482 #endif // INCLUDE_ALL_GCS
1483 case BarrierSet::CardTableBarrierSet:
1484 CardTableBarrierSet_post_barrier(addr, new_val);
1485 break;
1486 default :
1487 ShouldNotReachHere();
1488 }
1489 }
1490
1491 ////////////////////////////////////////////////////////////////////////
1492 #if INCLUDE_ALL_GCS
1493
1494 void LIRGenerator::G1BarrierSet_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1495 bool do_load, bool patch, CodeEmitInfo* info) {
1496 // First we test whether marking is in progress.
1497 BasicType flag_type;
1498 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
1499 flag_type = T_INT;
1500 } else {
1501 guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1,
1502 "Assumption");
1503 // Use unsigned type T_BOOLEAN here rather than signed T_BYTE since some platforms, eg. ARM,
1504 // need to use unsigned instructions to use the large offset to load the satb_mark_queue.
1505 flag_type = T_BOOLEAN;
1506 }
1507 LIR_Opr thrd = getThreadPointer();
1508 LIR_Address* mark_active_flag_addr =
1509 new LIR_Address(thrd, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()), flag_type);
1510 // Read the marking-in-progress flag.
1511 LIR_Opr flag_val = new_register(T_INT);
1512 __ load(mark_active_flag_addr, flag_val);
1513 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
1514
1515 LIR_PatchCode pre_val_patch_code = lir_patch_none;
1516
1517 CodeStub* slow;
1518
1519 if (do_load) {
1520 assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
1521 assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
1522
1523 if (patch)
1524 pre_val_patch_code = lir_patch_normal;
1525
1526 pre_val = new_register(T_OBJECT);
1527
1528 if (!addr_opr->is_address()) {
1529 assert(addr_opr->is_register(), "must be");
1530 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
1531 }
1532 slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
1533 } else {
1534 assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
1535 assert(pre_val->is_register(), "must be");
1536 assert(pre_val->type() == T_OBJECT, "must be an object");
1537 assert(info == NULL, "sanity");
1538
1539 slow = new G1PreBarrierStub(pre_val);
1540 }
1541
1542 __ branch(lir_cond_notEqual, T_INT, slow);
1543 __ branch_destination(slow->continuation());
1544 }
1545
1546 void LIRGenerator::G1BarrierSet_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1547 // If the "new_val" is a constant NULL, no barrier is necessary.
1548 if (new_val->is_constant() &&
1549 new_val->as_constant_ptr()->as_jobject() == NULL) return;
1550
1551 if (!new_val->is_register()) {
1552 LIR_Opr new_val_reg = new_register(T_OBJECT);
1553 if (new_val->is_constant()) {
1554 __ move(new_val, new_val_reg);
1555 } else {
1556 __ leal(new_val, new_val_reg);
1557 }
1558 new_val = new_val_reg;
1559 }
1560 assert(new_val->is_register(), "must be a register at this point");
1561
1562 if (addr->is_address()) {
1563 LIR_Address* address = addr->as_address_ptr();
1564 LIR_Opr ptr = new_pointer_register();
1565 if (!address->index()->is_valid() && address->disp() == 0) {
1566 __ move(address->base(), ptr);
1567 } else {
1568 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1569 __ leal(addr, ptr);
1570 }
1571 addr = ptr;
1572 }
1573 assert(addr->is_register(), "must be a register at this point");
1574
1575 LIR_Opr xor_res = new_pointer_register();
1576 LIR_Opr xor_shift_res = new_pointer_register();
1577 if (TwoOperandLIRForm ) {
1578 __ move(addr, xor_res);
1579 __ logical_xor(xor_res, new_val, xor_res);
1580 __ move(xor_res, xor_shift_res);
1581 __ unsigned_shift_right(xor_shift_res,
1582 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1583 xor_shift_res,
1584 LIR_OprDesc::illegalOpr());
1585 } else {
1586 __ logical_xor(addr, new_val, xor_res);
1587 __ unsigned_shift_right(xor_res,
1588 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1589 xor_shift_res,
1590 LIR_OprDesc::illegalOpr());
1591 }
1592
1593 if (!new_val->is_register()) {
1594 LIR_Opr new_val_reg = new_register(T_OBJECT);
1595 __ leal(new_val, new_val_reg);
1596 new_val = new_val_reg;
1597 }
1598 assert(new_val->is_register(), "must be a register at this point");
1599
1600 __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
1601
1602 CodeStub* slow = new G1PostBarrierStub(addr, new_val);
1603 __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
1604 __ branch_destination(slow->continuation());
1605 }
1606
1607 #endif // INCLUDE_ALL_GCS
1608 ////////////////////////////////////////////////////////////////////////
1609
1610 void LIRGenerator::CardTableBarrierSet_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1611 LIR_Const* card_table_base = new LIR_Const(ci_card_table_address());
1612 if (addr->is_address()) {
1613 LIR_Address* address = addr->as_address_ptr();
1614 // ptr cannot be an object because we use this barrier for array card marks
1615 // and addr can point in the middle of an array.
1616 LIR_Opr ptr = new_pointer_register();
1617 if (!address->index()->is_valid() && address->disp() == 0) {
1618 __ move(address->base(), ptr);
1619 } else {
1620 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1621 __ leal(addr, ptr);
1622 }
1623 addr = ptr;
1624 }
1625 assert(addr->is_register(), "must be a register at this point");
1626
1627 #ifdef CARDTABLEBARRIERSET_POST_BARRIER_HELPER
1628 CardTableBarrierSet_post_barrier_helper(addr, card_table_base);
1629 #else
1630 LIR_Opr tmp = new_pointer_register();
1631 if (TwoOperandLIRForm) {
1632 __ move(addr, tmp);
1633 __ unsigned_shift_right(tmp, CardTable::card_shift, tmp);
1634 } else {
1635 __ unsigned_shift_right(addr, CardTable::card_shift, tmp);
1636 }
1637
1638 LIR_Address* card_addr;
1639 if (can_inline_as_constant(card_table_base)) {
1640 card_addr = new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE);
1641 } else {
1642 card_addr = new LIR_Address(tmp, load_constant(card_table_base), T_BYTE);
1643 }
1644
1645 LIR_Opr dirty = LIR_OprFact::intConst(CardTable::dirty_card_val());
1646 if (UseCondCardMark) {
1647 LIR_Opr cur_value = new_register(T_INT);
1648 if (UseConcMarkSweepGC) {
1649 __ membar_storeload();
1650 }
1651 __ move(card_addr, cur_value);
1652
1653 LabelObj* L_already_dirty = new LabelObj();
1654 __ cmp(lir_cond_equal, cur_value, dirty);
1655 __ branch(lir_cond_equal, T_BYTE, L_already_dirty->label());
1656 __ move(dirty, card_addr);
1657 __ branch_destination(L_already_dirty->label());
1658 } else {
1659 #if INCLUDE_ALL_GCS
1660 if (UseConcMarkSweepGC && CMSPrecleaningEnabled) {
1661 __ membar_storestore();
1662 }
1663 #endif
1664 __ move(dirty, card_addr);
1665 }
1666 #endif
1667 }
1668
1669
1670 //------------------------field access--------------------------------------
1671
1672 // Comment copied form templateTable_i486.cpp
1673 // ----------------------------------------------------------------------------
1674 // Volatile variables demand their effects be made known to all CPU's in
1675 // order. Store buffers on most chips allow reads & writes to reorder; the
1676 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1677 // memory barrier (i.e., it's not sufficient that the interpreter does not
1678 // reorder volatile references, the hardware also must not reorder them).
1679 //
1680 // According to the new Java Memory Model (JMM):
1681 // (1) All volatiles are serialized wrt to each other.
1682 // ALSO reads & writes act as aquire & release, so:
1683 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1684 // the read float up to before the read. It's OK for non-volatile memory refs
1685 // that happen before the volatile read to float down below it.
1686 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1687 // that happen BEFORE the write float down to after the write. It's OK for
1688 // non-volatile memory refs that happen after the volatile write to float up
1689 // before it.
1690 //
1691 // We only put in barriers around volatile refs (they are expensive), not
1692 // _between_ memory refs (that would require us to track the flavor of the
1693 // previous memory refs). Requirements (2) and (3) require some barriers
1694 // before volatile stores and after volatile loads. These nearly cover
1695 // requirement (1) but miss the volatile-store-volatile-load case. This final
1696 // case is placed after volatile-stores although it could just as well go
1697 // before volatile-loads.
1698
1699
1700 void LIRGenerator::do_StoreField(StoreField* x) {
1701 bool needs_patching = x->needs_patching();
1702 bool is_volatile = x->field()->is_volatile();
1703 BasicType field_type = x->field_type();
1704 bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
1705
1706 CodeEmitInfo* info = NULL;
1707 if (needs_patching) {
1708 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1709 info = state_for(x, x->state_before());
1710 } else if (x->needs_null_check()) {
1711 NullCheck* nc = x->explicit_null_check();
1712 if (nc == NULL) {
1713 info = state_for(x);
1714 } else {
1715 info = state_for(nc);
1716 }
1717 }
1718
1719
1720 LIRItem object(x->obj(), this);
1721 LIRItem value(x->value(), this);
1722
1723 object.load_item();
1724
1725 if (is_volatile || needs_patching) {
1726 // load item if field is volatile (fewer special cases for volatiles)
1727 // load item if field not initialized
1728 // load item if field not constant
1729 // because of code patching we cannot inline constants
1730 if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1731 value.load_byte_item();
1732 } else {
1733 value.load_item();
1734 }
1735 } else {
1736 value.load_for_store(field_type);
1737 }
1738
1739 set_no_result(x);
1740
1741 #ifndef PRODUCT
1742 if (PrintNotLoaded && needs_patching) {
1743 tty->print_cr(" ###class not loaded at store_%s bci %d",
1744 x->is_static() ? "static" : "field", x->printable_bci());
1745 }
1746 #endif
1747
1748 if (x->needs_null_check() &&
1749 (needs_patching ||
1750 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1751 // Emit an explicit null check because the offset is too large.
1752 // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1753 // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1754 __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1755 }
1756
1757 LIR_Address* address;
1758 if (needs_patching) {
1759 // we need to patch the offset in the instruction so don't allow
1760 // generate_address to try to be smart about emitting the -1.
1761 // Otherwise the patching code won't know how to find the
1762 // instruction to patch.
1763 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1764 } else {
1765 address = generate_address(object.result(), x->offset(), field_type);
1766 }
1767
1768 if (is_volatile && os::is_MP()) {
1769 __ membar_release();
1770 }
1771
1772 if (is_oop) {
1773 // Do the pre-write barrier, if any.
1774 pre_barrier(LIR_OprFact::address(address),
1775 LIR_OprFact::illegalOpr /* pre_val */,
1776 true /* do_load*/,
1777 needs_patching,
1778 (info ? new CodeEmitInfo(info) : NULL));
1779 }
1780
1781 bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1782 if (needs_atomic_access && !needs_patching) {
1783 volatile_field_store(value.result(), address, info);
1784 } else {
1785 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1786 __ store(value.result(), address, info, patch_code);
1787 }
1788
1789 if (is_oop) {
1790 // Store to object so mark the card of the header
1791 post_barrier(object.result(), value.result());
1792 }
1793
1794 if (!support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile && os::is_MP()) {
1795 __ membar();
1796 }
1797 }
1798
1799
1800 void LIRGenerator::do_LoadField(LoadField* x) {
1801 bool needs_patching = x->needs_patching();
1802 bool is_volatile = x->field()->is_volatile();
1803 BasicType field_type = x->field_type();
1804
1805 CodeEmitInfo* info = NULL;
1806 if (needs_patching) {
1807 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1808 info = state_for(x, x->state_before());
1809 } else if (x->needs_null_check()) {
1810 NullCheck* nc = x->explicit_null_check();
1811 if (nc == NULL) {
1812 info = state_for(x);
1813 } else {
1814 info = state_for(nc);
1815 }
1816 }
1817
|
1419 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1420 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1421 break;
1422 case T_OBJECT:
1423 if (c->as_jobject() != other->as_jobject()) continue;
1424 break;
1425 default:
1426 break;
1427 }
1428 return _reg_for_constants.at(i);
1429 }
1430 }
1431
1432 LIR_Opr result = new_register(t);
1433 __ move((LIR_Opr)c, result);
1434 _constants.append(c);
1435 _reg_for_constants.append(result);
1436 return result;
1437 }
1438
1439 //------------------------field access--------------------------------------
1440
1441 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
1442 assert(x->number_of_arguments() == 4, "wrong type");
1443 LIRItem obj (x->argument_at(0), this); // object
1444 LIRItem offset(x->argument_at(1), this); // offset of field
1445 LIRItem cmp (x->argument_at(2), this); // value to compare with field
1446 LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
1447 assert(obj.type()->tag() == objectTag, "invalid type");
1448
1449 // In 64bit the type can be long, sparc doesn't have this assert
1450 // assert(offset.type()->tag() == intTag, "invalid type");
1451
1452 assert(cmp.type()->tag() == type->tag(), "invalid type");
1453 assert(val.type()->tag() == type->tag(), "invalid type");
1454
1455 DecoratorSet decorators = IN_HEAP | MO_SEQ_CST;
1456 LIR_Opr result = access_atomic_cmpxchg_at(decorators, as_BasicType(type),
1457 obj, offset, cmp, val);
1458 set_result(x, result);
1459 }
1460
1461 // Comment copied form templateTable_i486.cpp
1462 // ----------------------------------------------------------------------------
1463 // Volatile variables demand their effects be made known to all CPU's in
1464 // order. Store buffers on most chips allow reads & writes to reorder; the
1465 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1466 // memory barrier (i.e., it's not sufficient that the interpreter does not
1467 // reorder volatile references, the hardware also must not reorder them).
1468 //
1469 // According to the new Java Memory Model (JMM):
1470 // (1) All volatiles are serialized wrt to each other.
1471 // ALSO reads & writes act as aquire & release, so:
1472 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1473 // the read float up to before the read. It's OK for non-volatile memory refs
1474 // that happen before the volatile read to float down below it.
1475 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1476 // that happen BEFORE the write float down to after the write. It's OK for
1477 // non-volatile memory refs that happen after the volatile write to float up
1478 // before it.
1479 //
1480 // We only put in barriers around volatile refs (they are expensive), not
1481 // _between_ memory refs (that would require us to track the flavor of the
1482 // previous memory refs). Requirements (2) and (3) require some barriers
1483 // before volatile stores and after volatile loads. These nearly cover
1484 // requirement (1) but miss the volatile-store-volatile-load case. This final
1485 // case is placed after volatile-stores although it could just as well go
1486 // before volatile-loads.
1487
1488
1489 void LIRGenerator::do_StoreField(StoreField* x) {
1490 bool needs_patching = x->needs_patching();
1491 bool is_volatile = x->field()->is_volatile();
1492 BasicType field_type = x->field_type();
1493
1494 CodeEmitInfo* info = NULL;
1495 if (needs_patching) {
1496 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1497 info = state_for(x, x->state_before());
1498 } else if (x->needs_null_check()) {
1499 NullCheck* nc = x->explicit_null_check();
1500 if (nc == NULL) {
1501 info = state_for(x);
1502 } else {
1503 info = state_for(nc);
1504 }
1505 }
1506
1507 LIRItem object(x->obj(), this);
1508 LIRItem value(x->value(), this);
1509
1510 object.load_item();
1511
1512 if (is_volatile || needs_patching) {
1513 // load item if field is volatile (fewer special cases for volatiles)
1514 // load item if field not initialized
1515 // load item if field not constant
1516 // because of code patching we cannot inline constants
1517 if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1518 value.load_byte_item();
1519 } else {
1520 value.load_item();
1521 }
1522 } else {
1523 value.load_for_store(field_type);
1524 }
1525
1526 set_no_result(x);
1527
1528 #ifndef PRODUCT
1529 if (PrintNotLoaded && needs_patching) {
1530 tty->print_cr(" ###class not loaded at store_%s bci %d",
1531 x->is_static() ? "static" : "field", x->printable_bci());
1532 }
1533 #endif
1534
1535 if (x->needs_null_check() &&
1536 (needs_patching ||
1537 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1538 // Emit an explicit null check because the offset is too large.
1539 // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1540 // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1541 __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1542 }
1543
1544 DecoratorSet decorators = IN_HEAP;
1545 if (is_volatile) {
1546 decorators |= MO_SEQ_CST;
1547 }
1548 if (needs_patching) {
1549 decorators |= C1_NEEDS_PATCHING;
1550 }
1551
1552 access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()), value.result(), info ? new CodeEmitInfo(info) :
1553 }
1554
1555 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1556 assert(x->is_pinned(),"");
1557 bool needs_range_check = x->compute_needs_range_check();
1558 bool use_length = x->length() != NULL;
1559 bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
1560 bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
1561 !get_jobject_constant(x->value())->is_null_object() ||
1562 x->should_profile());
1563
1564 LIRItem array(x->array(), this);
1565 LIRItem index(x->index(), this);
1566 LIRItem value(x->value(), this);
1567 LIRItem length(this);
1568
1569 array.load_item();
1570 index.load_nonconstant();
1571
1572 if (use_length && needs_range_check) {
1573 length.set_instruction(x->length());
1574 length.load_item();
1575
1576 }
1577 if (needs_store_check || x->check_boolean()) {
1578 value.load_item();
1579 } else {
1580 value.load_for_store(x->elt_type());
1581 }
1582
1583 set_no_result(x);
1584
1585 // the CodeEmitInfo must be duplicated for each different
1586 // LIR-instruction because spilling can occur anywhere between two
1587 // instructions and so the debug information must be different
1588 CodeEmitInfo* range_check_info = state_for(x);
1589 CodeEmitInfo* null_check_info = NULL;
1590 if (x->needs_null_check()) {
1591 null_check_info = new CodeEmitInfo(range_check_info);
1592 }
1593
1594 if (GenerateRangeChecks && needs_range_check) {
1595 if (use_length) {
1596 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1597 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1598 } else {
1599 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1600 // range_check also does the null check
1601 null_check_info = NULL;
1602 }
1603 }
1604
1605 if (GenerateArrayStoreCheck && needs_store_check) {
1606 CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1607 array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1608 }
1609
1610 DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY;
1611 if (x->check_boolean()) {
1612 decorators |= C1_MASK_BOOLEAN;
1613 }
1614
1615 access_store_at(decorators, x->elt_type(), array, index.result(), value.result(), NULL, null_check_info);
1616 }
1617
1618 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
1619 LIRItem& base, LIR_Opr offset, LIR_Opr result,
1620 CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1621 BarrierSetC1 *bs = BarrierSet::barrier_set()->barrier_set_c1();
1622 bool as_raw = (decorators & AS_RAW) != 0;
1623 if (as_raw) {
1624 bs->BarrierSetC1::load_at(this, decorators, type,
1625 base, offset, result, patch_info, load_emit_info);
1626 } else {
1627 bs->load_at(this, decorators, type,
1628 base, offset, result, patch_info, load_emit_info);
1629 }
1630 }
1631
1632 void LIRGenerator::access_store_at(DecoratorSet decorators, BasicType type,
1633 LIRItem& base, LIR_Opr offset, LIR_Opr value,
1634 CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) {
1635 BarrierSetC1 *bs = BarrierSet::barrier_set()->barrier_set_c1();
1636 bool as_raw = (decorators & AS_RAW) != 0;
1637 if (as_raw) {
1638 bs->BarrierSetC1::store_at(this, decorators, type,
1639 base, offset, value, patch_info, store_emit_info);
1640 } else {
1641 bs->store_at(this, decorators, type,
1642 base, offset, value, patch_info, store_emit_info);
1643 }
1644 }
1645
1646 LIR_Opr LIRGenerator::access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
1647 LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
1648 BarrierSetC1 *bs = BarrierSet::barrier_set()->barrier_set_c1();
1649 bool as_raw = (decorators & AS_RAW) != 0;
1650 if (as_raw) {
1651 return bs->BarrierSetC1::atomic_cmpxchg_at(this, decorators, type,
1652 base, offset, cmp_value, new_value);
1653 } else {
1654 return bs->atomic_cmpxchg_at(this, decorators, type,
1655 base, offset, cmp_value, new_value);
1656 }
1657 }
1658
1659 LIR_Opr LIRGenerator::access_atomic_xchg_at(DecoratorSet decorators, BasicType type,
1660 LIRItem& base, LIRItem& offset, LIRItem& value) {
1661 BarrierSetC1 *bs = BarrierSet::barrier_set()->barrier_set_c1();
1662 bool as_raw = (decorators & AS_RAW) != 0;
1663 if (as_raw) {
1664 return bs->BarrierSetC1::atomic_xchg(this, decorators, type,
1665 base, offset, value);
1666 } else {
1667 return bs->atomic_xchg(this, decorators, type,
1668 base, offset, value);
1669 }
1670 }
1671
1672 LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type,
1673 LIRItem& base, LIRItem& offset, LIRItem& value) {
1674 BarrierSetC1 *bs = BarrierSet::barrier_set()->barrier_set_c1();
1675 bool as_raw = (decorators & AS_RAW) != 0;
1676 if (as_raw) {
1677 return bs->BarrierSetC1::atomic_add_at(this, decorators, type,
1678 base, offset, value);
1679 } else {
1680 return bs->atomic_add_at(this, decorators, type,
1681 base, offset, value);
1682 }
1683 }
1684
1685 void LIRGenerator::do_LoadField(LoadField* x) {
1686 bool needs_patching = x->needs_patching();
1687 bool is_volatile = x->field()->is_volatile();
1688 BasicType field_type = x->field_type();
1689
1690 CodeEmitInfo* info = NULL;
1691 if (needs_patching) {
1692 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1693 info = state_for(x, x->state_before());
1694 } else if (x->needs_null_check()) {
1695 NullCheck* nc = x->explicit_null_check();
1696 if (nc == NULL) {
1697 info = state_for(x);
1698 } else {
1699 info = state_for(nc);
1700 }
1701 }
1702
|
1950 } else {
1951 index.load_item();
1952 }
1953
1954 CodeEmitInfo* range_check_info = state_for(x);
1955 CodeEmitInfo* null_check_info = NULL;
1956 if (x->needs_null_check()) {
1957 NullCheck* nc = x->explicit_null_check();
1958 if (nc != NULL) {
1959 null_check_info = state_for(nc);
1960 } else {
1961 null_check_info = range_check_info;
1962 }
1963 if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
1964 LIR_Opr obj = new_register(T_OBJECT);
1965 __ move(LIR_OprFact::oopConst(NULL), obj);
1966 __ null_check(obj, new CodeEmitInfo(null_check_info));
1967 }
1968 }
1969
1970 // emit array address setup early so it schedules better
1971 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1972
1973 if (GenerateRangeChecks && needs_range_check) {
1974 if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1975 __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
1976 } else if (use_length) {
1977 // TODO: use a (modified) version of array_range_check that does not require a
1978 // constant length to be loaded to a register
1979 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1980 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1981 } else {
1982 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1983 // The range check performs the null check, so clear it out for the load
1984 null_check_info = NULL;
1985 }
1986 }
1987
1988 __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
1989 }
1990
1991
1992 void LIRGenerator::do_NullCheck(NullCheck* x) {
1993 if (x->can_trap()) {
1994 LIRItem value(x->obj(), this);
1995 value.load_item();
1996 CodeEmitInfo* info = state_for(x);
1997 __ null_check(value.result(), info);
1998 }
1999 }
2000
2001
2002 void LIRGenerator::do_TypeCast(TypeCast* x) {
2003 LIRItem value(x->obj(), this);
2004 value.load_item();
2005 // the result is the same as from the node we are casting
2006 set_result(x, value.result());
2007 }
|
1820 } else {
1821 index.load_item();
1822 }
1823
1824 CodeEmitInfo* range_check_info = state_for(x);
1825 CodeEmitInfo* null_check_info = NULL;
1826 if (x->needs_null_check()) {
1827 NullCheck* nc = x->explicit_null_check();
1828 if (nc != NULL) {
1829 null_check_info = state_for(nc);
1830 } else {
1831 null_check_info = range_check_info;
1832 }
1833 if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
1834 LIR_Opr obj = new_register(T_OBJECT);
1835 __ move(LIR_OprFact::oopConst(NULL), obj);
1836 __ null_check(obj, new CodeEmitInfo(null_check_info));
1837 }
1838 }
1839
1840 if (GenerateRangeChecks && needs_range_check) {
1841 if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1842 __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
1843 } else if (use_length) {
1844 // TODO: use a (modified) version of array_range_check that does not require a
1845 // constant length to be loaded to a register
1846 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1847 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1848 } else {
1849 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1850 // The range check performs the null check, so clear it out for the load
1851 null_check_info = NULL;
1852 }
1853 }
1854
1855 DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY;
1856
1857 LIR_Opr result = rlock_result(x, x->elt_type());
1858 access_load_at(decorators, x->elt_type(),
1859 array, index.result(), result,
1860 NULL, null_check_info);
1861 }
1862
1863
1864 void LIRGenerator::do_NullCheck(NullCheck* x) {
1865 if (x->can_trap()) {
1866 LIRItem value(x->obj(), this);
1867 value.load_item();
1868 CodeEmitInfo* info = state_for(x);
1869 __ null_check(value.result(), info);
1870 }
1871 }
1872
1873
1874 void LIRGenerator::do_TypeCast(TypeCast* x) {
1875 LIRItem value(x->obj(), this);
1876 value.load_item();
1877 // the result is the same as from the node we are casting
1878 set_result(x, value.result());
1879 }
|
2254 __ shift_left(index_op, log2_scale, tmp);
2255 if (!TwoOperandLIRForm) {
2256 index_op = tmp;
2257 }
2258 }
2259
2260 LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2261 #endif // !GENERATE_ADDRESS_IS_PREFERRED
2262 __ move(value.result(), addr);
2263 }
2264
2265
2266 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2267 BasicType type = x->basic_type();
2268 LIRItem src(x->object(), this);
2269 LIRItem off(x->offset(), this);
2270
2271 off.load_item();
2272 src.load_item();
2273
2274 LIR_Opr value = rlock_result(x, x->basic_type());
2275
2276 if (support_IRIW_for_not_multiple_copy_atomic_cpu && x->is_volatile() && os::is_MP()) {
2277 __ membar();
2278 }
2279
2280 get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2281
2282 #if INCLUDE_ALL_GCS
2283 // We might be reading the value of the referent field of a
2284 // Reference object in order to attach it back to the live
2285 // object graph. If G1 is enabled then we need to record
2286 // the value that is being returned in an SATB log buffer.
2287 //
2288 // We need to generate code similar to the following...
2289 //
2290 // if (offset == java_lang_ref_Reference::referent_offset) {
2291 // if (src != NULL) {
2292 // if (klass(src)->reference_type() != REF_NONE) {
2293 // pre_barrier(..., value, ...);
2294 // }
2295 // }
2296 // }
2297
2298 if (UseG1GC && type == T_OBJECT) {
2299 bool gen_pre_barrier = true; // Assume we need to generate pre_barrier.
2300 bool gen_offset_check = true; // Assume we need to generate the offset guard.
2301 bool gen_source_check = true; // Assume we need to check the src object for null.
2302 bool gen_type_check = true; // Assume we need to check the reference_type.
2303
2304 if (off.is_constant()) {
2305 jlong off_con = (off.type()->is_int() ?
2306 (jlong) off.get_jint_constant() :
2307 off.get_jlong_constant());
2308
2309
2310 if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
2311 // The constant offset is something other than referent_offset.
2312 // We can skip generating/checking the remaining guards and
2313 // skip generation of the code stub.
2314 gen_pre_barrier = false;
2315 } else {
2316 // The constant offset is the same as referent_offset -
2317 // we do not need to generate a runtime offset check.
2318 gen_offset_check = false;
2319 }
2320 }
2321
2322 // We don't need to generate stub if the source object is an array
2323 if (gen_pre_barrier && src.type()->is_array()) {
2324 gen_pre_barrier = false;
2325 }
2326
2327 if (gen_pre_barrier) {
2328 // We still need to continue with the checks.
2329 if (src.is_constant()) {
2330 ciObject* src_con = src.get_jobject_constant();
2331 guarantee(src_con != NULL, "no source constant");
2332
2333 if (src_con->is_null_object()) {
2334 // The constant src object is null - We can skip
2335 // generating the code stub.
2336 gen_pre_barrier = false;
2337 } else {
2338 // Non-null constant source object. We still have to generate
2339 // the slow stub - but we don't need to generate the runtime
2340 // null object check.
2341 gen_source_check = false;
2342 }
2343 }
2344 }
2345 if (gen_pre_barrier && !PatchALot) {
2346 // Can the klass of object be statically determined to be
2347 // a sub-class of Reference?
2348 ciType* type = src.value()->declared_type();
2349 if ((type != NULL) && type->is_loaded()) {
2350 if (type->is_subtype_of(compilation()->env()->Reference_klass())) {
2351 gen_type_check = false;
2352 } else if (type->is_klass() &&
2353 !compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
2354 // Not Reference and not Object klass.
2355 gen_pre_barrier = false;
2356 }
2357 }
2358 }
2359
2360 if (gen_pre_barrier) {
2361 LabelObj* Lcont = new LabelObj();
2362
2363 // We can have generate one runtime check here. Let's start with
2364 // the offset check.
2365 if (gen_offset_check) {
2366 // if (offset != referent_offset) -> continue
2367 // If offset is an int then we can do the comparison with the
2368 // referent_offset constant; otherwise we need to move
2369 // referent_offset into a temporary register and generate
2370 // a reg-reg compare.
2371
2372 LIR_Opr referent_off;
2373
2374 if (off.type()->is_int()) {
2375 referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
2376 } else {
2377 assert(off.type()->is_long(), "what else?");
2378 referent_off = new_register(T_LONG);
2379 __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
2380 }
2381 __ cmp(lir_cond_notEqual, off.result(), referent_off);
2382 __ branch(lir_cond_notEqual, as_BasicType(off.type()), Lcont->label());
2383 }
2384 if (gen_source_check) {
2385 // offset is a const and equals referent offset
2386 // if (source == null) -> continue
2387 __ cmp(lir_cond_equal, src.result(), LIR_OprFact::oopConst(NULL));
2388 __ branch(lir_cond_equal, T_OBJECT, Lcont->label());
2389 }
2390 LIR_Opr src_klass = new_register(T_OBJECT);
2391 if (gen_type_check) {
2392 // We have determined that offset == referent_offset && src != null.
2393 // if (src->_klass->_reference_type == REF_NONE) -> continue
2394 __ move(new LIR_Address(src.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), src_klass);
2395 LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE);
2396 LIR_Opr reference_type = new_register(T_INT);
2397 __ move(reference_type_addr, reference_type);
2398 __ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
2399 __ branch(lir_cond_equal, T_INT, Lcont->label());
2400 }
2401 {
2402 // We have determined that src->_klass->_reference_type != REF_NONE
2403 // so register the value in the referent field with the pre-barrier.
2404 pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
2405 value /* pre_val */,
2406 false /* do_load */,
2407 false /* patch */,
2408 NULL /* info */);
2409 }
2410 __ branch_destination(Lcont->label());
2411 }
2412 }
2413 #endif // INCLUDE_ALL_GCS
2414
2415 if (x->is_volatile() && os::is_MP()) __ membar_acquire();
2416
2417 /* Normalize boolean value returned by unsafe operation, i.e., value != 0 ? value = true : value false. */
2418 if (type == T_BOOLEAN) {
2419 LabelObj* equalZeroLabel = new LabelObj();
2420 __ cmp(lir_cond_equal, value, 0);
2421 __ branch(lir_cond_equal, T_BOOLEAN, equalZeroLabel->label());
2422 __ move(LIR_OprFact::intConst(1), value);
2423 __ branch_destination(equalZeroLabel->label());
2424 }
2425 }
2426
2427
2428 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
2429 BasicType type = x->basic_type();
2430 LIRItem src(x->object(), this);
2431 LIRItem off(x->offset(), this);
2432 LIRItem data(x->value(), this);
2433
2434 src.load_item();
2435 if (type == T_BOOLEAN || type == T_BYTE) {
2436 data.load_byte_item();
2437 } else {
2438 data.load_item();
2439 }
2440 off.load_item();
2441
2442 set_no_result(x);
2443
2444 if (x->is_volatile() && os::is_MP()) __ membar_release();
2445 put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
2446 if (!support_IRIW_for_not_multiple_copy_atomic_cpu && x->is_volatile() && os::is_MP()) __ membar();
2447 }
2448
2449
2450 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2451 int lng = x->length();
2452
2453 for (int i = 0; i < lng; i++) {
2454 SwitchRange* one_range = x->at(i);
2455 int low_key = one_range->low_key();
2456 int high_key = one_range->high_key();
2457 BlockBegin* dest = one_range->sux();
2458 if (low_key == high_key) {
2459 __ cmp(lir_cond_equal, value, low_key);
2460 __ branch(lir_cond_equal, T_INT, dest);
2461 } else if (high_key - low_key == 1) {
2462 __ cmp(lir_cond_equal, value, low_key);
2463 __ branch(lir_cond_equal, T_INT, dest);
2464 __ cmp(lir_cond_equal, value, high_key);
2465 __ branch(lir_cond_equal, T_INT, dest);
2466 } else {
2467 LabelObj* L = new LabelObj();
|
2126 __ shift_left(index_op, log2_scale, tmp);
2127 if (!TwoOperandLIRForm) {
2128 index_op = tmp;
2129 }
2130 }
2131
2132 LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2133 #endif // !GENERATE_ADDRESS_IS_PREFERRED
2134 __ move(value.result(), addr);
2135 }
2136
2137
2138 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2139 BasicType type = x->basic_type();
2140 LIRItem src(x->object(), this);
2141 LIRItem off(x->offset(), this);
2142
2143 off.load_item();
2144 src.load_item();
2145
2146 DecoratorSet decorators = IN_HEAP;
2147
2148 if (x->is_volatile()) {
2149 decorators |= MO_SEQ_CST;
2150 }
2151 if (type == T_BOOLEAN) {
2152 decorators |= C1_MASK_BOOLEAN;
2153 }
2154 if (type == T_ARRAY || type == T_OBJECT) {
2155 decorators |= ON_UNKNOWN_OOP_REF;
2156 }
2157
2158 LIR_Opr result = rlock_result(x, type);
2159 access_load_at(decorators, type,
2160 src, off.result(), result,
2161 NULL, NULL);
2162 }
2163
2164
2165 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
2166 BasicType type = x->basic_type();
2167 LIRItem src(x->object(), this);
2168 LIRItem off(x->offset(), this);
2169 LIRItem data(x->value(), this);
2170
2171 src.load_item();
2172 if (type == T_BOOLEAN || type == T_BYTE) {
2173 data.load_byte_item();
2174 } else {
2175 data.load_item();
2176 }
2177 off.load_item();
2178
2179 set_no_result(x);
2180
2181 DecoratorSet decorators = IN_HEAP;
2182 if (type == T_ARRAY || type == T_OBJECT) {
2183 decorators |= ON_UNKNOWN_OOP_REF;
2184 }
2185 if (x->is_volatile()) {
2186 decorators |= MO_SEQ_CST;
2187 }
2188 access_store_at(decorators, type, src, off.result(), data.result(), NULL, NULL);
2189 }
2190
2191 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
2192 BasicType type = x->basic_type();
2193 LIRItem src(x->object(), this);
2194 LIRItem off(x->offset(), this);
2195 LIRItem value(x->value(), this);
2196
2197 DecoratorSet decorators = IN_HEAP | MO_SEQ_CST;
2198
2199 if (type == T_ARRAY || type == T_OBJECT) {
2200 decorators |= ON_UNKNOWN_OOP_REF;
2201 }
2202
2203 LIR_Opr result;
2204 if (x->is_add()) {
2205 result = access_atomic_add_at(decorators, type, src, off, value);
2206 } else {
2207 result = access_atomic_xchg_at(decorators, type, src, off, value);
2208 }
2209 set_result(x, result);
2210 }
2211
2212 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2213 int lng = x->length();
2214
2215 for (int i = 0; i < lng; i++) {
2216 SwitchRange* one_range = x->at(i);
2217 int low_key = one_range->low_key();
2218 int high_key = one_range->high_key();
2219 BlockBegin* dest = one_range->sux();
2220 if (low_key == high_key) {
2221 __ cmp(lir_cond_equal, value, low_key);
2222 __ branch(lir_cond_equal, T_INT, dest);
2223 } else if (high_key - low_key == 1) {
2224 __ cmp(lir_cond_equal, value, low_key);
2225 __ branch(lir_cond_equal, T_INT, dest);
2226 __ cmp(lir_cond_equal, value, high_key);
2227 __ branch(lir_cond_equal, T_INT, dest);
2228 } else {
2229 LabelObj* L = new LabelObj();
|
3747 }
3748 return result;
3749 }
3750
3751 void LIRGenerator::do_MemBar(MemBar* x) {
3752 if (os::is_MP()) {
3753 LIR_Code code = x->code();
3754 switch(code) {
3755 case lir_membar_acquire : __ membar_acquire(); break;
3756 case lir_membar_release : __ membar_release(); break;
3757 case lir_membar : __ membar(); break;
3758 case lir_membar_loadload : __ membar_loadload(); break;
3759 case lir_membar_storestore: __ membar_storestore(); break;
3760 case lir_membar_loadstore : __ membar_loadstore(); break;
3761 case lir_membar_storeload : __ membar_storeload(); break;
3762 default : ShouldNotReachHere(); break;
3763 }
3764 }
3765 }
3766
3767 LIR_Opr LIRGenerator::maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
3768 if (x->check_boolean()) {
3769 LIR_Opr value_fixed = rlock_byte(T_BYTE);
3770 if (TwoOperandLIRForm) {
3771 __ move(value, value_fixed);
3772 __ logical_and(value_fixed, LIR_OprFact::intConst(1), value_fixed);
3773 } else {
3774 __ logical_and(value, LIR_OprFact::intConst(1), value_fixed);
3775 }
3776 LIR_Opr klass = new_register(T_METADATA);
3777 __ move(new LIR_Address(array, oopDesc::klass_offset_in_bytes(), T_ADDRESS), klass, null_check_info);
3778 null_check_info = NULL;
3779 LIR_Opr layout = new_register(T_INT);
3780 __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
3781 int diffbit = Klass::layout_helper_boolean_diffbit();
3782 __ logical_and(layout, LIR_OprFact::intConst(diffbit), layout);
3783 __ cmp(lir_cond_notEqual, layout, LIR_OprFact::intConst(0));
3784 __ cmove(lir_cond_notEqual, value_fixed, value, value_fixed, T_BYTE);
3785 value = value_fixed;
3786 }
3787 return value;
3788 }
|
3509 }
3510 return result;
3511 }
3512
3513 void LIRGenerator::do_MemBar(MemBar* x) {
3514 if (os::is_MP()) {
3515 LIR_Code code = x->code();
3516 switch(code) {
3517 case lir_membar_acquire : __ membar_acquire(); break;
3518 case lir_membar_release : __ membar_release(); break;
3519 case lir_membar : __ membar(); break;
3520 case lir_membar_loadload : __ membar_loadload(); break;
3521 case lir_membar_storestore: __ membar_storestore(); break;
3522 case lir_membar_loadstore : __ membar_loadstore(); break;
3523 case lir_membar_storeload : __ membar_storeload(); break;
3524 default : ShouldNotReachHere(); break;
3525 }
3526 }
3527 }
3528
3529 LIR_Opr LIRGenerator::mask_boolean(LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
3530 LIR_Opr value_fixed = rlock_byte(T_BYTE);
3531 if (TwoOperandLIRForm) {
3532 __ move(value, value_fixed);
3533 __ logical_and(value_fixed, LIR_OprFact::intConst(1), value_fixed);
3534 } else {
3535 __ logical_and(value, LIR_OprFact::intConst(1), value_fixed);
3536 }
3537 LIR_Opr klass = new_register(T_METADATA);
3538 __ move(new LIR_Address(array, oopDesc::klass_offset_in_bytes(), T_ADDRESS), klass, null_check_info);
3539 null_check_info = NULL;
3540 LIR_Opr layout = new_register(T_INT);
3541 __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
3542 int diffbit = Klass::layout_helper_boolean_diffbit();
3543 __ logical_and(layout, LIR_OprFact::intConst(diffbit), layout);
3544 __ cmp(lir_cond_notEqual, layout, LIR_OprFact::intConst(0));
3545 __ cmove(lir_cond_notEqual, value_fixed, value, value_fixed, T_BYTE);
3546 value = value_fixed;
3547 return value;
3548 }
3549
3550 LIR_Opr LIRGenerator::maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
3551 if (x->check_boolean()) {
3552 value = mask_boolean(array, value, null_check_info);
3553 }
3554 return value;
3555 }
|