1717 // Otherwise the patching code won't know how to find the
1718 // instruction to patch.
1719 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1720 } else {
1721 address = generate_address(object.result(), x->offset(), field_type);
1722 }
1723
1724 if (is_volatile && os::is_MP()) {
1725 __ membar_release();
1726 }
1727
1728 if (is_oop) {
1729 // Do the pre-write barrier, if any.
1730 pre_barrier(LIR_OprFact::address(address),
1731 LIR_OprFact::illegalOpr /* pre_val */,
1732 true /* do_load*/,
1733 needs_patching,
1734 (info ? new CodeEmitInfo(info) : NULL));
1735 }
1736
1737 if (is_volatile && !needs_patching) {
1738 volatile_field_store(value.result(), address, info);
1739 } else {
1740 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1741 __ store(value.result(), address, info, patch_code);
1742 }
1743
1744 if (is_oop) {
1745 // Store to object so mark the card of the header
1746 post_barrier(object.result(), value.result());
1747 }
1748
1749 if (is_volatile && os::is_MP()) {
1750 __ membar();
1751 }
1752 }
1753
1754
1755 void LIRGenerator::do_LoadField(LoadField* x) {
1756 bool needs_patching = x->needs_patching();
1757 bool is_volatile = x->field()->is_volatile();
1790 if (stress_deopt) {
1791 obj = new_register(T_OBJECT);
1792 __ move(LIR_OprFact::oopConst(NULL), obj);
1793 }
1794 // emit an explicit null check because the offset is too large
1795 __ null_check(obj, new CodeEmitInfo(info));
1796 }
1797
1798 LIR_Opr reg = rlock_result(x, field_type);
1799 LIR_Address* address;
1800 if (needs_patching) {
1801 // we need to patch the offset in the instruction so don't allow
1802 // generate_address to try to be smart about emitting the -1.
1803 // Otherwise the patching code won't know how to find the
1804 // instruction to patch.
1805 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1806 } else {
1807 address = generate_address(object.result(), x->offset(), field_type);
1808 }
1809
1810 if (is_volatile && !needs_patching) {
1811 volatile_field_load(address, reg, info);
1812 } else {
1813 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1814 __ load(address, reg, info, patch_code);
1815 }
1816
1817 if (is_volatile && os::is_MP()) {
1818 __ membar_acquire();
1819 }
1820 }
1821
1822
1823 //------------------------java.nio.Buffer.checkIndex------------------------
1824
1825 // int java.nio.Buffer.checkIndex(int)
1826 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1827 // NOTE: by the time we are in checkIndex() we are guaranteed that
1828 // the buffer is non-null (because checkIndex is package-private and
1829 // only called from within other methods in the buffer).
1830 assert(x->number_of_arguments() == 2, "wrong type");
|
1717 // Otherwise the patching code won't know how to find the
1718 // instruction to patch.
1719 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1720 } else {
1721 address = generate_address(object.result(), x->offset(), field_type);
1722 }
1723
1724 if (is_volatile && os::is_MP()) {
1725 __ membar_release();
1726 }
1727
1728 if (is_oop) {
1729 // Do the pre-write barrier, if any.
1730 pre_barrier(LIR_OprFact::address(address),
1731 LIR_OprFact::illegalOpr /* pre_val */,
1732 true /* do_load*/,
1733 needs_patching,
1734 (info ? new CodeEmitInfo(info) : NULL));
1735 }
1736
1737 bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1738 if (needs_atomic_access && !needs_patching) {
1739 volatile_field_store(value.result(), address, info);
1740 } else {
1741 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1742 __ store(value.result(), address, info, patch_code);
1743 }
1744
1745 if (is_oop) {
1746 // Store to object so mark the card of the header
1747 post_barrier(object.result(), value.result());
1748 }
1749
1750 if (is_volatile && os::is_MP()) {
1751 __ membar();
1752 }
1753 }
1754
1755
1756 void LIRGenerator::do_LoadField(LoadField* x) {
1757 bool needs_patching = x->needs_patching();
1758 bool is_volatile = x->field()->is_volatile();
1791 if (stress_deopt) {
1792 obj = new_register(T_OBJECT);
1793 __ move(LIR_OprFact::oopConst(NULL), obj);
1794 }
1795 // emit an explicit null check because the offset is too large
1796 __ null_check(obj, new CodeEmitInfo(info));
1797 }
1798
1799 LIR_Opr reg = rlock_result(x, field_type);
1800 LIR_Address* address;
1801 if (needs_patching) {
1802 // we need to patch the offset in the instruction so don't allow
1803 // generate_address to try to be smart about emitting the -1.
1804 // Otherwise the patching code won't know how to find the
1805 // instruction to patch.
1806 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1807 } else {
1808 address = generate_address(object.result(), x->offset(), field_type);
1809 }
1810
1811 bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1812 if (needs_atomic_access && !needs_patching) {
1813 volatile_field_load(address, reg, info);
1814 } else {
1815 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1816 __ load(address, reg, info, patch_code);
1817 }
1818
1819 if (is_volatile && os::is_MP()) {
1820 __ membar_acquire();
1821 }
1822 }
1823
1824
1825 //------------------------java.nio.Buffer.checkIndex------------------------
1826
1827 // int java.nio.Buffer.checkIndex(int)
1828 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1829 // NOTE: by the time we are in checkIndex() we are guaranteed that
1830 // the buffer is non-null (because checkIndex is package-private and
1831 // only called from within other methods in the buffer).
1832 assert(x->number_of_arguments() == 2, "wrong type");
|