< prev index next >

src/share/vm/c1/c1_LIRGenerator.cpp

Print this page




1736     } else  {
1737       value.load_item();
1738     }
1739   } else {
1740     value.load_for_store(field_type);
1741   }
1742 
1743   set_no_result(x);
1744 
1745 #ifndef PRODUCT
1746   if (PrintNotLoaded && needs_patching) {
1747     tty->print_cr("   ###class not loaded at store_%s bci %d",
1748                   x->is_static() ?  "static" : "field", x->printable_bci());
1749   }
1750 #endif
1751 
1752   if (x->needs_null_check() &&
1753       (needs_patching ||
1754        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1755     // emit an explicit null check because the offset is too large
1756     __ null_check(object.result(), new CodeEmitInfo(info));










1757   }
1758 
1759   LIR_Address* address;
1760   if (needs_patching) {
1761     // we need to patch the offset in the instruction so don't allow
1762     // generate_address to try to be smart about emitting the -1.
1763     // Otherwise the patching code won't know how to find the
1764     // instruction to patch.
1765     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1766   } else {
1767     address = generate_address(object.result(), x->offset(), field_type);
1768   }
1769 
1770   if (is_volatile && os::is_MP()) {
1771     __ membar_release();
1772   }
1773 
1774   if (is_oop) {
1775     // Do the pre-write barrier, if any.
1776     pre_barrier(LIR_OprFact::address(address),


1822   object.load_item();
1823 
1824 #ifndef PRODUCT
1825   if (PrintNotLoaded && needs_patching) {
1826     tty->print_cr("   ###class not loaded at load_%s bci %d",
1827                   x->is_static() ?  "static" : "field", x->printable_bci());
1828   }
1829 #endif
1830 
1831   bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1832   if (x->needs_null_check() &&
1833       (needs_patching ||
1834        MacroAssembler::needs_explicit_null_check(x->offset()) ||
1835        stress_deopt)) {
1836     LIR_Opr obj = object.result();
1837     if (stress_deopt) {
1838       obj = new_register(T_OBJECT);
1839       __ move(LIR_OprFact::oopConst(NULL), obj);
1840     }
1841     // emit an explicit null check because the offset is too large








1842     __ null_check(obj, new CodeEmitInfo(info));

1843   }
1844 
1845   LIR_Opr reg = rlock_result(x, field_type);
1846   LIR_Address* address;
1847   if (needs_patching) {
1848     // we need to patch the offset in the instruction so don't allow
1849     // generate_address to try to be smart about emitting the -1.
1850     // Otherwise the patching code won't know how to find the
1851     // instruction to patch.
1852     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1853   } else {
1854     address = generate_address(object.result(), x->offset(), field_type);
1855   }
1856 
1857   if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile && os::is_MP()) {
1858     __ membar();
1859   }
1860 
1861   bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1862   if (needs_atomic_access && !needs_patching) {




1736     } else  {
1737       value.load_item();
1738     }
1739   } else {
1740     value.load_for_store(field_type);
1741   }
1742 
1743   set_no_result(x);
1744 
1745 #ifndef PRODUCT
1746   if (PrintNotLoaded && needs_patching) {
1747     tty->print_cr("   ###class not loaded at store_%s bci %d",
1748                   x->is_static() ?  "static" : "field", x->printable_bci());
1749   }
1750 #endif
1751 
1752   if (x->needs_null_check() &&
1753       (needs_patching ||
1754        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1755     // emit an explicit null check because the offset is too large
1756     LIR_Opr obj = object.result();
1757     if (needs_patching) {
1758       // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1759       // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1760       CodeStub* deopt = new DeoptimizeStub(info, Deoptimization::Reason_null_check,
1761                                            Deoptimization::Action_none);
1762       __ cmp(lir_cond_equal, obj, LIR_OprFact::oopConst(NULL));
1763       __ branch(lir_cond_equal, T_OBJECT, deopt);
1764     } else {
1765       __ null_check(obj, new CodeEmitInfo(info));
1766     }
1767   }
1768 
1769   LIR_Address* address;
1770   if (needs_patching) {
1771     // we need to patch the offset in the instruction so don't allow
1772     // generate_address to try to be smart about emitting the -1.
1773     // Otherwise the patching code won't know how to find the
1774     // instruction to patch.
1775     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1776   } else {
1777     address = generate_address(object.result(), x->offset(), field_type);
1778   }
1779 
1780   if (is_volatile && os::is_MP()) {
1781     __ membar_release();
1782   }
1783 
1784   if (is_oop) {
1785     // Do the pre-write barrier, if any.
1786     pre_barrier(LIR_OprFact::address(address),


1832   object.load_item();
1833 
1834 #ifndef PRODUCT
1835   if (PrintNotLoaded && needs_patching) {
1836     tty->print_cr("   ###class not loaded at load_%s bci %d",
1837                   x->is_static() ?  "static" : "field", x->printable_bci());
1838   }
1839 #endif
1840 
1841   bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1842   if (x->needs_null_check() &&
1843       (needs_patching ||
1844        MacroAssembler::needs_explicit_null_check(x->offset()) ||
1845        stress_deopt)) {
1846     LIR_Opr obj = object.result();
1847     if (stress_deopt) {
1848       obj = new_register(T_OBJECT);
1849       __ move(LIR_OprFact::oopConst(NULL), obj);
1850     }
1851     // emit an explicit null check because the offset is too large
1852     if (needs_patching) {
1853       // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1854       // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1855       CodeStub* deopt = new DeoptimizeStub(info, Deoptimization::Reason_null_check,
1856                                            Deoptimization::Action_none);
1857       __ cmp(lir_cond_equal, obj, LIR_OprFact::oopConst(NULL));
1858       __ branch(lir_cond_equal, T_OBJECT, deopt);
1859     } else {
1860       __ null_check(obj, new CodeEmitInfo(info));
1861     }
1862   }
1863 
1864   LIR_Opr reg = rlock_result(x, field_type);
1865   LIR_Address* address;
1866   if (needs_patching) {
1867     // we need to patch the offset in the instruction so don't allow
1868     // generate_address to try to be smart about emitting the -1.
1869     // Otherwise the patching code won't know how to find the
1870     // instruction to patch.
1871     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1872   } else {
1873     address = generate_address(object.result(), x->offset(), field_type);
1874   }
1875 
1876   if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile && os::is_MP()) {
1877     __ membar();
1878   }
1879 
1880   bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1881   if (needs_atomic_access && !needs_patching) {


< prev index next >