< prev index next >

src/share/vm/c1/c1_LIRGenerator.cpp

Print this page
rev 9031 : 8138894: C1: Support IRIW on weak memory platforms
Reviewed-by:


1741     pre_barrier(LIR_OprFact::address(address),
1742                 LIR_OprFact::illegalOpr /* pre_val */,
1743                 true /* do_load*/,
1744                 needs_patching,
1745                 (info ? new CodeEmitInfo(info) : NULL));
1746   }
1747 
1748   bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1749   if (needs_atomic_access && !needs_patching) {
1750     volatile_field_store(value.result(), address, info);
1751   } else {
1752     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1753     __ store(value.result(), address, info, patch_code);
1754   }
1755 
1756   if (is_oop) {
1757     // Store to object so mark the card of the header
1758     post_barrier(object.result(), value.result());
1759   }
1760 
1761   if (is_volatile && os::is_MP()) {
1762     __ membar();
1763   }
1764 }
1765 
1766 
1767 void LIRGenerator::do_LoadField(LoadField* x) {
1768   bool needs_patching = x->needs_patching();
1769   bool is_volatile = x->field()->is_volatile();
1770   BasicType field_type = x->field_type();
1771 
1772   CodeEmitInfo* info = NULL;
1773   if (needs_patching) {
1774     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1775     info = state_for(x, x->state_before());
1776   } else if (x->needs_null_check()) {
1777     NullCheck* nc = x->explicit_null_check();
1778     if (nc == NULL) {
1779       info = state_for(x);
1780     } else {
1781       info = state_for(nc);


1802     if (stress_deopt) {
1803       obj = new_register(T_OBJECT);
1804       __ move(LIR_OprFact::oopConst(NULL), obj);
1805     }
1806     // emit an explicit null check because the offset is too large
1807     __ null_check(obj, new CodeEmitInfo(info));
1808   }
1809 
1810   LIR_Opr reg = rlock_result(x, field_type);
1811   LIR_Address* address;
1812   if (needs_patching) {
1813     // we need to patch the offset in the instruction so don't allow
1814     // generate_address to try to be smart about emitting the -1.
1815     // Otherwise the patching code won't know how to find the
1816     // instruction to patch.
1817     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1818   } else {
1819     address = generate_address(object.result(), x->offset(), field_type);
1820   }
1821 




1822   bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1823   if (needs_atomic_access && !needs_patching) {
1824     volatile_field_load(address, reg, info);
1825   } else {
1826     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1827     __ load(address, reg, info, patch_code);
1828   }
1829 
1830   if (is_volatile && os::is_MP()) {
1831     __ membar_acquire();
1832   }
1833 }
1834 
1835 
1836 //------------------------java.nio.Buffer.checkIndex------------------------
1837 
1838 // int java.nio.Buffer.checkIndex(int)
1839 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1840   // NOTE: by the time we are in checkIndex() we are guaranteed that
1841   // the buffer is non-null (because checkIndex is package-private and


2218       index_op = tmp;
2219     }
2220   }
2221 
2222   LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2223 #endif // !GENERATE_ADDRESS_IS_PREFERRED
2224   __ move(value.result(), addr);
2225 }
2226 
2227 
2228 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2229   BasicType type = x->basic_type();
2230   LIRItem src(x->object(), this);
2231   LIRItem off(x->offset(), this);
2232 
2233   off.load_item();
2234   src.load_item();
2235 
2236   LIR_Opr value = rlock_result(x, x->basic_type());
2237 




2238   get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2239 
2240 #if INCLUDE_ALL_GCS
2241   // We might be reading the value of the referent field of a
2242   // Reference object in order to attach it back to the live
2243   // object graph. If G1 is enabled then we need to record
2244   // the value that is being returned in an SATB log buffer.
2245   //
2246   // We need to generate code similar to the following...
2247   //
2248   // if (offset == java_lang_ref_Reference::referent_offset) {
2249   //   if (src != NULL) {
2250   //     if (klass(src)->reference_type() != REF_NONE) {
2251   //       pre_barrier(..., value, ...);
2252   //     }
2253   //   }
2254   // }
2255 
2256   if (UseG1GC && type == T_OBJECT) {
2257     bool gen_pre_barrier = true;     // Assume we need to generate pre_barrier.


2375 
2376 
2377 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
2378   BasicType type = x->basic_type();
2379   LIRItem src(x->object(), this);
2380   LIRItem off(x->offset(), this);
2381   LIRItem data(x->value(), this);
2382 
2383   src.load_item();
2384   if (type == T_BOOLEAN || type == T_BYTE) {
2385     data.load_byte_item();
2386   } else {
2387     data.load_item();
2388   }
2389   off.load_item();
2390 
2391   set_no_result(x);
2392 
2393   if (x->is_volatile() && os::is_MP()) __ membar_release();
2394   put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
2395   if (x->is_volatile() && os::is_MP()) __ membar();
2396 }
2397 
2398 
2399 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2400   int lng = x->length();
2401 
2402   for (int i = 0; i < lng; i++) {
2403     SwitchRange* one_range = x->at(i);
2404     int low_key = one_range->low_key();
2405     int high_key = one_range->high_key();
2406     BlockBegin* dest = one_range->sux();
2407     if (low_key == high_key) {
2408       __ cmp(lir_cond_equal, value, low_key);
2409       __ branch(lir_cond_equal, T_INT, dest);
2410     } else if (high_key - low_key == 1) {
2411       __ cmp(lir_cond_equal, value, low_key);
2412       __ branch(lir_cond_equal, T_INT, dest);
2413       __ cmp(lir_cond_equal, value, high_key);
2414       __ branch(lir_cond_equal, T_INT, dest);
2415     } else {




1741     pre_barrier(LIR_OprFact::address(address),
1742                 LIR_OprFact::illegalOpr /* pre_val */,
1743                 true /* do_load*/,
1744                 needs_patching,
1745                 (info ? new CodeEmitInfo(info) : NULL));
1746   }
1747 
1748   bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1749   if (needs_atomic_access && !needs_patching) {
1750     volatile_field_store(value.result(), address, info);
1751   } else {
1752     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1753     __ store(value.result(), address, info, patch_code);
1754   }
1755 
1756   if (is_oop) {
1757     // Store to object so mark the card of the header
1758     post_barrier(object.result(), value.result());
1759   }
1760 
1761   if (!support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile && os::is_MP()) {
1762     __ membar();
1763   }
1764 }
1765 
1766 
1767 void LIRGenerator::do_LoadField(LoadField* x) {
1768   bool needs_patching = x->needs_patching();
1769   bool is_volatile = x->field()->is_volatile();
1770   BasicType field_type = x->field_type();
1771 
1772   CodeEmitInfo* info = NULL;
1773   if (needs_patching) {
1774     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1775     info = state_for(x, x->state_before());
1776   } else if (x->needs_null_check()) {
1777     NullCheck* nc = x->explicit_null_check();
1778     if (nc == NULL) {
1779       info = state_for(x);
1780     } else {
1781       info = state_for(nc);


1802     if (stress_deopt) {
1803       obj = new_register(T_OBJECT);
1804       __ move(LIR_OprFact::oopConst(NULL), obj);
1805     }
1806     // emit an explicit null check because the offset is too large
1807     __ null_check(obj, new CodeEmitInfo(info));
1808   }
1809 
1810   LIR_Opr reg = rlock_result(x, field_type);
1811   LIR_Address* address;
1812   if (needs_patching) {
1813     // we need to patch the offset in the instruction so don't allow
1814     // generate_address to try to be smart about emitting the -1.
1815     // Otherwise the patching code won't know how to find the
1816     // instruction to patch.
1817     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1818   } else {
1819     address = generate_address(object.result(), x->offset(), field_type);
1820   }
1821 
1822   if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile && os::is_MP()) {
1823     __ membar();
1824   }
1825 
1826   bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1827   if (needs_atomic_access && !needs_patching) {
1828     volatile_field_load(address, reg, info);
1829   } else {
1830     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1831     __ load(address, reg, info, patch_code);
1832   }
1833 
1834   if (is_volatile && os::is_MP()) {
1835     __ membar_acquire();
1836   }
1837 }
1838 
1839 
1840 //------------------------java.nio.Buffer.checkIndex------------------------
1841 
1842 // int java.nio.Buffer.checkIndex(int)
1843 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1844   // NOTE: by the time we are in checkIndex() we are guaranteed that
1845   // the buffer is non-null (because checkIndex is package-private and


2222       index_op = tmp;
2223     }
2224   }
2225 
2226   LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2227 #endif // !GENERATE_ADDRESS_IS_PREFERRED
2228   __ move(value.result(), addr);
2229 }
2230 
2231 
2232 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2233   BasicType type = x->basic_type();
2234   LIRItem src(x->object(), this);
2235   LIRItem off(x->offset(), this);
2236 
2237   off.load_item();
2238   src.load_item();
2239 
2240   LIR_Opr value = rlock_result(x, x->basic_type());
2241 
2242   if (support_IRIW_for_not_multiple_copy_atomic_cpu && x->is_volatile() && os::is_MP()) {
2243     __ membar();
2244   }
2245 
2246   get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2247 
2248 #if INCLUDE_ALL_GCS
2249   // We might be reading the value of the referent field of a
2250   // Reference object in order to attach it back to the live
2251   // object graph. If G1 is enabled then we need to record
2252   // the value that is being returned in an SATB log buffer.
2253   //
2254   // We need to generate code similar to the following...
2255   //
2256   // if (offset == java_lang_ref_Reference::referent_offset) {
2257   //   if (src != NULL) {
2258   //     if (klass(src)->reference_type() != REF_NONE) {
2259   //       pre_barrier(..., value, ...);
2260   //     }
2261   //   }
2262   // }
2263 
2264   if (UseG1GC && type == T_OBJECT) {
2265     bool gen_pre_barrier = true;     // Assume we need to generate pre_barrier.


2383 
2384 
2385 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
2386   BasicType type = x->basic_type();
2387   LIRItem src(x->object(), this);
2388   LIRItem off(x->offset(), this);
2389   LIRItem data(x->value(), this);
2390 
2391   src.load_item();
2392   if (type == T_BOOLEAN || type == T_BYTE) {
2393     data.load_byte_item();
2394   } else {
2395     data.load_item();
2396   }
2397   off.load_item();
2398 
2399   set_no_result(x);
2400 
2401   if (x->is_volatile() && os::is_MP()) __ membar_release();
2402   put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
2403   if (!support_IRIW_for_not_multiple_copy_atomic_cpu && x->is_volatile() && os::is_MP()) __ membar();
2404 }
2405 
2406 
2407 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2408   int lng = x->length();
2409 
2410   for (int i = 0; i < lng; i++) {
2411     SwitchRange* one_range = x->at(i);
2412     int low_key = one_range->low_key();
2413     int high_key = one_range->high_key();
2414     BlockBegin* dest = one_range->sux();
2415     if (low_key == high_key) {
2416       __ cmp(lir_cond_equal, value, low_key);
2417       __ branch(lir_cond_equal, T_INT, dest);
2418     } else if (high_key - low_key == 1) {
2419       __ cmp(lir_cond_equal, value, low_key);
2420       __ branch(lir_cond_equal, T_INT, dest);
2421       __ cmp(lir_cond_equal, value, high_key);
2422       __ branch(lir_cond_equal, T_INT, dest);
2423     } else {


< prev index next >