src/cpu/sparc/vm/sparc.ad
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 6954029 Sdiff src/cpu/sparc/vm

src/cpu/sparc/vm/sparc.ad

Print this page




1733 
1734 const bool Matcher::isSimpleConstant64(jlong value) {
1735   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
1736   // Depends on optimizations in MacroAssembler::setx.
1737   int hi = (int)(value >> 32);
1738   int lo = (int)(value & ~0);
1739   return (hi == 0) || (hi == -1) || (lo == 0);
1740 }
1741 
1742 // No scaling for the parameter the ClearArray node.
1743 const bool Matcher::init_array_count_is_in_bytes = true;
1744 
1745 // Threshold size for cleararray.
1746 const int Matcher::init_array_short_size = 8 * BytesPerLong;
1747 
1748 // Should the Matcher clone shifts on addressing modes, expecting them to
1749 // be subsumed into complex addressing expressions or compute them into
1750 // registers?  True for Intel but false for most RISCs
1751 const bool Matcher::clone_shift_expressions = false;
1752 






1753 // Is it better to copy float constants, or load them directly from memory?
1754 // Intel can load a float constant from a direct address, requiring no
1755 // extra registers.  Most RISCs will have to materialize an address into a
1756 // register first, so they would do better to copy the constant from stack.
1757 const bool Matcher::rematerialize_float_constants = false;
1758 
1759 // If CPU can load and store mis-aligned doubles directly then no fixup is
1760 // needed.  Else we split the double into 2 integer pieces and move it
1761 // piece-by-piece.  Only happens when passing doubles into C code as the
1762 // Java calling convention forces doubles to be aligned.
1763 #ifdef _LP64
1764 const bool Matcher::misaligned_doubles_ok = true;
1765 #else
1766 const bool Matcher::misaligned_doubles_ok = false;
1767 #endif
1768 
1769 // No-op on SPARC.
1770 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
1771 }
1772 




1733 
1734 const bool Matcher::isSimpleConstant64(jlong value) {
1735   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
1736   // Depends on optimizations in MacroAssembler::setx.
1737   int hi = (int)(value >> 32);
1738   int lo = (int)(value & ~0);
1739   return (hi == 0) || (hi == -1) || (lo == 0);
1740 }
1741 
1742 // No scaling for the parameter the ClearArray node.
1743 const bool Matcher::init_array_count_is_in_bytes = true;
1744 
1745 // Threshold size for cleararray.
1746 const int Matcher::init_array_short_size = 8 * BytesPerLong;
1747 
1748 // Should the Matcher clone shifts on addressing modes, expecting them to
1749 // be subsumed into complex addressing expressions or compute them into
1750 // registers?  True for Intel but false for most RISCs
1751 const bool Matcher::clone_shift_expressions = false;
1752 
1753 bool Matcher::narrow_oop_use_complex_address() {
1754   NOT_LP64(ShouldNotCallThis());
1755   assert(UseCompressedOops, "only for comressed oops code");
1756   return false;
1757 }
1758 
1759 // Is it better to copy float constants, or load them directly from memory?
1760 // Intel can load a float constant from a direct address, requiring no
1761 // extra registers.  Most RISCs will have to materialize an address into a
1762 // register first, so they would do better to copy the constant from stack.
1763 const bool Matcher::rematerialize_float_constants = false;
1764 
1765 // If CPU can load and store mis-aligned doubles directly then no fixup is
1766 // needed.  Else we split the double into 2 integer pieces and move it
1767 // piece-by-piece.  Only happens when passing doubles into C code as the
1768 // Java calling convention forces doubles to be aligned.
1769 #ifdef _LP64
1770 const bool Matcher::misaligned_doubles_ok = true;
1771 #else
1772 const bool Matcher::misaligned_doubles_ok = false;
1773 #endif
1774 
1775 // No-op on SPARC.
1776 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
1777 }
1778 


src/cpu/sparc/vm/sparc.ad
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File