src/cpu/x86/vm/x86_64.ad
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 6954029 Sdiff src/cpu/x86/vm

src/cpu/x86/vm/x86_64.ad

Print this page




2037 
2038 const bool Matcher::isSimpleConstant64(jlong value) {
2039   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2040   //return value == (int) value;  // Cf. storeImmL and immL32.
2041 
2042   // Probably always true, even if a temp register is required.
2043   return true;
2044 }
2045 
2046 // The ecx parameter to rep stosq for the ClearArray node is in words.
2047 const bool Matcher::init_array_count_is_in_bytes = false;
2048 
2049 // Threshold size for cleararray.
2050 const int Matcher::init_array_short_size = 8 * BytesPerLong;
2051 
2052 // Should the Matcher clone shifts on addressing modes, expecting them
2053 // to be subsumed into complex addressing expressions or compute them
2054 // into registers?  True for Intel but false for most RISCs
2055 const bool Matcher::clone_shift_expressions = true;
2056 





2057 // Is it better to copy float constants, or load them directly from
2058 // memory?  Intel can load a float constant from a direct address,
2059 // requiring no extra registers.  Most RISCs will have to materialize
2060 // an address into a register first, so they would do better to copy
2061 // the constant from stack.
2062 const bool Matcher::rematerialize_float_constants = true; // XXX
2063 
2064 // If CPU can load and store mis-aligned doubles directly then no
2065 // fixup is needed.  Else we split the double into 2 integer pieces
2066 // and move it piece-by-piece.  Only happens when passing doubles into
2067 // C code as the Java calling convention forces doubles to be aligned.
2068 const bool Matcher::misaligned_doubles_ok = true;
2069 
2070 // No-op on amd64
2071 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {}
2072 
2073 // Advertise here if the CPU requires explicit rounding operations to
2074 // implement the UseStrictFP mode.
2075 const bool Matcher::strict_fp_requires_explicit_rounding = true;
2076 




2037 
2038 const bool Matcher::isSimpleConstant64(jlong value) {
2039   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2040   //return value == (int) value;  // Cf. storeImmL and immL32.
2041 
2042   // Probably always true, even if a temp register is required.
2043   return true;
2044 }
2045 
2046 // The ecx parameter to rep stosq for the ClearArray node is in words.
2047 const bool Matcher::init_array_count_is_in_bytes = false;
2048 
2049 // Threshold size for cleararray.
2050 const int Matcher::init_array_short_size = 8 * BytesPerLong;
2051 
2052 // Should the Matcher clone shifts on addressing modes, expecting them
2053 // to be subsumed into complex addressing expressions or compute them
2054 // into registers?  True for Intel but false for most RISCs
2055 const bool Matcher::clone_shift_expressions = true;
2056 
2057 bool Matcher::narrow_oop_use_complex_address() {
2058   assert(UseCompressedOops, "only for comressed oops code");
2059   return (LogMinObjAlignmentInBytes <= 3);
2060 }
2061 
2062 // Is it better to copy float constants, or load them directly from
2063 // memory?  Intel can load a float constant from a direct address,
2064 // requiring no extra registers.  Most RISCs will have to materialize
2065 // an address into a register first, so they would do better to copy
2066 // the constant from stack.
2067 const bool Matcher::rematerialize_float_constants = true; // XXX
2068 
2069 // If CPU can load and store mis-aligned doubles directly then no
2070 // fixup is needed.  Else we split the double into 2 integer pieces
2071 // and move it piece-by-piece.  Only happens when passing doubles into
2072 // C code as the Java calling convention forces doubles to be aligned.
2073 const bool Matcher::misaligned_doubles_ok = true;
2074 
2075 // No-op on amd64
2076 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {}
2077 
2078 // Advertise here if the CPU requires explicit rounding operations to
2079 // implement the UseStrictFP mode.
2080 const bool Matcher::strict_fp_requires_explicit_rounding = true;
2081 


src/cpu/x86/vm/x86_64.ad
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File