2170 // - how ImplicitNullCheck opportunities are recognized
2171 // If true, the matcher will try to remove all Decodes and match them
2172 // (as operands) into nodes. NullChecks are not prepared to deal with
2173 // Decodes by final_graph_reshaping().
2174 // If false, final_graph_reshaping() forces the decode behind the Cmp
2175 // for a NullCheck. The matcher matches the Decode node into a register.
2176 // Implicit_null_check optimization moves the Decode along with the
2177 // memory operation back up before the NullCheck.
2178 bool Matcher::narrow_oop_use_complex_address() {
2179 // TODO: PPC port if (MatchDecodeNodes) return true;
2180 return false;
2181 }
2182
2183 bool Matcher::narrow_klass_use_complex_address() {
2184 NOT_LP64(ShouldNotCallThis());
2185 assert(UseCompressedClassPointers, "only for compressed klass code");
2186 // TODO: PPC port if (MatchDecodeNodes) return true;
2187 return false;
2188 }
2189
2190 // Is it better to copy float constants, or load them directly from memory?
2191 // Intel can load a float constant from a direct address, requiring no
2192 // extra registers. Most RISCs will have to materialize an address into a
2193 // register first, so they would do better to copy the constant from stack.
2194 const bool Matcher::rematerialize_float_constants = false;
2195
2196 // If CPU can load and store mis-aligned doubles directly then no fixup is
2197 // needed. Else we split the double into 2 integer pieces and move it
2198 // piece-by-piece. Only happens when passing doubles into C code as the
2199 // Java calling convention forces doubles to be aligned.
2200 const bool Matcher::misaligned_doubles_ok = true;
2201
2202 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2203 Unimplemented();
2204 }
2205
2206 // Advertise here if the CPU requires explicit rounding operations
2207 // to implement the UseStrictFP mode.
2208 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2209
|
2170 // - how ImplicitNullCheck opportunities are recognized
2171 // If true, the matcher will try to remove all Decodes and match them
2172 // (as operands) into nodes. NullChecks are not prepared to deal with
2173 // Decodes by final_graph_reshaping().
2174 // If false, final_graph_reshaping() forces the decode behind the Cmp
2175 // for a NullCheck. The matcher matches the Decode node into a register.
2176 // Implicit_null_check optimization moves the Decode along with the
2177 // memory operation back up before the NullCheck.
2178 bool Matcher::narrow_oop_use_complex_address() {
2179 // TODO: PPC port if (MatchDecodeNodes) return true;
2180 return false;
2181 }
2182
2183 bool Matcher::narrow_klass_use_complex_address() {
2184 NOT_LP64(ShouldNotCallThis());
2185 assert(UseCompressedClassPointers, "only for compressed klass code");
2186 // TODO: PPC port if (MatchDecodeNodes) return true;
2187 return false;
2188 }
2189
2190 bool Matcher::const_oop_prefer_decode() {
2191 // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
2192 return Universe::narrow_oop_base() == NULL;
2193 }
2194
2195 bool Matcher::const_klass_prefer_decode() {
2196 // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
2197 return Universe::narrow_klass_base() == NULL;
2198 }
2199
2200 // Is it better to copy float constants, or load them directly from memory?
2201 // Intel can load a float constant from a direct address, requiring no
2202 // extra registers. Most RISCs will have to materialize an address into a
2203 // register first, so they would do better to copy the constant from stack.
2204 const bool Matcher::rematerialize_float_constants = false;
2205
2206 // If CPU can load and store mis-aligned doubles directly then no fixup is
2207 // needed. Else we split the double into 2 integer pieces and move it
2208 // piece-by-piece. Only happens when passing doubles into C code as the
2209 // Java calling convention forces doubles to be aligned.
2210 const bool Matcher::misaligned_doubles_ok = true;
2211
2212 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2213 Unimplemented();
2214 }
2215
2216 // Advertise here if the CPU requires explicit rounding operations
2217 // to implement the UseStrictFP mode.
2218 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2219
|