207 }
208 } else if (UseSHA512Intrinsics) {
209 warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
210 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
211 }
212
213 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
214 FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true);
215 }
216 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
217 FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, true);
218 }
219 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
220 FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, true);
221 }
222 if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
223 FLAG_SET_DEFAULT(UsePopCountInstruction, true);
224 }
225
226 // z/Architecture supports 8-byte compare-exchange operations
227 // (see Atomic::cmpxchg and StubGenerator::generate_atomic_cmpxchg_ptr)
228 // and 'atomic long memory ops' (see Unsafe_GetLongVolatile).
229 _supports_cx8 = true;
230
231 _supports_atomic_getadd4 = VM_Version::has_LoadAndALUAtomicV1();
232 _supports_atomic_getadd8 = VM_Version::has_LoadAndALUAtomicV1();
233
234 // z/Architecture supports unaligned memory accesses.
235 // Performance penalty is negligible. An additional tick or so
236 // is lost if the accessed data spans a cache line boundary.
237 // Unaligned accesses are not atomic, of course.
238 if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
239 FLAG_SET_DEFAULT(UseUnalignedAccesses, true);
240 }
241 }
242
243
244 void VM_Version::set_features_string() {
245
246 unsigned int ambiguity = 0;
247 if (is_z13()) {
|
207 }
208 } else if (UseSHA512Intrinsics) {
209 warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
210 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
211 }
212
213 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
214 FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true);
215 }
216 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
217 FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, true);
218 }
219 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
220 FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, true);
221 }
222 if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
223 FLAG_SET_DEFAULT(UsePopCountInstruction, true);
224 }
225
226 // z/Architecture supports 8-byte compare-exchange operations
227 // (see Atomic::cmpxchg)
228 // and 'atomic long memory ops' (see Unsafe_GetLongVolatile).
229 _supports_cx8 = true;
230
231 _supports_atomic_getadd4 = VM_Version::has_LoadAndALUAtomicV1();
232 _supports_atomic_getadd8 = VM_Version::has_LoadAndALUAtomicV1();
233
234 // z/Architecture supports unaligned memory accesses.
235 // Performance penalty is negligible. An additional tick or so
236 // is lost if the accessed data spans a cache line boundary.
237 // Unaligned accesses are not atomic, of course.
238 if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
239 FLAG_SET_DEFAULT(UseUnalignedAccesses, true);
240 }
241 }
242
243
244 void VM_Version::set_features_string() {
245
246 unsigned int ambiguity = 0;
247 if (is_z13()) {
|