--- old/src/share/vm/opto/idealKit.hpp 2015-11-04 15:07:45.616543716 +0100 +++ new/src/share/vm/opto/idealKit.hpp 2015-11-04 15:07:45.258600209 +0100 @@ -229,7 +229,9 @@ BasicType bt, int adr_idx, MemNode::MemOrd mo, - bool require_atomic_access = false); + bool require_atomic_access = false, + bool mismatched = false + ); // Store a card mark ordered after store_oop Node* storeCM(Node* ctl, --- /dev/null 2015-06-17 17:39:34.064603000 +0200 +++ new/test/compiler/intrinsics/unsafe/TestUnsafeUnalignedMismatchedAccesses.java 2015-11-04 15:07:45.288935001 +0100 @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 8136473 + * @summary Mismatched stores on same slice possible with Unsafe.Put*Unaligned methods + * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation TestUnsafeUnalignedMismatchedAccesses + * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation -XX:+UnlockDiagnosticVMOptions -XX:-UseUnalignedAccesses TestUnsafeUnalignedMismatchedAccesses + * + */ + +import java.lang.reflect.*; +import sun.misc.Unsafe; + +public class TestUnsafeUnalignedMismatchedAccesses { + + private static final Unsafe UNSAFE; + + static { + try { + Field unsafeField = Unsafe.class.getDeclaredField("theUnsafe"); + unsafeField.setAccessible(true); + UNSAFE = (Unsafe) unsafeField.get(null); + } + catch (Exception e) { + throw new AssertionError(e); + } + } + + static void test1(byte[] array) { + array[0] = 0; + UNSAFE.putIntUnaligned(array, UNSAFE.ARRAY_BYTE_BASE_OFFSET, 0); + array[0] = 0; + } + + static void test2(byte[] array) { + array[0] = 0; + UNSAFE.putIntUnaligned(array, UNSAFE.ARRAY_BYTE_BASE_OFFSET+1, 0); + array[0] = 0; + } + + static void test3(byte[] array) { + array[0] = 0; + UNSAFE.putIntUnaligned(array, UNSAFE.ARRAY_BYTE_BASE_OFFSET+2, 0); + array[0] = 0; + } + + static void test4(byte[] array) { + array[0] = 0; + UNSAFE.putIntUnaligned(array, UNSAFE.ARRAY_BYTE_BASE_OFFSET+3, 0); + array[0] = 0; + } + + static void test5(byte[] array) { + array[0] = 0; + UNSAFE.putInt(array, UNSAFE.ARRAY_BYTE_BASE_OFFSET, 0); + array[0] = 0; + } + + // unaligned access and non escaping allocation + static void test6() { + byte[] array = new byte[10]; + UNSAFE.putIntUnaligned(array, UNSAFE.ARRAY_BYTE_BASE_OFFSET+1, -1); + array[0] = 0; + } + + // unaligned access and non escaping allocation + static int test7() { + byte[] array = new byte[10]; + UNSAFE.putIntUnaligned(array, UNSAFE.ARRAY_BYTE_BASE_OFFSET+1, -1); + array[0] = 0; + array[2] = 0; + return array[0] + array[1] + array[2] + array[3] + array[4]; + } + + // unaligned access with vectorization + static void test8(int[] src1, int[] src2, int[] dst) { + for (int i = 0; i < dst.length-1; i++) { + int res = src1[i] + src2[i]; + UNSAFE.putIntUnaligned(dst, UNSAFE.ARRAY_INT_BASE_OFFSET + i*4+1, res); + } + } + + static public void main(String[] args) throws Exception { + byte[] byte_array = new byte[100]; + int[] int_array = new int[100]; + Object[] obj_array = new Object[100]; + TestUnsafeUnalignedMismatchedAccesses test = new TestUnsafeUnalignedMismatchedAccesses(); + for (int i = 0; i < 20000; i++) { + test1(byte_array); + test2(byte_array); + test3(byte_array); + test4(byte_array); + test5(byte_array); + test6(); + test7(); + test8(int_array, int_array, int_array); + } + } +} --- old/src/share/vm/opto/memnode.cpp 2015-11-04 15:07:45.711982229 +0100 +++ new/src/share/vm/opto/memnode.cpp 2015-11-04 15:07:45.303735248 +0100 @@ -72,8 +72,15 @@ dump_adr_type(this, _adr_type, st); Compile* C = Compile::current(); - if( C->alias_type(_adr_type)->is_volatile() ) + if (C->alias_type(_adr_type)->is_volatile()) { st->print(" Volatile!"); + } + if (_unaligned_access) { + st->print(" unaligned"); + } + if (_mismatched_access) { + st->print(" mismatched"); + } } void MemNode::dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st) { @@ -2393,7 +2400,8 @@ st->Opcode() == Op_StoreVector || Opcode() == Op_StoreVector || phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw || - (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI), // expanded ClearArrayNode + (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode + (is_mismatched_access() || st->as_Store()->is_mismatched_access()), "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]); if (st->in(MemNode::Address)->eqv_uncast(address) && @@ -3213,6 +3221,9 @@ // within the initialized memory. intptr_t InitializeNode::can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape) { const int FAIL = 0; + if (st->is_unaligned_access()) { + return FAIL; + } if (st->req() != MemNode::ValueIn + 1) return FAIL; // an inscrutable StoreNode (card mark?) Node* ctl = st->in(MemNode::Control); --- old/src/share/vm/opto/library_call.cpp 2015-11-04 15:07:45.764242465 +0100 +++ new/src/share/vm/opto/library_call.cpp 2015-11-04 15:07:45.394843440 +0100 @@ -238,7 +238,7 @@ // Generates the guards that check whether the result of // Unsafe.getObject should be recorded in an SATB log buffer. void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar); - bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile); + bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool is_unaligned); static bool klass_needs_init_guard(Node* kls); bool inline_unsafe_allocate(); bool inline_unsafe_copyMemory(); @@ -544,72 +544,72 @@ case vmIntrinsics::_inflateStringC: case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress); - case vmIntrinsics::_getObject: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, !is_volatile); - case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, !is_volatile); - case vmIntrinsics::_getByte: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, !is_volatile); - case vmIntrinsics::_getShort: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, !is_volatile); - case vmIntrinsics::_getChar: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, !is_volatile); - case vmIntrinsics::_getInt: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, !is_volatile); - case vmIntrinsics::_getLong: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, !is_volatile); - case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, !is_volatile); - case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, !is_volatile); - case vmIntrinsics::_putObject: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, !is_volatile); - case vmIntrinsics::_putBoolean: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, !is_volatile); - case vmIntrinsics::_putByte: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, !is_volatile); - case vmIntrinsics::_putShort: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, !is_volatile); - case vmIntrinsics::_putChar: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, !is_volatile); - case vmIntrinsics::_putInt: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, !is_volatile); - case vmIntrinsics::_putLong: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, !is_volatile); - case vmIntrinsics::_putFloat: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, !is_volatile); - case vmIntrinsics::_putDouble: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, !is_volatile); - - case vmIntrinsics::_getByte_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_BYTE, !is_volatile); - case vmIntrinsics::_getShort_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_SHORT, !is_volatile); - case vmIntrinsics::_getChar_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_CHAR, !is_volatile); - case vmIntrinsics::_getInt_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_INT, !is_volatile); - case vmIntrinsics::_getLong_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_LONG, !is_volatile); - case vmIntrinsics::_getFloat_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_FLOAT, !is_volatile); - case vmIntrinsics::_getDouble_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_DOUBLE, !is_volatile); - case vmIntrinsics::_getAddress_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_ADDRESS, !is_volatile); - - case vmIntrinsics::_putByte_raw: return inline_unsafe_access( is_native_ptr, is_store, T_BYTE, !is_volatile); - case vmIntrinsics::_putShort_raw: return inline_unsafe_access( is_native_ptr, is_store, T_SHORT, !is_volatile); - case vmIntrinsics::_putChar_raw: return inline_unsafe_access( is_native_ptr, is_store, T_CHAR, !is_volatile); - case vmIntrinsics::_putInt_raw: return inline_unsafe_access( is_native_ptr, is_store, T_INT, !is_volatile); - case vmIntrinsics::_putLong_raw: return inline_unsafe_access( is_native_ptr, is_store, T_LONG, !is_volatile); - case vmIntrinsics::_putFloat_raw: return inline_unsafe_access( is_native_ptr, is_store, T_FLOAT, !is_volatile); - case vmIntrinsics::_putDouble_raw: return inline_unsafe_access( is_native_ptr, is_store, T_DOUBLE, !is_volatile); - case vmIntrinsics::_putAddress_raw: return inline_unsafe_access( is_native_ptr, is_store, T_ADDRESS, !is_volatile); - - case vmIntrinsics::_getObjectVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, is_volatile); - case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, is_volatile); - case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, is_volatile); - case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, is_volatile); - case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, is_volatile); - case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, is_volatile); - case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, is_volatile); - case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, is_volatile); - case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, is_volatile); - - case vmIntrinsics::_putObjectVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, is_volatile); - case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, is_volatile); - case vmIntrinsics::_putByteVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, is_volatile); - case vmIntrinsics::_putShortVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, is_volatile); - case vmIntrinsics::_putCharVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, is_volatile); - case vmIntrinsics::_putIntVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, is_volatile); - case vmIntrinsics::_putLongVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, is_volatile); - case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, is_volatile); - case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, is_volatile); - - case vmIntrinsics::_getShortUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, !is_volatile); - case vmIntrinsics::_getCharUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, !is_volatile); - case vmIntrinsics::_getIntUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, !is_volatile); - case vmIntrinsics::_getLongUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, !is_volatile); - - case vmIntrinsics::_putShortUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, !is_volatile); - case vmIntrinsics::_putCharUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, !is_volatile); - case vmIntrinsics::_putIntUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, !is_volatile); - case vmIntrinsics::_putLongUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, !is_volatile); + case vmIntrinsics::_getObject: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, !is_volatile, false); + case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, !is_volatile, false); + case vmIntrinsics::_getByte: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, !is_volatile, false); + case vmIntrinsics::_getShort: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, !is_volatile, false); + case vmIntrinsics::_getChar: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, !is_volatile, false); + case vmIntrinsics::_getInt: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, !is_volatile, false); + case vmIntrinsics::_getLong: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, !is_volatile, false); + case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, !is_volatile, false); + case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, !is_volatile, false); + case vmIntrinsics::_putObject: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, !is_volatile, false); + case vmIntrinsics::_putBoolean: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, !is_volatile, false); + case vmIntrinsics::_putByte: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, !is_volatile, false); + case vmIntrinsics::_putShort: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, !is_volatile, false); + case vmIntrinsics::_putChar: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, !is_volatile, false); + case vmIntrinsics::_putInt: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, !is_volatile, false); + case vmIntrinsics::_putLong: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, !is_volatile, false); + case vmIntrinsics::_putFloat: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, !is_volatile, false); + case vmIntrinsics::_putDouble: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, !is_volatile, false); + + case vmIntrinsics::_getByte_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_BYTE, !is_volatile, false); + case vmIntrinsics::_getShort_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_SHORT, !is_volatile, false); + case vmIntrinsics::_getChar_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_CHAR, !is_volatile, false); + case vmIntrinsics::_getInt_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_INT, !is_volatile, false); + case vmIntrinsics::_getLong_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_LONG, !is_volatile, false); + case vmIntrinsics::_getFloat_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_FLOAT, !is_volatile, false); + case vmIntrinsics::_getDouble_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_DOUBLE, !is_volatile, false); + case vmIntrinsics::_getAddress_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_ADDRESS, !is_volatile, false); + + case vmIntrinsics::_putByte_raw: return inline_unsafe_access( is_native_ptr, is_store, T_BYTE, !is_volatile, false); + case vmIntrinsics::_putShort_raw: return inline_unsafe_access( is_native_ptr, is_store, T_SHORT, !is_volatile, false); + case vmIntrinsics::_putChar_raw: return inline_unsafe_access( is_native_ptr, is_store, T_CHAR, !is_volatile, false); + case vmIntrinsics::_putInt_raw: return inline_unsafe_access( is_native_ptr, is_store, T_INT, !is_volatile, false); + case vmIntrinsics::_putLong_raw: return inline_unsafe_access( is_native_ptr, is_store, T_LONG, !is_volatile, false); + case vmIntrinsics::_putFloat_raw: return inline_unsafe_access( is_native_ptr, is_store, T_FLOAT, !is_volatile, false); + case vmIntrinsics::_putDouble_raw: return inline_unsafe_access( is_native_ptr, is_store, T_DOUBLE, !is_volatile, false); + case vmIntrinsics::_putAddress_raw: return inline_unsafe_access( is_native_ptr, is_store, T_ADDRESS, !is_volatile, false); + + case vmIntrinsics::_getObjectVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, is_volatile, false); + case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, is_volatile, false); + case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, is_volatile, false); + case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, is_volatile, false); + case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, is_volatile, false); + case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, is_volatile, false); + case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, is_volatile, false); + case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, is_volatile, false); + case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, is_volatile, false); + + case vmIntrinsics::_putObjectVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, is_volatile, false); + case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, is_volatile, false); + case vmIntrinsics::_putByteVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, is_volatile, false); + case vmIntrinsics::_putShortVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, is_volatile, false); + case vmIntrinsics::_putCharVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, is_volatile, false); + case vmIntrinsics::_putIntVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, is_volatile, false); + case vmIntrinsics::_putLongVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, is_volatile, false); + case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, is_volatile, false); + case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, is_volatile, false); + + case vmIntrinsics::_getShortUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, !is_volatile, true); + case vmIntrinsics::_getCharUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, !is_volatile, true); + case vmIntrinsics::_getIntUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, !is_volatile, true); + case vmIntrinsics::_getLongUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, !is_volatile, true); + + case vmIntrinsics::_putShortUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, !is_volatile, true); + case vmIntrinsics::_putCharUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, !is_volatile, true); + case vmIntrinsics::_putIntUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, !is_volatile, true); + case vmIntrinsics::_putLongUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, !is_volatile, true); case vmIntrinsics::_compareAndSwapObject: return inline_unsafe_load_store(T_OBJECT, LS_cmpxchg); case vmIntrinsics::_compareAndSwapInt: return inline_unsafe_load_store(T_INT, LS_cmpxchg); @@ -2385,7 +2385,7 @@ return NULL; } -bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile) { +bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool unaligned) { if (callee()->is_static()) return false; // caller must have the capability! #ifndef PRODUCT @@ -2527,7 +2527,24 @@ // of safe & unsafe memory. if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder); - if (!is_store) { + assert(alias_type->adr_type() == TypeRawPtr::BOTTOM || alias_type->adr_type() == TypeOopPtr::BOTTOM || + alias_type->field() != NULL || alias_type->element() != NULL, "field, array element or unknown"); + bool mismatched = false; + if (alias_type->element() != NULL || alias_type->field() != NULL) { + BasicType bt; + if (alias_type->element() != NULL) { + const Type* element = alias_type->element(); + bt = element->isa_narrowoop() ? T_OBJECT : element->array_element_basic_type(); + } else { + bt = alias_type->field()->type()->basic_type(); + } + if (bt != type) { + mismatched = true; + } + } + assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type"); + + if (!is_store) { Node* p = NULL; // Try to constant fold a load from a constant field ciField* field = alias_type->field(); @@ -2543,7 +2560,7 @@ MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered; // To be valid, unsafe loads may depend on other conditions than // the one that guards them: pin the Load node - p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile); + p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched); // load value switch (type) { case T_BOOLEAN: @@ -2590,12 +2607,12 @@ MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered; if (type != T_OBJECT ) { - (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile); + (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile, unaligned, mismatched); } else { // Possibly an oop being stored to Java heap or native memory if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) { // oop to Java heap. - (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo); + (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo, mismatched); } else { // We can't tell at compile time if we are storing in the Java heap or outside // of it. So we need to emit code to conditionally do the proper type of @@ -2607,11 +2624,11 @@ __ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); { // Sync IdealKit and graphKit. sync_kit(ideal); - Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo); + Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo, mismatched); // Update IdealKit memory. __ sync_kit(this); } __ else_(); { - __ store(__ ctrl(), adr, val, type, alias_type->index(), mo, is_volatile); + __ store(__ ctrl(), adr, val, type, alias_type->index(), mo, is_volatile, mismatched); } __ end_if(); // Final sync IdealKit and GraphKit. final_sync(ideal); --- old/src/share/vm/opto/idealKit.cpp 2015-11-04 15:07:45.829545984 +0100 +++ new/src/share/vm/opto/idealKit.cpp 2015-11-04 15:07:45.331540813 +0100 @@ -368,7 +368,8 @@ Node* IdealKit::store(Node* ctl, Node* adr, Node *val, BasicType bt, int adr_idx, - MemNode::MemOrd mo, bool require_atomic_access) { + MemNode::MemOrd mo, bool require_atomic_access, + bool mismatched) { assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory"); const TypePtr* adr_type = NULL; debug_only(adr_type = C->get_adr_type(adr_idx)); @@ -379,6 +380,9 @@ } else { st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo); } + if (mismatched) { + st->as_Store()->set_mismatched_access(); + } st = transform(st); set_memory(st, adr_idx); --- old/src/share/vm/opto/graphKit.cpp 2015-11-04 15:07:45.868026920 +0100 +++ new/src/share/vm/opto/graphKit.cpp 2015-11-04 15:07:45.355719900 +0100 @@ -1457,7 +1457,11 @@ // factory methods in "int adr_idx" Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx, - MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency, bool require_atomic_access) { + MemNode::MemOrd mo, + LoadNode::ControlDependency control_dependency, + bool require_atomic_access, + bool unaligned, + bool mismatched) { assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" ); const TypePtr* adr_type = NULL; // debug-mode-only argument debug_only(adr_type = C->get_adr_type(adr_idx)); @@ -1470,6 +1474,12 @@ } else { ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency); } + if (unaligned) { + ld->as_Load()->set_unaligned_access(); + } + if (mismatched) { + ld->as_Load()->set_mismatched_access(); + } ld = _gvn.transform(ld); if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) { // Improve graph before escape analysis and boxing elimination. @@ -1481,7 +1491,9 @@ Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt, int adr_idx, MemNode::MemOrd mo, - bool require_atomic_access) { + bool require_atomic_access, + bool unaligned, + bool mismatched) { assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" ); const TypePtr* adr_type = NULL; debug_only(adr_type = C->get_adr_type(adr_idx)); @@ -1494,6 +1506,12 @@ } else { st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo); } + if (unaligned) { + st->as_Store()->set_unaligned_access(); + } + if (mismatched) { + st->as_Store()->set_mismatched_access(); + } st = _gvn.transform(st); set_memory(st, adr_idx); // Back-to-back stores can only remove intermediate store with DU info @@ -1587,7 +1605,8 @@ const TypeOopPtr* val_type, BasicType bt, bool use_precise, - MemNode::MemOrd mo) { + MemNode::MemOrd mo, + bool mismatched) { // Transformation of a value which could be NULL pointer (CastPP #NULL) // could be delayed during Parse (for example, in adjust_map_after_if()). // Execute transformation here to avoid barrier generation in such case. @@ -1607,7 +1626,7 @@ NULL /* pre_val */, bt); - Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo); + Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo, mismatched); post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise); return store; } @@ -1619,7 +1638,8 @@ const TypePtr* adr_type, Node* val, BasicType bt, - MemNode::MemOrd mo) { + MemNode::MemOrd mo, + bool mismatched) { Compile::AliasType* at = C->alias_type(adr_type); const TypeOopPtr* val_type = NULL; if (adr_type->isa_instptr()) { @@ -1638,7 +1658,7 @@ if (val_type == NULL) { val_type = TypeInstPtr::BOTTOM; } - return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo); + return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo, mismatched); } --- old/src/share/vm/opto/graphKit.hpp 2015-11-04 15:07:46.008497205 +0100 +++ new/src/share/vm/opto/graphKit.hpp 2015-11-04 15:07:45.429419845 +0100 @@ -513,23 +513,28 @@ // of volatile fields. Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest, - bool require_atomic_access = false) { + bool require_atomic_access = false, bool unaligned = false, + bool mismatched = false) { // This version computes alias_index from bottom_type return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(), - mo, control_dependency, require_atomic_access); + mo, control_dependency, require_atomic_access, + unaligned, mismatched); } Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type, MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest, - bool require_atomic_access = false) { + bool require_atomic_access = false, bool unaligned = false, + bool mismatched = false) { // This version computes alias_index from an address type assert(adr_type != NULL, "use other make_load factory"); return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type), - mo, control_dependency, require_atomic_access); + mo, control_dependency, require_atomic_access, + unaligned, mismatched); } // This is the base version which is given an alias index. Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx, MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest, - bool require_atomic_access = false); + bool require_atomic_access = false, bool unaligned = false, + bool mismatched = false); // Create & transform a StoreNode and store the effect into the // parser's memory state. @@ -542,19 +547,24 @@ Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt, const TypePtr* adr_type, MemNode::MemOrd mo, - bool require_atomic_access = false) { + bool require_atomic_access = false, + bool unaligned = false, + bool mismatched = false) { // This version computes alias_index from an address type assert(adr_type != NULL, "use other store_to_memory factory"); return store_to_memory(ctl, adr, val, bt, C->get_alias_index(adr_type), - mo, require_atomic_access); + mo, require_atomic_access, + unaligned, mismatched); } // This is the base version which is given alias index // Return the new StoreXNode Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt, int adr_idx, MemNode::MemOrd, - bool require_atomic_access = false); + bool require_atomic_access = false, + bool unaligned = false, + bool mismatched = false); // All in one pre-barrier, store, post_barrier @@ -577,7 +587,8 @@ const TypeOopPtr* val_type, BasicType bt, bool use_precise, - MemNode::MemOrd mo); + MemNode::MemOrd mo, + bool mismatched = false); Node* store_oop_to_object(Node* ctl, Node* obj, // containing obj @@ -608,7 +619,8 @@ const TypePtr* adr_type, Node* val, BasicType bt, - MemNode::MemOrd mo); + MemNode::MemOrd mo, + bool mismatched = false); // For the few case where the barriers need special help void pre_barrier(bool do_load, Node* ctl, --- old/src/share/vm/opto/memnode.hpp 2015-11-04 15:07:45.986660271 +0100 +++ new/src/share/vm/opto/memnode.hpp 2015-11-04 15:07:45.444623954 +0100 @@ -39,11 +39,14 @@ //------------------------------MemNode---------------------------------------- // Load or Store, possibly throwing a NULL pointer exception class MemNode : public Node { +private: + bool _unaligned_access; // Unaligned access from unsafe + bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance protected: #ifdef ASSERT const TypePtr* _adr_type; // What kind of memory is being addressed? #endif - virtual uint size_of() const; // Size is bigger (ASSERT only) + virtual uint size_of() const; public: enum { Control, // When is it safe to do this load? Memory, // Chunk of memory is being loaded from @@ -57,17 +60,17 @@ } MemOrd; protected: MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) - : Node(c0,c1,c2 ) { + : Node(c0,c1,c2 ), _unaligned_access(false), _mismatched_access(false) { init_class_id(Class_Mem); debug_only(_adr_type=at; adr_type();) } MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) - : Node(c0,c1,c2,c3) { + : Node(c0,c1,c2,c3), _unaligned_access(false), _mismatched_access(false) { init_class_id(Class_Mem); debug_only(_adr_type=at; adr_type();) } MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) - : Node(c0,c1,c2,c3,c4) { + : Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false) { init_class_id(Class_Mem); debug_only(_adr_type=at; adr_type();) } @@ -127,6 +130,11 @@ // the given memory state? (The state may or may not be in(Memory).) Node* can_see_stored_value(Node* st, PhaseTransform* phase) const; + void set_unaligned_access() { _unaligned_access = true; } + bool is_unaligned_access() const { return _unaligned_access; } + void set_mismatched_access() { _mismatched_access = true; } + bool is_mismatched_access() const { return _mismatched_access; } + #ifndef PRODUCT static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st); virtual void dump_spec(outputStream *st) const;