--- old/src/share/vm/utilities/copy.hpp 2016-02-05 13:29:45.105059830 -0800 +++ new/src/share/vm/utilities/copy.hpp 2016-02-05 13:29:44.969057461 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -227,6 +227,16 @@ } } + /** + * Copy and *unconditionally* byte swap elements + * + * @param src address of source + * @param dst address of destination + * @param byte_count number of bytes to copy + * @param elem_size size of the elements to copy-swap + */ + static void conjoint_swap(address src, address dst, size_t byte_count, size_t elem_size); + // Fill methods // Fill word-aligned words, not atomic on each word --- old/src/share/vm/prims/unsafe.cpp 2016-02-05 13:29:45.105059830 -0800 +++ new/src/share/vm/prims/unsafe.cpp 2016-02-05 13:29:44.973057530 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -660,6 +660,36 @@ Copy::conjoint_memory_atomic(src, dst, sz); UNSAFE_END +// This function is a leaf since if the source and destination are both in native memory +// the copy may potentially be very large, and we don't want to disable GC if we can avoid it. +// If either source or destination (or both) are on the heap, the function will enter VM using +// JVM_ENTRY_FROM_LEAF +JVM_LEAF(void, Unsafe_CopySwapMemory0(JNIEnv *env, jobject unsafe, jobject srcObj, jlong srcOffset, jobject dstObj, jlong dstOffset, jlong size, jlong elemSize)) { + UnsafeWrapper("Unsafe_CopySwapMemory0"); + + size_t sz = (size_t)size; + size_t esz = (size_t)elemSize; + + if (srcObj == NULL && dstObj == NULL) { + // Both src & dst are in native memory + address src = (address)srcOffset; + address dst = (address)dstOffset; + + Copy::conjoint_swap(src, dst, sz, esz); + } else { + // At least one of src/dst are on heap, transition to VM to access raw pointers + + JVM_ENTRY_FROM_LEAF(env, void, Unsafe_CopySwapMemory0) { + oop srcp = JNIHandles::resolve(srcObj); + oop dstp = JNIHandles::resolve(dstObj); + + address src = (address)index_oop_from_field_offset_long(srcp, srcOffset); + address dst = (address)index_oop_from_field_offset_long(dstp, dstOffset); + + Copy::conjoint_swap(src, dst, sz, esz); + } JVM_END + } +} JVM_END ////// Random queries @@ -1363,6 +1393,7 @@ {CC "getLoadAverage", CC "([DI)I", FN_PTR(Unsafe_Loadavg)}, {CC "copyMemory", CC "(" OBJ "J" OBJ "JJ)V", FN_PTR(Unsafe_CopyMemory)}, + {CC "copySwapMemory0", CC "(" OBJ "J" OBJ "JJJ)V", FN_PTR(Unsafe_CopySwapMemory0)}, {CC "setMemory", CC "(" OBJ "JJB)V", FN_PTR(Unsafe_SetMemory)}, {CC "defineAnonymousClass", CC "(" DAC_Args ")" CLS, FN_PTR(Unsafe_DefineAnonymousClass)}, --- old/src/share/vm/utilities/copy.cpp 2016-02-05 13:29:45.109059900 -0800 +++ new/src/share/vm/utilities/copy.cpp 2016-02-05 13:29:44.981057668 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,6 +53,168 @@ } } +class CopySwap : AllStatic { +public: + /** + * Copy and byte swap elements + * + * @param src address of source + * @param dst address of destination + * @param byte_count number of bytes to copy + * @param elem_size size of the elements to copy-swap + */ + static void conjoint_swap(address src, address dst, size_t byte_count, size_t elem_size) { + address src_end = src + byte_count; + + if (dst <= src || dst >= src_end) { + do_conjoint_swap(src, dst, byte_count, elem_size); + } else { + do_conjoint_swap(src, dst, byte_count, elem_size); + } + } + +private: + /** + * Byte swap a 16-bit value + */ + static uint16_t byte_swap(uint16_t x) { + return (x << 8) | (x >> 8); + } + + /** + * Byte swap a 32-bit value + */ + static uint32_t byte_swap(uint32_t x) { + uint16_t lo = (uint16_t)x; + uint16_t hi = (uint16_t)(x >> 16); + + return ((uint32_t)byte_swap(lo) << 16) | (uint32_t)byte_swap(hi); + } + + /** + * Byte swap a 64-bit value + */ + static uint64_t byte_swap(uint64_t x) { + uint32_t lo = (uint32_t)x; + uint32_t hi = (uint32_t)(x >> 32); + + return ((uint64_t)byte_swap(lo) << 32) | (uint64_t)byte_swap(hi); + } + + enum CopyDirection { + RIGHT, // lower -> higher address + LEFT // higher -> lower address + }; + + /** + * Copy and byte swap elements + * + * - type of element to copy + * - copy direction + * - true if src argument is aligned to element size + * - true if dst argument is aligned to element size + * + * @param src address of source + * @param dst address of destination + * @param byte_count number of bytes to copy + */ + template + static void do_conjoint_swap(address src, address dst, size_t byte_count) { + address cur_src, cur_dst; + + switch (D) { + case RIGHT: + cur_src = src; + cur_dst = dst; + break; + case LEFT: + cur_src = src + byte_count - sizeof(T); + cur_dst = dst + byte_count - sizeof(T); + break; + } + + for (size_t i = 0; i < byte_count / sizeof(T); i++) { + T tmp; + + if (is_src_aligned) { + tmp = *(T*)cur_src; + } else { + memcpy(&tmp, cur_src, sizeof(T)); + } + + tmp = byte_swap(tmp); + + if (is_dst_aligned) { + *(T*)cur_dst = tmp; + } else { + memcpy(cur_dst, &tmp, sizeof(T)); + } + + switch (D) { + case RIGHT: + cur_src += sizeof(T); + cur_dst += sizeof(T); + break; + case LEFT: + cur_src -= sizeof(T); + cur_dst -= sizeof(T); + break; + } + } + } + + /** + * Copy and byte swap elements + * + * - type of element to copy + * - copy direction + * + * @param src address of source + * @param dst address of destination + * @param byte_count number of bytes to copy + */ + template + static void do_conjoint_swap(address src, address dst, size_t byte_count) { + if (is_ptr_aligned(src, sizeof(T))) { + if (is_ptr_aligned(dst, sizeof(T))) { + do_conjoint_swap(src, dst, byte_count); + } else { + do_conjoint_swap(src, dst, byte_count); + } + } else { + if (is_ptr_aligned(dst, sizeof(T))) { + do_conjoint_swap(src, dst, byte_count); + } else { + do_conjoint_swap(src, dst, byte_count); + } + } + } + + + /** + * Copy and byte swap elements + * + * - copy direction + * + * @param src address of source + * @param dst address of destination + * @param byte_count number of bytes to copy + * @param elem_size size of the elements to copy-swap + */ + template + static void do_conjoint_swap(address src, address dst, size_t byte_count, size_t elem_size) { + switch (elem_size) { + case 2: do_conjoint_swap(src, dst, byte_count); break; + case 4: do_conjoint_swap(src, dst, byte_count); break; + case 8: do_conjoint_swap(src, dst, byte_count); break; + default: guarantee(false, "do_conjoint_swap: Invalid elem_size %zd\n", elem_size); + } + } +}; + +void Copy::conjoint_swap(address src, address dst, size_t byte_count, size_t elem_size) { + CopySwap::conjoint_swap(src, dst, byte_count, elem_size); +} // Fill bytes; larger units are filled atomically if everything is aligned. void Copy::fill_to_memory_atomic(void* to, size_t size, jubyte value) { --- old/src/share/vm/runtime/interfaceSupport.hpp 2016-02-05 13:29:45.109059900 -0800 +++ new/src/share/vm/runtime/interfaceSupport.hpp 2016-02-05 13:29:44.981057668 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -417,6 +417,14 @@ os::verify_stack_alignment(); \ /* begin of body */ +#define VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread) \ + TRACE_CALL(result_type, header) \ + debug_only(ResetNoHandleMark __rnhm;) \ + HandleMarkCleaner __hm(thread); \ + Thread* THREAD = thread; \ + os::verify_stack_alignment(); \ + /* begin of body */ + // ENTRY routines may lock, GC and throw exceptions @@ -584,6 +592,14 @@ VM_LEAF_BASE(result_type, header) +#define JVM_ENTRY_FROM_LEAF(env, result_type, header) \ + { { \ + JavaThread* thread=JavaThread::thread_from_jni_environment(env); \ + ThreadInVMfromNative __tiv(thread); \ + debug_only(VMNativeEntryWrapper __vew;) \ + VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread) + + #define JVM_END } } #endif // SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP