--- old/src/hotspot/cpu/aarch64/aarch64.ad 2018-08-20 11:53:22.166380268 +0100 +++ new/src/hotspot/cpu/aarch64/aarch64.ad 2018-08-20 11:53:21.867379377 +0100 @@ -8925,6 +8925,44 @@ // ---------------- end of volatile loads and stores ---------------- +instruct cacheWB(indirect addr) +%{ + match(CacheWB addr); + + ins_cost(100); + format %{"cache wb $addr" %} + ins_encode %{ + assert($addr->index_position() < 0, "should be"); + assert($addr$$disp == 0, "should be"); + __ cache_wb(Address($addr$$base$$Register, 0)); + %} + ins_pipe(pipe_slow); // XXX +%} + +instruct cacheWBPreSync() +%{ + match(CacheWBPreSync); + + ins_cost(100); + format %{"cache wb presync" %} + ins_encode %{ + __ cache_wbsync(true); + %} + ins_pipe(pipe_slow); // XXX +%} + +instruct cacheWBPostSync() +%{ + match(CacheWBPostSync); + + ins_cost(100); + format %{"cache wb postsync" %} + ins_encode %{ + __ cache_wbsync(false); + %} + ins_pipe(pipe_slow); // XXX +%} + // ============================================================================ // BSWAP Instructions --- old/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp 2018-08-20 11:53:23.378383879 +0100 +++ new/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp 2018-08-20 11:53:23.085383006 +0100 @@ -5820,3 +5820,21 @@ pop(saved_regs, sp); } + +void MacroAssembler::cache_wb(Address line) +{ + assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset"); + assert(line.index() == noreg, "index should be noreg"); + assert(line.offset() == 0, "offset should be 0"); + // would like to assert this + // assert(line._ext.shift == 0, "shift should be zero"); + dc(Assembler::CVAC, line.base()); +} + +void MacroAssembler::cache_wbsync(bool is_pre) +{ + // we only need a barrier post sync + if (!is_pre) { + membar(Assembler::AnyAny); + } +} --- old/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp 2018-08-20 11:53:24.454387084 +0100 +++ new/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp 2018-08-20 11:53:24.160386208 +0100 @@ -1356,6 +1356,9 @@ spill(tmp1, true, dst_offset+8); } } + + void cache_wb(Address line); + void cache_wbsync(bool is_pre); }; #ifdef ASSERT --- old/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp 2018-08-20 11:53:25.483390150 +0100 +++ new/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp 2018-08-20 11:53:25.202389313 +0100 @@ -2365,6 +2365,44 @@ return start; } + address generate_data_cache_writeback() { + const Register line = c_rarg0; // address of line to write back + + __ align(CodeEntryAlignment); + + StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback"); + + address start = __ pc(); + __ enter(); + __ cache_wb(Address(line, 0)); + __ leave(); + __ ret(lr); + + return start; + } + + address generate_data_cache_writeback_sync() { + const Register is_pre = c_rarg0; // pre or post sync + + __ align(CodeEntryAlignment); + + StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback_sync"); + + // pre wbsync is a no-op + // post wbsync translates to an sfence + + Label skip; + address start = __ pc(); + __ enter(); + __ cbnz(is_pre, skip); + __ cache_wbsync(false); + __ bind(skip); + __ leave(); + __ ret(lr); + + return start; + } + void generate_arraycopy_stubs() { address entry; address entry_jbyte_arraycopy; @@ -5824,6 +5862,10 @@ StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); } + // data cache line writeback + StubRoutines::_data_cache_writeback = generate_data_cache_writeback(); + StubRoutines::_data_cache_writeback_sync = generate_data_cache_writeback_sync(); + if (UseAESIntrinsics) { StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); --- old/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp 2018-08-20 11:53:26.321392646 +0100 +++ new/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp 2018-08-20 11:53:26.029391776 +0100 @@ -129,6 +129,9 @@ int dcache_line = VM_Version::dcache_line_size(); + // publish data cache line flush size via generic field + _data_cache_line_flush_size = dcache_line; + if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) FLAG_SET_DEFAULT(AllocatePrefetchDistance, 3*dcache_line); if (FLAG_IS_DEFAULT(AllocatePrefetchStepSize)) --- old/src/hotspot/cpu/x86/assembler_x86.cpp 2018-08-20 11:53:26.869394279 +0100 +++ new/src/hotspot/cpu/x86/assembler_x86.cpp 2018-08-20 11:53:26.588393442 +0100 @@ -2192,6 +2192,14 @@ emit_int8((unsigned char)0xF0); } +// Emit sfence instruction +void Assembler::sfence() { + NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");) + emit_int8(0x0F); + emit_int8((unsigned char)0xAE); + emit_int8((unsigned char)0xF8); +} + void Assembler::mov(Register dst, Register src) { LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); } @@ -8426,12 +8434,47 @@ } void Assembler::clflush(Address adr) { + assert(VM_Version::supports_clflush(), "should do"); + prefix(adr); + emit_int8(0x0F); + emit_int8((unsigned char)0xAE); + emit_operand(rdi, adr); +} + +#ifdef _LP64 +void Assembler::clflushopt(Address adr) { + assert(VM_Version::supports_clflushopt(), "should do!"); + // adr should be base reg only with no index or offset + assert(adr.index() == noreg, "index should be noreg"); + assert(adr.scale() == Address::no_scale, "scale should be no_scale"); + assert(adr.disp() == 0, "displacement should be 0"); + // instruction prefix is 0x66 + emit_int8(0x66); prefix(adr); + // opcode family is 0x0f 0xAE emit_int8(0x0F); emit_int8((unsigned char)0xAE); + // extended opcode byte is 7 == rdi emit_operand(rdi, adr); } +void Assembler::clwb(Address adr) { + assert(VM_Version::supports_clwb(), "should do!"); + // adr should be base reg only with no index or offset + assert(adr.index() == noreg, "index should be noreg"); + assert(adr.scale() == Address::no_scale, "scale should be no_scale"); + assert(adr.disp() == 0, "displacement should be 0"); + // instruction prefix is 0x66 + emit_int8(0x66); + prefix(adr); + // opcode family is 0x0f 0xAE + emit_int8(0x0F); + emit_int8((unsigned char)0xAE); + // extended opcode byte is 6 == rsi + emit_operand(rsi, adr); +} +#endif + void Assembler::cmovq(Condition cc, Register dst, Register src) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); emit_int8(0x0F); --- old/src/hotspot/cpu/x86/assembler_x86.hpp 2018-08-20 11:53:27.746396891 +0100 +++ new/src/hotspot/cpu/x86/assembler_x86.hpp 2018-08-20 11:53:27.453396018 +0100 @@ -1019,6 +1019,8 @@ void cld(); void clflush(Address adr); + void clflushopt(Address adr); + void clwb(Address adr); void cmovl(Condition cc, Register dst, Register src); void cmovl(Condition cc, Register dst, Address src); @@ -1380,6 +1382,7 @@ } void mfence(); + void sfence(); // Moves --- old/src/hotspot/cpu/x86/macroAssembler_x86.cpp 2018-08-20 11:53:28.788399995 +0100 +++ new/src/hotspot/cpu/x86/macroAssembler_x86.cpp 2018-08-20 11:53:28.503399146 +0100 @@ -10946,6 +10946,44 @@ bind(done); } +void MacroAssembler::cache_wb(Address line) +{ + // 64 bit cpus always support clflush + assert(VM_Version::supports_clflush(), "should not reach here on 32-bit"); + bool optimized = VM_Version::supports_clflushopt(); + bool no_evict = VM_Version::supports_clwb(); + + // pick the correct implementation + + if (optimized) { + if (no_evict) { + clwb(line); + } else { + clflushopt(line); + } + } else { + // no need for fence when using CLFLUSH + clflush(line); + } +} + + +void MacroAssembler::cache_wbsync(bool is_pre) +{ + assert(VM_Version::supports_clflush(), "should not reach here on 32-bit"); + bool optimized = VM_Version::supports_clflushopt(); + bool no_evict = VM_Version::supports_clwb(); + + // pick the correct implementation + + if (!is_pre && (optimized || no_evict)) { + // need an sfence for post flush when using clflushopt or clwb + // otherwise no no need for any synchroniaztion + + sfence(); + } +} + Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { switch (cond) { // Note some conditions are synonyms for others --- old/src/hotspot/cpu/x86/macroAssembler_x86.hpp 2018-08-20 11:53:29.920403368 +0100 +++ new/src/hotspot/cpu/x86/macroAssembler_x86.hpp 2018-08-20 11:53:29.634402516 +0100 @@ -1743,6 +1743,8 @@ void byte_array_inflate(Register src, Register dst, Register len, XMMRegister tmp1, Register tmp2); + void cache_wb(Address line); + void cache_wbsync(bool isPre); }; /** --- old/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp 2018-08-20 11:53:30.720405751 +0100 +++ new/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp 2018-08-20 11:53:30.437404908 +0100 @@ -2785,6 +2785,49 @@ return start; } + address generate_data_cache_writeback() { + bool optimized = VM_Version::supports_clflushopt(); + bool no_evict = VM_Version::supports_clwb(); + + const Register src = c_rarg0; // source address + + __ align(CodeEntryAlignment); + + StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback"); + + address start = __ pc(); + __ enter(); + const Address line(src, 0); + __ cache_wb(line); + __ leave(); + __ ret(0); + + return start; + } + + address generate_data_cache_writeback_sync() { + const Register is_pre = c_rarg0; // pre or post sync + + __ align(CodeEntryAlignment); + + StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback_sync"); + + // pre wbsync is a no-op + // post wbsync translates to an sfence + + Label skip; + address start = __ pc(); + __ enter(); + __ cmpl(is_pre, 0); + __ jcc(Assembler::notEqual, skip); + __ cache_wbsync(false); + __ bind(skip); + __ leave(); + __ ret(0); + + return start; + } + void generate_arraycopy_stubs() { address entry; address entry_jbyte_arraycopy; @@ -5777,6 +5820,10 @@ // support for verify_oop (must happen after universe_init) StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); + // data cache line writeback + StubRoutines::_data_cache_writeback = generate_data_cache_writeback(); + StubRoutines::_data_cache_writeback_sync = generate_data_cache_writeback_sync(); + // arraycopy stubs used by compilers generate_arraycopy_stubs(); --- old/src/hotspot/cpu/x86/vm_version_x86.cpp 2018-08-20 11:53:31.336407586 +0100 +++ new/src/hotspot/cpu/x86/vm_version_x86.cpp 2018-08-20 11:53:31.052406740 +0100 @@ -609,6 +609,9 @@ guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported"); #endif + // publish data cache line flush size to generic field + _data_cache_line_flush_size = _cpuid_info.std_cpuid1_ebx.bits.clflush_size * 8; + // If the OS doesn't support SSE, we can't use this feature even if the HW does if (!os::supports_sse()) _features &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2); --- old/src/hotspot/cpu/x86/vm_version_x86.hpp 2018-08-20 11:53:31.910409296 +0100 +++ new/src/hotspot/cpu/x86/vm_version_x86.hpp 2018-08-20 11:53:31.618408426 +0100 @@ -218,7 +218,10 @@ avx512dq : 1, : 1, adx : 1, - : 6, + : 3, + clflushopt : 1, + clwb : 1, + : 1, avx512pf : 1, avx512er : 1, avx512cd : 1, @@ -337,7 +340,10 @@ #define CPU_VPCLMULQDQ ((uint64_t)UCONST64(0x4000000000)) //Vector carryless multiplication #define CPU_VAES ((uint64_t)UCONST64(0x8000000000)) // Vector AES instructions - enum Extended_Family { +#define CPU_FLUSHOPT ((uint64_t)UCONST64(0x10000000000)) // flushopt instruction +#define CPU_CLWB ((uint64_t)UCONST64(0x20000000000)) // clwb instruction + +enum Extended_Family { // AMD CPU_FAMILY_AMD_11H = 0x11, // ZX @@ -572,6 +578,8 @@ result |= CPU_SHA; if (_cpuid_info.std_cpuid1_ecx.bits.fma != 0) result |= CPU_FMA; + if (_cpuid_info.sef_cpuid7_ebx.bits.clflushopt != 0) + result |= CPU_FLUSHOPT; // AMD features. if (is_amd()) { @@ -591,6 +599,9 @@ if (_cpuid_info.ext_cpuid1_ecx.bits.misalignsse != 0) { result |= CPU_3DNOW_PREFETCH; } + if (_cpuid_info.sef_cpuid7_ebx.bits.clwb != 0) { + result |= CPU_CLWB; + } } // ZX features. @@ -925,6 +936,32 @@ // that can be used for efficient implementation of // the intrinsic for java.lang.Thread.onSpinWait() static bool supports_on_spin_wait() { return supports_sse2(); } + + // there are several insns to force cache line sync to memory which + // we can use to ensure mapped persistent memory is up to date with + // pending in-cache changes. + // + // 64 bit cpus always support clflush which writes back and evicts + // + // clflushopt is optional and acts like clflush except it does + // not synchronize with other memory ops. it needs a preceding + // and trailing StoreStore fence + // + // clwb is an optional, intel-specific instruction optional which + // writes back without evicting the line. it also does not + // synchronize with other memory ops. so, it also needs a preceding + // and trailing StoreStore fence. + +#ifdef _LP64 + static bool supports_clflush() { return true; } + static bool supports_clflushopt() { return ((_features & CPU_FLUSHOPT) != 0); } + static bool supports_clwb() { return ((_features & CPU_CLWB) != 0); } +#else + static bool supports_clflush() { return true; } + static bool supports_clflushopt() { return false; } + static bool supports_clwb() { return false; } +#endif // _LP64 + }; #endif // CPU_X86_VM_VM_VERSION_X86_HPP --- old/src/hotspot/cpu/x86/x86_64.ad 2018-08-20 11:53:32.474410976 +0100 +++ new/src/hotspot/cpu/x86/x86_64.ad 2018-08-20 11:53:32.190410130 +0100 @@ -6237,6 +6237,44 @@ ins_pipe(pipe_slow); // XXX %} +instruct cacheWB(indirect addr) +%{ + match(CacheWB addr); + + ins_cost(100); + format %{"cache wb $addr" %} + ins_encode %{ + assert($addr->index_position() < 0, "should be"); + assert($addr$$disp == 0, "should be"); + __ cache_wb(Address($addr$$base$$Register, 0)); + %} + ins_pipe(pipe_slow); // XXX +%} + +instruct cacheWBPreSync() +%{ + match(CacheWBPreSync); + + ins_cost(100); + format %{"cache wb presync" %} + ins_encode %{ + __ cache_wbsync(true); + %} + ins_pipe(pipe_slow); // XXX +%} + +instruct cacheWBPostSync() +%{ + match(CacheWBPostSync); + + ins_cost(100); + format %{"cache wb postsync" %} + ins_encode %{ + __ cache_wbsync(false); + %} + ins_pipe(pipe_slow); // XXX +%} + //----------BSWAP Instructions------------------------------------------------- instruct bytes_reverse_int(rRegI dst) %{ match(Set dst (ReverseBytesI dst)); --- old/src/hotspot/share/adlc/formssel.cpp 2018-08-20 11:53:33.361413619 +0100 +++ new/src/hotspot/share/adlc/formssel.cpp 2018-08-20 11:53:33.077412773 +0100 @@ -3517,6 +3517,12 @@ int cnt = sizeof(needs_ideal_memory_list)/sizeof(char*); if( strcmp(_opType,"PrefetchAllocation")==0 ) return 1; + if( strcmp(_opType,"CacheWB")==0 ) + return 1; + if( strcmp(_opType,"CacheWBPreSync")==0 ) + return 1; + if( strcmp(_opType,"CacheWBPostSync")==0 ) + return 1; if( _lChild ) { const char *opType = _lChild->_opType; for( int i=0; iis_initialized(); } +//----------------------------inline_unsafe_writeback0------------------------- +// public native void Unsafe.writeback0(long address) +bool LibraryCallKit::inline_unsafe_writeback0() { + if (!Matcher::has_match_rule(Op_CacheWB)) { + return false; + } +#ifndef PRODUCT + assert(Matcher::has_match_rule(Op_CacheWBPreSync), "found match rule for CacheWB but not CacheWBPreSync"); + assert(Matcher::has_match_rule(Op_CacheWBPostSync), "found match rule for CacheWB but not CacheWBPostSync"); + ciSignature* sig = callee()->signature(); + assert(sig->type_at(0)->basic_type() == T_LONG, "Unsafe_writeback0 address is long!"); +#endif + null_check_receiver(); // null-check, then ignore + Node *addr = argument(1); + addr = new CastX2PNode(addr); + addr = _gvn.transform(addr); + Node *flush = new CacheWBNode(control(), memory(TypeRawPtr::BOTTOM), addr); + flush = _gvn.transform(flush); + set_memory(flush, TypeRawPtr::BOTTOM); + return true; +} + +//----------------------------inline_unsafe_writeback0------------------------- +// public native void Unsafe.writeback0(long address) +bool LibraryCallKit::inline_unsafe_writebackSync0(bool isPre) { + if (isPre && !Matcher::has_match_rule(Op_CacheWBPreSync)) { + return false; + } + if (!isPre && !Matcher::has_match_rule(Op_CacheWBPostSync)) { + return false; + } +#ifndef PRODUCT + assert(Matcher::has_match_rule(Op_CacheWB), + (isPre ? "found match rule for CacheWBPreSync but not CacheWB" + : "found match rule for CacheWBPostSync but not CacheWB")); + +#endif + null_check_receiver(); // null-check, then ignore + Node *sync; + if (isPre) { + sync = new CacheWBPreSyncNode(control(), memory(TypeRawPtr::BOTTOM)); + } else { + sync = new CacheWBPostSyncNode(control(), memory(TypeRawPtr::BOTTOM)); + } + sync = _gvn.transform(sync); + set_memory(sync, TypeRawPtr::BOTTOM); + return true; +} + //----------------------------inline_unsafe_allocate--------------------------- // public native Object Unsafe.allocateInstance(Class cls); bool LibraryCallKit::inline_unsafe_allocate() { --- old/src/hotspot/share/opto/memnode.hpp 2018-08-20 11:53:37.708426569 +0100 +++ new/src/hotspot/share/opto/memnode.hpp 2018-08-20 11:53:37.418425705 +0100 @@ -1605,4 +1605,40 @@ virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; } }; +// cachewb node for guaranteeing writeback of the cache line at a +// given address to (persistent) storage +class CacheWBNode : public Node { +public: + CacheWBNode(Node *ctrl, Node *mem, Node *addr) : Node(ctrl, mem, addr) {} + virtual int Opcode() const; + virtual uint ideal_reg() const { return NotAMachineReg; } + virtual uint match_edge(uint idx) const { return (idx == 2); } + virtual const TypePtr *adr_type() const { return TypeRawPtr::BOTTOM; } + virtual const Type *bottom_type() const { return Type::MEMORY; } +}; + +// cachewb pre sync node for ensuring that writebacks are serialised +// relative to preceding or following stores +class CacheWBPreSyncNode : public Node { +public: + CacheWBPreSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {} + virtual int Opcode() const; + virtual uint ideal_reg() const { return NotAMachineReg; } + virtual uint match_edge(uint idx) const { return false; } + virtual const TypePtr *adr_type() const { return TypeRawPtr::BOTTOM; } + virtual const Type *bottom_type() const { return Type::MEMORY; } +}; + +// cachewb pre sync node for ensuring that writebacks are serialised +// relative to preceding or following stores +class CacheWBPostSyncNode : public Node { +public: + CacheWBPostSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {} + virtual int Opcode() const; + virtual uint ideal_reg() const { return NotAMachineReg; } + virtual uint match_edge(uint idx) const { return false; } + virtual const TypePtr *adr_type() const { return TypeRawPtr::BOTTOM; } + virtual const Type *bottom_type() const { return Type::MEMORY; } +}; + #endif // SHARE_VM_OPTO_MEMNODE_HPP --- old/src/hotspot/share/prims/unsafe.cpp 2018-08-20 11:53:38.508428952 +0100 +++ new/src/hotspot/share/prims/unsafe.cpp 2018-08-20 11:53:38.225428109 +0100 @@ -42,6 +42,7 @@ #include "runtime/jniHandles.inline.hpp" #include "runtime/orderAccess.hpp" #include "runtime/reflection.hpp" +#include "runtime/sharedRuntime.hpp" #include "runtime/thread.hpp" #include "runtime/threadSMR.hpp" #include "runtime/vm_version.hpp" @@ -442,6 +443,74 @@ } } UNSAFE_END +UNSAFE_LEAF (void, Unsafe_WriteBack0(JNIEnv *env, jobject unsafe, jlong line)) { +#ifndef PRODUCT + if (TraceMemoryWriteback) { + tty->print_cr("Unsafe: writeback 0x%p", addr_from_java(line)); + } +#endif + + // guard against currently unimplemented cases +#if !defined(LINUX) || !(defined(AARCH64) || defined(AMD64)) + // TODO - implement for solaris/AIX/BSD/WINDOWS and for 32 bit + JNU_ThrowRuntimeException(env, "writeback is not implemented"); + return IOS_THROWN; +#else + void (*wb)(void *); + void *a = addr_from_java(line); + wb = (void (*)(void *)) StubRoutines::data_cache_writeback(); + assert(wb != NULL, "generate writeback stub!"); + (*wb)(a); +#endif +} UNSAFE_END + +static void doWriteBackSync0(bool isPre) +{ + void (*wbsync)(int); + wbsync = (void (*)(int)) StubRoutines::data_cache_writeback_sync(); + assert(wbsync != NULL, "generate writeback sync stub!"); + (*wbsync)(isPre); +} + +UNSAFE_LEAF (void, Unsafe_WriteBackPreSync0(JNIEnv *env, jobject unsafe)) { +#ifndef PRODUCT + if (TraceMemoryWriteback) { + tty->print_cr("Unsafe: writeback pre-sync"); + } +#endif +#if !defined(LINUX) || !(defined(AARCH64) || defined(AMD64)) + // TODO - implement for solaris/AIX/BSD/WINDOWS and for 32 bit + JNU_ThrowRuntimeException(env, "writeback sync is not implemented"); + return IOS_THROWN; +#else + doWriteBackSync0(true); +#endif +} UNSAFE_END + +UNSAFE_LEAF (void, Unsafe_WriteBackPostSync0(JNIEnv *env, jobject unsafe)) { +#ifndef PRODUCT + if (TraceMemoryWriteback) { + tty->print_cr("Unsafe: writeback pre-sync"); + } +#endif +#if !defined(LINUX) || !(defined(AARCH64) || defined(AMD64)) + // TODO - implement for solaris/AIX/BSD/WINDOWS and for 32 bit + JNU_ThrowRuntimeException(env, "writeback sync is not implemented"); + return IOS_THROWN; +#else + doWriteBackSync0(false); +#endif +} UNSAFE_END + +UNSAFE_LEAF(jint, Unsafe_DataCacheLineFlushSize0()) { + jint size = (jint)VM_Version::data_cache_line_flush_size(); + // TODO -- ensure every CPU actually sets this + if (size == 0) { + size = (jint)DEFAULT_CACHE_LINE_SIZE; + } + return size; +} UNSAFE_END + ////// Random queries UNSAFE_LEAF(jint, Unsafe_AddressSize0(JNIEnv *env, jobject unsafe)) { @@ -1061,6 +1130,7 @@ {CC "ensureClassInitialized0", CC "(" CLS ")V", FN_PTR(Unsafe_EnsureClassInitialized0)}, {CC "arrayBaseOffset0", CC "(" CLS ")I", FN_PTR(Unsafe_ArrayBaseOffset0)}, {CC "arrayIndexScale0", CC "(" CLS ")I", FN_PTR(Unsafe_ArrayIndexScale0)}, + {CC "dataCacheLineFlushSize0", CC "()I", FN_PTR(Unsafe_DataCacheLineFlushSize0)}, {CC "addressSize0", CC "()I", FN_PTR(Unsafe_AddressSize0)}, {CC "pageSize", CC "()I", FN_PTR(Unsafe_PageSize)}, @@ -1081,6 +1151,9 @@ {CC "copyMemory0", CC "(" OBJ "J" OBJ "JJ)V", FN_PTR(Unsafe_CopyMemory0)}, {CC "copySwapMemory0", CC "(" OBJ "J" OBJ "JJJ)V", FN_PTR(Unsafe_CopySwapMemory0)}, + {CC "writeback0", CC "(" "J" ")V", FN_PTR(Unsafe_WriteBack0)}, + {CC "writebackPreSync0", CC "()V", FN_PTR(Unsafe_WriteBackPreSync0)}, + {CC "writebackPostSync0", CC "()V", FN_PTR(Unsafe_WriteBackPostSync0)}, {CC "setMemory0", CC "(" OBJ "JJB)V", FN_PTR(Unsafe_SetMemory0)}, {CC "defineAnonymousClass0", CC "(" DAC_Args ")" CLS, FN_PTR(Unsafe_DefineAnonymousClass0)}, --- old/src/hotspot/share/runtime/globals.hpp 2018-08-20 11:53:39.313431350 +0100 +++ new/src/hotspot/share/runtime/globals.hpp 2018-08-20 11:53:39.018430472 +0100 @@ -2617,6 +2617,9 @@ experimental(bool, UseSwitchProfiling, true, \ "leverage profiling for table/lookup switch") \ \ + notproduct(bool, TraceMemoryWriteback, false, \ + "Trace memory writeback operations") \ + \ JFR_ONLY(product(bool, FlightRecorder, false, \ "Enable Flight Recorder")) \ \ --- old/src/hotspot/share/runtime/stubRoutines.cpp 2018-08-20 11:53:40.130433784 +0100 +++ new/src/hotspot/share/runtime/stubRoutines.cpp 2018-08-20 11:53:39.846432938 +0100 @@ -109,6 +109,9 @@ address StubRoutines::_zero_aligned_words = CAST_FROM_FN_PTR(address, Copy::zero_to_words); +address StubRoutines::_data_cache_writeback = NULL; +address StubRoutines::_data_cache_writeback_sync = NULL; + address StubRoutines::_checkcast_arraycopy = NULL; address StubRoutines::_checkcast_arraycopy_uninit = NULL; address StubRoutines::_unsafe_arraycopy = NULL; --- old/src/hotspot/share/runtime/stubRoutines.hpp 2018-08-20 11:53:40.681435426 +0100 +++ new/src/hotspot/share/runtime/stubRoutines.hpp 2018-08-20 11:53:40.396434577 +0100 @@ -151,6 +151,10 @@ static address _arrayof_jlong_disjoint_arraycopy; static address _arrayof_oop_disjoint_arraycopy, _arrayof_oop_disjoint_arraycopy_uninit; + // cache line writeback + static address _data_cache_writeback; + static address _data_cache_writeback_sync; + // these are recommended but optional: static address _checkcast_arraycopy, _checkcast_arraycopy_uninit; static address _unsafe_arraycopy; @@ -329,6 +333,9 @@ return dest_uninitialized ? _arrayof_oop_disjoint_arraycopy_uninit : _arrayof_oop_disjoint_arraycopy; } + static address data_cache_writeback() { return _data_cache_writeback; } + static address data_cache_writeback_sync() { return _data_cache_writeback_sync; } + static address checkcast_arraycopy(bool dest_uninitialized = false) { return dest_uninitialized ? _checkcast_arraycopy_uninit : _checkcast_arraycopy; } --- old/src/hotspot/share/runtime/vm_version.cpp 2018-08-20 11:53:41.268437175 +0100 +++ new/src/hotspot/share/runtime/vm_version.cpp 2018-08-20 11:53:40.980436317 +0100 @@ -43,6 +43,7 @@ bool Abstract_VM_Version::_supports_atomic_getadd8 = false; unsigned int Abstract_VM_Version::_logical_processors_per_package = 1U; unsigned int Abstract_VM_Version::_L1_data_cache_line_size = 0; +unsigned int Abstract_VM_Version::_data_cache_line_flush_size = 0; #ifndef HOTSPOT_VERSION_STRING #error HOTSPOT_VERSION_STRING must be defined --- old/src/hotspot/share/runtime/vm_version.hpp 2018-08-20 11:53:41.834438861 +0100 +++ new/src/hotspot/share/runtime/vm_version.hpp 2018-08-20 11:53:41.552438021 +0100 @@ -58,6 +58,7 @@ static int _vm_build_number; static unsigned int _parallel_worker_threads; static bool _parallel_worker_threads_initialized; + static unsigned int _data_cache_line_flush_size; static unsigned int nof_parallel_worker_threads(unsigned int num, unsigned int dem, @@ -139,6 +140,12 @@ return _L1_data_cache_line_size; } + // the size in bytes of a data cache line flushed by a flush + // operation which should be a power of two + static unsigned int data_cache_line_flush_size() { + return _data_cache_line_flush_size; + } + // ARCH specific policy for the BiasedLocking static bool use_biased_locking() { return true; } --- old/src/java.base/share/classes/java/nio/Direct-X-Buffer.java.template 2018-08-20 11:53:42.625441217 +0100 +++ new/src/java.base/share/classes/java/nio/Direct-X-Buffer.java.template 2018-08-20 11:53:42.342440374 +0100 @@ -167,15 +167,16 @@ // protected Direct$Type$Buffer$RW$(int cap, long addr, FileDescriptor fd, - Runnable unmapper) + Runnable unmapper, + boolean isPersistent) { #if[rw] - super(-1, 0, cap, cap, fd); + super(-1, 0, cap, cap, fd, isPersistent); address = addr; cleaner = Cleaner.create(this, unmapper); att = null; #else[rw] - super(cap, addr, fd, unmapper); + super(cap, addr, fd, unmapper, isPersistent); this.isReadOnly = true; #end[rw] } --- old/src/java.base/share/classes/java/nio/MappedByteBuffer.java 2018-08-20 11:53:43.430443616 +0100 +++ new/src/java.base/share/classes/java/nio/MappedByteBuffer.java 2018-08-20 11:53:43.146442769 +0100 @@ -77,18 +77,33 @@ // operations if valid; null if the buffer is not mapped. private final FileDescriptor fd; + // A flag true if this buffer is mapped against persistent memory + // using one of the persistent FileChannel.MapMode modes, + // MapMode.READ_ONLY_PERSISTENT or MapMode.READ_WRITE_PERSISTENT + // and false if it is mapped using any of other modes. this flag + // only determines the behaviour of force operations. + private final boolean isPersistent; + // This should only be invoked by the DirectByteBuffer constructors // MappedByteBuffer(int mark, int pos, int lim, int cap, // package-private - FileDescriptor fd) - { + FileDescriptor fd, boolean isPersistent) { super(mark, pos, lim, cap); this.fd = fd; + this.isPersistent = isPersistent; + } + + MappedByteBuffer(int mark, int pos, int lim, int cap, // package-private + boolean isPersistent) { + super(mark, pos, lim, cap); + this.fd = null; + this.isPersistent = isPersistent; } MappedByteBuffer(int mark, int pos, int lim, int cap) { // package-private super(mark, pos, lim, cap); this.fd = null; + this.isPersistent = false; } // Returns the distance (in bytes) of the buffer from the page aligned address @@ -108,6 +123,31 @@ } /** + * Tells whether this buffer was mapped against a non-volatile + * memory device by passing one of the persistent map modes {@link + * java.nio.channels.FileChannel.MapMode#READ_ONLY_PERSISTENT + * MapMode#READ_ONLY_PERSISTENT} or {@link + * java.nio.channels.FileChannel.MapMode#READ_ONLY_PERSISTENT + * MapMode#READ_WRITE_PERSISTENT} in the call to {@link + * java.nio.channels.FileChannel#map FileChannel.map} or mapped + * against some other form of device file by pasing one of the + * other map modes. + * + * @return true if the file was mapped using against a + * non-volatile memory device by passing one of the persistent map + * modes {@link + * java.nio.channels.FileChannel.MapMode#READ_ONLY_PERSISTENT + * MapMode#READ_ONLY_PERSISTENT} or {@link + * java.nio.channels.FileChannel.MapMode#READ_ONLY_PERSISTENT + * MapMode#READ_WRITE_PERSISTENT} in the call to {@link + * java.nio.channels.FileChannel#map FileChannel.map} otherwise + * false. + */ + public boolean isPersistent() { + return isPersistent; + } + + /** * Tells whether or not this buffer's content is resident in physical * memory. * @@ -129,6 +169,10 @@ if (fd == null) { return true; } + // a persistent mapped buffer is always loaded + if (isPersistent()) { + return true; + } if ((address == 0) || (capacity() == 0)) return true; long offset = mappingOffset(); @@ -153,6 +197,10 @@ if (fd == null) { return this; } + // no need to load a persistent mapped buffer + if (isPersistent()) { + return this; + } if ((address == 0) || (capacity() == 0)) return this; long offset = mappingOffset(); @@ -202,12 +250,61 @@ * @return This buffer */ public final MappedByteBuffer force() { + return force(0, capacity()); + } + + /** + * Forces any changes made to some region of this buffer's content + * to be written to the storage device containing the mapped file. + * + *

If the file mapped into this buffer resides on a local storage + * device then when this method returns it is guaranteed that all changes + * made to the buffer since it was created, or since this method was last + * invoked, will have been written to that device. + * + *

If the file does not reside on a local device then no such guarantee + * is made. + * + *

If this buffer was not mapped in read/write mode ({@link + * java.nio.channels.FileChannel.MapMode#READ_WRITE}) then invoking this + * method has no effect.

+ * + * @param from + * The offset to the first byte in the buffer region that + * is to be written back to storage + * + * @param to + * The offset to the first byte beyond the buffer region + * that is to be written back to storage + * + * @return This buffer + * + * @since 12 + */ + public final MappedByteBuffer force(long from, long to) { if (fd == null) { return this; } if ((address != 0) && (capacity() != 0)) { + // check inputs + if (from < 0 || from >= capacity()) { + throw new IllegalArgumentException(); + } + if (to < from || to > capacity()) { + throw new IllegalArgumentException(); + } + long offset = mappingOffset(); - force0(fd, mappingAddress(offset), mappingLength(offset)); + long a = mappingAddress(offset) + from; + long length = to - from; + if (isPersistent) { + // simply force writeback of associated cache lines + Unsafe unsafe = Unsafe.getUnsafe(); + unsafe.writebackMemory(a, length); + } else { + // writeback using device associated with fd + force0(fd, a, length); + } } return this; } --- old/src/java.base/share/classes/java/nio/channels/FileChannel.java 2018-08-20 11:53:44.025445388 +0100 +++ new/src/java.base/share/classes/java/nio/channels/FileChannel.java 2018-08-20 11:53:43.714444461 +0100 @@ -817,6 +817,18 @@ public static final MapMode PRIVATE = new MapMode("PRIVATE"); + /** + * Mode for a read-only mapping from a non-volatile device. + */ + public static final MapMode READ_ONLY_PERSISTENT + = new MapMode("READ_ONLY_PERSISTENT"); + + /** + * Mode for a read/write mapping from a non-volatile device. + */ + public static final MapMode READ_WRITE_PERSISTENT + = new MapMode("READ_WRITE_PERSISTENT"); + private final String name; private MapMode(String name) { @@ -889,10 +901,12 @@ * * @param mode * One of the constants {@link MapMode#READ_ONLY READ_ONLY}, {@link - * MapMode#READ_WRITE READ_WRITE}, or {@link MapMode#PRIVATE - * PRIVATE} defined in the {@link MapMode} class, according to + * MapMode#READ_WRITE READ_WRITE}, {@link MapMode#PRIVATE + * PRIVATE}, {@link MapMode#READ_ONLY_PERSISTENT READ_ONLY_PERSISTENT} + * or {@link MapMode#READ_WRITE_PERSISTENT READ_WRITE_PERSISTENT} defined in the {@link MapMode} class, according to * whether the file is to be mapped read-only, read/write, or - * privately (copy-on-write), respectively + * privately (copy-on-write), read-only from a non-volatile + * device or read-write from a non-volatile device, respectively * * @param position * The position within the file at which the mapped region @@ -905,13 +919,14 @@ * @return The mapped byte buffer * * @throws NonReadableChannelException - * If the {@code mode} is {@link MapMode#READ_ONLY READ_ONLY} but + * If the {@code mode} is {@link MapMode#READ_ONLY READ_ONLY} or + * {@link MapMode#READ_ONLY_PERSISTENT READ_ONLY_PERSISTENT} but * this channel was not opened for reading * * @throws NonWritableChannelException - * If the {@code mode} is {@link MapMode#READ_WRITE READ_WRITE} or - * {@link MapMode#PRIVATE PRIVATE} but this channel was not opened - * for both reading and writing + * If the {@code mode} is {@link MapMode#READ_WRITE READ_WRITE}, + * {@link MapMode#PRIVATE PRIVATE} or {@link MapMode#READ_WRITE_PERSISTENT READ_WRITE_PERSISTENT} + * but this channel was not opened for both reading and writing * * @throws IllegalArgumentException * If the preconditions on the parameters do not hold --- old/src/java.base/share/classes/jdk/internal/misc/Unsafe.java 2018-08-20 11:53:44.610447131 +0100 +++ new/src/java.base/share/classes/jdk/internal/misc/Unsafe.java 2018-08-20 11:53:44.320446267 +0100 @@ -918,6 +918,89 @@ checkPointer(null, address); } + /** + * ensure writeback of a specified virtual memory address range + * from cache to physical memory. all bytes in the address range + * are guaranteed to have been written back to physical memory on + * return from this call i.e. subsequently executed store + * instructions are guaranteed not to be visible before the + * writeback is completed. + * + * @param address + * the lowest byte address that must be guaranteed written + * back to memory. bytes at lower addresses may also be + * written back. + * + * @param length + * the length in bytes of the region starting at address + * that must be guaranteed written back to memory. + * + * @throws RuntimeException if the arguments are invalid + * (Note: after optimization, invalid inputs may + * go undetected, which will lead to unpredictable + * behavior) + * + * @since 12 + */ + + public void writebackMemory(long address, long length) + { + checkWritebackMemory(address, length); + + // perform any required pre-writeback barrier + writebackPreSync0(); + + // write back one cache line at a time + long line = (address & CACHE_LINE_MASK); + long end = address + length; + while (line < end) { + writeback0(line); + line += CACHE_LINE_FLUSH_SIZE; + } + + // perform any required post-writeback barrier + writebackPostSync0(); + } + + /** + * Validate the arguments to writebackMemory + * + * @throws RuntimeException if the arguments are invalid + * (Note: after optimization, invalid inputs may + * go undetected, which will lead to unpredictable + * behavior) + */ + private void checkWritebackMemory(long address, long length) + { + checkNativeAddress(address); + checkSize(length); + } + + /** + * The size of an L1 data cache line which will be a power of two. + */ + private static final long CACHE_LINE_FLUSH_SIZE = (long)theUnsafe.dataCacheLineFlushSize0(); + private static final long CACHE_LINE_MASK = ~(CACHE_LINE_FLUSH_SIZE - 1); + + /** + * primitive operation forcing writeback of a single cache line. + * + * @param address + * the start address of the cache line to be written back + */ + // native used to write back an individual cache line starting at + // the supplied address + @HotSpotIntrinsicCandidate + private native void writeback0(long address); + // native used to serialise writeback operations relative to + // preceding memory writes + @HotSpotIntrinsicCandidate + private native void writebackPreSync0(); + // native used to serialise writeback operations relative to + // following memory writes + @HotSpotIntrinsicCandidate + private native void writebackPostSync0(); + /// random queries /** @@ -1172,7 +1255,6 @@ */ public native int pageSize(); - /// random trusted operations from JNI: /** @@ -3712,6 +3794,7 @@ private native int arrayBaseOffset0(Class arrayClass); private native int arrayIndexScale0(Class arrayClass); private native int addressSize0(); + private native int dataCacheLineFlushSize0(); private native Class defineAnonymousClass0(Class hostClass, byte[] data, Object[] cpPatches); private native int getLoadAverage0(double[] loadavg, int nelems); private native boolean unalignedAccess0(); --- old/src/java.base/share/classes/sun/nio/ch/FileChannelImpl.java 2018-08-20 11:53:45.237448999 +0100 +++ new/src/java.base/share/classes/sun/nio/ch/FileChannelImpl.java 2018-08-20 11:53:44.936448102 +0100 @@ -866,20 +866,15 @@ // -- Memory-mapped buffers -- - private static class Unmapper + private static abstract class Unmapper implements Runnable { // may be required to close file private static final NativeDispatcher nd = new FileDispatcherImpl(); - // keep track of mapped buffer usage - static volatile int count; - static volatile long totalSize; - static volatile long totalCapacity; - private volatile long address; - private final long size; - private final int cap; + protected final long size; + protected final int cap; private final FileDescriptor fd; private Unmapper(long address, long size, int cap, @@ -890,12 +885,6 @@ this.size = size; this.cap = cap; this.fd = fd; - - synchronized (Unmapper.class) { - count++; - totalSize += size; - totalCapacity += cap; - } } public void run() { @@ -913,14 +902,70 @@ } } - synchronized (Unmapper.class) { + decrement_stats(); + } + protected abstract void increment_stats(); + protected abstract void decrement_stats(); + } + + private static class DefaultUnmapper extends Unmapper { + + // keep track of non-persistent mapped buffer usage + static volatile int count; + static volatile long totalSize; + static volatile long totalCapacity; + + public DefaultUnmapper(long address, long size, int cap, + FileDescriptor fd) { + super(address, size, cap, fd); + increment_stats(); + } + + protected void increment_stats() { + synchronized (DefaultUnmapper.class) { + count++; + totalSize += size; + totalCapacity += cap; + } + } + protected void decrement_stats() { + synchronized (DefaultUnmapper.class) { count--; totalSize -= size; totalCapacity -= cap; } } } + + private static class PersistentUnmapper extends Unmapper { + // keep track of mapped buffer usage + static volatile int count; + static volatile long totalSize; + static volatile long totalCapacity; + + public PersistentUnmapper(long address, long size, int cap, + FileDescriptor fd) { + super(address, size, cap, fd); + increment_stats(); + } + + protected void increment_stats() { + synchronized (PersistentUnmapper.class) { + count++; + totalSize += size; + totalCapacity += cap; + } + } + protected void decrement_stats() { + synchronized (PersistentUnmapper.class) { + count--; + totalSize -= size; + totalCapacity -= cap; + } + } + } + private static void unmap(MappedByteBuffer bb) { Cleaner cl = ((DirectBuffer)bb).cleaner(); if (cl != null) @@ -947,12 +992,21 @@ throw new IllegalArgumentException("Size exceeds Integer.MAX_VALUE"); int imode = -1; + boolean isPersistent = false; if (mode == MapMode.READ_ONLY) imode = MAP_RO; else if (mode == MapMode.READ_WRITE) imode = MAP_RW; - else if (mode == MapMode.PRIVATE) + else if (mode == MapMode.PRIVATE) { imode = MAP_PV; + } else if (mode == MapMode.READ_ONLY_PERSISTENT) { + imode = MAP_RO; + isPersistent = true; + } else if (mode == MapMode.READ_WRITE_PERSISTENT) { + imode = MAP_RW; + isPersistent = true; + } + assert (imode >= 0); if ((mode != MapMode.READ_ONLY) && !writable) throw new NonWritableChannelException(); @@ -995,9 +1049,9 @@ // a valid file descriptor is not required FileDescriptor dummy = new FileDescriptor(); if ((!writable) || (imode == MAP_RO)) - return Util.newMappedByteBufferR(0, 0, dummy, null); + return Util.newMappedByteBufferR(0, 0, dummy, null, isPersistent); else - return Util.newMappedByteBuffer(0, 0, dummy, null); + return Util.newMappedByteBuffer(0, 0, dummy, null, isPersistent); } pagePosition = (int)(position % allocationGranularity); @@ -1005,7 +1059,7 @@ mapSize = size + pagePosition; try { // If map0 did not throw an exception, the address is valid - addr = map0(imode, mapPosition, mapSize); + addr = map0(imode, mapPosition, mapSize, isPersistent); } catch (OutOfMemoryError x) { // An OutOfMemoryError may indicate that we've exhausted // memory so force gc and re-attempt map @@ -1016,7 +1070,7 @@ Thread.currentThread().interrupt(); } try { - addr = map0(imode, mapPosition, mapSize); + addr = map0(imode, mapPosition, mapSize, isPersistent); } catch (OutOfMemoryError y) { // After a second OOME, fail throw new IOException("Map failed", y); @@ -1037,17 +1091,21 @@ assert (IOStatus.checkAll(addr)); assert (addr % allocationGranularity == 0); int isize = (int)size; - Unmapper um = new Unmapper(addr, mapSize, isize, mfd); + Unmapper um = (isPersistent + ? new PersistentUnmapper(addr, mapSize, isize, mfd) + : new DefaultUnmapper(addr, mapSize, isize, mfd)); if ((!writable) || (imode == MAP_RO)) { return Util.newMappedByteBufferR(isize, addr + pagePosition, mfd, - um); + um, + isPersistent); } else { return Util.newMappedByteBuffer(isize, addr + pagePosition, mfd, - um); + um, + isPersistent); } } finally { threads.remove(ti); @@ -1067,15 +1125,40 @@ } @Override public long getCount() { - return Unmapper.count; + return DefaultUnmapper.count; + } + @Override + public long getTotalCapacity() { + return DefaultUnmapper.totalCapacity; + } + @Override + public long getMemoryUsed() { + return DefaultUnmapper.totalSize; + } + }; + } + + /** + * Invoked by sun.management.ManagementFactoryHelper to create the management + * interface for persistent mapped buffers. + */ + public static JavaNioAccess.BufferPool getPersistentMappedBufferPool() { + return new JavaNioAccess.BufferPool() { + @Override + public String getName() { + return "mapped_persistent"; + } + @Override + public long getCount() { + return PersistentUnmapper.count; } @Override public long getTotalCapacity() { - return Unmapper.totalCapacity; + return PersistentUnmapper.totalCapacity; } @Override public long getMemoryUsed() { - return Unmapper.totalSize; + return PersistentUnmapper.totalSize; } }; } @@ -1201,7 +1284,7 @@ // -- Native methods -- // Creates a new mapping - private native long map0(int prot, long position, long length) + private native long map0(int prot, long position, long length, boolean isPersistent) throws IOException; // Removes an existing mapping --- old/src/java.base/share/classes/sun/nio/ch/Util.java 2018-08-20 11:53:45.822450742 +0100 +++ new/src/java.base/share/classes/sun/nio/ch/Util.java 2018-08-20 11:53:45.525449857 +0100 @@ -415,7 +415,8 @@ new Class[] { int.class, long.class, FileDescriptor.class, - Runnable.class }); + Runnable.class, + boolean.class }); ctor.setAccessible(true); directByteBufferConstructor = ctor; } catch (ClassNotFoundException | @@ -430,7 +431,8 @@ static MappedByteBuffer newMappedByteBuffer(int size, long addr, FileDescriptor fd, - Runnable unmapper) + Runnable unmapper, + boolean isPersistent) { MappedByteBuffer dbb; if (directByteBufferConstructor == null) @@ -440,7 +442,8 @@ new Object[] { size, addr, fd, - unmapper }); + unmapper, + isPersistent}); } catch (InstantiationException | IllegalAccessException | InvocationTargetException e) { @@ -460,7 +463,8 @@ new Class[] { int.class, long.class, FileDescriptor.class, - Runnable.class }); + Runnable.class, + boolean.class }); ctor.setAccessible(true); directByteBufferRConstructor = ctor; } catch (ClassNotFoundException | @@ -475,7 +479,8 @@ static MappedByteBuffer newMappedByteBufferR(int size, long addr, FileDescriptor fd, - Runnable unmapper) + Runnable unmapper, + boolean isPersistent) { MappedByteBuffer dbb; if (directByteBufferRConstructor == null) @@ -485,7 +490,8 @@ new Object[] { size, addr, fd, - unmapper }); + unmapper, + isPersistent}); } catch (InstantiationException | IllegalAccessException | InvocationTargetException e) { --- old/src/java.base/unix/native/libnio/ch/FileChannelImpl.c 2018-08-20 11:53:46.387452425 +0100 +++ new/src/java.base/unix/native/libnio/ch/FileChannelImpl.c 2018-08-20 11:53:46.099451567 +0100 @@ -47,6 +47,7 @@ #include "nio_util.h" #include "sun_nio_ch_FileChannelImpl.h" #include "java_lang_Integer.h" +#include static jfieldID chan_fd; /* jobject 'fd' in sun.nio.ch.FileChannelImpl */ @@ -72,7 +73,7 @@ JNIEXPORT jlong JNICALL Java_sun_nio_ch_FileChannelImpl_map0(JNIEnv *env, jobject this, - jint prot, jlong off, jlong len) + jint prot, jlong off, jlong len, jboolean map_sync) { void *mapAddress = 0; jobject fdo = (*env)->GetObjectField(env, this, chan_fd); @@ -80,6 +81,9 @@ int protections = 0; int flags = 0; + // should never be called with map_sync and prot == PRIVATE + assert((prot != sun_nio_ch_FileChannelImpl_MAP_PV) ||! map_sync); + if (prot == sun_nio_ch_FileChannelImpl_MAP_RO) { protections = PROT_READ; flags = MAP_SHARED; @@ -91,6 +95,33 @@ flags = MAP_PRIVATE; } + // if MAP_SYNC and MAP_SHARED_VALIDATE are not defined then it is + // best to define them here. This ensures the code compiles on old + // OS releases which do not provide the relevant headers. If run + // on the same machine then it will work if the kernel contains + // the necessary support otherwise mmap should fail with an + // invalid argument error + +#ifndef MAP_SYNC +#define MAP_SYNC 0x80000 +#endif +#ifndef MAP_SHARED_VALIDATE +#define MAP_SHARED_VALIDATE 0x03 +#endif + + if (map_sync) { + // ensure + // 1) this is Linux on AArch64 or x86_64 + // 2) the mmap APIs are available/ at compile time +#if !defined(LINUX) || ! (defined(aarch64) || (defined(amd64) && defined(_LP64))) + // TODO - implement for solaris/AIX/BSD/WINDOWS and for 32 bit + JNU_ThrowIOException(env, "map with persistent mode is not implemented"); + return IOS_THROWN; +#else + flags |= MAP_SYNC | MAP_SHARED_VALIDATE; +#endif + } + mapAddress = mmap64( 0, /* Let OS decide location */ len, /* Number of bytes to map */ @@ -100,6 +131,11 @@ off); /* Offset into file */ if (mapAddress == MAP_FAILED) { + if (map_sync && errno == ENOTSUP) { + JNU_ThrowIOExceptionWithLastError(env, "map with persistent mode is not supported"); + return IOS_THROWN; + } + if (errno == ENOMEM) { JNU_ThrowOutOfMemoryError(env, "Map failed"); return IOS_THROWN; --- old/src/java.management/share/classes/sun/management/ManagementFactoryHelper.java 2018-08-20 11:53:46.957454123 +0100 +++ new/src/java.management/share/classes/sun/management/ManagementFactoryHelper.java 2018-08-20 11:53:46.657453229 +0100 @@ -345,6 +345,8 @@ .getDirectBufferPool())); bufferPools.add(createBufferPoolMXBean(sun.nio.ch.FileChannelImpl .getMappedBufferPool())); + bufferPools.add(createBufferPoolMXBean(sun.nio.ch.FileChannelImpl + .getPersistentMappedBufferPool())); } return bufferPools; }