# HG changeset patch # Parent 656931ff4345f255de14cb2cefa600405b3c576e diff -r 656931ff4345 make/hotspot/symbols/symbols-unix --- a/make/hotspot/symbols/symbols-unix Fri May 18 11:52:53 2018 +0100 +++ b/make/hotspot/symbols/symbols-unix Sun May 20 17:57:55 2018 +0100 @@ -163,6 +163,7 @@ JVM_RawMonitorDestroy JVM_RawMonitorEnter JVM_RawMonitorExit +JVM_RegisterContinuationMethods JVM_RegisterSignal JVM_ReleaseUTF JVM_ResumeThread diff -r 656931ff4345 src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp --- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp Sun May 20 17:57:55 2018 +0100 @@ -221,6 +221,23 @@ } } +void LIR_Assembler::getfp(LIR_Opr opr) { + __ lea(opr->as_register_lo(), Address(rsp, initial_frame_size_in_bytes() + wordSize)); // + wordSize seems to be required to handle the push rbp before the sub of rsp +} + +void LIR_Assembler::getsp(LIR_Opr opr) { + __ movptr(opr->as_register_lo(), rsp); +} + +#if 0 +void LIR_Assembler::getpc(LIR_Opr opr) { + const char *name + "cont_getPC"; + address entry = StubRoutines::cont_getPC(); + __ call_VM_leaf(entry, 0); + __ movptr(opr->as_register_lo(), rax); +} +#endif + bool LIR_Assembler::is_literal_address(LIR_Address* addr) { return addr->base()->is_illegal() && addr->index()->is_illegal(); } diff -r 656931ff4345 src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp --- a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp Sun May 20 17:57:55 2018 +0100 @@ -325,6 +325,51 @@ set_result(x, round_item(reg)); } +void LIRGenerator::do_continuation_getFP(Intrinsic* x) { + LIR_Opr result_reg = rlock_result(x); + __ getfp(result_reg); +} + +void LIRGenerator::do_continuation_getSP(Intrinsic* x) { + LIR_Opr result_reg = rlock_result(x); + __ getsp(result_reg); +} + +void LIRGenerator::do_continuation_getPC(Intrinsic* x) { + BasicTypeList signature(0); + //signature.append(T_LONG); + CallingConvention* cc = frame_map()->c_calling_convention(&signature); + + const LIR_Opr result_reg = result_register_for(x->type()); + address entry = StubRoutines::cont_getPC(); + LIR_Opr result = rlock_result(x); + __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args()); + __ move(result_reg, result); +} + +void LIRGenerator::do_continuation_doContinue(Intrinsic* x) { + BasicTypeList signature(0); + CallingConvention* cc = frame_map()->c_calling_convention(&signature); + + //const LIR_Opr result_reg = result_register_for(x->type()); + address entry = StubRoutines::cont_thaw(2); + CodeEmitInfo* info = state_for(x, x->state()); + __ call_runtime(entry, getThreadTemp(), getThreadTemp(), cc->args(), info); +} + +void LIRGenerator::do_continuation_doYield(Intrinsic* x) { + BasicTypeList signature(1); + signature.append(T_OBJECT); + CallingConvention* cc = frame_map()->c_calling_convention(&signature); + + LIRItem value(x->argument_at(0), this); + value.load_item(); + __ move(value.result(), cc->at(0)); + + address entry = StubRoutines::cont_doYield(); + CodeEmitInfo* info = state_for(x, x->state()); + __ call_runtime(entry, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, cc->args(), info); +} // for _fadd, _fmul, _fsub, _fdiv, _frem // _dadd, _dmul, _dsub, _ddiv, _drem diff -r 656931ff4345 src/hotspot/cpu/x86/frame_x86.cpp --- a/src/hotspot/cpu/x86/frame_x86.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/cpu/x86/frame_x86.cpp Sun May 20 17:57:55 2018 +0100 @@ -29,6 +29,7 @@ #include "oops/method.hpp" #include "oops/oop.inline.hpp" #include "prims/methodHandles.hpp" +#include "runtime/continuation.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/javaCalls.hpp" @@ -328,7 +329,7 @@ BasicObjectLock* result = (BasicObjectLock*) *addr_at(interpreter_frame_monitor_block_top_offset); // make sure the pointer points inside the frame assert(sp() <= (intptr_t*) result, "monitor end should be above the stack pointer"); - assert((intptr_t*) result < fp(), "monitor end should be strictly below the frame pointer"); + assert((intptr_t*) result < fp(), "monitor end should be strictly below the frame pointer: result: %p fp: %p", result, fp()); return result; } @@ -456,6 +457,8 @@ intptr_t* sender_sp = unextended_sp() + _cb->frame_size(); intptr_t* unextended_sp = sender_sp; + assert (sender_sp == real_fp(), "sender_sp: %p real_fp: %p", sender_sp, real_fp()); + // On Intel the return_address is always the word on the stack address sender_pc = (address) *(sender_sp-1); @@ -490,12 +493,12 @@ // update it accordingly map->set_include_argument_oops(false); - if (is_entry_frame()) return sender_for_entry_frame(map); - if (is_interpreted_frame()) return sender_for_interpreter_frame(map); + if (is_entry_frame()) return Continuation::fix_continuation_bottom_sender(*this, sender_for_entry_frame(map), map); + if (is_interpreted_frame()) return Continuation::fix_continuation_bottom_sender(*this, sender_for_interpreter_frame(map), map); assert(_cb == CodeCache::find_blob(pc()),"Must be the same"); if (_cb != NULL) { - return sender_for_compiled_frame(map); + return Continuation::fix_continuation_bottom_sender(*this, sender_for_compiled_frame(map), map); } // Must be native-compiled frame, i.e. the marshaling code for native // methods that exists in the core system. diff -r 656931ff4345 src/hotspot/cpu/x86/frame_x86.hpp --- a/src/hotspot/cpu/x86/frame_x86.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/cpu/x86/frame_x86.hpp Sun May 20 17:57:55 2018 +0100 @@ -142,6 +142,7 @@ // accessors for the instance variables // Note: not necessarily the real 'frame pointer' (see real_fp) intptr_t* fp() const { return _fp; } + void set_fp(intptr_t* newfp) { _fp = newfp; } inline address* sender_pc_addr() const; diff -r 656931ff4345 src/hotspot/cpu/x86/frame_x86.inline.hpp --- a/src/hotspot/cpu/x86/frame_x86.inline.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/cpu/x86/frame_x86.inline.hpp Sun May 20 17:57:55 2018 +0100 @@ -74,11 +74,13 @@ address original_pc = CompiledMethod::get_deopt_original_pc(this); if (original_pc != NULL) { + assert(_cb != NULL, "no cb 1 pc: %p orig_pc: %p", pc, original_pc); _pc = original_pc; assert(_cb->as_compiled_method()->insts_contains_inclusive(_pc), "original PC must be in the main code section of the the compiled method (or must be immediately following it)"); _deopt_state = is_deoptimized; } else { + assert(_cb != NULL, "no cb 2 sp: %p usp: %p fp: %p pc: %p orig_pc: %p", sp, unextended_sp, fp, pc, original_pc); if (_cb->is_deoptimization_stub()) { _deopt_state = is_deoptimized; } else { diff -r 656931ff4345 src/hotspot/cpu/x86/interp_masm_x86.cpp --- a/src/hotspot/cpu/x86/interp_masm_x86.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/cpu/x86/interp_masm_x86.cpp Sun May 20 17:57:55 2018 +0100 @@ -798,6 +798,64 @@ jmp(Address(method, Method::from_interpreted_offset())); } +// void InterpreterMacroAssembler::resolve_special(Register rmethod, LinkInfo link_info) { +// CallInfo callinfo; +// LinkResolver::resolve_special_call(callinfo, Handle(), link_info, Thread::current()); +// methodHandle methodh = callinfo.selected_method(); +// assert(methodh.not_null(), "should have thrown exception"); +// Method* method = methodh(); +// tty->print_cr("call_Java_final method: %p name: %s", method, method->name()->as_C_string()); +// // tty->print_cr("call_Java_final const: %p, params: %d locals %d", method->constMethod(), method->constMethod()->_size_of_parameters, method->constMethod()->_max_locals); + +// movptr(rmethod, AddressLiteral((address)method, RelocationHolder::none).addr()); +// } + +// void InterpreterMacroAssembler::get_entry(Register entry, Register method) { +// // TODO: see InterpreterMacroAssembler::jump_from_interpreted for special cases +// Label done; +// // if (JvmtiExport::can_post_interpreter_events()) { +// // Register temp; +// // Label run_compiled_code; +// // // JVMTI events, such as single-stepping, are implemented partly by avoiding running +// // // compiled code in threads for which the event is enabled. Check here for +// // // interp_only_mode if these events CAN be enabled. +// // // interp_only is an int, on little endian it is sufficient to test the byte only +// // // Is a cmpl faster? +// // LP64_ONLY(temp = r15_thread;) +// // NOT_LP64(get_thread(temp);) +// // cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0); +// // jccb(Assembler::zero, run_compiled_code); +// // movptr(entry, Address(method, Method::interpreter_entry_offset())); +// // bind(run_compiled_code); +// // } +// movptr(entry, Address(method, Method::from_interpreted_offset())); +// bind(done); +// } + +// // loads method into rbx +// void InterpreterMacroAssembler::get_entry(Register entry, LinkInfo link_info) { +// resolve_special(rbx, link_info); +// get_entry(entry, rbx); +// } + +// void InterpreterMacroAssembler::call_Java_final(LinkInfo link_info) { +// Register rentry = rax; +// get_entry(rentry, link_info); + +// // profile_call(rax); // ?? rax +// // profile_arguments_type(rax, rbx, rbcp, false); +// call(rentry); +// } + +// void InterpreterMacroAssembler::jump_Java_final(LinkInfo link_info) { +// Register rentry = rax; +// get_entry(rentry, link_info); + +// // profile_call(rax); // ?? rax +// // profile_arguments_type(rax, rbx, rbcp, false); +// jmp(rentry); +// } + // The following two routines provide a hook so that an implementation // can schedule the dispatch in two parts. x86 does not do this. void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) { diff -r 656931ff4345 src/hotspot/cpu/x86/interp_masm_x86.hpp --- a/src/hotspot/cpu/x86/interp_masm_x86.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/cpu/x86/interp_masm_x86.hpp Sun May 20 17:57:55 2018 +0100 @@ -197,7 +197,12 @@ // jump to an invoked target void prepare_to_jump_from_interpreted(); void jump_from_interpreted(Register method, Register temp); - + // void resolve_special(Register rmethod, LinkInfo link_info); + // void get_entry(Register entry, Register method); + // void get_entry(Register entry, LinkInfo link_info); + // void call_Java_final(LinkInfo link_info); + // void jump_Java_final(LinkInfo link_info); + // narrow int return value void narrow(Register result); diff -r 656931ff4345 src/hotspot/cpu/x86/macroAssembler_x86.cpp --- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp Sun May 20 17:57:55 2018 +0100 @@ -993,6 +993,26 @@ } } +void MacroAssembler::push_f(XMMRegister r) { + subptr(rsp, wordSize); + movflt(Address(rsp, 0), r); +} + +void MacroAssembler::pop_f(XMMRegister r) { + movflt(r, Address(rsp, 0)); + addptr(rsp, wordSize); +} + +void MacroAssembler::push_d(XMMRegister r) { + subptr(rsp, 2 * wordSize); + movdbl(Address(rsp, 0), r); +} + +void MacroAssembler::pop_d(XMMRegister r) { + movdbl(r, Address(rsp, 0)); + addptr(rsp, 2 * Interpreter::stackElementSize); +} + void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) { // Used in sign-masking with aligned address. assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); @@ -3686,6 +3706,15 @@ addptr(rsp, FPUStateSizeInWords * wordSize); } +void MacroAssembler::stop_if_in_cont(Register cont, const char* name) { + Label no_cont; + movptr(cont, Address(r15_thread, in_bytes(JavaThread::continuation_offset()))); + testl(cont, cont); + jcc(Assembler::zero, no_cont); + stop(name); + bind(no_cont); +} + void MacroAssembler::pop_IU_state() { popa(); LP64_ONLY(addq(rsp, 8)); diff -r 656931ff4345 src/hotspot/cpu/x86/macroAssembler_x86.hpp --- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp Sun May 20 17:57:55 2018 +0100 @@ -495,6 +495,8 @@ void push_CPU_state(); void pop_CPU_state(); + void stop_if_in_cont(Register cont_reg, const char* name); + // Round up to a power of two void round_to(Register reg, int modulus); @@ -869,6 +871,11 @@ // Floating + void push_f(XMMRegister r); + void pop_f(XMMRegister r); + void push_d(XMMRegister r); + void pop_d(XMMRegister r); + void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } void andpd(XMMRegister dst, AddressLiteral src); void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); } diff -r 656931ff4345 src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp --- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp Sun May 20 17:57:55 2018 +0100 @@ -3493,6 +3493,8 @@ map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); + // __ stop_if_in_cont(r10, "CONT 3"); + int frame_complete = __ offset(); __ set_last_Java_frame(noreg, noreg, NULL); diff -r 656931ff4345 src/hotspot/cpu/x86/stubGenerator_x86_64.cpp --- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp Sun May 20 17:57:55 2018 +0100 @@ -35,6 +35,7 @@ #include "oops/objArrayKlass.hpp" #include "oops/oop.inline.hpp" #include "prims/methodHandles.hpp" +#include "runtime/continuation.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/sharedRuntime.hpp" @@ -4788,6 +4789,207 @@ } +void push_FrameInfo(MacroAssembler* _masm, Register fi, Register sp, Register fp, address pc) { + if(!sp->is_valid()) __ push(0); else { + if (sp == rsp) { + __ movptr(fi, rsp); + __ push(fi); + } else { + __ push(sp); + } + } + + if(!fp->is_valid()) __ push(0); else __ push(fp); + + __ lea(fi, ExternalAddress(pc)); + __ push(fi); + + __ movptr(fi, rsp); // make fi point to the beginning of FramInfo +} + +void push_FrameInfo(MacroAssembler* _masm, Register fi, Register sp, Register fp, Register pc) { + if(!sp->is_valid()) __ push(0); else { + if (sp == rsp) { + __ movptr(fi, rsp); + __ push(fi); + } else { + __ push(sp); + } + } + + if(!fp->is_valid()) __ push(0); else __ push(fp); + + if(!pc->is_valid()) __ push(0); else __ push(pc); + + __ movptr(fi, rsp); // make fi point to the beginning of FramInfo +} + +void pop_FrameInfo(MacroAssembler* _masm, Register sp, Register fp, Register pc) { + if(!pc->is_valid()) __ lea(rsp, Address(rsp, wordSize)); else __ pop(pc); + if(!fp->is_valid()) __ lea(rsp, Address(rsp, wordSize)); else __ pop(fp); + if(!sp->is_valid()) __ lea(rsp, Address(rsp, wordSize)); else __ pop(sp); +} + + // c_rarg1 ContinuationScope +address generate_cont_doYield() { + const char *name = "cont_doYield"; + + // enum layout { + // rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, + // rbp_off2, + // return_off, + // return_off2, + // framesize // inclusive of return address + // }; + // assert(is_even(framesize/2), "sp not 16-byte aligned"); + // int insts_size = 512; + // int locs_size = 64; + // CodeBuffer code(name, insts_size, locs_size); + // OopMapSet* oop_maps = new OopMapSet(); + // MacroAssembler* masm = new MacroAssembler(&code); + // MacroAssembler* _masm = masm; + // OopMap* map = new OopMap(framesize, 0); + + MacroAssembler* masm = _masm; + StubCodeMark mark(this, "StubRoutines", name); + + address start = __ pc(); + + Register fi = c_rarg1; + + __ movptr(c_rarg2, c_rarg1); // scope argument + __ movptr(rax, Address(rsp, 0)); // use return address as the frame pc // __ lea(rax, InternalAddress(pcxxxx)); + __ lea(fi, Address(rsp, wordSize)); // skip return address + __ movptr(c_rarg3, rbp); + + __ enter(); + + // // return address and rbp are already in place + // __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog + + int frame_complete = __ pc() - start; + address the_pc = __ pc(); + + push_FrameInfo(masm, fi, fi, c_rarg3, rax); + //__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); // __ reset_last_Java_frame(thread, true); + __ call_VM(noreg, CAST_FROM_FN_PTR(address, Continuation::freeze), fi, c_rarg2, false); // do NOT check exceptions; they'll get forwarded to the caller + + Label pinned; + __ pop(rax); // read the pc from the FrameInfo + __ testq(rax, rax); + __ jcc(Assembler::zero, pinned); + + __ pop(rbp); // not pinned -- jump to Continuation.run (the entry frame) + __ pop(fi); + __ movptr(rsp, fi); + __ jmp(rax); + + __ bind(pinned); // pinned -- return to caller + __ lea(rsp, Address(rsp, wordSize*2)); // "pop" the rest of the FrameInfo struct + + __ leave(); + __ ret(0); + + return start; + + // oop_maps->add_gc_map(the_pc - start, map); + + // RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) + // RuntimeStub::new_runtime_stub(name, + // &code, + // frame_complete, + // (framesize >> (LogBytesPerWord - LogBytesPerInt)), + // oop_maps, false); + // return stub->entry_point(); + } + + // clobbers r11 + address generate_cont_thaw(const char *stub_name, int frames) { + StubCodeMark mark(this, "StubRoutines", stub_name); + address start = __ pc(); + + // TODO: Handle Valhalla return types. May require generating different return barriers. + + Register fi = r11; + + if (frames > 1) { // not return barrier + __ pop(c_rarg3); // pop return address. if we don't do this, we get a drift, where the bottom-most frozen frame continuously grows + // __ lea(rsp, Address(rsp, wordSize)); // pop return address. if we don't do this, we get a drift, where the bottom-most frozen frame continuously grows + } + + Label thaw_fail; + __ movptr(fi, rsp); + __ push(rax); __ push_d(xmm0); // preserve possible return value from a method returning to the return barrier + __ movl(c_rarg2, frames); + push_FrameInfo(_masm, fi, fi, rbp, c_rarg3); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, Continuation::prepare_thaw), fi, c_rarg2); + __ testq(rax, rax); // rax contains the size of the frames to thaw, 0 if overflow or no more frames + __ jcc(Assembler::zero, thaw_fail); + + pop_FrameInfo(_masm, noreg, rbp, c_rarg3); // c_rarg3 would still be our return address + __ pop_d(xmm0); __ pop(rdx); // TEMPORARILY restore return value (we're going to push it again, but rsp is about to move) + + __ subq(rsp, rax); // make room for the thawed frames + __ movptr(fi, rsp); // where we'll start copying frame (the lowest address) + __ push(rdx); __ push_d(xmm0); // save original return value -- again + __ movl(c_rarg2, frames); + push_FrameInfo(_masm, fi, fi, rbp, c_rarg3); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, Continuation::thaw), fi, c_rarg2); + + __ bind(thaw_fail); + pop_FrameInfo(_masm, fi, rbp, rdx); + // __ movl(rbp, 0); + __ pop_d(xmm0); __ pop(rax); // restore return value (no safepoint in the call to thaw, so even an oop return value should be OK) + __ movptr(rsp, fi); // we're now on the yield frame (which is above us b/c rsp has been pushed down) + __ jmp(rdx); + + return start; + } + + address generate_cont_returnBarrier() { + // TODO: will probably need multiple return barriers depending on return type + StubCodeMark mark(this, "StubRoutines", "cont return barrier"); + address start = __ pc(); + + if (CONT_FULL_STACK) + __ stop("RETURN BARRIER -- UNREACHABLE 0"); + + __ jump(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw(1)))); + + return start; + } + + address generate_cont_getPC() { + StubCodeMark mark(this, "StubRoutines", "GetPC"); + address start = __ pc(); + + __ movptr(rax, Address(rsp, 0)); + __ ret(0); + + return start; + } + + address generate_cont_getSP() { + StubCodeMark mark(this, "StubRoutines", "getSP"); + address start = __ pc(); + + __ lea(rax, Address(rsp, wordSize)); + __ ret(0); + + return start; + } + + address generate_cont_getFP() { + StubCodeMark mark(this, "StubRoutines", "GetFP"); + address start = __ pc(); + + __ stop("WHAT?"); + __ lea(rax, Address(rsp, wordSize)); + __ ret(0); + + return start; + } + #undef __ #define __ masm-> @@ -5019,6 +5221,16 @@ } } + void generate_phase1() { + // Continuation stubs: + StubRoutines::_cont_thaw2 = generate_cont_thaw("Cont thaw 2", 2); + StubRoutines::_cont_thaw1 = generate_cont_thaw("Cont thaw 1", 1); + StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier(); + StubRoutines::_cont_doYield = generate_cont_doYield(); // StubRoutines::_cont_doYield_blob->entry_point(); // code_begin(); // + StubRoutines::_cont_getSP = generate_cont_getSP(); + StubRoutines::_cont_getPC = generate_cont_getPC(); + } + void generate_all() { // Generates all stubs and initializes the entry points @@ -5141,15 +5353,17 @@ } public: - StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { - if (all) { + StubGenerator(CodeBuffer* code, int phase) : StubCodeGenerator(code) { + if (phase == 0) { + generate_initial(); + } else if (phase == 1) { + generate_phase1(); + } else { generate_all(); - } else { - generate_initial(); } } }; // end class declaration -void StubGenerator_generate(CodeBuffer* code, bool all) { - StubGenerator g(code, all); +void StubGenerator_generate(CodeBuffer* code, int phase) { + StubGenerator g(code, phase); } diff -r 656931ff4345 src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp --- a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp Sun May 20 17:57:55 2018 +0100 @@ -38,6 +38,7 @@ #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.hpp" #include "runtime/arguments.hpp" +#include "runtime/continuation.hpp" #include "runtime/deoptimization.hpp" #include "runtime/frame.inline.hpp" #include "runtime/sharedRuntime.hpp" @@ -173,8 +174,13 @@ } address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { + return generate_return_entry_for(state, step, index_size, false); +} + +address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size, bool X) { address entry = __ pc(); +// if(X) __ stop("XXXXXXXX 000"); #ifndef _LP64 #ifdef COMPILER2 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases @@ -199,14 +205,20 @@ } #endif // _LP64 + // if(X) __ stop("XXXXXXXX 111"); + // Restore stack bottom in case i2c adjusted stack __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); // and NULL it as marker that esp is now tos until next java call __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); + // if(X) __ stop("XXXXXXXX 222"); + __ restore_bcp(); __ restore_locals(); + // if(X) __ stop("XXXXXXXX 333"); // rbcp = r13 locals = r14 + if (state == atos) { Register mdp = rbx; Register tmp = rcx; @@ -232,6 +244,8 @@ __ check_and_handle_earlyret(java_thread); } + if(X) __ stop("XXXXXXXX 444"); + __ dispatch_next(state, step); return entry; @@ -695,6 +709,55 @@ // End of helpers +// return current sp +address TemplateInterpreterGenerator::generate_Continuation_getSP_entry(void) { + address entry = __ pc(); + + __ lea(rax, Address(rsp, wordSize)); // skip return address + __ ret(0); + + return entry; +} + +// return current fp +address TemplateInterpreterGenerator::generate_Continuation_getFP_entry(void) { + address entry = __ pc(); + + __ movptr(rax, rbp); + __ ret(0); + + return entry; +} + +// return current pc +address TemplateInterpreterGenerator::generate_Continuation_getPC_entry(void) { + address entry = __ pc(); + + __ movptr(rax, Address(rsp, 0)); + __ ret(0); + + return entry; +} + +address TemplateInterpreterGenerator::generate_Continuation_doYield_entry(void) { + address entry = __ pc(); + assert(StubRoutines::cont_doYield() != NULL, "stub not yet generated"); + + __ movptr(c_rarg1, Address(rsp, wordSize)); // ContinuationScope + __ jump(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::cont_doYield()))); + + return entry; +} + +address TemplateInterpreterGenerator::generate_Continuation_doContinue_entry(void) { + address entry = __ pc(); + assert(StubRoutines::cont_thaw(2) != NULL, "stub not yet generated"); + + __ jump(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw(2)))); + + return entry; +} + // Method entry for java.lang.ref.Reference.get. address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { // Code: _aload_0, _getfield, _areturn @@ -1337,12 +1400,12 @@ bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; // ebx: Method* - // rbcp: sender sp + // rbcp: sender sp (set in InterpreterMacroAssembler::prepare_to_jump_from_interpreted / generate_call_stub) address entry_point = __ pc(); const Address constMethod(rbx, Method::const_offset()); const Address access_flags(rbx, Method::access_flags_offset()); - const Address size_of_parameters(rdx, + const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset()); const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset()); diff -r 656931ff4345 src/hotspot/cpu/x86/templateTable_x86.cpp --- a/src/hotspot/cpu/x86/templateTable_x86.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/cpu/x86/templateTable_x86.cpp Sun May 20 17:57:55 2018 +0100 @@ -2612,6 +2612,17 @@ void TemplateTable::_return(TosState state) { transition(state, state); + // { + // Label not_rb; + // Register aa = rcx, bb = rdi; + // __ movptr(aa, Address(rsp, 0)); + // __ lea(bb, ExternalAddress(StubRoutines::cont_returnBarrier())); + // __ cmpq(aa, bb); + // // __ cmpq(ExternalAddress(StubRoutines::cont_returnBarrier()).addr(), aa); + // __ jcc(Assembler::notZero, not_rb); + // __ stop("WQWWQWQW"); + // __ bind(not_rb); + // } assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation diff -r 656931ff4345 src/hotspot/cpu/x86/x86_64.ad --- a/src/hotspot/cpu/x86/x86_64.ad Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/cpu/x86/x86_64.ad Sun May 20 17:57:55 2018 +0100 @@ -11004,6 +11004,20 @@ ins_pipe( pipe_slow ); %} +instruct getFP(rRegL dst) %{ + match(Set dst (GetFP)); + effect(DEF dst); + ins_cost(1); + + ins_encode %{ + // Remove wordSize for return addr which is already pushed. + int framesize = Compile::current()->frame_size_in_bytes() - wordSize; + Address base(rsp, framesize); + __ lea($dst$$Register, base); + %} + ins_pipe(ialu_reg_reg_long); +%} + //----------Overflow Math Instructions----------------------------------------- instruct overflowAddI_rReg(rFlagsReg cr, rax_RegI op1, rRegI op2) diff -r 656931ff4345 src/hotspot/share/c1/c1_Compiler.cpp --- a/src/hotspot/share/c1/c1_Compiler.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/c1/c1_Compiler.cpp Sun May 20 17:57:55 2018 +0100 @@ -222,6 +222,12 @@ case vmIntrinsics::_compareAndSetObject: case vmIntrinsics::_getCharStringU: case vmIntrinsics::_putCharStringU: + case vmIntrinsics::_Continuation_getFP: + case vmIntrinsics::_Continuation_getSP: + case vmIntrinsics::_Continuation_getPC: + case vmIntrinsics::_Continuation_doContinue: + case vmIntrinsics::_Continuation_doYield: + case vmIntrinsics::_Continuation_runLevel: #ifdef TRACE_HAVE_INTRINSICS case vmIntrinsics::_counterTime: case vmIntrinsics::_getBufferWriter: diff -r 656931ff4345 src/hotspot/share/c1/c1_LIR.cpp --- a/src/hotspot/share/c1/c1_LIR.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/c1/c1_LIR.cpp Sun May 20 17:57:55 2018 +0100 @@ -453,6 +453,16 @@ break; } + case lir_getfp: // result always valid + case lir_getsp: // result always valid + { + assert(op->as_Op0() != NULL, "must be"); + if (op->_info) do_info(op->_info); + if (op->_result->is_valid()) do_output(op->_result); + break; + } + + // LIR_OpLabel case lir_label: // result and info always invalid @@ -1676,6 +1686,8 @@ case lir_monaddr: s = "mon_addr"; break; case lir_pack64: s = "pack64"; break; case lir_unpack64: s = "unpack64"; break; + case lir_getsp: s = "getsp"; break; + case lir_getfp: s = "getfp"; break; // LIR_Op2 case lir_cmp: s = "cmp"; break; case lir_cmp_l2i: s = "cmp_l2i"; break; diff -r 656931ff4345 src/hotspot/share/c1/c1_LIR.hpp --- a/src/hotspot/share/c1/c1_LIR.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/c1/c1_LIR.hpp Sun May 20 17:57:55 2018 +0100 @@ -901,6 +901,8 @@ , lir_membar_storeload , lir_get_thread , lir_on_spin_wait + , lir_getfp + , lir_getsp , end_op0 , begin_op1 , lir_fxch @@ -2134,6 +2136,8 @@ void push(LIR_Opr opr) { append(new LIR_Op1(lir_push, opr)); } void pop(LIR_Opr reg) { append(new LIR_Op1(lir_pop, reg)); } + void getfp(LIR_Opr reg) { append(new LIR_Op0(lir_getfp, reg)); } + void getsp(LIR_Opr reg) { append(new LIR_Op0(lir_getsp, reg)); } void cmp(LIR_Condition condition, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_cmp, condition, left, right, info)); diff -r 656931ff4345 src/hotspot/share/c1/c1_LIRAssembler.cpp --- a/src/hotspot/share/c1/c1_LIRAssembler.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/c1/c1_LIRAssembler.cpp Sun May 20 17:57:55 2018 +0100 @@ -683,6 +683,15 @@ on_spin_wait(); break; + case lir_getfp: + getfp(op->result_opr()); + break; + + case lir_getsp: + getsp(op->result_opr()); + break; + + default: ShouldNotReachHere(); break; diff -r 656931ff4345 src/hotspot/share/c1/c1_LIRAssembler.hpp --- a/src/hotspot/share/c1/c1_LIRAssembler.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/c1/c1_LIRAssembler.hpp Sun May 20 17:57:55 2018 +0100 @@ -113,6 +113,9 @@ void push(LIR_Opr opr); void pop(LIR_Opr opr); + void getsp(LIR_Opr opr); + void getfp(LIR_Opr opr); + // patching void append_patching_stub(PatchingStub* stub); void patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info); diff -r 656931ff4345 src/hotspot/share/c1/c1_LIRGenerator.cpp --- a/src/hotspot/share/c1/c1_LIRGenerator.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp Sun May 20 17:57:55 2018 +0100 @@ -3071,10 +3071,34 @@ do_vectorizedMismatch(x); break; + case vmIntrinsics::_Continuation_getFP: + do_continuation_getFP(x); + break; + case vmIntrinsics::_Continuation_getSP: + do_continuation_getSP(x); + break; + case vmIntrinsics::_Continuation_getPC: + do_continuation_getPC(x); + break; + case vmIntrinsics::_Continuation_doContinue: + do_continuation_doContinue(x); + break; + case vmIntrinsics::_Continuation_doYield: + do_continuation_doYield(x); + break; + case vmIntrinsics::_Continuation_runLevel: + do_continuation_runLevel(x); + break; + default: ShouldNotReachHere(); break; } } +void LIRGenerator::do_continuation_runLevel(Intrinsic* x) { + LIR_Opr result = rlock_result(x); + __ move(LIR_OprFact::intConst(1), result); +} + void LIRGenerator::profile_arguments(ProfileCall* x) { if (compilation()->profile_arguments()) { int bci = x->bci_of_invoke(); diff -r 656931ff4345 src/hotspot/share/c1/c1_LIRGenerator.hpp --- a/src/hotspot/share/c1/c1_LIRGenerator.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/c1/c1_LIRGenerator.hpp Sun May 20 17:57:55 2018 +0100 @@ -264,6 +264,12 @@ void do_update_CRC32(Intrinsic* x); void do_update_CRC32C(Intrinsic* x); void do_vectorizedMismatch(Intrinsic* x); + void do_continuation_getPC(Intrinsic* x); + void do_continuation_getSP(Intrinsic* x); + void do_continuation_getFP(Intrinsic* x); + void do_continuation_doYield(Intrinsic* x); + void do_continuation_doContinue(Intrinsic* x); + void do_continuation_runLevel(Intrinsic* x); public: LIR_Opr call_runtime(BasicTypeArray* signature, LIRItemList* args, address entry, ValueType* result_type, CodeEmitInfo* info); diff -r 656931ff4345 src/hotspot/share/c1/c1_Runtime1.cpp --- a/src/hotspot/share/c1/c1_Runtime1.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/c1/c1_Runtime1.cpp Sun May 20 17:57:55 2018 +0100 @@ -333,6 +333,9 @@ FUNCTION_CASE(entry, StubRoutines::dsin()); FUNCTION_CASE(entry, StubRoutines::dcos()); FUNCTION_CASE(entry, StubRoutines::dtan()); + FUNCTION_CASE(entry, StubRoutines::cont_getPC()); + FUNCTION_CASE(entry, StubRoutines::cont_thaw(2)); + FUNCTION_CASE(entry, StubRoutines::cont_doYield()); #undef FUNCTION_CASE diff -r 656931ff4345 src/hotspot/share/classfile/javaClasses.cpp --- a/src/hotspot/share/classfile/javaClasses.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/classfile/javaClasses.cpp Sun May 20 17:57:55 2018 +0100 @@ -1565,6 +1565,7 @@ int java_lang_Thread::_stillborn_offset = 0; int java_lang_Thread::_stackSize_offset = 0; int java_lang_Thread::_tid_offset = 0; +int java_lang_Thread::_continuation_offset = 0; int java_lang_Thread::_thread_status_offset = 0; int java_lang_Thread::_park_blocker_offset = 0; int java_lang_Thread::_park_event_offset = 0 ; @@ -1582,7 +1583,8 @@ macro(_tid_offset, k, "tid", long_signature, false); \ macro(_thread_status_offset, k, "threadStatus", int_signature, false); \ macro(_park_blocker_offset, k, "parkBlocker", object_signature, false); \ - macro(_park_event_offset, k, "nativeParkEventPointer", long_signature, false) + macro(_park_event_offset, k, "nativeParkEventPointer", long_signature, false); \ + macro(_continuation_offset, k, "cont", continuation_signature, false) void java_lang_Thread::compute_offsets() { assert(_group_offset == 0, "offsets should be initialized only once"); @@ -1718,6 +1720,14 @@ } } +oop java_lang_Thread::continuation(oop java_thread) { + return java_thread->obj_field(_continuation_offset); +} + +void java_lang_Thread::set_continuation(oop java_thread, oop continuation) { + return java_thread->obj_field_put(_continuation_offset, continuation); +} + oop java_lang_Thread::park_blocker(oop java_thread) { assert(JDK_Version::current().supports_thread_park_blocker() && _park_blocker_offset != 0, "Must support parkBlocker field"); @@ -4237,6 +4247,19 @@ int java_lang_ref_Reference::discovered_offset; int java_lang_ref_SoftReference::timestamp_offset; int java_lang_ref_SoftReference::static_clock_offset; +int java_lang_Continuation::scope_offset; +int java_lang_Continuation::target_offset; +int java_lang_Continuation::stack_offset; +int java_lang_Continuation::refStack_offset; +int java_lang_Continuation::parent_offset; +int java_lang_Continuation::entrySP_offset; +int java_lang_Continuation::entryFP_offset; +int java_lang_Continuation::entryPC_offset; +int java_lang_Continuation::fp_offset; +int java_lang_Continuation::sp_offset; +int java_lang_Continuation::pc_offset; +int java_lang_Continuation::refSP_offset; +Method* java_lang_Continuation::stack_method; int java_lang_ClassLoader::parent_offset; int java_lang_System::static_in_offset; int java_lang_System::static_out_offset; @@ -4384,6 +4407,30 @@ o->bool_field_put(deflt_offset, val); } +// Support for java.lang.Continuation + +void java_lang_Continuation::compute_offsets() { + InstanceKlass* k = SystemDictionary::Continuation_klass(); + compute_offset(scope_offset, k, vmSymbols::scope_name(), vmSymbols::continuationscope_signature()); + compute_offset(target_offset, k, vmSymbols::target_name(), vmSymbols::runnable_signature()); + compute_offset(parent_offset, k, vmSymbols::parent_name(), vmSymbols::continuation_signature()); + compute_offset(stack_offset, k, vmSymbols::stack_name(), vmSymbols::int_array_signature()); + compute_offset(refStack_offset, k, vmSymbols::refStack_name(), vmSymbols::object_array_signature()); + compute_offset(entrySP_offset, k, vmSymbols::entrySP_name(), vmSymbols::long_signature()); + compute_offset(entryFP_offset, k, vmSymbols::entryFP_name(), vmSymbols::long_signature()); + compute_offset(entryPC_offset, k, vmSymbols::entryPC_name(), vmSymbols::long_signature()); + compute_offset(fp_offset, k, vmSymbols::fp_name(), vmSymbols::long_signature()); + compute_offset(sp_offset, k, vmSymbols::sp_name(), vmSymbols::int_signature()); + compute_offset(pc_offset, k, vmSymbols::pc_name(), vmSymbols::long_signature()); + compute_offset(refSP_offset, k, vmSymbols::refSP_name(), vmSymbols::int_signature()); +} + +bool java_lang_Continuation::on_local_stack(oop ref, address adr) { + arrayOop s = stack(ref); + void* base = s->base(T_INT); + return adr >= base && (char*)adr < ((char*)base + (s->length() * 4)); +} + // Support for intrinsification of java.nio.Buffer.checkIndex int java_nio_Buffer::limit_offset() { @@ -4437,6 +4484,16 @@ java_lang_ref_Reference::queue_offset = member_offset(java_lang_ref_Reference::hc_queue_offset); java_lang_ref_Reference::next_offset = member_offset(java_lang_ref_Reference::hc_next_offset); java_lang_ref_Reference::discovered_offset = member_offset(java_lang_ref_Reference::hc_discovered_offset); + + // // java_lang_Continuation Class + // java_lang_Continuation::target_offset = member_offset(java_lang_Continuation::hc_target_offset); + // java_lang_Continuation::parent_offset = member_offset(java_lang_Continuation::hc_parent_offset); + // java_lang_Continuation::entrySP_offset = member_offset(java_lang_Continuation::hc_entrySP_offset); + // java_lang_Continuation::entryFP_offset = member_offset(java_lang_Continuation::hc_entryFP_offset); + // java_lang_Continuation::entryPC_offset = member_offset(java_lang_Continuation::hc_entryPC_offset); + // java_lang_Continuation::stack_offset = member_offset(java_lang_Continuation::hc_stack_offset); + // java_lang_Continuation::lastFP_offset = member_offset(java_lang_Continuation::hc_lastFP_offset); + // java_lang_Continuation::lastSP_offset = member_offset(java_lang_Continuation::hc_lastSP_offset); } @@ -4479,6 +4536,7 @@ java_lang_StackTraceElement::compute_offsets(); java_lang_StackFrameInfo::compute_offsets(); java_lang_LiveStackFrameInfo::compute_offsets(); + java_lang_Continuation::compute_offsets(); // generated interpreter code wants to know about the offsets we just computed: AbstractAssembler::update_delayed_values(); @@ -4544,6 +4602,17 @@ // Fake field //CHECK_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, discovered, "Ljava/lang/ref/Reference;"); + // java.lang.Continuation + + // CHECK_OFFSET("java/lang/Continuation", java_lang_Continuation, target, "Ljava/lang/Runnable;"); + // CHECK_OFFSET("java/lang/Continuation", java_lang_Continuation, stack, "[I"); + // CHECK_OFFSET("java/lang/Continuation", java_lang_Continuation, parent, "Ljava/lang/Continuation;"); + // CHECK_OFFSET("java/lang/Continuation", java_lang_Continuation, entrySP, "J"); + // CHECK_OFFSET("java/lang/Continuation", java_lang_Continuation, entryFP, "J"); + // CHECK_OFFSET("java/lang/Continuation", java_lang_Continuation, entryPC, "J"); + // CHECK_OFFSET("java/lang/Continuation", java_lang_Continuation, lastFP, "I"); + // CHECK_OFFSET("java/lang/Continuation", java_lang_Continuation, lastSP, "I"); + if (!valid) vm_exit_during_initialization("Hard-coded field offset verification failed"); } diff -r 656931ff4345 src/hotspot/share/classfile/javaClasses.hpp --- a/src/hotspot/share/classfile/javaClasses.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/classfile/javaClasses.hpp Sun May 20 17:57:55 2018 +0100 @@ -309,6 +309,7 @@ static int _stillborn_offset; static int _stackSize_offset; static int _tid_offset; + static int _continuation_offset; static int _thread_status_offset; static int _park_blocker_offset; static int _park_event_offset ; @@ -349,6 +350,9 @@ static jlong stackSize(oop java_thread); // Thread ID static jlong thread_id(oop java_thread); + // Continuation + static oop continuation(oop java_thread); + static void set_continuation(oop java_thread, oop continuation); // Blocker object responsible for thread parking static oop park_blocker(oop java_thread); @@ -946,6 +950,59 @@ static void serialize(SerializeClosure* f) NOT_CDS_RETURN; }; +// Interface to java.lang.Continuation objects +class java_lang_Continuation: AllStatic { + friend class JavaClasses; + private: + static void compute_offsets(); +public: + enum { + hc_fp_offset = 0, + hc_sp_offset = 11, + hc_entrySP_offset = 1, + hc_entryFP_offset = 3, + hc_target_offset = 13, + hc_parent_offset = 14, + hc_stack_offset = 15, + }; + static int scope_offset; + static int target_offset; + static int parent_offset; + static int entrySP_offset; + static int entryFP_offset; + static int entryPC_offset; + static int stack_offset; + static int refStack_offset; + static int fp_offset; + static int sp_offset; + static int pc_offset; + static int refSP_offset; + static Method* stack_method; + // Accessors + static inline oop scope(oop ref); + static inline oop target(oop ref); + static inline oop parent(oop ref); + static inline typeArrayOop stack(oop ref); + static inline objArrayOop refStack(oop ref); + static inline jlong fp(oop ref); + static inline void set_fp(oop ref, const jlong i); + static inline jint sp(oop ref); + static inline void set_sp(oop ref, const jint i); + static inline void* pc(oop ref); + static inline void set_pc(oop ref, const void* pc); + static inline jint refSP(oop ref); + static inline void set_refSP(oop ref, jint i); + static inline intptr_t* entrySP(oop ref); + static inline intptr_t* entryFP(oop ref); + static inline void set_entryFP(oop ref, intptr_t* fp); + static inline address entryPC(oop ref); + static inline void set_entryPC(oop ref, address pc); + static inline int stack_size(oop ref); + static inline void* stack_base(oop ref); + static inline HeapWord* refStack_base(oop ref); + static bool on_local_stack(oop ref, address adr); +}; + // Interface to java.lang.invoke.MethodHandle objects class MethodHandleEntry; diff -r 656931ff4345 src/hotspot/share/classfile/javaClasses.inline.hpp --- a/src/hotspot/share/classfile/javaClasses.inline.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/classfile/javaClasses.inline.hpp Sun May 20 17:57:55 2018 +0100 @@ -29,6 +29,7 @@ #include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/oopsHierarchy.hpp" +#include "oops/typeArrayOop.inline.hpp" void java_lang_String::set_coder(oop string, jbyte coder) { assert(initialized && (coder_offset > 0), "Must be initialized"); @@ -131,6 +132,72 @@ return InstanceKlass::cast(ref->klass())->reference_type() == REF_PHANTOM; } +inline oop java_lang_Continuation::scope(oop ref) { + return ref->obj_field(scope_offset); +} +inline oop java_lang_Continuation::target(oop ref) { + return ref->obj_field(target_offset); +} +inline oop java_lang_Continuation::parent(oop ref) { + return ref->obj_field(parent_offset); +} +inline typeArrayOop java_lang_Continuation::stack(oop ref) { + oop a = ref->obj_field(stack_offset); + return (typeArrayOop)a; +} +inline objArrayOop java_lang_Continuation::refStack(oop ref) { + oop a = ref->obj_field(refStack_offset); + return (objArrayOop)a; +} +inline jlong java_lang_Continuation::fp(oop ref) { + return ref->long_field(fp_offset); +} +inline void java_lang_Continuation::set_fp(oop ref, const jlong i) { + ref->long_field_put(fp_offset, i); +} +inline jint java_lang_Continuation::sp(oop ref) { + return ref->int_field(sp_offset); +} +inline void java_lang_Continuation::set_sp(oop ref, const jint i) { + ref->int_field_put(sp_offset, i); +} +inline void* java_lang_Continuation::pc(oop ref) { + return (void*)ref->long_field(pc_offset); +} +inline void java_lang_Continuation::set_pc(oop ref, const void* pc) { + ref->long_field_put(pc_offset, (long)pc); +} +inline jint java_lang_Continuation::refSP(oop ref) { + return ref->int_field(refSP_offset); +} +inline void java_lang_Continuation::set_refSP(oop ref, jint i) { + ref->int_field_put(refSP_offset, i); +} +inline intptr_t* java_lang_Continuation::entrySP(oop ref) { + return (intptr_t*)ref->long_field(entrySP_offset); +} +inline intptr_t* java_lang_Continuation::entryFP(oop ref) { + return (intptr_t*)ref->long_field(entryFP_offset); +} +inline void java_lang_Continuation::set_entryFP(oop ref, intptr_t* fp) { + ref->long_field_put(entryFP_offset, (long)fp); +} +inline address java_lang_Continuation::entryPC(oop ref) { + return (address)ref->long_field(entryPC_offset); +} +inline void java_lang_Continuation::set_entryPC(oop ref, address pc) { + ref->long_field_put(entryPC_offset, (long)pc); +} +inline int java_lang_Continuation::stack_size(oop ref) { + return stack(ref)->length() * 4; +} +inline void* java_lang_Continuation::stack_base(oop ref) { + return stack(ref)->base(T_INT); +} +inline HeapWord* java_lang_Continuation::refStack_base(oop ref) { + return refStack(ref)->base(); +} + inline void java_lang_invoke_CallSite::set_target_volatile(oop site, oop target) { site->obj_field_put_volatile(_target_offset, target); } diff -r 656931ff4345 src/hotspot/share/classfile/systemDictionary.hpp --- a/src/hotspot/share/classfile/systemDictionary.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/classfile/systemDictionary.hpp Sun May 20 17:57:55 2018 +0100 @@ -146,6 +146,10 @@ do_klass(reflect_Method_klass, java_lang_reflect_Method, Pre ) \ do_klass(reflect_Constructor_klass, java_lang_reflect_Constructor, Pre ) \ \ + do_klass(Runnable_klass, java_lang_Runnable, Pre ) \ + do_klass(ContinuationScope_klass, java_lang_ContinuationScope, Pre ) \ + do_klass(Continuation_klass, java_lang_Continuation, Pre ) \ + \ /* NOTE: needed too early in bootstrapping process to have checks based on JDK version */ \ /* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \ do_klass(reflect_MagicAccessorImpl_klass, reflect_MagicAccessorImpl, Opt ) \ diff -r 656931ff4345 src/hotspot/share/classfile/vmSymbols.cpp --- a/src/hotspot/share/classfile/vmSymbols.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/classfile/vmSymbols.cpp Sun May 20 17:57:55 2018 +0100 @@ -373,6 +373,8 @@ case vmIntrinsics::_dpow: case vmIntrinsics::_checkIndex: case vmIntrinsics::_Reference_get: + case vmIntrinsics::_Continuation_doContinue: + case vmIntrinsics::_Continuation_doYield: case vmIntrinsics::_updateCRC32: case vmIntrinsics::_updateBytesCRC32: case vmIntrinsics::_updateByteBufferCRC32: @@ -533,6 +535,8 @@ case vmIntrinsics::_fullFence: case vmIntrinsics::_hasNegatives: case vmIntrinsics::_Reference_get: + case vmIntrinsics::_Continuation_doContinue: + case vmIntrinsics::_Continuation_doYield: break; default: return true; diff -r 656931ff4345 src/hotspot/share/classfile/vmSymbols.hpp --- a/src/hotspot/share/classfile/vmSymbols.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/classfile/vmSymbols.hpp Sun May 20 17:57:55 2018 +0100 @@ -66,6 +66,9 @@ template(java_lang_ClassLoader, "java/lang/ClassLoader") \ template(java_lang_ClassLoader_NativeLibrary, "java/lang/ClassLoader\x024NativeLibrary") \ template(java_lang_ThreadDeath, "java/lang/ThreadDeath") \ + template(java_lang_Runnable, "java/lang/Runnable") \ + /*template(java_lang_Continuation, "java/lang/Continuation") */\ + template(java_lang_ContinuationScope, "java/lang/ContinuationScope") \ template(java_lang_Boolean, "java/lang/Boolean") \ template(java_lang_Character, "java/lang/Character") \ template(java_lang_Character_CharacterCache, "java/lang/Character$CharacterCache") \ @@ -360,6 +363,26 @@ template(run_finalization_name, "runFinalization") \ template(dispatchUncaughtException_name, "dispatchUncaughtException") \ template(loadClass_name, "loadClass") \ + template(doYield_name, "doYield") \ + template(doContinue_name, "doContinue") \ + template(getSP_name, "getSP") \ + template(getFP_name, "getFP") \ + template(runLevel_name, "runLevel") \ + template(getPC_name, "getPC") \ + template(enter_name, "enter") \ + template(onContinue_name, "onContinue0") \ + template(getStacks_name, "getStacks") \ + template(onPinned_name, "onPinned0") \ + template(scope_name, "scope") \ + template(entrySP_name, "entrySP") \ + template(entryFP_name, "entryFP") \ + template(entryPC_name, "entryPC") \ + template(stack_name, "stack") \ + template(fp_name, "fp") \ + template(sp_name, "sp") \ + template(pc_name, "pc") \ + template(refStack_name, "refStack") \ + template(refSP_name, "refSP") \ template(get_name, "get") \ template(put_name, "put") \ template(type_name, "type") \ @@ -478,6 +501,9 @@ template(byte_array_signature, "[B") \ template(char_array_signature, "[C") \ template(int_array_signature, "[I") \ + template(runnable_signature, "Ljava/lang/Runnable;") \ + template(continuation_signature, "Ljava/lang/Continuation;") \ + template(continuationscope_signature, "Ljava/lang/ContinuationScope;") \ template(object_void_signature, "(Ljava/lang/Object;)V") \ template(object_int_signature, "(Ljava/lang/Object;)I") \ template(object_boolean_signature, "(Ljava/lang/Object;)Z") \ @@ -1044,7 +1070,25 @@ do_intrinsic(_updateByteBufferAdler32, java_util_zip_Adler32, updateByteBuffer_A_name, updateByteBuffer_signature, F_SN) \ do_name( updateByteBuffer_A_name, "updateByteBuffer") \ \ - /* support for Unsafe */ \ + /* java/lang/Continuation */ \ + do_class(java_lang_Continuation, "java/lang/Continuation") \ + do_alias(continuationEnter_signature, void_method_signature) \ + do_signature(continuationGetStacks_signature, "(III)V") \ + do_alias(continuationOnPinned_signature, int_void_signature) \ + do_intrinsic(_Continuation_getSP, java_lang_Continuation, getSP_name, continuationGetSP_signature, F_S) \ + do_alias(continuationGetSP_signature, void_long_signature) \ + do_intrinsic(_Continuation_getFP, java_lang_Continuation, getFP_name, continuationGetFP_signature, F_S) \ + do_alias(continuationGetFP_signature, void_long_signature) \ + do_intrinsic(_Continuation_getPC, java_lang_Continuation, getPC_name, continuationGetPC_signature, F_S) \ + do_alias(continuationGetPC_signature, void_long_signature) \ + do_intrinsic(_Continuation_doContinue, java_lang_Continuation, doContinue_name, continuationDoContinue_signature, F_R) \ + do_alias(continuationDoContinue_signature, void_method_signature) \ + do_intrinsic(_Continuation_doYield, java_lang_Continuation, doYield_name, continuationDoYield_signature, F_S) \ + do_signature(continuationDoYield_signature, "(Ljava/lang/ContinuationScope;)V") \ + do_intrinsic(_Continuation_runLevel, java_lang_Continuation, runLevel_name, continuationrunLevel_signature, F_S) \ + do_alias(continuationrunLevel_signature, void_int_signature) \ + \ +/* support for Unsafe */ \ do_class(jdk_internal_misc_Unsafe, "jdk/internal/misc/Unsafe") \ \ do_intrinsic(_allocateInstance, jdk_internal_misc_Unsafe, allocateInstance_name, allocateInstance_signature, F_RN) \ @@ -1347,7 +1391,7 @@ do_name( getAndSetObject_name, "getAndSetObject") \ do_signature(getAndSetObject_signature, "(Ljava/lang/Object;JLjava/lang/Object;)Ljava/lang/Object;" ) \ \ - /* (2) Bytecode intrinsics */ \ + /* (2) Bytecode intrinsics */ \ \ do_intrinsic(_park, jdk_internal_misc_Unsafe, park_name, park_signature, F_R) \ do_name( park_name, "park") \ diff -r 656931ff4345 src/hotspot/share/compiler/oopMap.cpp --- a/src/hotspot/share/compiler/oopMap.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/compiler/oopMap.cpp Sun May 20 17:57:55 2018 +0100 @@ -380,8 +380,9 @@ continue; } #ifdef ASSERT - if ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) || - !Universe::heap()->is_in_or_null(*loc)) { + if (reg_map->validate_oops() && + ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) || + !Universe::heap()->is_in_or_null(*loc))) { tty->print_cr("# Found non oop pointer. Dumping state at failure"); // try to dump out some helpful debugging information trace_codeblob_maps(fr, reg_map); @@ -389,6 +390,9 @@ tty->print_cr("register r"); omv.reg()->print(); tty->print_cr("loc = %p *loc = %p\n", loc, (address)*loc); + // os::print_location(tty, (intptr_t)*loc); + tty->print("pc: "); os::print_location(tty, (intptr_t)fr->pc()); + fr->print_value_on(tty, NULL); // do the real assert. assert(Universe::heap()->is_in_or_null(*loc), "found non oop pointer"); } diff -r 656931ff4345 src/hotspot/share/gc/shared/space.inline.hpp --- a/src/hotspot/share/gc/shared/space.inline.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/gc/shared/space.inline.hpp Sun May 20 17:57:55 2018 +0100 @@ -166,7 +166,7 @@ assert(!space->scanned_block_is_obj(cur_obj) || oop(cur_obj)->mark_raw()->is_marked() || oop(cur_obj)->mark_raw()->is_unlocked() || oop(cur_obj)->mark_raw()->has_bias_pattern(), - "these are the only valid states during a mark sweep"); + "these are the only valid states during a mark sweep (%p)", cur_obj); if (space->scanned_block_is_obj(cur_obj) && oop(cur_obj)->is_gc_marked()) { // prefetch beyond cur_obj Prefetch::write(cur_obj, interval); diff -r 656931ff4345 src/hotspot/share/include/jvm.h --- a/src/hotspot/share/include/jvm.h Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/include/jvm.h Sun May 20 17:57:55 2018 +0100 @@ -263,6 +263,12 @@ JVM_DumpThreads(JNIEnv *env, jclass threadClass, jobjectArray threads); /* + * java.lang.Continuation + */ +JNIEXPORT void JNICALL +JVM_RegisterContinuationMethods(JNIEnv *env, jclass cls); + +/* * java.lang.SecurityManager */ JNIEXPORT jobjectArray JNICALL diff -r 656931ff4345 src/hotspot/share/interpreter/abstractInterpreter.cpp --- a/src/hotspot/share/interpreter/abstractInterpreter.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/interpreter/abstractInterpreter.cpp Sun May 20 17:57:55 2018 +0100 @@ -185,6 +185,16 @@ case vmIntrinsics::_Reference_get : return java_lang_ref_reference_get; + case vmIntrinsics::_Continuation_getSP + : return java_lang_continuation_getSP; + case vmIntrinsics::_Continuation_getFP + : return java_lang_continuation_getFP; + case vmIntrinsics::_Continuation_getPC + : return java_lang_continuation_getPC; + case vmIntrinsics::_Continuation_doContinue + : return java_lang_continuation_doContinue; + case vmIntrinsics::_Continuation_doYield + : return java_lang_continuation_doYield; default : break; } @@ -197,6 +207,14 @@ return accessor; } + // Symbol* kname = m->klass_name(); + // Symbol* name = m->name(); + // if (kname == vmSymbols::java_lang_Continuation()) { + // if (name == vmSymbols::enter_name()) { + // return java_lang_continuation_enter; + // } + // } + // Note: for now: zero locals for all non-empty methods return zerolocals; } diff -r 656931ff4345 src/hotspot/share/interpreter/abstractInterpreter.hpp --- a/src/hotspot/share/interpreter/abstractInterpreter.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/interpreter/abstractInterpreter.hpp Sun May 20 17:57:55 2018 +0100 @@ -80,6 +80,11 @@ java_lang_math_fmaF, // implementation of java.lang.Math.fma (x, y, z) java_lang_math_fmaD, // implementation of java.lang.Math.fma (x, y, z) java_lang_ref_reference_get, // implementation of java.lang.ref.Reference.get() + java_lang_continuation_getSP, // implementation of java.lang.Continuation.getSP() + java_lang_continuation_getFP, // implementation of java.lang.Continuation.getFP() + java_lang_continuation_getPC, // implementation of java.lang.Continuation.getPC() + java_lang_continuation_doContinue, // implementation of java.lang.Continuation.doContinue() + java_lang_continuation_doYield, // implementation of java.lang.Continuation.doYield() java_util_zip_CRC32_update, // implementation of java.util.zip.CRC32.update() java_util_zip_CRC32_updateBytes, // implementation of java.util.zip.CRC32.updateBytes() java_util_zip_CRC32_updateByteBuffer, // implementation of java.util.zip.CRC32.updateByteBuffer() diff -r 656931ff4345 src/hotspot/share/interpreter/templateInterpreter.cpp --- a/src/hotspot/share/interpreter/templateInterpreter.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/interpreter/templateInterpreter.cpp Sun May 20 17:57:55 2018 +0100 @@ -193,12 +193,14 @@ EntryPoint TemplateInterpreter::_trace_code; #endif // !PRODUCT EntryPoint TemplateInterpreter::_return_entry[TemplateInterpreter::number_of_return_entries]; +EntryPoint TemplateInterpreter::_return_entryX[TemplateInterpreter::number_of_return_entries]; EntryPoint TemplateInterpreter::_earlyret_entry; EntryPoint TemplateInterpreter::_deopt_entry [TemplateInterpreter::number_of_deopt_entries ]; address TemplateInterpreter::_deopt_reexecute_return_entry; EntryPoint TemplateInterpreter::_safept_entry; address TemplateInterpreter::_invoke_return_entry[TemplateInterpreter::number_of_return_addrs]; +address TemplateInterpreter::_invoke_return_entryX[TemplateInterpreter::number_of_return_addrs]; address TemplateInterpreter::_invokeinterface_return_entry[TemplateInterpreter::number_of_return_addrs]; address TemplateInterpreter::_invokedynamic_return_entry[TemplateInterpreter::number_of_return_addrs]; @@ -235,6 +237,10 @@ * Returns the return entry address for the given top-of-stack state and bytecode. */ address TemplateInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) { + return return_entry(state, length, code, false); +} + +address TemplateInterpreter::return_entry(TosState state, int length, Bytecodes::Code code, bool X) { guarantee(0 <= length && length < Interpreter::number_of_return_entries, "illegal length"); const int index = TosState_as_index(state); switch (code) { @@ -242,14 +248,14 @@ case Bytecodes::_invokespecial: case Bytecodes::_invokevirtual: case Bytecodes::_invokehandle: - return _invoke_return_entry[index]; + return X ? _invoke_return_entryX[index] : _invoke_return_entry[index]; case Bytecodes::_invokeinterface: return _invokeinterface_return_entry[index]; case Bytecodes::_invokedynamic: return _invokedynamic_return_entry[index]; default: assert(!Bytecodes::is_invoke(code), "invoke instructions should be handled separately: %s", Bytecodes::name(code)); - address entry = _return_entry[length].entry(state); + address entry = (X ? _return_entryX[length] : _return_entry[length]).entry(state); vmassert(entry != NULL, "unsupported return entry requested, length=%d state=%d", length, index); return entry; } diff -r 656931ff4345 src/hotspot/share/interpreter/templateInterpreter.hpp --- a/src/hotspot/share/interpreter/templateInterpreter.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/interpreter/templateInterpreter.hpp Sun May 20 17:57:55 2018 +0100 @@ -119,12 +119,14 @@ static EntryPoint _trace_code; #endif // !PRODUCT static EntryPoint _return_entry[number_of_return_entries]; // entry points to return to from a call + static EntryPoint _return_entryX[number_of_return_entries]; // entry points to return to from a call static EntryPoint _earlyret_entry; // entry point to return early from a call static EntryPoint _deopt_entry[number_of_deopt_entries]; // entry points to return to from a deoptimization static address _deopt_reexecute_return_entry; static EntryPoint _safept_entry; static address _invoke_return_entry[number_of_return_addrs]; // for invokestatic, invokespecial, invokevirtual return entries + static address _invoke_return_entryX[number_of_return_addrs]; // for invokestatic, invokespecial, invokevirtual return entries static address _invokeinterface_return_entry[number_of_return_addrs]; // for invokeinterface return entries static address _invokedynamic_return_entry[number_of_return_addrs]; // for invokedynamic return entries @@ -178,6 +180,7 @@ static address deopt_entry(TosState state, int length); static address deopt_reexecute_return_entry() { return _deopt_reexecute_return_entry; } static address return_entry(TosState state, int length, Bytecodes::Code code); + static address return_entry(TosState state, int length, Bytecodes::Code code, bool X); // Safepoint support static void notice_safepoints(); // stops the thread when reaching a safepoint diff -r 656931ff4345 src/hotspot/share/interpreter/templateInterpreterGenerator.cpp --- a/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp Sun May 20 17:57:55 2018 +0100 @@ -102,6 +102,23 @@ generate_return_entry_for(vtos, i, index_size) ); } + Interpreter::_return_entryX[0] = EntryPoint(); + for (int i = 1; i < Interpreter::number_of_return_entries; i++) { + address return_itos = generate_return_entry_for(itos, i, index_size, true); + Interpreter::_return_entryX[i] = + EntryPoint( + return_itos, + return_itos, + return_itos, + return_itos, + generate_return_entry_for(atos, i, index_size, true), + return_itos, + generate_return_entry_for(ltos, i, index_size, true), + generate_return_entry_for(ftos, i, index_size, true), + generate_return_entry_for(dtos, i, index_size, true), + generate_return_entry_for(vtos, i, index_size, true) + ); + } } { CodeletMark cm(_masm, "invoke return entry points"); @@ -116,6 +133,7 @@ TosState state = states[i]; assert(state != ilgl, "states array is wrong above"); Interpreter::_invoke_return_entry[i] = generate_return_entry_for(state, invoke_length, sizeof(u2)); + Interpreter::_invoke_return_entryX[i] = generate_return_entry_for(state, invoke_length, sizeof(u2), true); Interpreter::_invokeinterface_return_entry[i] = generate_return_entry_for(state, invokeinterface_length, sizeof(u2)); Interpreter::_invokedynamic_return_entry[i] = generate_return_entry_for(state, invokedynamic_length, sizeof(u4)); } @@ -227,6 +245,12 @@ method_entry(java_lang_Double_longBitsToDouble); method_entry(java_lang_Double_doubleToRawLongBits); + method_entry(java_lang_continuation_getSP) + method_entry(java_lang_continuation_getFP) + method_entry(java_lang_continuation_getPC) + method_entry(java_lang_continuation_doContinue) + method_entry(java_lang_continuation_doYield) + #undef method_entry // Bytecodes @@ -431,6 +455,16 @@ case Interpreter::java_lang_math_fmaF : entry_point = generate_math_entry(kind); break; case Interpreter::java_lang_ref_reference_get : entry_point = generate_Reference_get_entry(); break; + case Interpreter::java_lang_continuation_getSP + : entry_point = generate_Continuation_getSP_entry(); break; + case Interpreter::java_lang_continuation_getFP + : entry_point = generate_Continuation_getFP_entry(); break; + case Interpreter::java_lang_continuation_getPC + : entry_point = generate_Continuation_getPC_entry(); break; + case Interpreter::java_lang_continuation_doContinue + : entry_point = generate_Continuation_doContinue_entry(); break; + case Interpreter::java_lang_continuation_doYield + : entry_point = generate_Continuation_doYield_entry(); break; case Interpreter::java_util_zip_CRC32_update : native = true; entry_point = generate_CRC32_update_entry(); break; case Interpreter::java_util_zip_CRC32_updateBytes diff -r 656931ff4345 src/hotspot/share/interpreter/templateInterpreterGenerator.hpp --- a/src/hotspot/share/interpreter/templateInterpreterGenerator.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/interpreter/templateInterpreterGenerator.hpp Sun May 20 17:57:55 2018 +0100 @@ -53,6 +53,7 @@ address generate_ClassCastException_handler(); address generate_ArrayIndexOutOfBounds_handler(const char* name); address generate_return_entry_for(TosState state, int step, size_t index_size); + address generate_return_entry_for(TosState state, int step, size_t index_size, bool X); address generate_earlyret_entry_for(TosState state); address generate_deopt_entry_for(TosState state, int step, address continuation = NULL); address generate_safept_entry_for(TosState state, address runtime_entry); @@ -91,9 +92,15 @@ address generate_abstract_entry(void); address generate_math_entry(AbstractInterpreter::MethodKind kind); address generate_Reference_get_entry(); + address generate_Continuation_doYield_entry(); + address generate_Continuation_doContinue_entry(); + address generate_Continuation_getSP_entry(); + address generate_Continuation_getFP_entry(); + address generate_Continuation_getPC_entry(); address generate_CRC32_update_entry(); address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind); address generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind); + #ifdef IA32 address generate_Float_intBitsToFloat_entry(); address generate_Float_floatToRawIntBits_entry(); diff -r 656931ff4345 src/hotspot/share/logging/logTag.hpp --- a/src/hotspot/share/logging/logTag.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/logging/logTag.hpp Sun May 20 17:57:55 2018 +0100 @@ -82,6 +82,7 @@ LOG_TAG(itables) \ LOG_TAG(jit) \ LOG_TAG(jni) \ + LOG_TAG(jvmcont) \ LOG_TAG(jvmti) \ LOG_TAG(liveness) \ LOG_TAG(load) /* Trace all classes loaded */ \ diff -r 656931ff4345 src/hotspot/share/oops/method.cpp --- a/src/hotspot/share/oops/method.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/oops/method.cpp Sun May 20 17:57:55 2018 +0100 @@ -1590,7 +1590,7 @@ } // Exposed so field engineers can debug VM -void Method::print_short_name(outputStream* st) { +void Method::print_short_name(outputStream* st) const { ResourceMark rm; #ifdef PRODUCT st->print(" %s::", method_holder()->external_name()); @@ -1655,7 +1655,7 @@ }; -void Method::print_name(outputStream* st) { +void Method::print_name(outputStream* st) const { Thread *thread = Thread::current(); ResourceMark rm(thread); st->print("%s ", is_static() ? "static" : "virtual"); diff -r 656931ff4345 src/hotspot/share/oops/method.hpp --- a/src/hotspot/share/oops/method.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/oops/method.hpp Sun May 20 17:57:55 2018 +0100 @@ -958,11 +958,11 @@ static bool has_unloaded_classes_in_signature(const methodHandle& m, TRAPS); // Printing - void print_short_name(outputStream* st = tty); // prints as klassname::methodname; Exposed so field engineers can debug VM + void print_short_name(outputStream* st = tty) const; // prints as klassname::methodname; Exposed so field engineers can debug VM #if INCLUDE_JVMTI - void print_name(outputStream* st = tty); // prints as "virtual void foo(int)"; exposed for TraceRedefineClasses + void print_name(outputStream* st = tty) const; // prints as "virtual void foo(int)"; exposed for TraceRedefineClasses #else - void print_name(outputStream* st = tty) PRODUCT_RETURN; // prints as "virtual void foo(int)" + void print_name(outputStream* st = tty) const PRODUCT_RETURN; // prints as "virtual void foo(int)" #endif // Helper routine used for method sorting diff -r 656931ff4345 src/hotspot/share/oops/oop.cpp --- a/src/hotspot/share/oops/oop.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/oops/oop.cpp Sun May 20 17:57:55 2018 +0100 @@ -38,6 +38,10 @@ void oopDesc::print_on(outputStream* st) const { if (this == NULL) { st->print_cr("NULL"); + } else if (*((juint*)this) == badHeapWordVal) { + st->print("BAD WORD"); + } else if (*((juint*)this) == badMetaWordVal) { + st->print("BAD META WORD"); } else { klass()->oop_print_on(oop(this), st); } diff -r 656931ff4345 src/hotspot/share/opto/c2compiler.cpp --- a/src/hotspot/share/opto/c2compiler.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/opto/c2compiler.cpp Sun May 20 17:57:55 2018 +0100 @@ -593,6 +593,12 @@ case vmIntrinsics::_profileBoolean: case vmIntrinsics::_isCompileConstant: case vmIntrinsics::_Preconditions_checkIndex: + case vmIntrinsics::_Continuation_getSP: + case vmIntrinsics::_Continuation_getFP: + case vmIntrinsics::_Continuation_getPC: + case vmIntrinsics::_Continuation_doContinue: + case vmIntrinsics::_Continuation_doYield: + case vmIntrinsics::_Continuation_runLevel: break; default: return false; diff -r 656931ff4345 src/hotspot/share/opto/classes.hpp --- a/src/hotspot/share/opto/classes.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/opto/classes.hpp Sun May 20 17:57:55 2018 +0100 @@ -134,6 +134,7 @@ macro(ConvL2I) macro(CountedLoop) macro(CountedLoopEnd) +macro(GetFP) macro(OuterStripMinedLoop) macro(OuterStripMinedLoopEnd) macro(CountLeadingZerosI) diff -r 656931ff4345 src/hotspot/share/opto/intrinsicnode.hpp --- a/src/hotspot/share/opto/intrinsicnode.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/opto/intrinsicnode.hpp Sun May 20 17:57:55 2018 +0100 @@ -164,6 +164,16 @@ virtual const Type* bottom_type() const { return TypeInt::BOOL; } }; +//------------------------------GetFP --------------------------------- + class GetFPNode: public Node { + public: + GetFPNode(Node* ctrl): + Node(ctrl) {} + + virtual int Opcode() const; + virtual uint ideal_reg() const { return Op_RegL; } + virtual const Type* bottom_type() const { return TypeLong::LONG; } + }; //------------------------------EncodeISOArray-------------------------------- // encode char[] to byte[] in ISO_8859_1 diff -r 656931ff4345 src/hotspot/share/opto/library_call.cpp --- a/src/hotspot/share/opto/library_call.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/opto/library_call.cpp Sun May 20 17:57:55 2018 +0100 @@ -329,6 +329,11 @@ bool inline_profileBoolean(); bool inline_isCompileConstant(); + bool inline_continuation(vmIntrinsics::ID id); + bool inline_get_frame_pointer(); + bool inline_continuation_do_yield(); + bool inline_continuation_runLevel(); + bool inline_continuation_do_continue(); void clear_upper_avx() { #ifdef X86 if (UseAVX >= 2) { @@ -870,6 +875,19 @@ case vmIntrinsics::_fmaF: return inline_fma(intrinsic_id()); + case vmIntrinsics::_Continuation_getFP: + return inline_get_frame_pointer(); + + case vmIntrinsics::_Continuation_getSP: + case vmIntrinsics::_Continuation_getPC: + return inline_continuation(intrinsic_id()); + case vmIntrinsics::_Continuation_doContinue: + return inline_continuation_do_continue(); + case vmIntrinsics::_Continuation_doYield: + return inline_continuation_do_yield(); + case vmIntrinsics::_Continuation_runLevel: + return inline_continuation_runLevel(); + default: // If you get here, it may be that someone has added a new intrinsic // to the list in vmSymbols.hpp without implementing it here. @@ -6889,6 +6907,56 @@ return instof_false; // even if it is NULL } +// long Continuations::getFP() ()J +bool LibraryCallKit::inline_get_frame_pointer() { + Node *frame = _gvn.transform(new GetFPNode(control())); + set_result(frame); + return true; +} + +bool LibraryCallKit::inline_continuation(vmIntrinsics::ID id) { + address call_addr = NULL; + const char *name = NULL; + + switch (id) { + case vmIntrinsics::_Continuation_getSP: call_addr = StubRoutines::cont_getSP(); name = "getSP"; break; + case vmIntrinsics::_Continuation_getPC: call_addr = StubRoutines::cont_getPC(); name = "getPC"; break; + case vmIntrinsics::_Continuation_getFP: call_addr = OptoRuntime::continuation_getFP_Java(); name = "getFP"; break; + default: fatal("error"); return false; + } + + const TypeFunc* tf = OptoRuntime::void_long_Type(); + const TypePtr* no_memory_effects = NULL; + Node* call = make_runtime_call(RC_LEAF, tf, call_addr, name, no_memory_effects); + Node* value = _gvn.transform(new ProjNode(call, TypeFunc::Parms+0)); +#ifdef ASSERT + Node* value_top = _gvn.transform(new ProjNode(call, TypeFunc::Parms+1)); + assert(value_top == top(), "second value must be top"); +#endif + set_result(value); + return true; +} + +bool LibraryCallKit::inline_continuation_runLevel() { + set_result(intcon(2)); + return true; +} + +bool LibraryCallKit::inline_continuation_do_continue() { + address call_addr = StubRoutines::cont_thaw(2); + const TypeFunc* tf = OptoRuntime::void_void_Type(); + Node* call = make_runtime_call(RC_NO_LEAF, tf, call_addr, "doContinue", TypeRawPtr::BOTTOM); + return true; +} + +bool LibraryCallKit::inline_continuation_do_yield() { + address call_addr = StubRoutines::cont_doYield(); + Node* arg0 = argument(0); + const TypeFunc* tf = OptoRuntime::continuation_doYield_Type(); + Node* call = make_runtime_call(RC_NO_LEAF, tf, call_addr, "doYield", TypeRawPtr::BOTTOM, arg0); + return true; +} + //-------------inline_fma----------------------------------- bool LibraryCallKit::inline_fma(vmIntrinsics::ID id) { Node *a = NULL; diff -r 656931ff4345 src/hotspot/share/opto/node.hpp --- a/src/hotspot/share/opto/node.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/opto/node.hpp Sun May 20 17:57:55 2018 +0100 @@ -72,6 +72,7 @@ class EncodePKlassNode; class FastLockNode; class FastUnlockNode; +class GetFPNode; class IfNode; class IfFalseNode; class IfTrueNode; @@ -706,6 +707,7 @@ DEFINE_CLASS_ID(Mul, Node, 12) DEFINE_CLASS_ID(Vector, Node, 13) DEFINE_CLASS_ID(ClearArray, Node, 14) + DEFINE_CLASS_ID(GetFP, Node, 15) _max_classes = ClassMask_ClearArray }; diff -r 656931ff4345 src/hotspot/share/opto/runtime.cpp --- a/src/hotspot/share/opto/runtime.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/opto/runtime.cpp Sun May 20 17:57:55 2018 +0100 @@ -105,6 +105,7 @@ address OptoRuntime::_slow_arraycopy_Java = NULL; address OptoRuntime::_register_finalizer_Java = NULL; +address OptoRuntime::_continuation_getFP_Java = NULL; ExceptionBlob* OptoRuntime::_exception_blob; @@ -152,6 +153,7 @@ gen(env, _slow_arraycopy_Java , slow_arraycopy_Type , SharedRuntime::slow_arraycopy_C , 0 , false, false, false); gen(env, _register_finalizer_Java , register_finalizer_Type , register_finalizer , 0 , false, false, false); + gen(env, _continuation_getFP_Java , void_long_Type , SharedRuntime::continuation_getFP, 0 , false, false, false); return true; } @@ -725,6 +727,27 @@ return TypeFunc::make(domain, range); } +const TypeFunc* OptoRuntime::void_void_Type() { + // create input type (domain) + const Type **fields = TypeTuple::fields(0); + const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields); + + // create result type (range) + fields = TypeTuple::fields(0); + const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); + return TypeFunc::make(domain, range); + } + + const TypeFunc* OptoRuntime::continuation_doYield_Type() { + const Type**fields = TypeTuple::fields(1); + fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked + const TypeTuple *args = TypeTuple::make(TypeFunc::Parms+1, fields); + + fields = TypeTuple::fields(0); + const TypeTuple *result = TypeTuple::make(TypeFunc::Parms+0, fields); + return TypeFunc::make(args, result); + } + // arraycopy stub variations: enum ArrayCopyType { ac_fast, // void(ptr, ptr, size_t) diff -r 656931ff4345 src/hotspot/share/opto/runtime.hpp --- a/src/hotspot/share/opto/runtime.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/opto/runtime.hpp Sun May 20 17:57:55 2018 +0100 @@ -151,6 +151,7 @@ static address _slow_arraycopy_Java; static address _register_finalizer_Java; + static address _continuation_getFP_Java; // // Implementation of runtime methods @@ -232,6 +233,7 @@ static address slow_arraycopy_Java() { return _slow_arraycopy_Java; } static address register_finalizer_Java() { return _register_finalizer_Java; } + static address continuation_getFP_Java() { return _continuation_getFP_Java; } static ExceptionBlob* exception_blob() { return _exception_blob; } @@ -270,6 +272,8 @@ static const TypeFunc* modf_Type(); static const TypeFunc* l2f_Type(); static const TypeFunc* void_long_Type(); + static const TypeFunc* void_void_Type(); + static const TypeFunc* continuation_doYield_Type(); static const TypeFunc* flush_windows_Type(); diff -r 656931ff4345 src/hotspot/share/prims/jvm.cpp --- a/src/hotspot/share/prims/jvm.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/prims/jvm.cpp Sun May 20 17:57:55 2018 +0100 @@ -72,6 +72,7 @@ #include "runtime/vframe.inline.hpp" #include "runtime/vm_operations.hpp" #include "runtime/vm_version.hpp" +#include "runtime/continuation.hpp" #include "services/attachListener.hpp" #include "services/management.hpp" #include "services/threadService.hpp" @@ -680,6 +681,12 @@ return JNIHandles::make_local(env, new_obj()); JVM_END +// java.lang.Continuation ///////////////////////////////////////////////////// + +JVM_ENTRY(void, JVM_RegisterContinuationMethods(JNIEnv *env, jclass cls)) + CONT_RegisterNativeMethods(env, cls); +JVM_END + // java.io.File /////////////////////////////////////////////////////////////// JVM_LEAF(char*, JVM_NativePath(char* path)) diff -r 656931ff4345 src/hotspot/share/runtime/continuation.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/runtime/continuation.cpp Sun May 20 17:57:55 2018 +0100 @@ -0,0 +1,1751 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "classfile/javaClasses.inline.hpp" +#include "classfile/vmSymbols.hpp" +#include "code/scopeDesc.hpp" +#include "code/vmreg.inline.hpp" +#include "interpreter/interpreter.hpp" +#include "logging/log.hpp" +#include "logging/logStream.hpp" +#include "oops/access.inline.hpp" +#include "oops/objArrayOop.inline.hpp" +#include "runtime/continuation.hpp" +#include "runtime/interfaceSupport.inline.hpp" +#include "runtime/frame.hpp" +#include "runtime/javaCalls.hpp" +#include "runtime/vframe_hp.hpp" +#include "utilities/copy.hpp" +#include "utilities/macros.hpp" + +// TODO +// +// !!! Keep an eye out for deopt, and patch_pc +// +// Add: +// - method/nmethod metadata +// 0. Precise monitor detection +// 1. Exceptions +// 2. stack walking (+ exceptions) +// 3. special native methods: Method.invoke, doPrivileged +// 4. compiled->intrepreted for serialization (look at scopeDesc) +// 5. caching h-stacks in thread stacks +// +// Things to compress in interpreted frames: return address, monitors, last_sp + +#define RBP_OFFSET 0 + +JVM_ENTRY(void, CONT_Foo(JNIEnv* env, jobject c)) { + tty->print_cr("Hello, World!"); +} +JVM_END + +#define CC (char*) /*cast a literal from (const char*)*/ +#define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f) + +static JNINativeMethod CONT_methods[] = { + {CC"foo", CC"()V", FN_PTR(CONT_Foo)}, +}; + +void CONT_RegisterNativeMethods(JNIEnv *env, jclass cls) { + int status = env->RegisterNatives(cls, CONT_methods, sizeof(CONT_methods)/sizeof(JNINativeMethod)); + guarantee(status == JNI_OK && !env->ExceptionOccurred(), "register java.lang.Continuation natives"); +} + +static void print_oop(void *p, oop obj, outputStream* st = tty); +static void print_vframe(frame f, RegisterMap* map = NULL, outputStream* st = tty); +static void print_frames(JavaThread* thread, outputStream* st = tty); +#ifdef ASSERT +static VMReg find_register_spilled_here(void* p, RegisterMap* map); +#endif + +struct HFrameMetadata { + int num_oops; + unsigned short frame_size; + unsigned short uncompressed_size; +}; + +#define METADATA_SIZE sizeof(HFrameMetadata) // bytes + +#define ELEM_SIZE sizeof(jint) // stack is int[] +static inline int to_index(size_t x) { return x >> 2; } // stack is int[] +static inline int to_bytes(int x) { return x << 2; } // stack is int[] + + +static inline HFrameMetadata* metadata(intptr_t* hsp) { + return (HFrameMetadata*)((address)hsp - METADATA_SIZE); +} + +static inline intptr_t* to_haddress(const void* base, const int index) { + return (intptr_t*)((address)base + to_bytes(index)); +} + +static inline int to_index(void* base, void* ptr) { + return to_index((char*)ptr - (char*)base); +} + +static oop get_continuation(JavaThread* thread) { + return java_lang_Thread::continuation(thread->threadObj()); +} + +static void set_continuation(JavaThread* thread, oop cont) { + java_lang_Thread::set_continuation(thread->threadObj(), cont); +} + +class ContMirror; + +// Represents a stack frame on the horizontal stack, analogous to the frame class, for vertical-stack frames. +class hframe { +private: + bool _write; + int _sp; + long _fp; + address _pc; + bool _is_interpreted; + CodeBlob* _cb; + +private: + inline HFrameMetadata* meta(ContMirror& cont); + inline intptr_t* real_fp(ContMirror& cont); + inline int real_fp_index(ContMirror& cont); + inline long* link_address(ContMirror& cont); + inline int link_index(ContMirror& cont); + inline address* return_pc_address(ContMirror& cont); + +public: + hframe() : _write(false), _sp(-1), _fp(0), _pc(NULL), _is_interpreted(true), _cb(NULL) {} + hframe(const hframe& hf) : _write(hf._write), _sp(hf._sp), _fp(hf._fp), _pc(hf._pc), _is_interpreted(hf._is_interpreted), _cb(hf._cb) {} + + hframe(int sp, long fp, address pc, bool write = false) + : _write(write), _sp(sp), _fp(fp), _pc(pc), _is_interpreted(Interpreter::contains(pc)) { _cb = NULL; } + hframe(int sp, long fp, address pc, CodeBlob* cb, bool is_interpreted, bool write = false) + : _write(write), _sp(sp), _fp(fp), _pc(pc), _cb(cb), _is_interpreted(is_interpreted) {} + hframe(int sp, long fp, address pc, bool is_interpreted, bool write = false) + : _write(write), _sp(sp), _fp(fp), _pc(pc), _is_interpreted(is_interpreted) { _cb = NULL; } + + bool operator==(const hframe& other) { return _write == other._write && _sp == other._sp && _fp == other._fp && _pc == other._pc; } + bool is_empty() { return _pc == NULL && _sp < 0; } + + inline bool is_interpreted_frame() { return _is_interpreted; } + inline int sp() { return _sp; } + inline long fp() { return _fp; } + inline address pc() { return _pc; } + inline CodeBlob* cb(); + + inline bool write() { return _write; } + + size_t size(ContMirror& cont) { return meta(cont)->frame_size; } + size_t uncompressed_size(ContMirror& cont) { return meta(cont)->uncompressed_size; } + int num_oops(ContMirror& cont) { return meta(cont)->num_oops; } + + void set_size(ContMirror& cont, size_t size) { assert(size < 0xffff, ""); meta(cont)->frame_size = size; } + void set_num_oops(ContMirror& cont, int num) { assert(num < 0xffff, ""); meta(cont)->num_oops = num; } + void set_uncompressed_size(ContMirror& cont, size_t size) { assert(size < 0xffff, ""); meta(cont)->uncompressed_size = size; } + + // the link is an offset from the real fp to the sender's fp IFF the sender is interpreted + // otherwise, it's the contents of the rbp register + inline long link(ContMirror& cont) { return *link_address(cont); } + inline address return_pc(ContMirror& cont) { return *return_pc_address(cont); } + + hframe sender(ContMirror& cont); + + inline void patch_link(ContMirror& cont, long value) { *link_address(cont) = value; } + inline void patch_link_relative(ContMirror& cont, intptr_t* fp); + inline void patch_callee(ContMirror& cont, hframe& sender); + + void patch_return_pc(ContMirror& cont, address value) { *return_pc_address(cont) = value; } + void patch_real_fp_offset(ContMirror& cont, int offset, intptr_t value) { *(link_address(cont) + offset) = value; } + inline void patch_real_fp_offset_relative(ContMirror& cont, int offset, intptr_t* value); + + bool is_bottom(ContMirror& cont); + + inline intptr_t* index_address(ContMirror& cont, int i); + + void print_on(ContMirror& cont, outputStream* st); + void print(ContMirror& cont) { print_on(cont, tty); } + void print_on(outputStream* st); + void print() { print_on(tty); } +}; + +// freeze result +enum res_freeze { + freeze_ok = 0, + freeze_pinned_native, + freeze_pinned_monitor = 2 +}; + +struct oopLoc { + bool narrow : 1; + unsigned long loc : 63; +}; + +// Mirrors the Java continuation objects. +// Contents are read from the Java object at the entry points of this module, and written at exists or intermediate calls into Java +class ContMirror { +private: + JavaThread* const _thread; + /*const*/ oop _cont; + intptr_t* _entrySP; + intptr_t* _entryFP; + address _entryPC; + + int _sp; + long _fp; + address _pc; + + typeArrayOop _stack; + int _stack_length; + int* _hstack; + + int* _write_stack; + int _wstack_length; + int _wsp; // traditional indexing. increases, equals number of cells written + + int _ref_sp; + objArrayOop _ref_stack; + GrowableArray* _oops; + + ContMirror(const ContMirror& cont); // no copy constructor + + int* stack() { return _hstack; } + + void allocate_stacks(int size, int oops, int frames); + inline intptr_t* write_stack_address(int i); + inline int write_stack_index(void* p); + inline bool in_writestack(void *p) { return (_write_stack != NULL && p >= _write_stack && p < (_write_stack + _wstack_length)); } + inline bool in_hstack(void *p) { return (_hstack != NULL && p >= _hstack && p < (_hstack + _stack_length)); } + inline int fix_write_index_after_write(int index); + +public: + ContMirror(JavaThread* thread, oop cont); + + void read(); + void write(); + + intptr_t* entrySP() { return _entrySP; } + intptr_t* entryFP() { return _entryFP; } + address entryPC() { return _entryPC; } + + void set_entrySP(intptr_t* sp) { _entrySP = sp; } + void set_entryFP(intptr_t* fp) { _entryFP = fp; } + void set_entryPC(address pc) { + log_trace(jvmcont)("set_entryPC %p", pc); + _entryPC = pc; + } + + int sp() { return _sp; } + long fp() { return _fp; } + address pc() { return _pc; } + + void set_sp(int sp) { _sp = sp; } + void set_fp(long fp) { _fp = fp; } + void set_pc(address pc) { _pc = pc; } + + int stack_length() { return _stack_length; } + + JavaThread* thread() { return _thread; } + + void copy_to_stack(void* from, void* to, int size); + void copy_from_stack(void* from, void* to, int size); + + objArrayOop refStack(int size); + objArrayOop refStack() { return _ref_stack; } + int refSP() { return _ref_sp; } + void set_refSP(int refSP) { log_trace(jvmcont)("set_refSP: %d", refSP); _ref_sp = refSP; } + + typeArrayOop stack(int size); + inline bool in_stack(void *p) { return in_hstack(p) || in_writestack(p); } + inline int stack_index(void* p); + inline intptr_t* stack_address(int i); + inline intptr_t* stack_address(int i, bool write); + + void call_pinned(res_freeze res, frame& f); + + void update_register_map(RegisterMap& map); + bool is_map_at_top(RegisterMap& map); + + bool is_empty(); + inline hframe new_hframe(intptr_t* hsp, intptr_t* hfp, address pc, CodeBlob* cb, bool is_interpreted); + hframe last_frame(); + inline void set_last_frame(hframe& f); + + void init_write_arrays(int size); + address freeze_target(); + inline hframe fix_hframe_afer_write(hframe& hf); + void write_stacks(); + + inline void add_oop_location(oop* p); + inline void add_oop_location(narrowOop* p); + + inline oop obj_at(int i); + int num_oops(); +}; + +void hframe::print_on(outputStream* st) { + if (_is_interpreted) { + st->print_cr("\tInterpreted sp: %d fp: %ld pc: %p", _sp, _fp, _pc); + } else { + st->print_cr("\tCompiled sp: %d fp: 0x%lx pc: %p", _sp, _fp, _pc); + } +} + +void hframe::print_on(ContMirror& cont, outputStream* st) { + print_on(st); + + st->print_cr("\tMetadata size: %d num_oops: %d", meta(cont)->frame_size, meta(cont)->num_oops); + + if (_is_interpreted) { + intptr_t* fp = index_address(cont, _fp); + Method* method = *(Method**)(fp + frame::interpreter_frame_method_offset); + st->print_cr("\tmethod: %p", method); + st->print("\tmethod: "); method->print_short_name(st); st->cr(); + + st->print_cr("\treturn_pc: %p", *(void**)(fp + frame::return_addr_offset)); + st->print_cr("\tissp: %ld", *(long*) (fp + frame::interpreter_frame_sender_sp_offset)); + st->print_cr("\tlast_sp: %ld", *(long*) (fp + frame::interpreter_frame_last_sp_offset)); + st->print_cr("\tinitial_sp: %ld", *(long*) (fp + frame::interpreter_frame_initial_sp_offset)); + // st->print_cr("\tmon_block_top: %ld", *(long*) (fp + frame::interpreter_frame_monitor_block_top_offset)); + // st->print_cr("\tmon_block_bottom: %ld", *(long*) (fp + frame::interpreter_frame_monitor_block_bottom_offset)); + st->print_cr("\tlocals: %ld", *(long*) (fp + frame::interpreter_frame_locals_offset)); + st->print_cr("\tcache: %p", *(void**)(fp + frame::interpreter_frame_cache_offset)); + st->print_cr("\tbcp: %p", *(void**)(fp + frame::interpreter_frame_bcp_offset)); + st->print_cr("\tbci: %d", method->bci_from(*(address*)(fp + frame::interpreter_frame_bcp_offset))); + st->print_cr("\tmirror: %p", *(void**)(fp + frame::interpreter_frame_mirror_offset)); + // st->print("\tmirror: "); os::print_location(st, *(intptr_t*)(fp + frame::interpreter_frame_mirror_offset), true); + } else { + st->print_cr("\tcb: %p", cb()); + if (_cb != NULL) { + st->print("\tcb: "); _cb->print_value_on(st); st->cr(); + st->print_cr("\tcb.frame_size: %d", _cb->frame_size()); + } + } + st->print_cr("\tlink: 0x%lx %ld", link(cont), link(cont)); + st->print_cr("\treturn_pc: %p", return_pc(cont)); + + if (false) { + address sp = (address)index_address(cont, _sp); + st->print_cr("--data--"); + int fsize = meta(cont)->frame_size; + for(int i=0; i < fsize; i++) + st->print_cr("%p: %x", (sp + i), *(sp + i)); + st->print_cr("--end data--"); + } +} + +inline intptr_t* hframe::index_address(ContMirror& cont, int i) { + return (intptr_t*)cont.stack_address(i, _write); +} + +inline HFrameMetadata* hframe::meta(ContMirror& cont) { + return (HFrameMetadata*)index_address(cont, _sp - to_index(METADATA_SIZE)); +} + +bool hframe::is_bottom(ContMirror& cont) { + assert (!_write, ""); + return _sp + to_index(size(cont) + METADATA_SIZE) >= cont.stack_length(); +} + +inline CodeBlob* hframe::cb() { + if (_cb == NULL && !_is_interpreted) // compute lazily + _cb = CodeCache::find_blob(_pc); + return _cb; +} + +inline intptr_t* hframe::real_fp(ContMirror& cont) { + assert (!_is_interpreted, "interpreted"); + return index_address(cont, _sp) + cb()->frame_size(); +} + +inline int hframe::real_fp_index(ContMirror& cont) { + assert (!_is_interpreted, "interpreted"); + return _sp + to_index(cb()->frame_size() * sizeof(intptr_t)); +} + +inline long* hframe::link_address(ContMirror& cont) { + return _is_interpreted + ? (long*)&index_address(cont, _fp)[frame::link_offset] + : (long*)(real_fp(cont) - frame::sender_sp_offset); // x86-specific +} + +inline int hframe::link_index(ContMirror& cont) { + return _is_interpreted ? _fp : (real_fp_index(cont) - to_index(frame::sender_sp_offset * sizeof(intptr_t*))); // x86-specific +} + +inline address* hframe::return_pc_address(ContMirror& cont) { + return _is_interpreted + ? (address*)&index_address(cont, _fp)[frame::return_addr_offset] + : (address*)(real_fp(cont) - 1); // x86-specific +} + +inline void hframe::patch_real_fp_offset_relative(ContMirror& cont, int offset, intptr_t* value) { + long* la = (long*)((_is_interpreted ? index_address(cont, _fp) : real_fp(cont)) + offset); + *la = to_index((address)value - (address)la); + log_trace(jvmcont)("patched relative offset: %d value: %p", offset, value); +} + +inline void hframe::patch_link_relative(ContMirror& cont, intptr_t* fp) { + long* la = link_address(cont); + *la = to_index((address)fp - (address)la); + log_trace(jvmcont)("patched link: %ld", *la); +} + +inline void hframe::patch_callee(ContMirror& cont, hframe& sender) { + assert (_write == sender._write, ""); + if (sender.is_interpreted_frame()) { + patch_link_relative(cont, sender.link_address(cont)); + } else { + patch_link(cont, sender.fp()); + } + if (is_interpreted_frame()) { + patch_real_fp_offset_relative(cont, frame::interpreter_frame_sender_sp_offset, index_address(cont, sender.sp())); + } +} + +hframe hframe::sender(ContMirror& cont) { + address sender_pc = return_pc(cont); + bool is_sender_interpreted = Interpreter::contains(sender_pc); + int sender_sp = _sp + to_index(size(cont) + METADATA_SIZE); + long sender_fp = link(cont); + log_trace(jvmcont)("hframe::sender sender_fp0: %ld", sender_fp); + if (is_sender_interpreted) { + sender_fp += link_index(cont); + log_trace(jvmcont)("hframe::sender real_fp: %d sender_fp: %ld", link_index(cont), sender_fp); + } + return hframe(sender_sp, sender_fp, sender_pc, is_sender_interpreted, _write); +} + +ContMirror::ContMirror(JavaThread* thread, oop cont) + : _thread(thread) { + _cont = cont; + _stack = NULL; + _hstack = NULL; + _ref_stack = NULL; + _stack_length = 0; + _oops = NULL; + _write_stack = NULL; + _wstack_length = 0; + _wsp = 0; +} + +void ContMirror::read() { + log_trace(jvmcont)("Reading continuation object:"); + + _entrySP = (intptr_t*) java_lang_Continuation::entrySP(_cont); + _entryFP = NULL; + _entryPC = (address) java_lang_Continuation::entryPC(_cont); + log_trace(jvmcont)("set_entryPC Z %p", _entryPC); + log_trace(jvmcont)("\tentrySP: %p entryFP: %p entryPC: %p", _entrySP, _entryFP, _entryPC); + + _sp = java_lang_Continuation::sp(_cont); + _fp = java_lang_Continuation::fp(_cont); + _pc = (address)java_lang_Continuation::pc(_cont); + log_trace(jvmcont)("\tsp: %d fp: %ld 0x%lx pc: %p", _sp, _fp, _fp, _pc); + + _stack = java_lang_Continuation::stack(_cont); + if (_stack != NULL) { + _stack_length = _stack->length(); + _hstack = (int*)_stack->base(T_INT); + } else { + _stack_length = 0; + _hstack = NULL; + } + log_trace(jvmcont)("\tstack: %p hstack: %p, stack_length: %d", (oopDesc*)_stack, _hstack, _stack_length); + + _ref_stack = java_lang_Continuation::refStack(_cont); + _ref_sp = java_lang_Continuation::refSP(_cont); + log_trace(jvmcont)("\tref_stack: %p ref_sp: %d", (oopDesc*)_ref_stack, _ref_sp); +} + +void ContMirror::write() { + log_trace(jvmcont)("Writing continuation object:"); + + log_trace(jvmcont)("\tsp: %d fp: %ld 0x%lx pc: %p", _sp, _fp, _fp, _pc); + java_lang_Continuation::set_sp(_cont, _sp); + java_lang_Continuation::set_fp(_cont, _fp); + java_lang_Continuation::set_pc(_cont, _pc); + + log_trace(jvmcont)("WRITE set_entryPC: %p", _entryPC); + // java_lang_Continuation::set_entrySP(_cont, _entrySP); + // java_lang_Continuation::set_entryFP(_cont, _entryFP); + java_lang_Continuation::set_entryPC(_cont, _entryPC); + + write_stacks(); + + log_trace(jvmcont)("\tref_sp: %d", _ref_sp); + java_lang_Continuation::set_refSP(_cont, _ref_sp); + log_trace(jvmcont)("\tend write"); +} + +bool ContMirror::is_empty() { + return _sp < 0 || _sp >= _stack->length(); +} + +hframe ContMirror::last_frame() { + return is_empty() ? hframe() : hframe(_sp, _fp, _pc); +} + +inline void ContMirror::set_last_frame(hframe& f) { + set_sp(f.sp()); set_fp(f.fp()); set_pc(f.pc()); + log_trace(jvmcont)("set_last_frame cont sp: %d fp: 0x%lx pc: %p", sp(), fp(), pc()); + if (is_empty()) { + set_fp(0); + set_pc(NULL); + } +} + +inline int ContMirror::stack_index(void* p) { + int i = to_index(stack(), p); + assert (i >= 0 && i < stack_length(), "i: %d length: %d", i, stack_length()); + return i; +} + +inline intptr_t* ContMirror::stack_address(int i) { + assert (i >= 0 && i < stack_length(), "i: %d length: %d", i, stack_length()); + return (intptr_t*)&stack()[i]; +} + +inline int ContMirror::write_stack_index(void* p) { + assert (_write_stack != NULL, ""); + int i = to_index(_write_stack, p); + assert (i >= 0 && i < _wstack_length, "i: %d length: %d", i, _wstack_length); + return i; +} + +inline intptr_t* ContMirror::write_stack_address(int i) { + assert (_write_stack != NULL, ""); + assert (i >= 0 && i < _wstack_length, "i: %d length: %d", i, _wstack_length); + return (intptr_t*)&_write_stack[i]; +} + +inline intptr_t* ContMirror::stack_address(int i, bool write) { + return write ? write_stack_address(i) : stack_address(i); +} + +void ContMirror::copy_to_stack(void* from, void* to, int size) { + log_trace(jvmcont)("Copying from v: %p - %p (%d bytes)", from, (address)from + size, size); + log_trace(jvmcont)("Copying to h: %p - %p (%d - %d)", to, (address)to + size, to_index(_write_stack, to), to_index(_write_stack, (address)to + size)); + + assert (size > 0, "size: %d", size); + assert (write_stack_index(to) >= 0, ""); + assert (to_index(_write_stack, (address)to + size) <= _wstack_length, ""); + + // this assertion is just to check whether the copying happens as intended, but not otherwise required for this method. + assert (write_stack_index(to) == _wsp + to_index(METADATA_SIZE), "to: %d wsp: %d", write_stack_index(to), _wsp); + + Copy::conjoint_memory_atomic(from, to, size); + _wsp = to_index(_write_stack, (address)to + size); +} + +void ContMirror::copy_from_stack(void* from, void* to, int size) { + log_trace(jvmcont)("Copying from h: %p - %p (%d - %d)", from, (address)from + size, to_index(stack(), from), to_index(stack(), (address)from + size)); + log_trace(jvmcont)("Copying to v: %p - %p (%d bytes)", to, (address)to + size, size); + + assert (size > 0, "size: %d", size); + assert (stack_index(from) >= 0, ""); + assert (to_index(stack(), (address)from + size) <= stack_length(), ""); + + Copy::conjoint_memory_atomic(from, to, size); +} + +void ContMirror::allocate_stacks(int size, int oops, int frames) { + bool need_allocation = false; + if (_stack == NULL || to_index(size) >= _sp - to_index(METADATA_SIZE)) { + log_trace(jvmcont)("stack(int): size: %d size(int): %d sp: %d", size, to_index(size), _sp); + need_allocation = true; + } + if (!need_allocation && _ref_stack == NULL && oops >= _ref_sp) { + need_allocation = true; + } + if (!need_allocation) + return; + + assert(_sp == java_lang_Continuation::sp(_cont), ""); + assert(_fp == java_lang_Continuation::fp(_cont), ""); + + int old_stack_length = _stack_length; + + HandleMark hm(_thread); + Handle conth(_thread, _cont); + JavaCallArguments args; + args.push_oop(conth); + args.push_int(size); + args.push_int(oops); + args.push_int(frames); + JavaValue result(T_VOID); + JavaCalls::call_virtual(&result, SystemDictionary::Continuation_klass(), vmSymbols::getStacks_name(), vmSymbols::continuationGetStacks_signature(), &args, _thread); + _cont = conth(); // reload oop after java call + + _stack = java_lang_Continuation::stack(_cont); + _stack_length = _stack->length(); + _hstack = (int*)_stack->base(T_INT); + + _sp = java_lang_Continuation::sp(_cont); + if (Interpreter::contains(_pc)) // only interpreter frames use relative (index) fp + _fp = _stack_length - (old_stack_length - _fp); + + assert (to_bytes(_stack_length) >= size, "sanity check: stack_size: %d size: %d", to_bytes(_stack_length), size); + assert (to_bytes(_sp) - (int)METADATA_SIZE >= size, "sanity check"); + + _ref_stack = java_lang_Continuation::refStack(_cont); + _ref_sp = java_lang_Continuation::refSP(_cont); +} + +void ContMirror::write_stacks() { + if (_write_stack == NULL) { + assert(_oops == NULL, ""); + return; + } + + log_trace(jvmcont)("Writing stacks"); + + int num_oops = _oops->length(); + int size = to_bytes(_wsp); + + allocate_stacks(size, num_oops, 0); + + address to = (address)stack_address(_sp - to_index(METADATA_SIZE) - _wsp); + log_trace(jvmcont)("Copying %d bytes", size); + log_trace(jvmcont)("Copying to h: %p - %p (%d - %d)", to, to + size, to_index(stack(), to), to_index(stack(), to + size)); + + Copy::conjoint_memory_atomic(_write_stack, to, size); + + // delete _write_stack; + _write_stack = NULL; + + log_trace(jvmcont)("Copying %d oops", num_oops); + for (int i = 0; i < _oops->length(); i++) { + oopLoc ol = _oops->at(i); + oop obj = ol.narrow ? (oop)RootAccess<>::oop_load((narrowOop*)ol.loc) : RootAccess<>::oop_load((oop*)ol.loc); + log_trace(jvmcont)("i: %d narrow: %d", i, ol.narrow); print_oop((void*)ol.loc, obj); + assert (oopDesc::is_oop_or_null(obj), "invalid oop"); + _ref_stack->obj_at_put(_ref_sp - num_oops + i, obj); // does a HeapAccess write barrier + } + + _ref_sp = _ref_sp - num_oops; + // delete oops; + _oops = NULL; +} + +inline hframe ContMirror::new_hframe(intptr_t* hsp, intptr_t* hfp, address pc, CodeBlob* cb, bool is_interpreted) { + assert (!is_interpreted || in_writestack(hsp) == in_writestack(hfp), ""); + + bool write = in_writestack(hsp); + int sp; + long fp; + if (write) { + sp = write_stack_index(hsp); + fp = is_interpreted ? write_stack_index(hfp) : (long)hfp; + } else { + sp = stack_index(hsp); + fp = is_interpreted ? stack_index(hfp) : (long)hfp; + } + return hframe(sp, fp, pc, cb, is_interpreted, write); +} + +inline int ContMirror::fix_write_index_after_write(int index) { + return _sp - to_index(METADATA_SIZE) - _wsp + index; +} + +inline hframe ContMirror::fix_hframe_afer_write(hframe& hf) { + assert (hf.write(), ""); + return hframe(fix_write_index_after_write(hf.sp()), + hf.is_interpreted_frame() ? fix_write_index_after_write(hf.fp()) : hf.fp(), + hf.pc(), + hf.cb(), + hf.is_interpreted_frame(), + false); +} + +void ContMirror::init_write_arrays(int size) { + _oops = new GrowableArray(); + + _wstack_length = to_index(size + 8); // due to overlap of bottom interpreted frame with entry frame + _write_stack = NEW_RESOURCE_ARRAY(int, _wstack_length); + _wsp = 0; +} + +address ContMirror::freeze_target() { + assert (_write_stack != NULL, ""); + return (address) _write_stack; +} + +inline void ContMirror::add_oop_location(oop* p) { + log_trace(jvmcont)("i: %d (oop)", _oops->length()); + _oops->append((oopLoc){false, (unsigned long)p}); +} + +inline void ContMirror::add_oop_location(narrowOop* p) { + log_trace(jvmcont)("i: %d (narrow)", _oops->length()); + _oops->append((oopLoc){true, (unsigned long)p}); +} + +inline oop ContMirror::obj_at(int i) { + assert (_ref_stack != NULL, ""); + assert (_ref_sp <= i && i < _ref_stack->length(), "i: %d _ref_sp: %d, length: %d", i, _ref_sp, _ref_stack->length()); + return _ref_stack->obj_at(i); +} + +int ContMirror::num_oops() { + return _ref_stack == NULL ? 0 : _ref_stack->length() - _ref_sp; +} + +void ContMirror::update_register_map(RegisterMap& map) { + log_trace(jvmcont)("Setting RegisterMap saved link address to: %p", &_fp); + frame::update_map_with_saved_link(&map, (intptr_t **)&_fp); +} + +bool ContMirror::is_map_at_top(RegisterMap& map) { + return (map.location(rbp->as_VMReg()) == (address)&_fp); +} + +void ContMirror::call_pinned(res_freeze res, frame& f) { + write(); + + HandleMark hm(_thread); + Handle conth(_thread, _cont); + JavaCallArguments args; + args.push_oop(conth); + args.push_int(res); + JavaValue result(T_VOID); + JavaCalls::call_virtual(&result, SystemDictionary::Continuation_klass(), vmSymbols::onPinned_name(), vmSymbols::continuationOnPinned_signature(), &args, _thread); + _cont = conth(); // reload oop after java call + log_trace(jvmcont)("YTYTYTYTYTYT"); +} + +// static inline bool is_empty(frame& f) { +// return f.pc() == NULL; +// } + +// static inline intptr_t* get_callee_link_from_map(RegisterMap& map) { +// intptr_t** link_address = (intptr_t**)map.location(rbp->as_VMReg()); // TODO x86-specific +// assert (link_address != NULL, ""); +// return *link_address; +// } + +static inline intptr_t* frame_top(frame &f, bool is_interpreted) { // inclusive this will be copied with the frame + // return f.unextended_sp(); + return is_interpreted + ? *(intptr_t**)f.addr_at(frame::interpreter_frame_initial_sp_offset) // ? interpreter_frame_last_sp_offset / interpreter_frame_initial_sp_offset + : f.unextended_sp(); +} + +static inline intptr_t* frame_top(frame &f) { // inclusive this will be copied with the frame + return frame_top(f, f.is_interpreted_frame()); +} + +static inline intptr_t* frame_bottom(frame &f, bool is_interpreted) { + if (is_interpreted) { + return *(intptr_t**)f.addr_at(frame::interpreter_frame_locals_offset) + 1; // exclusive, so we add 1 word + // RegisterMap map(JavaThread::current(), false); // if thread is NULL we don't get a fix for the return barrier -> entry frame + // frame sender = f.sender(&map); + // return frame_top(sender); + } else { + return f.unextended_sp() + f.cb()->frame_size(); + } +} + +static inline intptr_t* frame_bottom(frame &f) { + return frame_bottom(f, f.is_interpreted_frame()); +} + +static inline intptr_t** real_link_address(frame& f, bool is_interpreted) { + return is_interpreted + ? (intptr_t**)(f.fp() + frame::link_offset) + : (intptr_t**)(f.real_fp() - frame::sender_sp_offset); // x86-specific +} + +// static inline intptr_t* real_link(frame& f, bool is_interpreted) { +// return *real_link_address(f, is_interpreted); +// } + +static void patch_link(frame& f, intptr_t* fp, bool is_interpreted) { + *real_link_address(f, is_interpreted) = fp; + log_trace(jvmcont)("patched link: %p", fp); +} + +static void patch_sender_sp(frame& f, intptr_t* sp) { + assert (f.is_interpreted_frame(), ""); + *(intptr_t**)(f.fp() + frame::interpreter_frame_sender_sp_offset) = sp; + log_trace(jvmcont)("patched sender_sp: %p", sp); +} + +static inline address* return_pc_address(const frame& f, bool is_interpreted) { + return is_interpreted + ? (address*)(f.fp() + frame::return_addr_offset) + : (address*)(f.real_fp() - 1); // x86-specific +} + +static inline address return_pc(const frame& f, bool is_interpreted) { + return *return_pc_address(f, is_interpreted); +} + +static void patch_return_pc(frame& f, address pc, bool is_interpreted) { + *return_pc_address(f, is_interpreted) = pc; + log_trace(jvmcont)("patched return_pc: %p", pc); +} + +// static void patch_interpreted_bci(frame& f, int bci) { +// f.interpreter_frame_set_bcp(f.interpreter_frame_method()->bcp_from(bci)); +// } + +static bool is_interpreted_frame_owning_locks(frame& f) { + return f.interpreter_frame_monitor_end() < f.interpreter_frame_monitor_begin(); +} + +static bool is_compiled_frame_owning_locks(JavaThread* thread, RegisterMap* map, frame& f) { + ResourceMark rm(thread); // vframes/scopes are allocated in the resource area + + nmethod* nm = f.cb()->as_nmethod(); + assert (!nm->is_compiled() || !nm->as_compiled_method()->is_native_method(), ""); // ??? See compiledVFrame::compiledVFrame(...) in vframe_hp.cpp + + for (ScopeDesc* scope = nm->scope_desc_at(f.pc()); scope != NULL; scope = scope->sender()) { + // scope->print_on(tty); + GrowableArray* mons = scope->monitors(); + if (mons == NULL || mons->is_empty()) + continue; + return true; + for (int index = (mons->length()-1); index >= 0; index--) { // see compiledVFrame::monitors() + MonitorValue* mon = mons->at(index); + if (mon->eliminated()) + continue; + ScopeValue* ov = mon->owner(); + StackValue* owner_sv = StackValue::create_stack_value(&f, map, ov); // it is an oop + oop owner = owner_sv->get_obj()(); + if (owner != NULL) + return true; + } + } + return false; +} + +static inline void relativize(intptr_t* const fp, intptr_t* const hfp, int offset) { + *(long*)(hfp + offset) = to_index((address)*(hfp + offset) - (address)fp + METADATA_SIZE); +} + +static inline void derelativize(intptr_t* const fp, int offset) { + *(fp + offset) = (intptr_t)((address)fp + to_bytes(*(long*)(fp + offset)) - METADATA_SIZE); +} + +class ContOopClosure : public OopClosure { +protected: + ContMirror* const _cont; + void* const _vsp; + int _count; +#ifdef ASSERT + RegisterMap* _map; +#endif + +public: + int count() { return _count; } + +protected: + ContOopClosure(ContMirror* cont, RegisterMap* map, void* vsp) + : _cont(cont), _vsp(vsp) { + _count = 0; + #ifdef ASSERT + _map = map; + #endif + } + + inline bool process(void* p) { +#ifdef ASSERT + VMReg reg; +#endif + + int offset = (address)p - (address)_vsp; + assert(offset >= 0 || (void*)p == (void*)_map->location(rbp->as_VMReg()), + "offset: %d reg: %s", offset, (reg = find_register_spilled_here(p, _map), reg != NULL ? reg->name() : "NONE")); // calle-saved register can only be rbp + log_trace(jvmcont)("p: %p offset: %d", p, offset); + +#ifdef ASSERT + reg = find_register_spilled_here(p, _map); + if (reg != NULL) log_trace(jvmcont)("reg: %s", reg->name()); +#endif + + _count++; + return true; + } +}; + +class FreezeOopClosure: public ContOopClosure { + private: + void* const _hsp; + + protected: + template inline void do_oop_work(T* p) { + if (!process(p)) return; + + #ifdef ASSERT + oop obj = RootAccess<>::oop_load(p); + print_oop(p, obj); + assert (oopDesc::is_oop_or_null(obj), "invalid oop"); + #endif + _cont->add_oop_location(p); + + #ifndef PRODUCT + // address hloc = (address)_hsp + ((address)p - (address)_vsp); + // if (hloc >= (address)_cont->stack()) // callee-saved registers of the yield frame may be stored in the doYield frame, which would result in an underflow here + // memset(hloc, 0xba, sizeof(T)); // mark oops + #endif + } + public: + FreezeOopClosure(ContMirror* cont, void* vsp, void* hsp, RegisterMap* map) + : ContOopClosure(cont, map, vsp), _hsp(hsp) { assert (cont->in_stack(hsp), ""); } + virtual void do_oop(oop* p) { do_oop_work(p); } + virtual void do_oop(narrowOop* p) { do_oop_work(p); } +}; + +class ThawOopClosure: public ContOopClosure { + private: + int _i; + + protected: + template inline void do_oop_work(T* p) { + if (!process(p)) return; + + oop obj = _cont->obj_at(_i); // does a HeapAccess load barrier + log_trace(jvmcont)("i: %d", _i); print_oop(p, obj); + RootAccess<>::oop_store(p, obj); + _i++; + } + public: + ThawOopClosure(ContMirror* cont, int index, int num_oops, void* vsp, RegisterMap* map) + : ContOopClosure(cont, map, vsp) { _i = index; } + virtual void do_oop(oop* p) { do_oop_work(p); } + virtual void do_oop(narrowOop* p) { do_oop_work(p); } +}; + +static void set_anchor(JavaThread* thread, FrameInfo* fi) { + JavaFrameAnchor* anchor = thread->frame_anchor(); + anchor->set_last_Java_sp((intptr_t*)fi->sp); + anchor->set_last_Java_fp((intptr_t*)fi->fp); + anchor->set_last_Java_pc(fi->pc); + + assert(thread->last_frame().cb() != NULL, ""); + + log_trace(jvmcont)("set_anchor:"); + print_vframe(thread->last_frame()); +} + +static void set_anchor(ContMirror& cont) { + FrameInfo fi = { cont.entryPC(), cont.entryFP(), cont.entrySP() }; + set_anchor(cont.thread(), &fi); +} + +static inline void clear_anchor(JavaThread* thread) { + thread->frame_anchor()->clear(); +} + +static int count_frames(frame f, intptr_t* bottom) { + RegisterMap map(NULL, false); + int i = 0; + log_trace(jvmcont)("count_frames bottom: %p", bottom); + while (f.unextended_sp() < bottom) { + // print_vframe(f); + i++; + f = f.sender(&map); + } + log_trace(jvmcont)("count_frames #frames: %d", i); + return i; +} + +static inline int freeze_oops(ContMirror& cont, frame &f, void* vsp, void* hsp, RegisterMap& map) { + log_trace(jvmcont)("Walking oops (freeze)"); + + assert (!map.include_argument_oops(), ""); + + FreezeOopClosure oopClosure(&cont, vsp, hsp, &map); + f.oops_do(&oopClosure, NULL, &map); + + log_trace(jvmcont)("Done walking oops"); + + return oopClosure.count(); +} + +static inline size_t freeze_interpreted_frame(ContMirror& cont, frame& f, hframe& hf, intptr_t* vsp, address target) { + intptr_t* bottom = frame_bottom(f, true); + // if (bottom > cont.entrySP()) bottom = cont.entrySP(); // due to a difference between initial_sp and unextended_sp; need to understand better + // assert (bottom <= cont.entrySP(), "bottom: %p entrySP: %p", bottom, cont.entrySP()); + assert (bottom > vsp, "bottom: %p vsp: %p", bottom, vsp); + const int fsize = (bottom - vsp) * sizeof(intptr_t); + + intptr_t* hsp = (intptr_t*)(target + METADATA_SIZE); + intptr_t* vfp = f.fp(); + intptr_t* hfp = hsp + (vfp - vsp); + + assert (*(intptr_t**)(vfp + frame::interpreter_frame_locals_offset) < frame_bottom(f, true), "frame bottom: %p locals: %p", + frame_bottom(f, true), *(intptr_t**)(vfp + frame::interpreter_frame_locals_offset)); + + if (!hf.is_empty()) { + hf.patch_link_relative(cont, hfp); + if (hf.is_interpreted_frame()) { + hf.patch_real_fp_offset_relative(cont, frame::interpreter_frame_sender_sp_offset, hsp); + } + } + + cont.copy_to_stack(vsp, hsp, fsize); + + relativize(vfp, hfp, frame::interpreter_frame_last_sp_offset); + relativize(vfp, hfp, frame::interpreter_frame_initial_sp_offset); // == block_top == block_bottom + relativize(vfp, hfp, frame::interpreter_frame_locals_offset); + + hf = cont.new_hframe(hsp, hfp, f.pc(), NULL, true); + hf.patch_link(cont, 0); + hf.patch_real_fp_offset(cont, frame::interpreter_frame_sender_sp_offset, 0); + + if (Continuation::is_cont_bottom_frame(f) && !cont.is_empty()) { + log_trace(jvmcont)("Fixing return address on bottom frame: %p", cont.pc()); + hf.patch_return_pc(cont, cont.pc()); + } + + hf.set_size(cont, fsize); + hf.set_uncompressed_size(cont, fsize); + hf.set_num_oops(cont, 0); + + return fsize + METADATA_SIZE; +} + +static inline size_t freeze_compiled_frame(ContMirror& cont, frame& f, hframe& hf, intptr_t* vsp, address target) { + intptr_t* bottom = frame_bottom(f, false); + assert (bottom > vsp, "bottom: %p vsp: %p", bottom, vsp); + assert (bottom <= cont.entrySP(), "bottom: %p entrySP: %p", bottom, cont.entrySP()); + + const int fsize = (bottom - vsp) * sizeof(intptr_t); + + intptr_t* hsp = (intptr_t*)(target + METADATA_SIZE); + + if (!hf.is_empty()) { + hf.patch_link(cont, (long)f.fp()); + if (hf.is_interpreted_frame()) { + hf.patch_real_fp_offset_relative(cont, frame::interpreter_frame_sender_sp_offset, hsp); + } + } + + cont.copy_to_stack(vsp, hsp, fsize); + + hf = cont.new_hframe(hsp, f.fp(), f.pc(), f.cb(), false); + hf.patch_link(cont, 0); + + if (Continuation::is_cont_bottom_frame(f) && !cont.is_empty()) { + log_trace(jvmcont)("Fixing return address on bottom frame: %p", cont.pc()); + hf.patch_return_pc(cont, cont.pc()); + } + + hf.set_size(cont, fsize); + hf.set_uncompressed_size(cont, 0); + hf.set_num_oops(cont, 0); + + return fsize + METADATA_SIZE; +} + +// freezes a single frame +static res_freeze freeze_frame(ContMirror& cont, address &target, frame &f, RegisterMap &map, hframe &hf, bool is_top) { + log_trace(jvmcont)("============================="); + + RegisterMap dmap(NULL, false); + + print_vframe(f, &dmap); + + const bool is_interpreted = f.is_interpreted_frame(); + const bool is_compiled = f.is_compiled_frame(); + + assert (!is_interpreted || f.is_interpreted_frame_valid(cont.thread()), "invalid frame"); + + if ((is_interpreted && is_interpreted_frame_owning_locks(f)) + || (is_compiled && is_compiled_frame_owning_locks(cont.thread(), &map, f))) { + return freeze_pinned_monitor; + } + + intptr_t* vsp = is_top ? f.sp() : frame_top(f, is_interpreted); + + size_t nbytes = 0; + if (is_compiled) nbytes = freeze_compiled_frame(cont, f, hf, vsp, target); + else if (is_interpreted) nbytes = freeze_interpreted_frame(cont, f, hf, vsp, target); + else { + // TODO: support reflection, doPrivileged + log_trace(jvmcont)("not Java: %p", f.pc()); + if (log_is_enabled(Trace, jvmcont)) os::print_location(tty, *((intptr_t*)((void*)f.pc()))); + return freeze_pinned_native; + } + + if (nbytes > 0) { + intptr_t* hsp = (intptr_t*)(target + METADATA_SIZE); + int num_oops = freeze_oops(cont, f, vsp, hsp, map); + hf.set_num_oops(cont, num_oops); + } + + log_trace(jvmcont)("hframe:"); + if (log_is_enabled(Trace, jvmcont)) hf.print(cont); + + target += nbytes; + f = f.sender(&map); + + return freeze_ok; +} + +// freezes all frames of a single continuation +static bool freeze_continuation(JavaThread* thread, ContMirror& cont, frame& f, RegisterMap& map) { + HandleMark hm(thread); // TODO: necessary? + + LogStreamHandle(Trace, jvmcont) st; + + log_trace(jvmcont)("Freeze 0000 sp: %p fp: %p pc: %p", f.sp(), f.fp(), f.pc()); + log_trace(jvmcont)("Freeze 1111 sp: %d fp: 0x%lx pc: %p", cont.sp(), cont.fp(), cont.pc()); + + intptr_t* bottom = cont.entrySP(); // (bottom is highest address; stacks grow down) + intptr_t* top = f.sp(); + + log_trace(jvmcont)("QQQ AAAAA bottom: %p top: %p size: %ld", bottom, top, (address)bottom - (address)top); + + int size = (bottom - top) * sizeof(intptr_t); // in bytes + int num_frames = count_frames(f, bottom); + size += num_frames * METADATA_SIZE; + + log_trace(jvmcont)("bottom: %p size: %d, count %d", bottom, size, num_frames); + assert (num_frames < 1000 && num_frames > 0 && size > 0, "num_frames: %d size: %d", num_frames, size); // just sanity; sometimes get garbage + + ResourceMark rm(thread); // required for the arrays created in ContMirror::init_arrays(int) + + hframe orig_top_frame = cont.last_frame(); + log_trace(jvmcont)("top_frame:"); + if (log_is_enabled(Trace, jvmcont)) orig_top_frame.print(); + + cont.init_write_arrays(size); + + const bool empty = cont.is_empty(); + log_trace(jvmcont)("empty: %d", empty); + assert (!CONT_FULL_STACK || empty, ""); + + assert (orig_top_frame.is_empty() == empty, "empty: %d f.sp: %d f.fp: 0x%lx f.pc: %p", empty, orig_top_frame.sp(), orig_top_frame.fp(), orig_top_frame.pc()); + + address target = cont.freeze_target(); + + hframe hf; + hframe new_top; + int nframes = 0; + while(frame_top(f) < bottom) { + res_freeze res = freeze_frame(cont, target, f, map, hf, nframes == 0); // changes f, target,hf + if (res != freeze_ok) { // f hasn't changed + log_trace(jvmcont)("FREEZE FAILED %d", res); + cont.call_pinned(res, f); + return false; + } + if (nframes == 0) + new_top = hf; + nframes++; + } + if (log_is_enabled(Trace, jvmcont)) { log_trace(jvmcont)("Found entry frame: "); print_vframe(f); } + + cont.write_stacks(); + + hf = cont.fix_hframe_afer_write(hf); + new_top = cont.fix_hframe_afer_write(new_top); + + cont.set_last_frame(new_top); // must be done after loop, because we rely on the old top when patching last-copied frame + + // f now points at the entry frame + + if (empty) { + if (f.is_interpreted_frame()) { + hf.patch_link(cont, 0); + } + } else { + hf.patch_callee(cont, orig_top_frame); + +#ifdef ASSERT + if (!(hf.sender(cont) == orig_top_frame)) { + log_trace(jvmcont)("orig_top_frame:"); + orig_top_frame.print_on(tty); + log_trace(jvmcont)("sender:"); + hf.sender(cont).print_on(tty); + assert (hf.sender(cont) == orig_top_frame, ""); + } +#endif + } + + log_trace(jvmcont)("last h-frame:"); + if (log_is_enabled(Trace, jvmcont)) hf.print(cont); + + RegisterMap dmap(NULL, false); + f = f.sender(&dmap); // go one frame further, to the entry frame's caller + + cont.write(); + + log_trace(jvmcont)("--- end of freeze_continuation"); + + return true; +} + +// recursively call freeze for all continuations up the chain until appropriate scope +static oop freeze_continuations(JavaThread* thread, oop contOop, oop scope, frame& f, RegisterMap &map) { + // save old values to restore in case of freeze failure + assert (contOop != NULL, ""); + + log_trace(jvmcont)("Freeze ___ cont: %p scope: %p", (oopDesc*)contOop, (oopDesc*)scope); + + ContMirror cont(thread, contOop); + cont.read(); + + const int orig_sp = cont.sp(); + const long orig_fp = cont.fp(); + const address orig_pc = cont.pc(); + const int orig_ref_sp = cont.refSP(); + + if (freeze_continuation(thread, cont, f, map)) { + if (true /*Access<>::equals(cont.scope(), scope)*/) { // end recursion cont.scope() == scope TODO: don't use scope but a counter passed from Java + return contOop; + } else { + oop parent = java_lang_Continuation::parent(contOop); + if (parent != NULL) { + oop ret = freeze_continuations(thread, parent, scope, f, map); + if (ret != NULL) + return ret; + } else { + assert (false, ""); // TODO: throw exception + } + } + } + + // fail; reset cont + log_trace(jvmcont)("FREEZE FAILED resetting"); + cont.set_sp(orig_sp); + cont.set_fp(orig_fp); + cont.set_pc(orig_pc); + cont.set_refSP(orig_ref_sp); + cont.write(); + + return NULL; // propagates failure up the recursive call-chain. +} + +// returns the continuation yielding (based on context), or NULL for failure (due to pinning) +// it freezes multiple continuations, depending on contex +// it must set Continuation.stackSize +// sets Continuation.fp/sp to relative indices +// +// In: fi->pc, fi->sp, fi->fp all point to the current (topmost) frame to freeze (the yield frame) +// Out: fi->pc, fi->sp, fi->fp all point to the entry frame +// unless freezing has failed, in which case fi->pc = 0 +// +// oop arguments to entry functions, called from assembly must be oopDesc* because +// when CHECK_UNHANDLED_OOPS is defined (in fasdebug), oop is not oopDesc but some POD class +JRT_ENTRY(void, Continuation::freeze(JavaThread* thread, FrameInfo* fi, oopDesc* scope)) + log_trace(jvmcont)("~~~~~~~~~ freeze scope: %p", scope); + log_trace(jvmcont)("fi->sp: %p fi->fp: %p fi->pc: %p", fi->sp, fi->fp, fi->pc); + set_anchor(thread, fi); // DEBUG + print_frames(thread); + + thread->_continuation = NULL; + + HandleMark hm(thread); + + oop cont = get_continuation(thread); + assert(cont != NULL && oopDesc::is_oop_or_null(cont), "Invalid cont: %p", (void*)cont); + + RegisterMap map(thread, true); + map.set_include_argument_oops(false); + frame::update_map_with_saved_link(&map, (intptr_t **)&fi->fp); + frame f(fi->sp, fi->fp, fi->pc); // the yield frame + + cont = freeze_continuations(thread, cont, scope, f, map); // changes f + if (cont == NULL) { + fi->fp = NULL; + fi->sp = NULL; + fi->pc = NULL; + log_trace(jvmcont)("=== end of freeze (fail)"); + return; + } + + if (false) // TODO BUG: check what happens in Continuation.run. Does it overwrite the current cont? consider logic of nested conts + set_continuation(thread, cont); + + log_trace(jvmcont)("Jumping to frame:"); + print_vframe(f); + + fi->sp = f.sp(); // java_lang_Continuation::entrySP(cont); + fi->fp = f.fp(); + fi->pc = f.pc(); + set_anchor(thread, fi); + + log_trace(jvmcont)("ENTRY: sp: %p fp: %p pc: %p", fi->sp, fi->fp, fi->pc); + log_trace(jvmcont)("=== end of freeze"); +JRT_END + +#ifdef ASSERT +static bool is_entry_frame(ContMirror& cont, frame& f) { + return f.sp() == cont.entrySP(); +} +#endif + +static frame thaw_interpreted_frame(ContMirror& cont, hframe& hf, intptr_t* vsp, frame& sender) { + intptr_t* hsp = cont.stack_address(hf.sp()); + cont.copy_from_stack(hsp, vsp, hf.size(cont)); + + intptr_t* hfp = cont.stack_address(hf.fp()); + intptr_t* vfp = vsp + (hfp - hsp); + + frame f(vsp, vfp, hf.pc()); + + derelativize(vfp, frame::interpreter_frame_last_sp_offset); + derelativize(vfp, frame::interpreter_frame_initial_sp_offset); // == block_top == block_bottom + derelativize(vfp, frame::interpreter_frame_locals_offset); + + patch_sender_sp(f, sender.sp()); + + assert (*(intptr_t**)(vfp + frame::interpreter_frame_locals_offset) < frame_top(sender), "sender top: %p locals: %p", + frame_top(sender), *(intptr_t**)(vfp + frame::interpreter_frame_locals_offset)); + + assert(f.is_interpreted_frame_valid(cont.thread()), "invalid thawed frame"); + + return f; +} + +static frame thaw_compiled_frame(ContMirror& cont, hframe& hf, intptr_t* vsp, frame& sender) { +#ifdef _LP64 + if ((long)vsp % 16 != 0) { + log_trace(jvmcont)("Aligning compiled frame: %p -> %p", vsp, vsp - 1); + vsp--; + } + assert((long)vsp % 16 == 0, ""); +#endif + + intptr_t* hsp = cont.stack_address(hf.sp()); + cont.copy_from_stack(hsp, vsp, hf.size(cont)); + + frame f(vsp, (intptr_t*)hf.fp(), hf.pc()); + + // TODO get nmethod. Call popNmethod if necessary + // when copying nmethod frames, we need to check for them being made non-reentrant, in which case we need to deopt them + // and turn them into interpreter frames. + + return f; +} + +static void thaw_oops(ContMirror& cont, frame& f, int oop_index, int num_oops, void* target, RegisterMap& map) { + log_trace(jvmcont)("Walking oops (thaw)"); + + // log_trace(jvmcont)("is_top: %d", is_top); + // assert (!is_top || cont.is_map_at_top(map), ""); + // assert (!is_top || f.is_interpreted_frame() || f.fp() == (intptr_t*)cont.fp(), "f.fp: %p cont.fp: 0x%lx", f.fp(), cont.fp()); + + assert (!map.include_argument_oops(), ""); + + intptr_t* fp = f.fp(); + frame::update_map_with_saved_link(&map, &fp); + + ResourceMark rm(cont.thread()); // apparently, oop-mapping may require resource allocation + ThawOopClosure oopClosure(&cont, oop_index, num_oops, target, &map); + f.oops_do(&oopClosure, NULL, &map); // can overwrite cont.fp() (because of update_register_map) + log_trace(jvmcont)("count: %d num_oops: %d", oopClosure.count(), num_oops); + assert(oopClosure.count() == num_oops, "closure oop count different."); + + // Thawing oops may have overwritten the link in the callee if rbp contained an oop (only possible if we're compiled). + // This only matters when we're the top frame, as that's the value that will be restored into rbp when we jump to continue. + if (fp != f.fp()) { + log_trace(jvmcont)("WHOA link has changed f.fp: %p link: %p", f.fp(), fp); + f.set_fp(fp); + } + + log_trace(jvmcont)("Done walking oops"); +} + +static frame thaw_frame(ContMirror& cont, hframe& hf, int oop_index, frame& sender) { + log_trace(jvmcont)("============================="); + + if (log_is_enabled(Trace, jvmcont)) hf.print(cont); + + const int fsize = hf.uncompressed_size(cont) != 0 ? hf.uncompressed_size(cont) : hf.size(cont); + const address bottom = (address) frame_top(sender); + intptr_t* vsp = (intptr_t*)(bottom - fsize); + + log_trace(jvmcont)("hsp: %d hfp: 0x%lx is_bottom: %d", hf.sp(), hf.fp(), hf.is_bottom(cont)); + log_trace(jvmcont)("stack_length: %d", cont.stack_length()); + log_trace(jvmcont)("bottom: %p vsp: %p fsize: %d", bottom, vsp, fsize); + + frame f = hf.is_interpreted_frame() ? thaw_interpreted_frame(cont, hf, vsp, sender) + : thaw_compiled_frame(cont, hf, vsp, sender); + + patch_link(f, sender.fp(), hf.is_interpreted_frame()); + + RegisterMap map(cont.thread(), true, false); + map.set_include_argument_oops(false); + thaw_oops(cont, f, oop_index, hf.num_oops(cont), f.sp(), map); + +#ifndef PRODUCT + RegisterMap dmap(NULL, false); + print_vframe(f, &dmap); +#endif + + return f; +} + +static frame thaw_frames(ContMirror& cont, hframe hf, int oop_index, int num_frames, int& count, int &last_oop_index, hframe& last_frame) { + if (num_frames == 0 || hf.is_empty() || hf.sp() >= cont.stack_length()) { + frame entry(cont.entrySP(), cont.entryFP(), cont.entryPC()); + log_trace(jvmcont)("Found entry:"); + print_vframe(entry); + + last_oop_index = oop_index; + last_frame = hf; + // cont.set_refSP(oop_index); + // cont.set_last_frame(hf); + return entry; + } + + hframe hsender = hf.sender(cont); + frame sender = thaw_frames(cont, hsender, oop_index + hf.num_oops(cont), num_frames - 1, count, last_oop_index, last_frame); + frame f = thaw_frame(cont, hf, oop_index, sender); + + if (count == 0) { + assert (is_entry_frame(cont, sender), ""); + if (hf.is_bottom(cont)) { + if (sender.is_interpreted_frame()) { + // We enter the continuation through an interface call (target.run()), but exit through a virtual call (doContinue()) + // Alternatively, wrap the call to target.run() inside a private method. + patch_return_pc(f, Interpreter::return_entry(vtos, 0, Bytecodes::_invokevirtual), f.is_interpreted_frame()); + } + } else { + log_trace(jvmcont)("Setting return address to return barrier: %p", StubRoutines::cont_returnBarrier()); + patch_return_pc(f, StubRoutines::cont_returnBarrier(), f.is_interpreted_frame()); + } + } + + count++; + return f; +} + +// fi->pc is the return address -- the entry +// fi->sp is the top of the stack after thaw +// fi->fp current rbp +// called after preparations (stack overflow check and making room) +static void thaw1(JavaThread* thread, FrameInfo* fi, const int num_frames) { + log_trace(jvmcont)("~~~~~~~~~ thaw %d", num_frames); + log_trace(jvmcont)("pc: %p", fi->pc); + log_trace(jvmcont)("rbp: %p", fi->fp); + + address target = (address)fi->sp; // we leave fi->sp as-is + + oop contOop = get_continuation(thread); + assert(contOop != NULL && oopDesc::is_oop_or_null(contOop), "Invalid cont: %p", (void*)contOop); + + ContMirror cont(thread, contOop); + cont.read(); + cont.set_entryFP(fi->fp); + if (num_frames > 1) { // not return barrier + cont.set_entryPC(fi->pc); + // if (Interpreter::contains(cont.entryPC())) { + // log_trace(jvmcont)("Bumping entrySP from %p to %p", cont.entrySP(), cont.entrySP() + 1); + // cont.set_entrySP(cont.entrySP() + 1); + // } + } + + if (num_frames == 1) + log_trace(jvmcont)("== RETURN BARRIER"); + + if (true) { + set_anchor(cont); + print_frames(thread); + } + + log_trace(jvmcont)("thaw: TARGET: %p", target); + log_trace(jvmcont)("QQQ CCCCC bottom: %p top: %p size: %ld", cont.entrySP(), target, (address)cont.entrySP() - target); + assert(num_frames > 0, "num_frames <= 0: %d", num_frames); + + assert(!cont.is_empty(), "no more frames"); + + hframe hf = cont.last_frame(); + + if (log_is_enabled(Trace, jvmcont)) hf.print(cont); + + RegisterMap map(thread, true, false); + map.set_include_argument_oops(false); + assert (map.update_map(), "RegisterMap not set to update"); + + int frame_count = 0; + int last_oop_index = 0; + hframe last_frame; + frame top = thaw_frames(cont, cont.last_frame(), cont.refSP(), num_frames, frame_count, last_oop_index, last_frame); + cont.set_last_frame(last_frame); + cont.set_refSP(last_oop_index); + + assert (!CONT_FULL_STACK || cont.is_empty(), ""); + + fi->sp = top.sp(); + fi->fp = top.fp(); + fi->pc = top.pc(); // we'll jump to the current continuation pc // Interpreter::return_entry(vtos, 0, Bytecodes::_invokestatic, true); // + + log_trace(jvmcont)("thawed %d frames", frame_count); + + cont.write(); + + set_anchor(thread, fi); + print_frames(thread); // must be done after write(), as frame walking reads fields off the Java objects. + + log_trace(jvmcont)("cont sp: %d fp: %lx", cont.sp(), cont.fp()); + log_trace(jvmcont)("fi->sp: %p fi->fp: %p fi->pc: %p", fi->sp, fi->fp, fi->pc); + print_vframe(frame(fi->sp, fi->fp, fi->pc), NULL); + + clear_anchor(thread); + thread->_continuation = contOop; + + log_trace(jvmcont)("=== End of thaw"); +} + +static size_t frames_size(oop cont, int frames) { + size_t size = 0; + int length = java_lang_Continuation::stack(cont)->length(); + int* hstack = (int*)java_lang_Continuation::stack_base(cont); + int sp = java_lang_Continuation::sp(cont); + // int fp = java_lang_Continuation::fp(cont); + + size = 8; + bool last_interpreted = false; + + for (int i=0; i < frames && sp >= 0 && sp < length; i++) { + HFrameMetadata* md = metadata(to_haddress(hstack, sp)); + size_t uncompressed_size = md->uncompressed_size; + size_t fsize = md->frame_size; // (indices are to 32-bit words) + + size += uncompressed_size != 0 ? uncompressed_size : fsize; + + bool is_interpreted = uncompressed_size != 0; + if (is_interpreted != last_interpreted) { + size += 8; + last_interpreted = is_interpreted; + } + + sp += to_index(fsize + METADATA_SIZE); + // fp += hstack[fp]; // contains offset to previous fp + } + log_trace(jvmcont)("frames_size: %lu", size); + return size; +} + +static bool stack_overflow_check(JavaThread* thread, int size, address sp) { + const int page_size = os::vm_page_size(); + if (size > page_size) { + if (sp - size < thread->stack_overflow_limit()) { + return false; + } + } + return true; +} + +// In: fi->sp = the sp of the entry frame +// Out: returns the size of frames to thaw or 0 for no more frames or a stack overflow +// On failure: fi->sp - cont's entry SP +// fi->fp - cont's entry FP +// fi->pc - overflow? throw StackOverflowError : cont's entry PC +JRT_LEAF(int, Continuation::prepare_thaw(FrameInfo* fi, int num_frames)) + log_trace(jvmcont)("~~~~~~~~~ prepare_thaw"); + if (CONT_FULL_STACK) + num_frames = 10000; + + log_trace(jvmcont)("prepare_thaw %d", num_frames); + log_trace(jvmcont)("pc: %p", fi->pc); + log_trace(jvmcont)("rbp: %p", fi->fp); + + const address bottom = (address)fi->sp; // os::current_stack_pointer(); points to the entry frame + log_trace(jvmcont)("bottom: %p", bottom); + + JavaThread* thread = JavaThread::current(); + oop cont = get_continuation(thread); + + assert (bottom == (address)java_lang_Continuation::entrySP(cont), "bottom: %p, entrySP: %p", bottom, java_lang_Continuation::entrySP(cont)); + // assert ((!Interpreter::contains(fi->pc) && (bottom == (address)java_lang_Continuation::entrySP(cont))) + // || (Interpreter::contains(fi->pc) && (bottom == (address)(java_lang_Continuation::entrySP(cont) + 1))), "bottom: %p, entrySP: %p", bottom, java_lang_Continuation::entrySP(cont)); + int size = frames_size(cont, num_frames); + if (size == 0) { // no more frames + return 0; + } + if (!stack_overflow_check(thread, size + 300, bottom)) { + fi->pc = StubRoutines::throw_StackOverflowError_entry(); + return 0; + } + + address target = bottom - size; + log_trace(jvmcont)("target: %p", target); + log_trace(jvmcont)("QQQ BBBBB bottom: %p top: %p size: %d", bottom, target, size); + + return size; +JRT_END + +// IN: fi->sp = the future SP of the topmost thawed frame (where we'll copy the thawed frames) +// Out: fi->sp = the SP of the topmost thawed frame -- the one we will resume at +// fi->fp = the FP " ... +// fi->pc = the PC " ... +// JRT_ENTRY(void, Continuation::thaw(JavaThread* thread, FrameInfo* fi, int num_frames)) +JRT_LEAF(void, Continuation::thaw(FrameInfo* fi, int num_frames)) + if (CONT_FULL_STACK) + num_frames = 10000; + + thaw1(JavaThread::current(), fi, num_frames); +JRT_END + +// When walking the virtual stack, this method returns true +// iff the frame is a thawed continuation frame whose +// caller is still frozen on the h-stack. +// The continuation object can be extracted from the thread. +bool Continuation::is_cont_bottom_frame(const frame& f) { + return return_pc(f, f.is_interpreted_frame()) == StubRoutines::cont_returnBarrier(); // TODO: account for multiple return barriers (based on return type) +} + +static oop find_continuation_for_frame(JavaThread* thread, intptr_t* const sp) { + oop cont = get_continuation(thread); + while (cont != NULL && java_lang_Continuation::entrySP(cont) < sp) + cont = java_lang_Continuation::parent(cont); + return cont; +} + +frame Continuation::fix_continuation_bottom_sender(const frame& callee, frame f, RegisterMap* map) { + if (map->thread() != NULL && is_cont_bottom_frame(callee)) { + log_trace(jvmcont)("YEYEYEYEYEYEYEEYEY"); + oop cont = find_continuation_for_frame(map->thread(), f.sp()); + assert (cont != NULL, ""); + // bool is_deopt = f.is_deoptimized_frame(); + address pc = java_lang_Continuation::entryPC(cont); + f.set_pc_preserve_deopt(pc); + } + return f; +} + +///// DEBUGGING + +static void print_oop(void *p, oop obj, outputStream* st) { + if (!log_is_enabled(Trace, jvmcont)) return; + + st->print_cr(INTPTR_FORMAT ": ", p2i(p)); + if (obj == NULL) { + st->print_cr("*NULL*"); + } else { + if (oopDesc::is_oop_or_null(obj)) { + if (obj->is_objArray()) { + st->print_cr("valid objArray: " INTPTR_FORMAT, p2i(obj)); + } else { + obj->print_value_on(st); + // obj->print(); + } + } else { + st->print_cr("invalid oop: " INTPTR_FORMAT, p2i(obj)); + } + st->cr(); + } +} + +static void print_vframe(frame f, RegisterMap* map, outputStream* st) { + if (!log_is_enabled(Trace, jvmcont)) return; + + st->print_cr("\tfp: %p real_fp: %p, sp: %p pc: %p usp: %p top: %p", f.fp(), f.real_fp(), f.sp(), f.pc(), f.unextended_sp(), frame_top(f)); + + f.print_value_on(st, NULL); + + // st->print("\tpc: "); os::print_location(st, *(intptr_t*)f.pc()); + intptr_t* fp = f.fp(); + intptr_t* usp = frame_top(f); + st->print("cb: "); + if (f.cb() == NULL) { + st->print_cr("NULL"); + return; + } + f.cb()->print_value_on(st); st->cr(); + if (f.is_interpreted_frame()) { + Method* method = f.interpreter_frame_method(); + st->print_cr("\tinterpreted"); + st->print("\tMethod: "); method->print_short_name(st); st->cr(); + st->print_cr("\tcode_size: %d", method->code_size()); + // st->print_cr("base: %p end: %p", method->constMethod()->code_base(), method->constMethod()->code_end()); + st->print_cr("\tlink: %p", *(void**)(fp + frame::link_offset)); + st->print_cr("\treturn_pc: %p", *(void**)(fp + frame::return_addr_offset)); + st->print_cr("\tssp: %p", (void*) (fp + frame::sender_sp_offset)); + st->print_cr("\tissp: %p", *(void**)(fp + frame::interpreter_frame_sender_sp_offset)); + st->print_cr("\tlast_sp: %p", *(void**)(fp + frame::interpreter_frame_last_sp_offset)); + st->print_cr("\tinitial_sp: %p", *(void**)(fp + frame::interpreter_frame_initial_sp_offset)); + // st->print_cr("\tmon_block_top: %p", *(void**)(fp + frame::interpreter_frame_monitor_block_top_offset)); + // st->print_cr("\tmon_block_bottom: %p", *(void**)(fp + frame::interpreter_frame_monitor_block_bottom_offset)); + st->print_cr("\tlocals: %p", *(void**)(fp + frame::interpreter_frame_locals_offset)); + st->print_cr("\tcache: %p", *(void**)(fp + frame::interpreter_frame_cache_offset)); + st->print_cr("\tbcp: %p", *(void**)(fp + frame::interpreter_frame_bcp_offset)); + st->print_cr("\tbci: %d", method->bci_from(*(address*)(fp + frame::interpreter_frame_bcp_offset))); + st->print_cr("\tmirror: %p", *(void**)(fp + frame::interpreter_frame_mirror_offset)); + // st->print("\tmirror: "); os::print_location(st, *(intptr_t*)(fp + frame::interpreter_frame_mirror_offset), true); + st->print("\treturn_pc: "); os::print_location(st, *(intptr_t*)(fp + frame::return_addr_offset)); + } else { + st->print_cr("\tcompiled/C"); + // st->print_cr("\tlink: %p", (void*)f.at(frame::link_offset)); + // st->print_cr("\treturn_pc: %p", *(void**)(fp + frame::return_addr_offset)); + // st->print_cr("\tssp: %p", *(void**)(fp + frame::sender_sp_offset)); + st->print_cr("\tcb.size: %d", f.cb()->frame_size()); + st->print_cr("\tlink: %p", *(intptr_t**) (f.real_fp() - frame::sender_sp_offset)); + st->print_cr("\t'real' return_pc: %p", *(void**)(f.real_fp() - 1)); + st->print("\t`real` return_pc: "); os::print_location(st, *(intptr_t*)(f.real_fp() - 1)); + // st->print("\treturn_pc: "); os::print_location(st, *(intptr_t*)(fp + frame::return_addr_offset)); + } + if (map != NULL) { + intptr_t* bottom = frame_bottom(f); + long fsize = (address)bottom - (address)usp; + st->print_cr("\tsize: %ld", fsize); + st->print_cr("\tbounds: %p - %p", usp, bottom); + + if (false) { + st->print_cr("--data--"); + for(int i=0; iprint_cr("%p: %x", ((address)usp + i), *((address)usp + i)); + st->print_cr("--end data--"); + } + } +} + +static void print_frames(JavaThread* thread, outputStream* st) { + if (!log_is_enabled(Trace, jvmcont)) return; + + st->print_cr("------- frames ---------"); + RegisterMap map(thread, false); + ResourceMark rm; +#ifndef PRODUCT + FrameValues values; +#endif + int i = 0; + for (frame f = thread->last_frame(); !f.is_entry_frame(); f = f.sender(&map)) { + print_vframe(f, &map, st); +#ifndef PRODUCT + f.describe(values, i); +#endif + i++; + } +#ifndef PRODUCT + values.print(thread); +#endif + st->print_cr("======= end frames ========="); +} + +#ifdef ASSERT +// Does a reverse lookup of a RegisterMap. Returns the register, if any, spilled at the given address. +static VMReg find_register_spilled_here(void* p, RegisterMap* map) { + for(int i = 0; i < RegisterMap::reg_count; i++) { + VMReg r = VMRegImpl::as_VMReg(i); + if (p == map->location(r)) return r; + } + return NULL; +} +#endif diff -r 656931ff4345 src/hotspot/share/runtime/continuation.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/runtime/continuation.hpp Sun May 20 17:57:55 2018 +0100 @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_RUNTIME_CONTINUATION_HPP +#define SHARE_VM_RUNTIME_CONTINUATION_HPP + +#include "runtime/globals.hpp" + +#define CONT_FULL_STACK (!UseNewCode) + +// The order of this struct matters as it's directly manipulated by assembly code (push/pop) +struct FrameInfo { + address pc; + intptr_t* fp; + intptr_t* sp; +}; + +class Continuation : AllStatic { +public: + static void freeze(JavaThread* thread, FrameInfo* fi, oopDesc* scope); + static int prepare_thaw(FrameInfo* fi, int num_frames); + static void thaw(FrameInfo* fi, int num_frames); + + static bool is_cont_bottom_frame(const frame& f); + static frame fix_continuation_bottom_sender(const frame& callee, frame f, RegisterMap* map); +}; + +void CONT_RegisterNativeMethods(JNIEnv *env, jclass cls); + +#endif // SHARE_VM_RUNTIME_CONTINUATION_HPP diff -r 656931ff4345 src/hotspot/share/runtime/frame.cpp --- a/src/hotspot/share/runtime/frame.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/runtime/frame.cpp Sun May 20 17:57:55 2018 +0100 @@ -52,11 +52,12 @@ #include "utilities/decoder.hpp" #include "utilities/formatBuffer.hpp" -RegisterMap::RegisterMap(JavaThread *thread, bool update_map) { +RegisterMap::RegisterMap(JavaThread *thread, bool update_map, bool validate_oops) { _thread = thread; _update_map = update_map; clear(); debug_only(_update_for_id = NULL;) + DEBUG_ONLY(_validate_oops = validate_oops;) #ifndef PRODUCT for (int i = 0; i < reg_count ; i++ ) _location[i] = NULL; #endif /* PRODUCT */ @@ -69,6 +70,7 @@ _update_map = map->update_map(); _include_argument_oops = map->include_argument_oops(); debug_only(_update_for_id = map->_update_for_id;) + DEBUG_ONLY(_validate_oops = map->_validate_oops;) pd_initialize_from(map); if (update_map()) { for(int i = 0; i < location_valid_size; i++) { @@ -161,6 +163,17 @@ } +void frame::set_pc_preserve_deopt(address newpc) { +#ifdef ASSERT + if (_cb != NULL && _cb->is_nmethod()) { + assert(!((nmethod*)_cb)->is_deopt_pc(_pc), "invariant violation"); + } +#endif // ASSERT + + _pc = newpc; + _cb = CodeCache::find_blob_unsafe(_pc); +} + // type testers bool frame::is_ignored_frame() const { return false; // FIXME: some LambdaForm frames should be ignored @@ -953,6 +966,7 @@ } else { OopMapCache::compute_one_oop_map(m, bci, &mask); } + // mask.print(); mask.iterate_oop(&blk); } diff -r 656931ff4345 src/hotspot/share/runtime/frame.hpp --- a/src/hotspot/share/runtime/frame.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/runtime/frame.hpp Sun May 20 17:57:55 2018 +0100 @@ -89,6 +89,7 @@ address raw_pc() const; void set_pc( address newpc ); + void set_pc_preserve_deopt( address newpc ); intptr_t* sp() const { return _sp; } void set_sp( intptr_t* newsp ) { _sp = newsp; } diff -r 656931ff4345 src/hotspot/share/runtime/init.cpp --- a/src/hotspot/share/runtime/init.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/runtime/init.cpp Sun May 20 17:57:55 2018 +0100 @@ -57,6 +57,7 @@ void VM_Version_init(); void os_init_globals(); // depends on VM_Version_init, before universe_init void stubRoutines_init1(); +void stubRoutines_initContinuationStubs(); jint universe_init(); // depends on codeCache_init and stubRoutines_init // depends on universe_init, must be before interpreter_init (currently only on SPARC) void gc_barrier_stubs_init(); @@ -114,6 +115,7 @@ return status; gc_barrier_stubs_init(); // depends on universe_init, must be before interpreter_init + stubRoutines_initContinuationStubs(); interpreter_init(); // before any methods loaded invocationCounter_init(); // before any methods loaded accessFlags_init(); diff -r 656931ff4345 src/hotspot/share/runtime/os.cpp --- a/src/hotspot/share/runtime/os.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/runtime/os.cpp Sun May 20 17:57:55 2018 +0100 @@ -1076,7 +1076,7 @@ } if (Universe::heap()->is_in(addr)) { - HeapWord* p = Universe::heap()->block_start(addr); + HeapWord* p = NULL; // Universe::heap()->block_start(addr); bool print = false; // If we couldn't find it it just may mean that heap wasn't parsable // See if we were just given an oop directly @@ -1091,8 +1091,14 @@ st->print_cr(INTPTR_FORMAT " is an oop", p2i(addr)); } else { st->print_cr(INTPTR_FORMAT " is pointing into object: " INTPTR_FORMAT, p2i(addr), p2i(p)); + } + if (*((juint*)p) == badHeapWordVal) { + st->print_cr(" Bad word"); + } else if (*((juint*)p) == badMetaWordVal) { + st->print_cr(" Bad meta word"); + } else { + oop(p)->print_on(st); } - oop(p)->print_on(st); return; } } else { diff -r 656931ff4345 src/hotspot/share/runtime/registerMap.hpp --- a/src/hotspot/share/runtime/registerMap.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/runtime/registerMap.hpp Sun May 20 17:57:55 2018 +0100 @@ -75,6 +75,7 @@ JavaThread* _thread; // Reference to current thread bool _update_map; // Tells if the register map need to be // updated when traversing the stack + DEBUG_ONLY(bool _validate_oops;) // whether to perform valid oop checks in asserts #ifdef ASSERT void check_location_valid(); @@ -83,8 +84,8 @@ #endif public: - debug_only(intptr_t* _update_for_id;) // Assert that RegisterMap is not updated twice for same frame - RegisterMap(JavaThread *thread, bool update_map = true); + DEBUG_ONLY(intptr_t* _update_for_id;) // Assert that RegisterMap is not updated twice for same frame + RegisterMap(JavaThread *thread, bool update_map = true, bool validate_oops = true); RegisterMap(const RegisterMap* map); address location(VMReg reg) const { @@ -116,6 +117,9 @@ JavaThread *thread() const { return _thread; } bool update_map() const { return _update_map; } +#ifdef ASSERT + bool validate_oops() const { return _validate_oops; } +#endif void print_on(outputStream* st) const; void print() const; diff -r 656931ff4345 src/hotspot/share/runtime/sharedRuntime.cpp --- a/src/hotspot/share/runtime/sharedRuntime.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/runtime/sharedRuntime.cpp Sun May 20 17:57:55 2018 +0100 @@ -529,6 +529,7 @@ #ifndef PRODUCT { ResourceMark rm; tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", p2i(return_address)); + os::print_location(tty, (intptr_t)return_address); tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here"); tty->print_cr("b) other problem"); } @@ -2958,6 +2959,15 @@ return regs; } +JRT_LEAF(jlong, SharedRuntime::continuation_getFP(JavaThread* thread) ) + RegisterMap reg_map2(thread); + assert(false, ""); + frame stubFrame = thread->last_frame(); + // Caller-frame is a compiled frame + frame callerFrame = stubFrame.sender(®_map2); + return (jlong) callerFrame.real_fp(); +JRT_END + // OSR Migration Code // // This code is used convert interpreter frames into compiled frames. It is diff -r 656931ff4345 src/hotspot/share/runtime/sharedRuntime.hpp --- a/src/hotspot/share/runtime/sharedRuntime.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/runtime/sharedRuntime.hpp Sun May 20 17:57:55 2018 +0100 @@ -437,6 +437,9 @@ const BasicType *sig_bt, const VMRegPair *regs); + + static jlong continuation_getFP(JavaThread *thread); + // OSR support // OSR_migration_begin will extract the jvm state from an interpreter diff -r 656931ff4345 src/hotspot/share/runtime/stubRoutines.cpp --- a/src/hotspot/share/runtime/stubRoutines.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/runtime/stubRoutines.cpp Sun May 20 17:57:55 2018 +0100 @@ -27,6 +27,7 @@ #include "memory/resourceArea.hpp" #include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" +#include "runtime/continuation.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/timerTrace.hpp" #include "runtime/sharedRuntime.hpp" @@ -46,6 +47,7 @@ BufferBlob* StubRoutines::_code1 = NULL; BufferBlob* StubRoutines::_code2 = NULL; +BufferBlob* StubRoutines::_code3 = NULL; address StubRoutines::_call_stub_return_address = NULL; address StubRoutines::_call_stub_entry = NULL; @@ -174,13 +176,20 @@ address StubRoutines::_safefetchN_fault_pc = NULL; address StubRoutines::_safefetchN_continuation_pc = NULL; +address StubRoutines::_cont_doYield = NULL; +address StubRoutines::_cont_thaw2 = NULL; +address StubRoutines::_cont_thaw1 = NULL; +address StubRoutines::_cont_returnBarrier = NULL; +address StubRoutines::_cont_getSP = NULL; +address StubRoutines::_cont_getPC = NULL; + // Initialization // // Note: to break cycle with universe initialization, stubs are generated in two phases. // The first one generates stubs needed during universe init (e.g., _handle_must_compile_first_entry). // The second phase includes all other stubs (which may depend on universe being initialized.) -extern void StubGenerator_generate(CodeBuffer* code, bool all); // only interface to generators +extern void StubGenerator_generate(CodeBuffer* code, int phase); // only interface to generators void StubRoutines::initialize1() { if (_code1 == NULL) { @@ -191,14 +200,13 @@ vm_exit_out_of_memory(code_size1, OOM_MALLOC_ERROR, "CodeCache: no room for StubRoutines (1)"); } CodeBuffer buffer(_code1); - StubGenerator_generate(&buffer, false); + StubGenerator_generate(&buffer, 0); // When new stubs added we need to make sure there is some space left // to catch situation when we should increase size again. assert(code_size1 == 0 || buffer.insts_remaining() > 200, "increase code_size1"); } } - #ifdef ASSERT typedef void (*arraycopy_fn)(address src, address dst, int count); @@ -269,6 +277,22 @@ } #endif +void StubRoutines::initializeContinuationStubs() { + if (_code3 == NULL) { + ResourceMark rm; + TraceTime timer("StubRoutines generation 3", TRACETIME_LOG(Info, startuptime)); + _code3 = BufferBlob::create("StubRoutines (3)", code_size2); + if (_code3 == NULL) { + vm_exit_out_of_memory(code_size2, OOM_MALLOC_ERROR, "CodeCache: no room for StubRoutines (3)"); + } + CodeBuffer buffer(_code3); + StubGenerator_generate(&buffer, 1); + // When new stubs added we need to make sure there is some space left + // to catch situation when we should increase size again. + assert(code_size2 == 0 || buffer.insts_remaining() > 200, "increase code_size3"); + } +} + void StubRoutines::initialize2() { if (_code2 == NULL) { ResourceMark rm; @@ -278,7 +302,7 @@ vm_exit_out_of_memory(code_size2, OOM_MALLOC_ERROR, "CodeCache: no room for StubRoutines (2)"); } CodeBuffer buffer(_code2); - StubGenerator_generate(&buffer, true); + StubGenerator_generate(&buffer, 2); // When new stubs added we need to make sure there is some space left // to catch situation when we should increase size again. assert(code_size2 == 0 || buffer.insts_remaining() > 200, "increase code_size2"); @@ -372,6 +396,7 @@ void stubRoutines_init1() { StubRoutines::initialize1(); } void stubRoutines_init2() { StubRoutines::initialize2(); } +void stubRoutines_initContinuationStubs() { StubRoutines::initializeContinuationStubs(); } // // Default versions of arraycopy functions diff -r 656931ff4345 src/hotspot/share/runtime/stubRoutines.hpp --- a/src/hotspot/share/runtime/stubRoutines.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/runtime/stubRoutines.hpp Sun May 20 17:57:55 2018 +0100 @@ -121,7 +121,8 @@ static jint _fpu_subnormal_bias2[3]; static BufferBlob* _code1; // code buffer for initial routines - static BufferBlob* _code2; // code buffer for all other routines + static BufferBlob* _code2; + static BufferBlob* _code3; // code buffer for all other routines // Leaf routines which implement arraycopy and their addresses // arraycopy operands aligned on element type boundary @@ -206,6 +207,13 @@ static address _dlibm_tan_cot_huge; static address _dtan; + static address _cont_doYield; + static address _cont_thaw2; + static address _cont_thaw1; + static address _cont_returnBarrier; + static address _cont_getSP; + static address _cont_getPC; + // These are versions of the java.lang.Math methods which perform // the same operations as the intrinsic version. They are used for // constant folding in the compiler to ensure equivalence. If the @@ -230,6 +238,7 @@ // Initialization/Testing static void initialize1(); // must happen before universe::genesis static void initialize2(); // must happen after universe::genesis + static void initializeContinuationStubs(); // must happen after universe::genesis static bool is_stub_code(address addr) { return contains(addr); } @@ -241,6 +250,7 @@ static RuntimeBlob* code1() { return _code1; } static RuntimeBlob* code2() { return _code2; } + static RuntimeBlob* code3() { return _code3; } // Debugging static jint verify_oop_count() { return _verify_oop_count; } @@ -381,6 +391,15 @@ static address dlibm_tan_cot_huge() { return _dlibm_tan_cot_huge; } static address dtan() { return _dtan; } + static address cont_doYield() { return _cont_doYield; } + static address cont_thaw(int frames) { if (frames == 2) return _cont_thaw2; + if (frames == 1) return _cont_thaw1; + return NULL; } + static address cont_returnBarrier() { return _cont_returnBarrier; } + static address cont_getSP() { return _cont_getSP; } + static address cont_getPC() { return _cont_getPC; } + + static address select_fill_function(BasicType t, bool aligned, const char* &name); static address zero_aligned_words() { return _zero_aligned_words; } diff -r 656931ff4345 src/hotspot/share/runtime/thread.cpp --- a/src/hotspot/share/runtime/thread.cpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/runtime/thread.cpp Sun May 20 17:57:55 2018 +0100 @@ -1514,6 +1514,7 @@ _in_deopt_handler = 0; _doing_unsafe_access = false; _stack_guard_state = stack_guard_unused; + _continuation = NULL; // DEBUG #if INCLUDE_JVMCI _pending_monitorenter = false; _pending_deoptimization = -1; diff -r 656931ff4345 src/hotspot/share/runtime/thread.hpp --- a/src/hotspot/share/runtime/thread.hpp Fri May 18 11:52:53 2018 +0100 +++ b/src/hotspot/share/runtime/thread.hpp Sun May 20 17:57:55 2018 +0100 @@ -976,6 +976,10 @@ volatile JNIAttachStates _jni_attach_state; public: + + // Continuation support DEBUG + oopDesc* _continuation; + // State of the stack guard pages for this thread. enum StackGuardState { stack_guard_unused, // not needed @@ -1649,6 +1653,7 @@ static ByteSize thread_state_offset() { return byte_offset_of(JavaThread, _thread_state); } static ByteSize saved_exception_pc_offset() { return byte_offset_of(JavaThread, _saved_exception_pc); } static ByteSize osthread_offset() { return byte_offset_of(JavaThread, _osthread); } + static ByteSize continuation_offset() { return byte_offset_of(JavaThread, _continuation); } #if INCLUDE_JVMCI static ByteSize pending_deoptimization_offset() { return byte_offset_of(JavaThread, _pending_deoptimization); } static ByteSize pending_monitorenter_offset() { return byte_offset_of(JavaThread, _pending_monitorenter); } diff -r 656931ff4345 src/java.base/share/classes/java/lang/Continuation.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/java.base/share/classes/java/lang/Continuation.java Sun May 20 17:57:55 2018 +0100 @@ -0,0 +1,391 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang; + +import jdk.internal.HotSpotIntrinsicCandidate; +import jdk.internal.vm.annotation.DontInline; + +import java.util.Arrays; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; + + +/* +* +* + */ +/** + * TBD + */ +public class Continuation { + private static final boolean DEBUG = false; + + private static Thread currentKernelThread() { + return Thread.currentKernelThread(); + } + + private static final int METADATA_SIZE = 2; + static { + // registerNatives(); + } + + private final ContinuationScope scope; + private Runnable target; + private Continuation parent; // null for native stack + // addresses into vstack. only valid when mounted + private long entrySP = 0; + private long entryFP = 0; + private long entryPC = 0; + + private int[] stack = null; // grows down + private Object[] refStack = null; + + private long fp = 0; // an index into the h-stack if the top frame is interpreted, otherwise, the value of rbp + private int sp = -1; // index into the h-stack + private long pc = 0; + private int refSP; + + private final AtomicBoolean mounted = new AtomicBoolean(); + + + private Continuation captor; + + private long[] nmethods = null; // grows up + private int numNmethods = 0; + + private boolean done; + + /** + * TBD + * @param scope TBD + * @param target TBD + */ + public Continuation(ContinuationScope scope, Runnable target) { + this.scope = scope; + this.target = target; + } + + /** + * TBD + * @param scope TBD + * @return TBD + */ + public static Continuation getCurrentContinuation(ContinuationScope scope) { + Continuation cont = currentKernelThread().getContinuation(); + while (cont != null && cont.scope != scope) + cont = cont.parent; + return cont; + } + + /** + * TBD + */ + @DontInline + public final void run() { + if (DEBUG) { + System.out.println(); + System.out.println("++++++++++++++++++++++++++++++"); + } + + if (!mounted.compareAndSet(false, true)) + throw new IllegalStateException("Mounted!!!!"); + + if (done) + throw new IllegalStateException("Continuation terminated"); + + Thread t = currentKernelThread(); + this.parent = t.getContinuation(); + t.setContinuation(this); + + if (DEBUG) + walkFrames(); + + if (captor != null && captor != parent) + throw new IllegalStateException(); + + int origRefSP = refSP; + try { + enter(); + } finally { + if (DEBUG) + System.out.println("run (after) sp: " + sp + " refSP: " + refSP); + + entrySP = 0; + entryFP = 0; + entryPC = 0; + for (int i = refSP; i>>>>>>> DONE <<<<<<<<<<<<<"); + } + } + + private void enter0() { + target.run(); + } + + /** + * TBD + * @param context TBD + */ + @DontInline + public static void yield(ContinuationScope context) { + Continuation cont = getCurrentContinuation(context); +// System.out.println("JJJ!!!! " + context); + doYield(context); // intrinsic + cont.onContinue(); + } + + private void onPinned0(int reason) { + onPinned(reason); + } + + /** + * TBD + * @param reason TBD + */ + protected void onPinned(int reason) { + if (DEBUG) + System.out.println("PINNED! " + reason); + throw new IllegalStateException("Pinned: " + reason); + } + + /** + * TBD + */ + protected void onContinue() { + if (DEBUG) + System.out.println("On continue"); + } + + /** + * TBD + * @return TBD + */ + public boolean isDone() { + return done; + } + + + private long readLong(int[] array, int index) { + return (long)array[index] << 32 + array[index+1]; + } + + + private int metadataSize(int sp) { + return stack[sp - 1]; + } + + private int metadataOops(int sp) { + return stack[sp - 2]; + } + + private void getStacks(int size, int oops, int frames) { + try { + getStack(size); + getRefStack(oops); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + // size is the size in bytes needed for newly frozen frames PLUS their metadata + private void getStack(int size) { + if (DEBUG) + System.out.println("-- getStack size: " + size); + size = size >> 2; + + if (this.stack == null) { + this.stack = new int[size]; + this.sp = stack.length + METADATA_SIZE; + } else { + int oldLength = stack.length; + int offset = sp - METADATA_SIZE; + int newLength = oldLength - offset + size; + if (newLength <= oldLength) { + if (DEBUG) + System.out.println("-- size ok"); + return; + } + + int[] newStack = new int[newLength]; + int n = oldLength - offset; + System.arraycopy(stack, offset, newStack, newLength - n, n); + + // we need to preserve the same offset from the array's _end_ + this.sp = newLength - (oldLength - sp); + + this.stack = newStack; + } + if (DEBUG) { + walkFrames(); + System.out.println("--- end of getStack"); + } + } + + private void getRefStack(int size) { + if (DEBUG) + System.out.println("-- getRefStack: " + size); + if (refStack == null) { + this.refStack = new Object[size]; // TODO: nearest power of 2 + this.refSP = refStack.length; + } else if (refSP < size) { + int oldLength = refStack.length; + int newLength = refStack.length * 2; + Object[] newRefStack = new Object[newLength]; + int n = oldLength - refSP; + System.arraycopy(refStack, refSP, newRefStack, newLength - n, n); + this.refStack = newRefStack; + this.refSP = newLength - (oldLength - refSP); + } + if (DEBUG) { + walkFrames(); + System.out.println("--- end of getRefStack: " + refStack.length); + } + } + + private void pushNmethod(long nmethod) { + if (nmethods == null) { + nmethods = new long[8]; + } else { + if (numNmethods == nmethods.length) { + long[] newNmethods = new long[nmethods.length * 2]; + System.arraycopy(nmethods, 0, newNmethods, 0, numNmethods); + this.nmethods = newNmethods; + } + } + nmethods[numNmethods++] = nmethod; + } + + private void popNmethod() { + numNmethods--; + } + + private static Map liveNmethods = new ConcurrentHashMap<>(); + + private void processNmethods(int before, int after) { + + } + + + @HotSpotIntrinsicCandidate + private static long getSP() { throw new Error("Intrinsic not installed"); }; + + @HotSpotIntrinsicCandidate + private static long getFP() { throw new Error("Intrinsic not installed"); }; + + @HotSpotIntrinsicCandidate + private static long getPC() { throw new Error("Intrinsic not installed"); }; + + @HotSpotIntrinsicCandidate + private void doContinue() { throw new Error("Intrinsic not installed"); }; + + @HotSpotIntrinsicCandidate + private static void doYield(ContinuationScope context) { throw new Error("Intrinsic not installed"); }; + + /** + * TBD + * @return value + */ + @HotSpotIntrinsicCandidate + public static int runLevel() { return 0; } + + /** + * TBD + */ + public native void foo(); + + // native methods + private static native void registerNatives(); + + private void walkFrames() { + System.out.println("--------------"); + System.out.println("walkFrames:"); + if (stack == null) { + System.out.println("Empty stack."); + return; + } +// int fp = this.fp; + int sp = this.sp; + System.out.println("stack.length = " + stack.length + " sp: " + sp); + // while(sp >=0 && sp < stack.length) { + // int size = metadataSize(sp); + // int numOops = metadataOops(sp); + + // System.out.println("sp: " + sp + " size: " + size + "(" + size/4 + ") numOops: " + numOops); + + // sp += size/4 + METADATA_SIZE; + // // fp += offset; + // } + System.out.println("++++++++++++"); + if (refStack != null) { + System.out.println("refStack.length : " + refStack.length); + for (int i = refStack.length - 1; i >= refSP; i--) { + Object obj = refStack[i]; + System.out.println(i + ": " + (obj == this ? "this" : obj)); + } + } + System.out.println("##############"); + } + + private void dump() { + System.out.println("Continuation@" + Long.toHexString(System.identityHashCode(this))); + System.out.println("\tparent: " + parent); + System.out.println("\tstack.length: " + stack.length); + for (int i = 1; i <= 10; i++) { + int j = stack.length - i; + System.out.println("\tarray[ " + j + "] = " + stack[j]); + } + } +} diff -r 656931ff4345 src/java.base/share/classes/java/lang/ContinuationScope.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/java.base/share/classes/java/lang/ContinuationScope.java Sun May 20 17:57:55 2018 +0100 @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang; + +/** + * TBD + */ +public abstract class ContinuationScope { +} diff -r 656931ff4345 src/java.base/share/classes/java/lang/Thread.java --- a/src/java.base/share/classes/java/lang/Thread.java Fri May 18 11:52:53 2018 +0100 +++ b/src/java.base/share/classes/java/lang/Thread.java Sun May 20 17:57:55 2018 +0100 @@ -201,6 +201,11 @@ */ private final long tid; + /* + * Current inner-most continuation + */ + private Continuation cont; + /* For generating thread ID */ private static long threadSeqNumber; @@ -1625,6 +1630,20 @@ } /** + * TBD + */ + Continuation getContinuation() { + return cont; + } + + /** + * TBD + */ + void setContinuation(Continuation cont) { + this.cont = cont; + } + + /** * Sets the context ClassLoader for this Thread. The context * ClassLoader can be set when a thread is created, and allows * the creator of the thread to provide the appropriate class loader, diff -r 656931ff4345 src/java.base/share/native/libjava/Continuation.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/java.base/share/native/libjava/Continuation.c Sun May 20 17:57:55 2018 +0100 @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "jni.h" +#include "jvm.h" + +#include "java_lang_Continuation.h" + +/* + * Class: java_lang_Continuation + * Method: registerNatives + * Signature: () + */ +JNIEXPORT void JNICALL +Java_java_lang_Continuation_registerNatives(JNIEnv *env, jclass cls) +{ + JVM_RegisterContinuationMethods(env, cls); +} diff -r 656931ff4345 src/java.naming/share/classes/com/sun/jndi/ldap/LdapCtx.java --- a/src/java.naming/share/classes/com/sun/jndi/ldap/LdapCtx.java Fri May 18 11:52:53 2018 +0100 +++ b/src/java.naming/share/classes/com/sun/jndi/ldap/LdapCtx.java Sun May 20 17:57:55 2018 +0100 @@ -44,6 +44,7 @@ import java.io.OutputStream; import com.sun.jndi.toolkit.ctx.*; +import com.sun.jndi.toolkit.ctx.Continuation; import com.sun.jndi.toolkit.dir.HierMemDirCtx; import com.sun.jndi.toolkit.dir.SearchFilter; import com.sun.jndi.ldap.ext.StartTlsResponseImpl; diff -r 656931ff4345 src/java.naming/share/classes/javax/naming/spi/NamingManager.java --- a/src/java.naming/share/classes/javax/naming/spi/NamingManager.java Fri May 18 11:52:53 2018 +0100 +++ b/src/java.naming/share/classes/javax/naming/spi/NamingManager.java Sun May 20 17:57:55 2018 +0100 @@ -412,7 +412,7 @@ /** * Retrieves a context identified by {@code obj}, using the specified * environment. - * Used by ContinuationContext. + * Used by ContinuationScope. * * @param obj The object identifying the context. * @param name The name of the context being returned, relative to @@ -454,7 +454,7 @@ : null; } - // Used by ContinuationContext + // Used by ContinuationScope static Resolver getResolver(Object obj, Name name, Context nameCtx, Hashtable environment) throws NamingException { Object answer; diff -r 656931ff4345 src/jdk.naming.dns/share/classes/com/sun/jndi/dns/DnsContext.java --- a/src/jdk.naming.dns/share/classes/com/sun/jndi/dns/DnsContext.java Fri May 18 11:52:53 2018 +0100 +++ b/src/jdk.naming.dns/share/classes/com/sun/jndi/dns/DnsContext.java Sun May 20 17:57:55 2018 +0100 @@ -34,6 +34,7 @@ import javax.naming.spi.DirectoryManager; import com.sun.jndi.toolkit.ctx.*; +import com.sun.jndi.toolkit.ctx.Continuation; /**