--- old/src/hotspot/cpu/x86/macroAssembler_x86.cpp 2019-07-04 11:52:46.000000000 +0200 +++ new/src/hotspot/cpu/x86/macroAssembler_x86.cpp 2019-07-04 11:52:40.000000000 +0200 @@ -1754,6 +1754,10 @@ // Attempt stack-locking ... orptr (tmpReg, markOopDesc::unlocked_value); + if (EnableValhalla && !UseBiasedLocking) { + // Mask always_locked bit such that we go to the slow path if object is a value type + andptr(tmpReg, ~markOopDesc::biased_lock_bit_in_place); + } movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS lock(); cmpxchgptr(boxReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Updates tmpReg --- old/src/hotspot/share/opto/graphKit.cpp 2019-07-04 11:52:51.000000000 +0200 +++ new/src/hotspot/share/opto/graphKit.cpp 2019-07-04 11:52:46.000000000 +0200 @@ -3591,12 +3591,6 @@ if( !GenerateSynchronizationCode ) return NULL; // Not locking things? - // We cannot lock on a value type - const TypeOopPtr* objptr = _gvn.type(obj)->make_oopptr(); - if (objptr->can_be_value_type()) { - gen_value_type_guard(obj, 1); - } - if (stopped()) // Dead monitor? return NULL; --- old/src/hotspot/share/opto/macro.cpp 2019-07-04 11:52:57.000000000 +0200 +++ new/src/hotspot/share/opto/macro.cpp 2019-07-04 11:52:52.000000000 +0200 @@ -2418,6 +2418,48 @@ mem_phi->init_req(2, mem); } + const TypeOopPtr* objptr = _igvn.type(obj)->make_oopptr(); + if (objptr->can_be_value_type()) { + // Deoptimize and re-execute if a value + assert(EnableValhalla, "should only be used if value types are enabled"); + Node* mark = make_load(slow_path, mem, obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type()); + Node* value_mask = _igvn.MakeConX(markOopDesc::always_locked_pattern); + Node* is_value = _igvn.transform(new AndXNode(mark, value_mask)); + Node* cmp = _igvn.transform(new CmpXNode(is_value, value_mask)); + Node* bol = _igvn.transform(new BoolNode(cmp, BoolTest::eq)); + Node* unc_ctrl = generate_slow_guard(&slow_path, bol, NULL); + + int trap_request = Deoptimization::make_trap_request(Deoptimization::Reason_class_check, Deoptimization::Action_none); + address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point(); + const TypePtr* no_memory_effects = NULL; + JVMState* jvms = lock->jvms(); + CallNode* unc = new CallStaticJavaNode(OptoRuntime::uncommon_trap_Type(), call_addr, "uncommon_trap", + jvms->bci(), no_memory_effects); + + unc->init_req(TypeFunc::Control, unc_ctrl); + unc->init_req(TypeFunc::I_O, lock->i_o()); + unc->init_req(TypeFunc::Memory, mem); // may gc ptrs + unc->init_req(TypeFunc::FramePtr, lock->in(TypeFunc::FramePtr)); + unc->init_req(TypeFunc::ReturnAdr, lock->in(TypeFunc::ReturnAdr)); + unc->init_req(TypeFunc::Parms+0, _igvn.intcon(trap_request)); + unc->set_cnt(PROB_UNLIKELY_MAG(4)); + copy_call_debug_info(lock, unc); + + assert(unc->peek_monitor_box() == box, "wrong monitor"); + assert(unc->peek_monitor_obj() == obj, "wrong monitor"); + + // pop monitor and push obj back on stack: we trap before the monitorenter + unc->pop_monitor(); + unc->grow_stack(unc->jvms(), 1); + unc->set_stack(unc->jvms(), unc->jvms()->stk_size()-1, obj); + + _igvn.register_new_node_with_optimizer(unc); + + Node* ctrl = _igvn.transform(new ProjNode(unc, TypeFunc::Control)); + Node* halt = _igvn.transform(new HaltNode(ctrl, lock->in(TypeFunc::FramePtr))); + C->root()->add_req(halt); + } + // Make slow path call CallNode *call = make_slow_call((CallNode *) lock, OptoRuntime::complete_monitor_enter_Type(), OptoRuntime::complete_monitor_locking_Java(), NULL, slow_path, --- old/src/hotspot/share/opto/macroArrayCopy.cpp 2019-07-04 11:53:03.000000000 +0200 +++ new/src/hotspot/share/opto/macroArrayCopy.cpp 2019-07-04 11:52:57.000000000 +0200 @@ -135,7 +135,7 @@ return if_slow; } -inline Node* PhaseMacroExpand::generate_slow_guard(Node** ctrl, Node* test, RegionNode* region) { +Node* PhaseMacroExpand::generate_slow_guard(Node** ctrl, Node* test, RegionNode* region) { return generate_guard(ctrl, test, region, PROB_UNLIKELY_MAG(3)); } --- old/src/hotspot/share/runtime/init.cpp 2019-07-04 11:53:09.000000000 +0200 +++ new/src/hotspot/share/runtime/init.cpp 2019-07-04 11:53:03.000000000 +0200 @@ -113,6 +113,7 @@ codeCache_init(); VM_Version_init(); os_init_globals(); + VMRegImpl::set_regName(); // need this before generate_stubs (for printing oop maps). stubRoutines_init1(); jint status = universe_init(); // dependent on codeCache_init and // stubRoutines_init1 and metaspace_init. @@ -125,7 +126,6 @@ accessFlags_init(); templateTable_init(); InterfaceSupport_init(); - VMRegImpl::set_regName(); // need this before generate_stubs (for printing oop maps). SharedRuntime::generate_stubs(); universe2_init(); // dependent on codeCache_init and stubRoutines_init1 javaClasses_init();// must happen after vtable initialization, before referenceProcessor_init