--- old/src/hotspot/cpu/x86/macroAssembler_x86.cpp 2018-09-03 10:46:18.878452880 +0200 +++ new/src/hotspot/cpu/x86/macroAssembler_x86.cpp 2018-09-03 10:46:18.466452886 +0200 @@ -3712,26 +3712,6 @@ } } -void MacroAssembler::test_oop_is_value(Register oop, Register temp, Label* is_value, Label* is_not_value) { - const int mask = Universe::oop_metadata_valuetype_mask(); - assert((is_value != NULL) || (is_not_value != NULL), "Need a label to jump to"); - assert((is_value == NULL) ^ (is_not_value == NULL), "Need one label"); -#ifdef _LP64 - if (UseCompressedClassPointers) { - movl(temp, Address(oop, oopDesc::klass_offset_in_bytes())); - } else -#endif - movptr(temp, Address(oop, oopDesc::klass_offset_in_bytes())); - - andl(temp, mask); - testl(temp, temp); - if (is_not_value != NULL) { - jcc(Assembler::zero, *is_not_value); - } else { - jcc(Assembler::notZero, *is_value); - } -} - void MacroAssembler::os_breakpoint() { // instead of directly emitting a breakpoint, call os:breakpoint for better debugability // (e.g., MSVC can't call ps() otherwise) --- old/src/hotspot/cpu/x86/macroAssembler_x86.hpp 2018-09-03 10:46:19.942452865 +0200 +++ new/src/hotspot/cpu/x86/macroAssembler_x86.hpp 2018-09-03 10:46:19.618452870 +0200 @@ -108,8 +108,6 @@ void test_flat_array_klass(Register klass, Register temp_reg, Label& is_flat_array); void test_flat_array_oop(Register oop, Register temp_reg, Label& is_flat_array); - void test_oop_is_value(Register oop, Register temp, Label* is_value, Label* is_not_value); - // Required platform-specific helpers for Label::patch_instructions. // They _shadow_ the declarations in AbstractAssembler, which are undefined. void pd_patch_instruction(address branch, address target) { --- old/src/hotspot/cpu/x86/templateTable_x86.cpp 2018-09-03 10:46:20.706452855 +0200 +++ new/src/hotspot/cpu/x86/templateTable_x86.cpp 2018-09-03 10:46:20.406452859 +0200 @@ -2511,29 +2511,37 @@ void TemplateTable::if_acmp(Condition cc) { transition(atos, vtos); // assume branch is more often taken than not (loops use backward branches) - Label not_taken, is_null; + Label taken, not_taken; __ pop_ptr(rdx); - if (EnableValhalla) { - // Handle value types - const int mask = Universe::oop_metadata_valuetype_mask(); + const int is_value_mask = markOopDesc::always_locked_pattern; + if (EnableValhalla && UsePointerPerturbation) { + Label is_null; __ testptr(rdx, rdx); __ jcc(Assembler::zero, is_null); - __ movl(rbx, Address(rdx, oopDesc::klass_offset_in_bytes())); - __ andptr(rbx, mask); - // Check if a shift is required for perturbation to affect aligned bits - if (mask == KlassPtrValueTypeMask && ObjectAlignmentInBytes <= KlassAlignmentInBytes) { - assert((mask >> LogKlassAlignmentInBytes) == 1, "invalid shift"); - __ shrptr(rbx, LogKlassAlignmentInBytes); - } else { - assert(mask < ObjectAlignmentInBytes, "invalid mask"); - } + __ movptr(rbx, Address(rdx, oopDesc::mark_offset_in_bytes())); + __ andptr(rbx, is_value_mask); + __ cmpl(rbx, is_value_mask); + __ setb(Assembler::equal, rbx); + __ movzbl(rbx, rbx); __ orptr(rdx, rbx); __ bind(is_null); } __ cmpoop(rdx, rax); + + if (EnableValhalla && !UsePointerPerturbation) { + __ jcc(Assembler::notEqual, (cc == not_equal) ? taken : not_taken); + __ testptr(rdx, rdx); + __ jcc(Assembler::zero, (cc == equal) ? taken : not_taken); + __ movptr(rbx, Address(rdx, oopDesc::mark_offset_in_bytes())); + __ andptr(rbx, is_value_mask); + __ cmpl(rbx, is_value_mask); + cc = (cc == equal) ? not_equal : equal; + } + __ jcc(j_not(cc), not_taken); + __ bind(taken); branch(false, false); __ bind(not_taken); __ profile_not_taken_branch(rax); --- old/src/hotspot/share/memory/universe.cpp 2018-09-03 10:46:21.382452846 +0200 +++ new/src/hotspot/share/memory/universe.cpp 2018-09-03 10:46:21.010452851 +0200 @@ -165,8 +165,6 @@ address Universe::_narrow_ptrs_base; uint64_t Universe::_narrow_klass_range = (uint64_t(max_juint)+1); -int Universe::_oop_metadata_valuetype_mask = KlassPtrValueTypeMask; - void Universe::basic_type_classes_do(void f(Klass*)) { f(boolArrayKlassObj()); f(byteArrayKlassObj()); --- old/src/hotspot/share/memory/universe.hpp 2018-09-03 10:46:21.930452838 +0200 +++ new/src/hotspot/share/memory/universe.hpp 2018-09-03 10:46:21.610452842 +0200 @@ -198,9 +198,6 @@ // CompressedClassSpaceSize set to 1GB, but appear 3GB away from _narrow_ptrs_base during CDS dump. static uint64_t _narrow_klass_range; - // value type using klass alignment encoded as oop metadata - static int _oop_metadata_valuetype_mask; - // array of dummy objects used with +FullGCAlot debug_only(static objArrayOop _fullgc_alot_dummy_array;) // index of next entry to clear @@ -458,13 +455,8 @@ static void set_narrow_klass_shift(int shift) { assert(shift == 0 || shift == LogKlassAlignmentInBytes, "invalid shift for klass ptrs"); _narrow_klass._shift = shift; - if (shift == LogKlassAlignmentInBytes) { - _oop_metadata_valuetype_mask = 1; - } } - static int oop_metadata_valuetype_mask() { return _oop_metadata_valuetype_mask; } - // Reserve Java heap and determine CompressedOops mode static ReservedSpace reserve_heap(size_t heap_size, size_t alignment); --- old/src/hotspot/share/oops/arrayKlass.cpp 2018-09-03 10:46:22.574452829 +0200 +++ new/src/hotspot/share/oops/arrayKlass.cpp 2018-09-03 10:46:22.194452834 +0200 @@ -96,7 +96,6 @@ set_layout_helper(Klass::_lh_neutral_value); set_is_cloneable(); // All arrays are considered to be cloneable (See JLS 20.1.5) JFR_ONLY(INIT_ID(this);) - assert(!ptr_is_value_type(this), "ArrayKlass encoded as value type"); } Symbol* ArrayKlass::create_element_klass_array_name(Klass* element_klass, TRAPS) { --- old/src/hotspot/share/oops/instanceKlass.cpp 2018-09-03 10:46:23.290452819 +0200 +++ new/src/hotspot/share/oops/instanceKlass.cpp 2018-09-03 10:46:22.950452824 +0200 @@ -364,7 +364,7 @@ ik = new (loader_data, size, THREAD) InstanceClassLoaderKlass(parser); } else if (parser.is_value_type()) { // value type - ik = new (loader_data, size, true, THREAD) ValueKlass(parser); + ik = new (loader_data, size, THREAD) ValueKlass(parser); } else { // normal ik = new (loader_data, size, THREAD) InstanceKlass(parser, InstanceKlass::_misc_kind_other); --- old/src/hotspot/share/oops/klass.cpp 2018-09-03 10:46:23.842452812 +0200 +++ new/src/hotspot/share/oops/klass.cpp 2018-09-03 10:46:23.538452816 +0200 @@ -180,17 +180,8 @@ return NULL; } -void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, bool is_value, TRAPS) throw () { - // Pad size in case need adjust to even/odd klass ptr - uintptr_t addr = (uintptr_t) Metaspace::allocate(loader_data, word_size + (1 << LogKlassAlignment), MetaspaceObj::ClassType, THREAD); - // values are odd, otherwise make even (and vice versa) - if (is_value ^ (((addr & KlassPtrValueTypeMask) >> LogKlassAlignmentInBytes) != 0)) { - addr += (1 << LogKlassAlignmentInBytes); - } - assert(is_aligned(addr, (1 << LogKlassAlignmentInBytes)), "Klass base alignment incorrect"); - assert( is_value || ((addr & KlassPtrValueTypeMask) == 0), "Klass even alignment incorrect"); - assert(!is_value || ((addr & KlassPtrValueTypeMask) != 0), "Klass odd alignment incorrect"); - return (void*) addr; +void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw() { + return Metaspace::allocate(loader_data, word_size, MetaspaceObj::ClassType, THREAD); } // "Normal" instantiation is preceeded by a MetaspaceObj allocation --- old/src/hotspot/share/oops/klass.hpp 2018-09-03 10:46:24.358452804 +0200 +++ new/src/hotspot/share/oops/klass.hpp 2018-09-03 10:46:24.062452809 +0200 @@ -192,10 +192,7 @@ Klass(KlassID id); Klass() : _id(KlassID(-1)) { assert(DumpSharedSpaces || UseSharedSpaces, "only for cds"); } - void* operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, bool is_value, TRAPS) throw(); - void* operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw() { - return operator new (size, loader_data, word_size, false, THREAD); - } + void* operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw(); public: int id() { return _id; } @@ -741,9 +738,6 @@ static Klass* decode_klass_not_null(narrowKlass v); static Klass* decode_klass(narrowKlass v); - - static bool decode_ptr_is_value_type(narrowKlass v); - static bool ptr_is_value_type(Klass* v); }; #endif // SHARE_VM_OOPS_KLASS_HPP --- old/src/hotspot/share/oops/klass.inline.hpp 2018-09-03 10:46:24.886452797 +0200 +++ new/src/hotspot/share/oops/klass.inline.hpp 2018-09-03 10:46:24.578452801 +0200 @@ -71,12 +71,4 @@ return is_null(v) ? (Klass*)NULL : decode_klass_not_null(v); } -inline bool Klass::decode_ptr_is_value_type(narrowKlass v) { - return (v & Universe::oop_metadata_valuetype_mask()) != 0; -} - -inline bool Klass::ptr_is_value_type(Klass* v) { - return ((uintptr_t)v & KlassPtrValueTypeMask) != 0; -} - #endif // SHARE_VM_OOPS_KLASS_INLINE_HPP --- old/src/hotspot/share/oops/oop.hpp 2018-09-03 10:46:25.410452790 +0200 +++ new/src/hotspot/share/oops/oop.hpp 2018-09-03 10:46:25.106452794 +0200 @@ -88,9 +88,6 @@ inline Klass** klass_addr(); inline narrowKlass* compressed_klass_addr(); - // oop only test (does not load klass) - inline bool klass_is_value_type(); - inline void set_klass(Klass* k); static inline void release_set_klass(HeapWord* mem, Klass* klass); --- old/src/hotspot/share/oops/oop.inline.hpp 2018-09-03 10:46:25.950452782 +0200 +++ new/src/hotspot/share/oops/oop.inline.hpp 2018-09-03 10:46:25.634452787 +0200 @@ -137,23 +137,10 @@ return compressed_klass_addr((HeapWord*)this); } -// oop only test (does not load klass) -bool oopDesc::klass_is_value_type() { - if (UseCompressedClassPointers) { - return Klass::decode_ptr_is_value_type(_metadata._compressed_klass); - } else { - return Klass::ptr_is_value_type(_metadata._klass); - } -} - - #define CHECK_SET_KLASS(k) \ do { \ assert(Universe::is_bootstrapping() || k != NULL, "NULL Klass"); \ assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass"); \ - assert(!EnableValhalla || (k->is_value() && Klass::ptr_is_value_type(k)) \ - || (!k->is_value() && !Klass::ptr_is_value_type(k)), \ - "Klass value encoding"); \ } while (0) void oopDesc::set_klass(Klass* k) { --- old/src/hotspot/share/oops/valueKlass.hpp 2018-09-03 10:46:26.618452773 +0200 +++ new/src/hotspot/share/oops/valueKlass.hpp 2018-09-03 10:46:26.282452778 +0200 @@ -49,7 +49,6 @@ *((address*)adr_unpack_handler()) = NULL; assert(pack_handler() == NULL, "pack handler not null"); *((int*)adr_default_value_offset()) = 0; - assert(Klass::ptr_is_value_type(this), "Value type klass ptr encoding"); set_prototype_header(markOopDesc::always_locked_prototype()); } --- old/src/hotspot/share/opto/compile.cpp 2018-09-03 10:46:27.566452760 +0200 +++ new/src/hotspot/share/opto/compile.cpp 2018-09-03 10:46:27.166452766 +0200 @@ -4666,29 +4666,6 @@ } } -Node* Compile::load_is_value_bit(PhaseGVN* phase, Node* oop) { - // Load the klass pointer and check if it's odd, i.e., if it defines a value type - // is_value = (klass & oop_metadata_valuetype_mask) >> LogKlassAlignmentInBytes - Node* k_adr = phase->transform(new AddPNode(oop, oop, phase->MakeConX(oopDesc::klass_offset_in_bytes()))); - Node* klass = NULL; - if (UseCompressedClassPointers) { - klass = phase->transform(new LoadNKlassNode(NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT->make_narrowklass(), MemNode::unordered)); - } else { - klass = phase->transform(new LoadKlassNode(NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT, MemNode::unordered)); - } - const int mask = Universe::oop_metadata_valuetype_mask(); - Node* is_value = phase->transform(new CastP2XNode(NULL, klass)); - is_value = phase->transform(new AndXNode(is_value, phase->MakeConX(mask))); - // Check if a shift is required for perturbation to affect aligned bits of oop - if (mask == KlassPtrValueTypeMask && ObjectAlignmentInBytes <= KlassAlignmentInBytes) { - assert((mask >> LogKlassAlignmentInBytes) == 1, "invalid shift"); - is_value = phase->transform(new URShiftXNode(is_value, phase->intcon(LogKlassAlignmentInBytes))); - } else { - assert(mask < ObjectAlignmentInBytes, "invalid mask"); - } - return is_value; -} - Node* Compile::optimize_acmp(PhaseGVN* phase, Node* a, Node* b) { const TypeInstPtr* ta = phase->type(a)->isa_instptr(); const TypeInstPtr* tb = phase->type(b)->isa_instptr(); --- old/src/hotspot/share/opto/compile.hpp 2018-09-03 10:46:28.446452748 +0200 +++ new/src/hotspot/share/opto/compile.hpp 2018-09-03 10:46:28.134452752 +0200 @@ -1356,7 +1356,6 @@ // Convert integer value to a narrowed long type dependent on ctrl (for example, a range check) static Node* constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl); - Node* load_is_value_bit(PhaseGVN* phase, Node* oop); Node* optimize_acmp(PhaseGVN* phase, Node* a, Node* b); // Auxiliary method for randomized fuzzing/stressing --- old/src/hotspot/share/opto/graphKit.cpp 2018-09-03 10:46:28.970452741 +0200 +++ new/src/hotspot/share/opto/graphKit.cpp 2018-09-03 10:46:28.666452745 +0200 @@ -3297,6 +3297,13 @@ return res; } +Node* GraphKit::is_always_locked(Node* obj) { + Node* mark_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes()); + Node* mark = make_load(NULL, mark_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); + Node* value_mask = _gvn.MakeConX(markOopDesc::always_locked_pattern); + return _gvn.transform(new AndXNode(mark, value_mask)); +} + // Deoptimize if 'obj' is a value type void GraphKit::gen_value_type_guard(Node* obj, int nargs) { assert(EnableValhalla, "should only be used if value types are enabled"); @@ -3304,12 +3311,10 @@ if (obj->is_ValueTypeBase()) { bol = intcon(0); } else { - Node* kls = load_object_klass(obj); - Node* flags_addr = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset())); - Node* flags = make_load(NULL, flags_addr, TypeInt::INT, T_INT, MemNode::unordered); - Node* is_value = _gvn.transform(new AndINode(flags, intcon(JVM_ACC_VALUE))); - Node* cmp = _gvn.transform(new CmpINode(is_value, intcon(0))); - bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq)); + Node* is_value = is_always_locked(obj); + Node* value_mask = _gvn.MakeConX(markOopDesc::always_locked_pattern); + Node* cmp = _gvn.transform(new CmpXNode(is_value, value_mask)); + bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne)); } { BuildCutout unless(this, bol, PROB_MAX); inc_sp(nargs); --- old/src/hotspot/share/opto/graphKit.hpp 2018-09-03 10:46:29.502452733 +0200 +++ new/src/hotspot/share/opto/graphKit.hpp 2018-09-03 10:46:29.202452737 +0200 @@ -823,6 +823,7 @@ Node* gen_checkcast( Node *subobj, Node* superkls, Node* *failure_control = NULL ); + Node* is_always_locked(Node* obj); void gen_value_type_guard(Node* obj, int nargs = 0); void gen_value_type_array_guard(Node* ary, Node* obj, Node* elem_klass = NULL); void gen_flattened_array_guard(Node* ary, int nargs = 0); --- old/src/hotspot/share/opto/macro.cpp 2018-09-03 10:46:30.146452724 +0200 +++ new/src/hotspot/share/opto/macro.cpp 2018-09-03 10:46:29.746452730 +0200 @@ -2734,7 +2734,7 @@ transform_later(slowpath_false); Node* rawmem = new StorePNode(slowpath_false, mem, top_adr, TypeRawPtr::BOTTOM, new_top, MemNode::unordered); transform_later(rawmem); - Node* mark_node = mark_node = makecon(TypeRawPtr::make((address)markOopDesc::always_locked_prototype())); + Node* mark_node = makecon(TypeRawPtr::make((address)markOopDesc::always_locked_prototype())); rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS); rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA); if (UseCompressedClassPointers) { --- old/src/hotspot/share/opto/mulnode.cpp 2018-09-03 10:46:30.698452717 +0200 +++ new/src/hotspot/share/opto/mulnode.cpp 2018-09-03 10:46:30.378452721 +0200 @@ -596,6 +596,13 @@ return usr; } } + + if (con == markOopDesc::always_locked_pattern) { + assert(EnableValhalla, "should only be used for value types"); + if (in(1)->is_Load() && phase->type(in(1)->in(MemNode::Address))->is_valuetypeptr()) { + return in(2); // Obj is known to be a value type + } + } } return MulNode::Identity(phase); } --- old/src/hotspot/share/opto/parse.hpp 2018-09-03 10:46:31.666452703 +0200 +++ new/src/hotspot/share/opto/parse.hpp 2018-09-03 10:46:31.246452709 +0200 @@ -551,8 +551,8 @@ bool seems_stable_comparison() const; void do_ifnull(BoolTest::mask btest, Node* c); - void do_if(BoolTest::mask btest, Node* c); - void do_acmp(BoolTest::mask& btest, Node* a, Node* b); + void do_if(BoolTest::mask btest, Node* c, bool new_path = false, Node** ctrl_taken = NULL); + void do_acmp(BoolTest::mask btest, Node* a, Node* b); int repush_if_args(); void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path); void sharpen_type_after_if(BoolTest::mask btest, --- old/src/hotspot/share/opto/parse2.cpp 2018-09-03 10:46:32.722452689 +0200 +++ new/src/hotspot/share/opto/parse2.cpp 2018-09-03 10:46:32.426452693 +0200 @@ -1573,7 +1573,7 @@ } //------------------------------------do_if------------------------------------ -void Parse::do_if(BoolTest::mask btest, Node* c) { +void Parse::do_if(BoolTest::mask btest, Node* c, bool new_path, Node** ctrl_taken) { int target_bci = iter().get_dest(); Block* branch_block = successor_for_bci(target_bci); @@ -1671,7 +1671,14 @@ profile_taken_branch(target_bci); adjust_map_after_if(taken_btest, c, prob, branch_block); if (!stopped()) { - merge(target_bci); + if (ctrl_taken != NULL) { + // Don't merge but save taken branch to be wired by caller + *ctrl_taken = control(); + } else if (new_path) { + merge_new_path(target_bci); + } else { + merge(target_bci); + } } } } @@ -1692,7 +1699,7 @@ } } -void Parse::do_acmp(BoolTest::mask& btest, Node* a, Node* b) { +void Parse::do_acmp(BoolTest::mask btest, Node* a, Node* b) { // In the case were both operands might be value types, we need to // use the new acmp implementation. Otherwise, i.e. if one operand // is not a value type, we can use the old acmp implementation. @@ -1704,6 +1711,41 @@ return; } + Node* ctrl = NULL; + bool safe_for_replace = true; + if (!UsePointerPerturbation) { + // Emit old acmp before new acmp for quick a != b check + cmp = CmpP(a, b); + cmp = optimize_cmp_with_klass(_gvn.transform(cmp)); + if (btest == BoolTest::ne) { + do_if(btest, cmp, true); + if (stopped()) { + return; // Never equal + } + } else if (btest == BoolTest::eq) { + Node* is_equal = NULL; + { + PreserveJVMState pjvms(this); + do_if(btest, cmp, true, &is_equal); + if (!stopped()) { + // Not equal, skip valuetype check + ctrl = new RegionNode(3); + ctrl->init_req(1, control()); + _gvn.set_type(ctrl, Type::CONTROL); + record_for_igvn(ctrl); + safe_for_replace = false; + } + } + if (is_equal == NULL) { + assert(ctrl != NULL, "no control left"); + set_control(_gvn.transform(ctrl)); + return; // Never equal + } + set_control(is_equal); + } + } + + // Null check operand before loading the is_value bit bool speculate = false; if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(b))) { // Operand 'b' is never null, swap operands to avoid null check @@ -1717,33 +1759,51 @@ swap(a, b); } } - - // Null check operand before loading the is_value bit - Node* region = new RegionNode(2); - Node* is_value = new PhiNode(region, TypeX_X); - Node* null_ctl = top(); inc_sp(2); - Node* not_null_a = null_check_oop(a, &null_ctl, speculate, true, speculate); + Node* null_ctl = top(); + Node* not_null_a = null_check_oop(a, &null_ctl, speculate, safe_for_replace, speculate); assert(!stopped(), "operand is always null"); dec_sp(2); + Node* region = new RegionNode(2); + Node* is_value = new PhiNode(region, TypeX_X); if (null_ctl != top()) { assert(!speculate, "should never be null"); region->add_req(null_ctl); is_value->add_req(_gvn.MakeConX(0)); } - Node* value_bit = C->load_is_value_bit(&_gvn, not_null_a); + Node* value_mask = _gvn.MakeConX(markOopDesc::always_locked_pattern); + if (UsePointerPerturbation) { + Node* mark_addr = basic_plus_adr(not_null_a, oopDesc::mark_offset_in_bytes()); + Node* mark = make_load(NULL, mark_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); + Node* not_mark = _gvn.transform(new XorXNode(mark, _gvn.MakeConX(-1))); + Node* andn = _gvn.transform(new AndXNode(not_mark, value_mask)); + Node* neg_if_value = _gvn.transform(new SubXNode(andn, _gvn.MakeConX(1))); + is_value->init_req(1, _gvn.transform(new RShiftXNode(neg_if_value, _gvn.intcon(63)))); + } else { + is_value->init_req(1, is_always_locked(not_null_a)); + } region->init_req(1, control()); - is_value->set_req(1, value_bit); set_control(_gvn.transform(region)); is_value = _gvn.transform(is_value); - // Perturbe oop if operand is a value type to make comparison fail - Node* pert = _gvn.transform(new AddPNode(a, a, is_value)); - cmp = _gvn.transform(new CmpPNode(pert, b)); + if (UsePointerPerturbation) { + // Perturbe oop if operand is a value type to make comparison fail + Node* pert = _gvn.transform(new AddPNode(a, a, is_value)); + cmp = _gvn.transform(new CmpPNode(pert, b)); + } else { + // Check for a value type because we already know that operands are equal + cmp = _gvn.transform(new CmpXNode(is_value, value_mask)); + btest = (btest == BoolTest::eq) ? BoolTest::ne : BoolTest::eq; + } cmp = optimize_cmp_with_klass(cmp); do_if(btest, cmp); + + if (ctrl != NULL) { + ctrl->init_req(2, control()); + set_control(_gvn.transform(ctrl)); + } } bool Parse::path_is_suitable_for_uncommon_trap(float prob) const { --- old/src/hotspot/share/opto/subnode.cpp 2018-09-03 10:46:33.610452676 +0200 +++ new/src/hotspot/share/opto/subnode.cpp 2018-09-03 10:46:33.198452682 +0200 @@ -983,14 +983,6 @@ if (cmp != NULL) { return cmp; } - if ( TypePtr::NULL_PTR->higher_equal(phase->type(a)) && - !TypePtr::NULL_PTR->higher_equal(phase->type(b))) { - // Operand 'b' is never null, swap operands to avoid null check - Node* is_value = phase->C->load_is_value_bit(phase, b); - set_req(1, phase->transform(new AddPNode(b, b, is_value))); - set_req(2, a); - return this; - } } // Normalize comparisons between Java mirrors into comparisons of the low- @@ -1103,7 +1095,7 @@ // RawPtr comparison return NULL; } - assert(EnableValhalla, "unexpected perturbed oop"); + assert(EnableValhalla && UsePointerPerturbation, "unexpected perturbed oop"); return in(1); } return NULL; --- old/src/hotspot/share/runtime/globals.hpp 2018-09-03 10:46:34.370452666 +0200 +++ new/src/hotspot/share/runtime/globals.hpp 2018-09-03 10:46:34.070452670 +0200 @@ -2679,6 +2679,9 @@ \ develop(bool, StressValueTypeReturnedAsFields, false, \ "stress return of fields instead of a value type reference") \ + \ + experimental(bool, UsePointerPerturbation, false, \ + "With value types, use the perturbation scheme for acmp") \ #define VM_FLAGS(develop, \ --- old/src/hotspot/share/utilities/globalDefinitions.hpp 2018-09-03 10:46:35.142452655 +0200 +++ new/src/hotspot/share/utilities/globalDefinitions.hpp 2018-09-03 10:46:34.834452660 +0200 @@ -456,7 +456,6 @@ const int LogKlassAlignment = LogKlassAlignmentInBytes - LogHeapWordSize; const int KlassAlignmentInBytes = 1 << LogKlassAlignmentInBytes; const int KlassAlignment = KlassAlignmentInBytes / HeapWordSize; -const int KlassPtrValueTypeMask = (1 << (LogKlassAlignmentInBytes + 1)) - 1; // Maximal size of heap where unscaled compression can be used. Also upper bound // for heap placement: 4GB. --- old/test/hotspot/jtreg/compiler/valhalla/valuetypes/TestNewAcmp.java 2018-09-03 10:46:35.654452648 +0200 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/TestNewAcmp.java 2018-09-03 10:46:35.358452652 +0200 @@ -30,29 +30,35 @@ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbatch * -XX:+EnableValhalla -XX:TypeProfileLevel=222 * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::test* + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::cmpAlways* * compiler.valhalla.valuetypes.TestNewAcmp 0 * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions * -XX:+WhiteBoxAPI -Xbatch -XX:+EnableValhalla -XX:TypeProfileLevel=222 * -XX:+AlwaysIncrementalInline * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::test* + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::cmpAlways* * compiler.valhalla.valuetypes.TestNewAcmp 0 * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbatch * -XX:+EnableValhalla -XX:TypeProfileLevel=222 * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::test* + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::cmpAlways* * compiler.valhalla.valuetypes.TestNewAcmp 1 * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions * -XX:+WhiteBoxAPI -Xbatch -XX:+EnableValhalla -XX:TypeProfileLevel=222 * -XX:+AlwaysIncrementalInline * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::test* + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::cmpAlways* * compiler.valhalla.valuetypes.TestNewAcmp 1 * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbatch * -XX:+EnableValhalla -XX:TypeProfileLevel=222 * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::test* + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::cmpAlways* * compiler.valhalla.valuetypes.TestNewAcmp 2 * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions * -XX:+WhiteBoxAPI -Xbatch -XX:+EnableValhalla -XX:TypeProfileLevel=222 * -XX:+AlwaysIncrementalInline * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::test* + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::cmpAlways* * compiler.valhalla.valuetypes.TestNewAcmp 2 */ @@ -1336,6 +1342,23 @@ return m.getName().startsWith("testNot"); } + // Tests with profiling + public boolean cmpAlwaysEqual1(Object a, Object b) { + return a == b; + } + + public boolean cmpAlwaysEqual2(Object a, Object b) { + return a != b; + } + + public boolean cmpAlwaysUnEqual1(Object a, Object b) { + return a == b; + } + + public boolean cmpAlwaysUnEqual2(Object a, Object b) { + return a != b; + } + protected static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); protected static final int COMP_LEVEL_FULL_OPTIMIZATION = 4; @@ -1400,7 +1423,7 @@ // Run tests for (Method m : getClass().getMethods()) { if (m.getName().startsWith("test")) { - // Do same warmup runs + // Do some warmup runs runTest(m, args, 1000, nullMode); // Make sure method is compiled WHITE_BOX.enqueueMethodForCompilation(m, COMP_LEVEL_FULL_OPTIMIZATION); @@ -1409,6 +1432,13 @@ runTest(m, args, 1, nullMode); } } + + for (int i = 0; i < 10_000; ++i) { + Asserts.assertTrue(cmpAlwaysEqual1(args[1], args[1])); + Asserts.assertFalse(cmpAlwaysEqual2(args[1], args[1])); + Asserts.assertFalse(cmpAlwaysUnEqual1(args[1], args[2])); + Asserts.assertTrue(cmpAlwaysUnEqual2(args[1], args[2])); + } } public static void main(String[] args) throws Exception {