--- old/src/hotspot/cpu/aarch64/globals_aarch64.hpp 2019-03-11 14:24:46.306356259 +0100 +++ new/src/hotspot/cpu/aarch64/globals_aarch64.hpp 2019-03-11 14:24:46.090356262 +0100 @@ -67,6 +67,8 @@ define_pd_global(bool, PreserveFramePointer, false); +define_pd_global(bool, ValueTypePassFieldsAsArgs, false); + // GC Ergo Flags define_pd_global(uintx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread --- old/src/hotspot/cpu/ppc/globals_ppc.hpp 2019-03-11 14:24:46.714356254 +0100 +++ new/src/hotspot/cpu/ppc/globals_ppc.hpp 2019-03-11 14:24:46.514356257 +0100 @@ -71,6 +71,9 @@ define_pd_global(bool, PreserveFramePointer, false); +define_pd_global(bool, ValueTypePassFieldsAsArgs, false); +define_pd_global(bool, ValueTypeReturnedAsFields, false); + // GC Ergo Flags define_pd_global(size_t, CMSYoungGenPerWorker, 16*M); // Default max size of CMS young gen, per GC worker thread. --- old/src/hotspot/cpu/sparc/globals_sparc.hpp 2019-03-11 14:24:47.138356248 +0100 +++ new/src/hotspot/cpu/sparc/globals_sparc.hpp 2019-03-11 14:24:46.918356251 +0100 @@ -76,6 +76,9 @@ define_pd_global(bool, PreserveFramePointer, false); +define_pd_global(bool, ValueTypePassFieldsAsArgs, false); +define_pd_global(bool, ValueTypeReturnedAsFields, false); + // GC Ergo Flags define_pd_global(size_t, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread --- old/src/hotspot/cpu/x86/abstractInterpreter_x86.cpp 2019-03-11 14:24:47.578356242 +0100 +++ new/src/hotspot/cpu/x86/abstractInterpreter_x86.cpp 2019-03-11 14:24:47.362356245 +0100 @@ -132,6 +132,7 @@ case T_DOUBLE : i = 6; break; case T_OBJECT : // fall through case T_ARRAY : i = 7; break; + case T_VALUETYPE : i = 8; break; default : ShouldNotReachHere(); } assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds"); @@ -152,6 +153,7 @@ case T_DOUBLE : i = 8; break; case T_OBJECT : i = 9; break; case T_ARRAY : i = 9; break; + case T_VALUETYPE : i = 10; break; default : ShouldNotReachHere(); } assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, --- old/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp 2019-03-11 14:24:48.014356236 +0100 +++ new/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp 2019-03-11 14:24:47.798356239 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ #include "c1/c1_MacroAssembler.hpp" #include "c1/c1_Runtime1.hpp" #include "nativeInst_x86.hpp" +#include "oops/objArrayKlass.hpp" #include "runtime/sharedRuntime.hpp" #include "utilities/align.hpp" #include "utilities/macros.hpp" @@ -154,6 +155,57 @@ } +// Implementation of LoadFlattenedArrayStub + +LoadFlattenedArrayStub::LoadFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) { + _array = array; + _index = index; + _result = result; + // Tell the register allocator that the runtime call will scratch rax. + _scratch_reg = FrameMap::rax_oop_opr; + _info = new CodeEmitInfo(info); +} + +void LoadFlattenedArrayStub::emit_code(LIR_Assembler* ce) { + assert(__ rsp_offset() == 0, "frame size should be fixed"); + __ bind(_entry); + ce->store_parameter(_array->as_register(), 1); + ce->store_parameter(_index->as_register(), 0); + __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::load_flattened_array_id))); + ce->add_call_info_here(_info); + ce->verify_oop_map(_info); + if (_result->as_register() != rax) { + __ movptr(_result->as_register(), rax); + } + __ jmp(_continuation); +} + + +// Implementation of StoreFlattenedArrayStub + +StoreFlattenedArrayStub::StoreFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr value, CodeEmitInfo* info) { + _array = array; + _index = index; + _value = value; + // Tell the register allocator that the runtime call will scratch rax. + _scratch_reg = FrameMap::rax_oop_opr; + _info = new CodeEmitInfo(info); +} + + +void StoreFlattenedArrayStub::emit_code(LIR_Assembler* ce) { + assert(__ rsp_offset() == 0, "frame size should be fixed"); + __ bind(_entry); + ce->store_parameter(_array->as_register(), 2); + ce->store_parameter(_index->as_register(), 1); + ce->store_parameter(_value->as_register(), 0); + __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::store_flattened_array_id))); + ce->add_call_info_here(_info); + ce->verify_oop_map(_info); + __ jmp(_continuation); +} + + // Implementation of NewInstanceStub NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { @@ -206,11 +258,13 @@ // Implementation of NewObjectArrayStub -NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { +NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, + CodeEmitInfo* info, bool is_value_type) { _klass_reg = klass_reg; _result = result; _length = length; _info = new CodeEmitInfo(info); + _is_value_type = is_value_type; } @@ -219,7 +273,11 @@ __ bind(_entry); assert(_length->as_register() == rbx, "length must in rbx,"); assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx"); - __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id))); + if (_is_value_type) { + __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_value_array_id))); + } else { + __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id))); + } ce->add_call_info_here(_info); ce->verify_oop_map(_info); assert(_result->as_register() == rax, "result must in rax,"); @@ -229,16 +287,28 @@ // Implementation of MonitorAccessStubs -MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info) +MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info, CodeStub* throw_imse_stub, LIR_Opr scratch_reg) : MonitorAccessStub(obj_reg, lock_reg) { _info = new CodeEmitInfo(info); + _throw_imse_stub = throw_imse_stub; + _scratch_reg = scratch_reg; + if (_throw_imse_stub != NULL) { + assert(_scratch_reg != LIR_OprFact::illegalOpr, "must be"); + } } void MonitorEnterStub::emit_code(LIR_Assembler* ce) { assert(__ rsp_offset() == 0, "frame size should be fixed"); __ bind(_entry); + if (_throw_imse_stub != NULL) { + // When we come here, _obj_reg has already been checked to be non-null. + Register mark = _scratch_reg->as_register(); + __ movptr(mark, Address(_obj_reg->as_register(), oopDesc::mark_offset_in_bytes())); + __ testl(mark, markOopDesc::always_locked_pattern); + __ jcc(Assembler::notZero, *_throw_imse_stub->entry()); + } ce->store_parameter(_obj_reg->as_register(), 1); ce->store_parameter(_lock_reg->as_register(), 0); Runtime1::StubID enter_id; --- old/src/hotspot/cpu/x86/c1_FrameMap_x86.cpp 2019-03-11 14:24:48.414356230 +0100 +++ new/src/hotspot/cpu/x86/c1_FrameMap_x86.cpp 2019-03-11 14:24:48.218356233 +0100 @@ -50,7 +50,7 @@ #else opr = as_long_opr(reg2, reg); #endif // _LP64 - } else if (type == T_OBJECT || type == T_ARRAY) { + } else if (type == T_OBJECT || type == T_ARRAY || type == T_VALUETYPE) { opr = as_oop_opr(reg); } else if (type == T_METADATA) { opr = as_metadata_opr(reg); --- old/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp 2019-03-11 14:24:48.822356225 +0100 +++ new/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp 2019-03-11 14:24:48.618356227 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -199,7 +199,7 @@ __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix())); } else if (opr->is_constant()) { LIR_Const* const_opr = opr->as_constant_ptr(); - if (const_opr->type() == T_OBJECT) { + if (const_opr->type() == T_OBJECT || const_opr->type() == T_VALUETYPE) { __ push_oop(const_opr->as_jobject()); } else if (const_opr->type() == T_INT) { __ push_jint(const_opr->as_jint()); @@ -629,6 +629,7 @@ break; } + case T_VALUETYPE: // Fall through case T_OBJECT: { if (patch_code != lir_patch_none) { jobject2reg_with_patching(dest->as_register(), info); @@ -711,6 +712,7 @@ __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); break; + case T_VALUETYPE: // Fall through case T_OBJECT: __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject()); break; @@ -750,6 +752,7 @@ __ movptr(as_Address(addr), c->as_jint_bits()); break; + case T_VALUETYPE: // fall through case T_OBJECT: // fall through case T_ARRAY: if (c->as_jobject() == NULL) { @@ -838,14 +841,14 @@ } #endif assert(src->is_single_cpu(), "must match"); - if (src->type() == T_OBJECT) { + if (src->type() == T_OBJECT || src->type() == T_VALUETYPE) { __ verify_oop(src->as_register()); } move_regs(src->as_register(), dest->as_register()); } else if (dest->is_double_cpu()) { #ifdef _LP64 - if (src->type() == T_OBJECT || src->type() == T_ARRAY) { + if (src->type() == T_OBJECT || src->type() == T_ARRAY || src->type() == T_VALUETYPE) { // Surprising to me but we can see move of a long to t_object __ verify_oop(src->as_register()); move_regs(src->as_register(), dest->as_register_lo()); @@ -916,7 +919,7 @@ if (src->is_single_cpu()) { Address dst = frame_map()->address_for_slot(dest->single_stack_ix()); - if (type == T_OBJECT || type == T_ARRAY) { + if (type == T_OBJECT || type == T_ARRAY || type == T_VALUETYPE) { __ verify_oop(src->as_register()); __ movptr (dst, src->as_register()); } else if (type == T_METADATA) { @@ -962,7 +965,7 @@ PatchingStub* patch = NULL; Register compressed_src = rscratch1; - if (type == T_ARRAY || type == T_OBJECT) { + if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) { __ verify_oop(src->as_register()); #ifdef _LP64 if (UseCompressedOops && !wide) { @@ -1007,6 +1010,7 @@ break; } + case T_VALUETYPE: // fall through case T_ARRAY: // fall through case T_OBJECT: // fall through if (UseCompressedOops && !wide) { @@ -1097,7 +1101,7 @@ assert(dest->is_register(), "should not call otherwise"); if (dest->is_single_cpu()) { - if (type == T_ARRAY || type == T_OBJECT) { + if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) { __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); __ verify_oop(dest->as_register()); } else if (type == T_METADATA) { @@ -1138,7 +1142,7 @@ void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { if (src->is_single_stack()) { - if (type == T_OBJECT || type == T_ARRAY) { + if (type == T_OBJECT || type == T_ARRAY || type == T_VALUETYPE) { __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix())); __ popptr (frame_map()->address_for_slot(dest->single_stack_ix())); } else { @@ -1177,7 +1181,7 @@ LIR_Address* addr = src->as_address_ptr(); Address from_addr = as_Address(addr); - if (addr->base()->type() == T_OBJECT) { + if (addr->base()->type() == T_OBJECT || addr->base()->type() == T_VALUETYPE) { __ verify_oop(addr->base()->as_pointer_register()); } @@ -1230,6 +1234,7 @@ break; } + case T_VALUETYPE: // fall through case T_OBJECT: // fall through case T_ARRAY: // fall through if (UseCompressedOops && !wide) { @@ -1339,7 +1344,7 @@ patching_epilog(patch, patch_code, addr->base()->as_register(), info); } - if (type == T_ARRAY || type == T_OBJECT) { + if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) { #ifdef _LP64 if (UseCompressedOops && !wide) { __ decode_heap_oop(dest->as_register()); @@ -1576,7 +1581,7 @@ Register len = op->len()->as_register(); LP64_ONLY( __ movslq(len, len); ) - if (UseSlowPath || + if (UseSlowPath || op->type() == T_VALUETYPE || (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { __ jmp(*op->stub()->entry()); @@ -1674,20 +1679,22 @@ assert_different_registers(obj, k_RInfo, klass_RInfo); - __ cmpptr(obj, (int32_t)NULL_WORD); - if (op->should_profile()) { - Label not_null; - __ jccb(Assembler::notEqual, not_null); - // Object is null; update MDO and exit - Register mdo = klass_RInfo; - __ mov_metadata(mdo, md->constant_encoding()); - Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset())); - int header_bits = BitData::null_seen_byte_constant(); - __ orb(data_addr, header_bits); - __ jmp(*obj_is_null); - __ bind(not_null); - } else { - __ jcc(Assembler::equal, *obj_is_null); + if (op->need_null_check()) { + __ cmpptr(obj, (int32_t)NULL_WORD); + if (op->should_profile()) { + Label not_null; + __ jccb(Assembler::notEqual, not_null); + // Object is null; update MDO and exit + Register mdo = klass_RInfo; + __ mov_metadata(mdo, md->constant_encoding()); + Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset())); + int header_bits = BitData::null_seen_byte_constant(); + __ orb(data_addr, header_bits); + __ jmp(*obj_is_null); + __ bind(not_null); + } else { + __ jcc(Assembler::equal, *obj_is_null); + } } if (!k->is_loaded()) { @@ -1898,6 +1905,26 @@ } +void LIR_Assembler::emit_opFlattenedStoreCheck(LIR_OpFlattenedStoreCheck* op) { + Klass* k = (Klass*)(op->element_klass()->constant_encoding()); + assert(k->is_klass(), "must be a loaded klass"); + add_debug_info_for_null_check_here(op->info_for_exception()); + +#ifdef _LP64 + if (UseCompressedClassPointers) { + __ movl(op->tmp1()->as_register(), Address(op->object()->as_register(), oopDesc::klass_offset_in_bytes())); + __ cmp_narrow_klass(op->tmp1()->as_register(), k); + } else { + __ movq(op->tmp1()->as_register(), Address(op->object()->as_register(), oopDesc::klass_offset_in_bytes())); + __ cmpq(op->tmp1()->as_register(), op->tmp2()->as_register()); + } +#else + Unimplemented(); // FIXME +#endif + + __ jcc(Assembler::notEqual, *op->stub()->entry()); + __ bind(*op->stub()->continuation()); +} void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) { @@ -2494,7 +2521,7 @@ } else { #ifdef _LP64 Register r_lo; - if (right->type() == T_OBJECT || right->type() == T_ARRAY) { + if (right->type() == T_OBJECT || right->type() == T_ARRAY || right->type() == T_VALUETYPE) { r_lo = right->as_register(); } else { r_lo = right->as_register_lo(); @@ -2607,15 +2634,15 @@ Register reg1 = opr1->as_register(); if (opr2->is_single_cpu()) { // cpu register - cpu register - if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { + if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY || opr1->type() == T_VALUETYPE) { __ cmpoop(reg1, opr2->as_register()); } else { - assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?"); + assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY && opr2->type() != T_VALUETYPE, "cmp int, oop?"); __ cmpl(reg1, opr2->as_register()); } } else if (opr2->is_stack()) { // cpu register - stack - if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { + if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY || opr1->type() == T_VALUETYPE) { __ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); } else { __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); @@ -2625,7 +2652,7 @@ LIR_Const* c = opr2->as_constant_ptr(); if (c->type() == T_INT) { __ cmpl(reg1, c->as_jint()); - } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) { + } else if (c->type() == T_OBJECT || c->type() == T_ARRAY || c->type() == T_VALUETYPE) { // In 64bit oops are single register jobject o = c->as_jobject(); if (o == NULL) { @@ -2725,7 +2752,7 @@ } else if (opr1->is_address() && opr2->is_constant()) { LIR_Const* c = opr2->as_constant_ptr(); #ifdef _LP64 - if (c->type() == T_OBJECT || c->type() == T_ARRAY) { + if (c->type() == T_OBJECT || c->type() == T_ARRAY || c->type() == T_VALUETYPE) { assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse"); __ movoop(rscratch1, c->as_jobject()); } @@ -2737,7 +2764,7 @@ LIR_Address* addr = opr1->as_address_ptr(); if (c->type() == T_INT) { __ cmpl(as_Address(addr), c->as_jint()); - } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) { + } else if (c->type() == T_OBJECT || c->type() == T_ARRAY || c->type() == T_VALUETYPE) { #ifdef _LP64 // %%% Make this explode if addr isn't reachable until we figure out a // better strategy by giving noreg as the temp for as_Address @@ -3018,6 +3045,21 @@ } +void LIR_Assembler::arraycopy_flat_check(Register obj, Register tmp, CodeStub* slow_path) { + Address klass_addr = Address(obj, oopDesc::klass_offset_in_bytes()); + if (UseCompressedClassPointers) { + __ movl(tmp, klass_addr); + LP64_ONLY(__ decode_klass_not_null(tmp)); + } else { + __ movptr(tmp, klass_addr); + } + __ movl(tmp, Address(tmp, Klass::layout_helper_offset())); + __ sarl(tmp, Klass::_lh_array_tag_shift); + __ cmpl(tmp, Klass::_lh_array_tag_vt_value); + __ jcc(Assembler::equal, *slow_path->entry()); +} + + // This code replaces a call to arraycopy; no exception may // be thrown in this code, they must be thrown in the System.arraycopy // activation frame; we could save some checks if this would not be the case @@ -3038,6 +3080,30 @@ BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; if (basic_type == T_ARRAY) basic_type = T_OBJECT; + if (flags & LIR_OpArrayCopy::always_slow_path) { + __ jmp(*stub->entry()); + __ bind(*stub->continuation()); + return; + } + + if (flags & LIR_OpArrayCopy::src_flat_check) { + arraycopy_flat_check(src, tmp, stub); + } + + if (flags & LIR_OpArrayCopy::dst_flat_check) { + arraycopy_flat_check(dst, tmp, stub); + } + + if (basic_type == T_VALUETYPE) { + assert(flags & (LIR_OpArrayCopy::always_slow_path | + LIR_OpArrayCopy::src_flat_check | + LIR_OpArrayCopy::dst_flat_check), "must have checked"); + // If either src or dst is (or maybe) a flattened array, one of the 3 checks + // above would have caught it, and taken the slow path. So when we come here, + // the array must be a (non-flat) object array. + basic_type = T_OBJECT; + } + // if we don't know anything, just go through the generic arraycopy if (default_type == NULL) { // save outgoing arguments on stack in case call to System.arraycopy is needed --- old/src/hotspot/cpu/x86/c1_LIRAssembler_x86.hpp 2019-03-11 14:24:49.254356219 +0100 +++ new/src/hotspot/cpu/x86/c1_LIRAssembler_x86.hpp 2019-03-11 14:24:49.058356221 +0100 @@ -55,6 +55,8 @@ _deopt_handler_size = NOT_LP64(10) LP64_ONLY(17) }; + void arraycopy_flat_check(Register obj, Register tmp, CodeStub* slow_path); + public: void store_parameter(Register r, int offset_from_esp_in_words); --- old/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp 2019-03-11 14:24:49.654356213 +0100 +++ new/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp 2019-03-11 14:24:49.454356216 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,7 @@ #include "ci/ciArray.hpp" #include "ci/ciObjArrayKlass.hpp" #include "ci/ciTypeArrayKlass.hpp" +#include "ci/ciValueKlass.hpp" #include "gc/shared/c1/barrierSetC1.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" @@ -270,6 +271,20 @@ __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci); } +void LIRGenerator::flattened_array_store_check(LIR_Opr value, ciKlass* element_klass, CodeEmitInfo* store_check_info) { + LIR_Opr tmp1 = new_register(T_METADATA); + LIR_Opr tmp2 = LIR_OprFact::illegalOpr; + +#ifdef _LP64 + if (!UseCompressedClassPointers) { + tmp2 = new_register(T_METADATA); + __ metadata2reg(element_klass->constant_encoding(), tmp2); + } +#endif + + __ flattened_store_check(value, element_klass, tmp1, tmp2, store_check_info); +} + //---------------------------------------------------------------------- // visitor functions //---------------------------------------------------------------------- @@ -285,7 +300,7 @@ LIR_Opr lock = new_register(T_INT); // Need a scratch register for biased locking on x86 LIR_Opr scratch = LIR_OprFact::illegalOpr; - if (UseBiasedLocking) { + if (UseBiasedLocking || x->maybe_valuetype()) { scratch = new_register(T_INT); } @@ -293,11 +308,17 @@ if (x->needs_null_check()) { info_for_exception = state_for(x); } + + CodeStub* throw_imse_stub = x->maybe_valuetype() ? + new SimpleExceptionStub(Runtime1::throw_illegal_monitor_state_exception_id, + LIR_OprFact::illegalOpr, state_for(x)) + : NULL; + // this CodeEmitInfo must not have the xhandlers because here the // object is already locked (xhandlers expect object to be unlocked) CodeEmitInfo* info = state_for(x, x->state(), true); monitor_enter(obj.result(), lock, syncTempOpr(), scratch, - x->monitor_no(), info_for_exception, info); + x->monitor_no(), info_for_exception, info, throw_imse_stub); } @@ -1221,6 +1242,21 @@ __ move(reg, result); } +void LIRGenerator::do_NewValueTypeInstance (NewValueTypeInstance* x) { + // Mapping to do_NewInstance (same code) + CodeEmitInfo* info = state_for(x, x->state()); + x->set_to_object_type(); + LIR_Opr reg = result_register_for(x->type()); + new_instance(reg, x->klass(), x->is_unresolved(), + FrameMap::rcx_oop_opr, + FrameMap::rdi_oop_opr, + FrameMap::rsi_oop_opr, + LIR_OprFact::illegalOpr, + FrameMap::rdx_metadata_opr, info); + LIR_Opr result = rlock_result(x); + __ move(reg, result); + +} void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { CodeEmitInfo* info = state_for(x, x->state()); @@ -1268,13 +1304,17 @@ length.load_item_force(FrameMap::rbx_opr); LIR_Opr len = length.result(); - CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info); - ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass()); + ciKlass* obj = (ciKlass*) x->exact_type(); + CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, x->is_never_null()); if (obj == ciEnv::unloaded_ciobjarrayklass()) { BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error"); } klass2reg_with_patching(klass_reg, obj, patching_info); - __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path); + if (x->is_never_null()) { + __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_VALUETYPE, klass_reg, slow_path); + } else { + __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path); + } LIR_Opr result = rlock_result(x); __ move(reg, result); @@ -1353,6 +1393,10 @@ (x->needs_exception_state() ? state_for(x) : state_for(x, x->state_before(), true /*ignore_xhandler*/)); + if (x->is_never_null()) { + __ null_check(obj.result(), new CodeEmitInfo(info_for_exception)); + } + CodeStub* stub; if (x->is_incompatible_class_change_check()) { assert(patching_info == NULL, "can't patch this"); @@ -1371,7 +1415,7 @@ __ checkcast(reg, obj.result(), x->klass(), new_register(objectType), new_register(objectType), tmp3, x->direct_compare(), info_for_exception, patching_info, stub, - x->profiled_method(), x->profiled_bci()); + x->profiled_method(), x->profiled_bci(), x->is_never_null()); } --- old/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp 2019-03-11 14:24:50.066356207 +0100 +++ new/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp 2019-03-11 14:24:49.866356210 +0100 @@ -62,6 +62,10 @@ movptr(hdr, Address(obj, hdr_offset)); // and mark it as unlocked orptr(hdr, markOopDesc::unlocked_value); + if (EnableValhalla && !UseBiasedLocking) { + // Mask always_locked bit such that we go to the slow path if object is a value type + andptr(hdr, ~markOopDesc::biased_lock_bit_in_place); + } // save unlocked object header into the displaced header location on the stack movptr(Address(disp_hdr, 0), hdr); // test if object header is still the same (i.e. unlocked), and if so, store the @@ -150,7 +154,8 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) { assert_different_registers(obj, klass, len); - if (UseBiasedLocking && !len->is_valid()) { + if ((UseBiasedLocking || EnableValhalla) && !len->is_valid()) { + // Need to copy markOopDesc::always_locked_pattern for values. assert_different_registers(obj, klass, len, t1, t2); movptr(t1, Address(klass, Klass::prototype_header_offset())); movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1); --- old/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp 2019-03-11 14:24:50.470356202 +0100 +++ new/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp 2019-03-11 14:24:50.274356205 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1103,6 +1103,7 @@ case new_type_array_id: case new_object_array_id: + case new_value_array_id: { Register length = rbx; // Incoming Register klass = rdx; // Incoming @@ -1110,8 +1111,10 @@ if (id == new_type_array_id) { __ set_info("new_type_array", dont_gc_arguments); - } else { + } else if (id == new_object_array_id) { __ set_info("new_object_array", dont_gc_arguments); + } else { + __ set_info("new_value_array", dont_gc_arguments); } #ifdef ASSERT @@ -1121,12 +1124,28 @@ Register t0 = obj; __ movl(t0, Address(klass, Klass::layout_helper_offset())); __ sarl(t0, Klass::_lh_array_tag_shift); - int tag = ((id == new_type_array_id) - ? Klass::_lh_array_tag_type_value - : Klass::_lh_array_tag_obj_value); - __ cmpl(t0, tag); - __ jcc(Assembler::equal, ok); - __ stop("assert(is an array klass)"); + switch (id) { + case new_type_array_id: + __ cmpl(t0, Klass::_lh_array_tag_type_value); + __ jcc(Assembler::equal, ok); + __ stop("assert(is a type array klass)"); + break; + case new_object_array_id: + case new_value_array_id: // <-- needs to be renamed to new_non_null_array_id! + // FIXME: + // The VM currently does not distinguish between anewarray of + // "[QV;" (elements are non-nullable) vs "[LV;" (elements may be null). + // Instead, both are treated essentially as "[QV;". This code needs + // to be reimplemented after proper support of "[LV;" is implemented in the VM. + // + __ cmpl(t0, Klass::_lh_array_tag_obj_value); + __ jcc(Assembler::equal, ok); + __ cmpl(t0, Klass::_lh_array_tag_vt_value); + __ jcc(Assembler::equal, ok); + __ stop("assert(is an object or value array klass)"); + break; + default: ShouldNotReachHere(); + } __ should_not_reach_here(); __ bind(ok); } @@ -1179,6 +1198,8 @@ if (id == new_type_array_id) { call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); } else { + // Runtime1::new_object_array handles both object and value arrays. + // See comments in the ASSERT block above. call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); } @@ -1211,6 +1232,44 @@ } break; + case load_flattened_array_id: + { + StubFrame f(sasm, "load_flattened_array", dont_gc_arguments); + OopMap* map = save_live_registers(sasm, 3); + + // Called with store_parameter and not C abi + + f.load_argument(1, rax); // rax,: array + f.load_argument(0, rbx); // rbx,: index + int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, load_flattened_array), rax, rbx); + + oop_maps = new OopMapSet(); + oop_maps->add_gc_map(call_offset, map); + restore_live_registers_except_rax(sasm); + + // rax,: loaded element at array[index] + __ verify_oop(rax); + } + break; + + case store_flattened_array_id: + { + StubFrame f(sasm, "store_flattened_array", dont_gc_arguments); + OopMap* map = save_live_registers(sasm, 4); + + // Called with store_parameter and not C abi + + f.load_argument(2, rax); // rax,: array + f.load_argument(1, rbx); // rbx,: index + f.load_argument(0, rcx); // rcx,: value + int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, store_flattened_array), rax, rbx, rcx); + + oop_maps = new OopMapSet(); + oop_maps->add_gc_map(call_offset, map); + restore_live_registers_except_rax(sasm); + } + break; + case register_finalizer_id: { __ set_info("register_finalizer", dont_gc_arguments); @@ -1312,11 +1371,17 @@ break; case throw_incompatible_class_change_error_id: - { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments); + { StubFrame f(sasm, "throw_incompatible_class_change_error", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); } break; + case throw_illegal_monitor_state_exception_id: + { StubFrame f(sasm, "throw_illegal_monitor_state_exception", dont_gc_arguments); + oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_illegal_monitor_state_exception), false); + } + break; + case slow_subtype_check_id: { // Typical calling sequence: --- old/src/hotspot/cpu/x86/c1_globals_x86.hpp 2019-03-11 14:24:50.894356196 +0100 +++ new/src/hotspot/cpu/x86/c1_globals_x86.hpp 2019-03-11 14:24:50.682356199 +0100 @@ -39,7 +39,7 @@ define_pd_global(bool, PreferInterpreterNativeStubs, false); define_pd_global(bool, ProfileTraps, false); define_pd_global(bool, UseOnStackReplacement, true ); -define_pd_global(bool, TieredCompilation, false); +define_pd_global(bool, TieredCompilation, true); define_pd_global(intx, CompileThreshold, 1500 ); define_pd_global(intx, OnStackReplacePercentage, 933 ); --- old/src/hotspot/cpu/x86/frame_x86.cpp 2019-03-11 14:24:51.298356190 +0100 +++ new/src/hotspot/cpu/x86/frame_x86.cpp 2019-03-11 14:24:51.098356193 +0100 @@ -144,13 +144,16 @@ if ((address)sender_sp >= thread->stack_base()) { return false; } - sender_unextended_sp = sender_sp; // On Intel the return_address is always the word on the stack sender_pc = (address) *(sender_sp-1); // Note: frame::sender_sp_offset is only valid for compiled frame - saved_fp = (intptr_t*) *(sender_sp - frame::sender_sp_offset); - } + intptr_t** saved_fp_addr = (intptr_t**) (sender_sp - frame::sender_sp_offset); + saved_fp = *saved_fp_addr; + // Repair the sender sp if this is a method with scalarized value type args + sender_sp = repair_sender_sp(sender_sp, saved_fp_addr); + sender_unextended_sp = sender_sp; + } // If the potential sender is the interpreter then we can do some more checking if (Interpreter::contains(sender_pc)) { @@ -454,7 +457,6 @@ // frame owned by optimizing compiler assert(_cb->frame_size() >= 0, "must have non-zero frame size"); intptr_t* sender_sp = unextended_sp() + _cb->frame_size(); - intptr_t* unextended_sp = sender_sp; // On Intel the return_address is always the word on the stack address sender_pc = (address) *(sender_sp-1); @@ -463,6 +465,9 @@ // It is only an FP if the sender is an interpreter frame (or C1?). intptr_t** saved_fp_addr = (intptr_t**) (sender_sp - frame::sender_sp_offset); + // Repair the sender sp if this is a method with scalarized value type args + sender_sp = repair_sender_sp(sender_sp, saved_fp_addr); + if (map->update_map()) { // Tell GC to use argument oopmaps for some runtime stubs that need it. // For C1, the runtime stub might not have oop maps, so set this flag @@ -479,7 +484,7 @@ } assert(sender_sp != sp(), "must have changed"); - return frame(sender_sp, unextended_sp, *saved_fp_addr, sender_pc); + return frame(sender_sp, sender_sp, *saved_fp_addr, sender_pc); } @@ -585,6 +590,7 @@ switch (type) { case T_OBJECT : + case T_VALUETYPE: case T_ARRAY : { oop obj; if (method->is_native()) { @@ -686,6 +692,22 @@ void frame::pd_ps() {} #endif +// Check for a method with scalarized value type arguments that needs +// a stack repair and return the repaired sender stack pointer. +intptr_t* frame::repair_sender_sp(intptr_t* sender_sp, intptr_t** saved_fp_addr) const { + CompiledMethod* cm = _cb->as_compiled_method_or_null(); + if (cm != NULL && cm->method()->needs_stack_repair()) { + // The stack increment resides just below the saved rbp on the stack + // and does not account for the return address. + intptr_t* sp_inc_addr = (intptr_t*) (saved_fp_addr - 1); + int sp_inc = (*sp_inc_addr) / wordSize; + int real_frame_size = sp_inc + 1; // Add size of return address + assert(real_frame_size >= _cb->frame_size(), "invalid frame size"); + sender_sp = unextended_sp() + real_frame_size; + } + return sender_sp; +} + void JavaFrameAnchor::make_walkable(JavaThread* thread) { // last frame set? if (last_Java_sp() == NULL) return; --- old/src/hotspot/cpu/x86/frame_x86.hpp 2019-03-11 14:24:51.706356185 +0100 +++ new/src/hotspot/cpu/x86/frame_x86.hpp 2019-03-11 14:24:51.506356188 +0100 @@ -123,6 +123,9 @@ return (intptr_t*) addr_at(offset); } + // Support for scalarized value type calling convention + intptr_t* repair_sender_sp(intptr_t* sender_sp, intptr_t** saved_fp_addr) const; + #ifdef ASSERT // Used in frame::sender_for_{interpreter,compiled}_frame static void verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp); --- old/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp 2019-03-11 14:24:52.522356173 +0100 +++ new/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp 2019-03-11 14:24:52.322356176 +0100 @@ -40,6 +40,7 @@ bool is_not_null = (decorators & IS_NOT_NULL) != 0; bool atomic = (decorators & MO_RELAXED) != 0; + assert(type != T_VALUETYPE, "Not supported yet"); switch (type) { case T_OBJECT: case T_ARRAY: { @@ -105,6 +106,7 @@ bool is_not_null = (decorators & IS_NOT_NULL) != 0; bool atomic = (decorators & MO_RELAXED) != 0; + assert(type != T_VALUETYPE, "Not supported yet"); switch (type) { case T_OBJECT: case T_ARRAY: { --- old/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp 2019-03-11 14:24:52.926356168 +0100 +++ new/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp 2019-03-11 14:24:52.726356171 +0100 @@ -197,6 +197,7 @@ Register tmp2) { BLOCK_COMMENT("ZBarrierSetAssembler::store_at {"); + assert(type != T_VALUETYPE, "Not supported yet"); // Verify oop store if (type == T_OBJECT || type == T_ARRAY) { // Note that src could be noreg, which means we --- old/src/hotspot/cpu/x86/globals_x86.hpp 2019-03-11 14:24:53.338356162 +0100 +++ new/src/hotspot/cpu/x86/globals_x86.hpp 2019-03-11 14:24:53.138356165 +0100 @@ -102,6 +102,9 @@ define_pd_global(bool, ThreadLocalHandshakes, false); #endif +define_pd_global(bool, ValueTypePassFieldsAsArgs, LP64_ONLY(true) NOT_LP64(false)); +define_pd_global(bool, ValueTypeReturnedAsFields, LP64_ONLY(true) NOT_LP64(false)); + #define ARCH_FLAGS(develop, \ product, \ diagnostic, \ --- old/src/hotspot/cpu/x86/interp_masm_x86.cpp 2019-03-11 14:24:53.750356156 +0100 +++ new/src/hotspot/cpu/x86/interp_masm_x86.cpp 2019-03-11 14:24:53.550356159 +0100 @@ -31,6 +31,7 @@ #include "oops/markOop.hpp" #include "oops/methodData.hpp" #include "oops/method.hpp" +#include "oops/valueKlass.hpp" #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.hpp" #include "runtime/basicLock.hpp" @@ -973,7 +974,7 @@ movbool(rbx, do_not_unlock_if_synchronized); movbool(do_not_unlock_if_synchronized, false); // reset the flag - // get method access flags + // get method access flags movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); movl(rcx, Address(rcx, Method::access_flags_offset())); testl(rcx, JVM_ACC_SYNCHRONIZED); @@ -1097,11 +1098,9 @@ notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA } - // remove activation - // get sender sp - movptr(rbx, - Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); if (StackReservedPages > 0) { + movptr(rbx, + Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // testing if reserved zone needs to be re-enabled Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); Label no_reserved_zone_enabling; @@ -1122,6 +1121,39 @@ bind(no_reserved_zone_enabling); } + + // remove activation + // get sender sp + movptr(rbx, + Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); + + if (state == atos && ValueTypeReturnedAsFields) { + Label skip; + // Test if the return type is a value type + movptr(rdi, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); + movptr(rdi, Address(rdi, Method::const_offset())); + load_unsigned_byte(rdi, Address(rdi, ConstMethod::result_type_offset())); + cmpl(rdi, T_VALUETYPE); + jcc(Assembler::notEqual, skip); + + // We are returning a value type, load its fields into registers +#ifndef _LP64 + super_call_VM_leaf(StubRoutines::load_value_type_fields_in_regs()); +#else + // Load fields from a buffered value with a value class specific handler + load_klass(rdi, rax); + movptr(rdi, Address(rdi, InstanceKlass::adr_valueklass_fixed_block_offset())); + movptr(rdi, Address(rdi, ValueKlass::unpack_handler_offset())); + + testptr(rdi, rdi); + jcc(Assembler::equal, skip); + + call(rdi); +#endif + // call above kills the value in rbx. Reload it. + movptr(rbx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); + bind(skip); + } leave(); // remove frame anchor pop(ret_addr); // get return address mov(rsp, rbx); // set sp to sender sp @@ -1184,6 +1216,10 @@ // Load (object->mark() | 1) into swap_reg %rax orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); + if (EnableValhalla && !UseBiasedLocking) { + // For slow path is_always_locked, using biased, which is never natural for !UseBiasLocking + andptr(swap_reg, ~markOopDesc::biased_lock_bit_in_place); + } // Save (object->mark() | 1) into BasicLock's displaced header movptr(Address(lock_reg, mark_offset), swap_reg); --- old/src/hotspot/cpu/x86/interpreterRT_x86.hpp 2019-03-11 14:24:54.570356145 +0100 +++ new/src/hotspot/cpu/x86/interpreterRT_x86.hpp 2019-03-11 14:24:54.370356148 +0100 @@ -53,6 +53,7 @@ void pass_double(); #endif // AMD64 void pass_object(); + void pass_valuetype(); public: // Creation --- old/src/hotspot/cpu/x86/interpreterRT_x86_32.cpp 2019-03-11 14:24:54.978356139 +0100 +++ new/src/hotspot/cpu/x86/interpreterRT_x86_32.cpp 2019-03-11 14:24:54.774356142 +0100 @@ -72,6 +72,10 @@ box (offset(), jni_offset() + 1); } +void InterpreterRuntime::SignatureHandlerGenerator::pass_valuetype() { + box (offset(), jni_offset() + 1); +} + void InterpreterRuntime::SignatureHandlerGenerator::move(int from_offset, int to_offset) { __ movl(temp(), Address(from(), Interpreter::local_offset_in_bytes(from_offset))); __ movl(Address(to(), to_offset * wordSize), temp()); @@ -136,6 +140,13 @@ // pass address of from intptr_t from_addr = (intptr_t)(_from + Interpreter::local_offset_in_bytes(0)); *_to++ = (*(intptr_t*)from_addr == 0) ? NULL_WORD : from_addr; + _from -= Interpreter::stackElementSize; + } + + virtual void pass_valuetype() { + // pass address of from + intptr_t from_addr = (intptr_t)(_from + Interpreter::local_offset_in_bytes(0)); + *_to++ = (*(intptr_t*)from_addr == 0) ? NULL_WORD : from_addr; _from -= Interpreter::stackElementSize; } --- old/src/hotspot/cpu/x86/interpreterRT_x86_64.cpp 2019-03-11 14:24:55.382356134 +0100 +++ new/src/hotspot/cpu/x86/interpreterRT_x86_64.cpp 2019-03-11 14:24:55.182356137 +0100 @@ -291,6 +291,10 @@ #endif } +void InterpreterRuntime::SignatureHandlerGenerator::pass_valuetype() { + pass_object(); +} + void InterpreterRuntime::SignatureHandlerGenerator::generate(uint64_t fingerprint) { // generate code to handle arguments iterate(fingerprint); @@ -356,6 +360,11 @@ } } + virtual void pass_valuetype() { + // values are handled with oops, like objects + pass_object(); + } + virtual void pass_float() { jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0)); @@ -451,6 +460,11 @@ } } + virtual void pass_valuetype() { + // values are handled with oops, like objects + pass_object(); + } + virtual void pass_float() { jint from_obj = *(jint*)(_from+Interpreter::local_offset_in_bytes(0)); --- old/src/hotspot/cpu/x86/macroAssembler_x86.cpp 2019-03-11 14:24:55.798356128 +0100 +++ new/src/hotspot/cpu/x86/macroAssembler_x86.cpp 2019-03-11 14:24:55.594356131 +0100 @@ -47,6 +47,7 @@ #include "runtime/stubRoutines.hpp" #include "runtime/thread.hpp" #include "utilities/macros.hpp" +#include "vmreg_x86.inline.hpp" #include "crc32c.h" #ifdef COMPILER2 #include "opto/intrinsicnode.hpp" @@ -2425,6 +2426,10 @@ call_VM_leaf(entry_point, 3); } +void MacroAssembler::super_call_VM_leaf(address entry_point) { + MacroAssembler::call_VM_leaf_base(entry_point, 1); +} + void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { pass_arg0(this, arg_0); MacroAssembler::call_VM_leaf_base(entry_point, 1); @@ -3419,6 +3424,51 @@ } } +void MacroAssembler::test_klass_is_value(Register klass, Register temp_reg, Label& is_value) { + movl(temp_reg, Address(klass, Klass::access_flags_offset())); + testl(temp_reg, JVM_ACC_VALUE); + jcc(Assembler::notZero, is_value); +} + +void MacroAssembler::test_field_is_flattenable(Register flags, Register temp_reg, Label& is_flattenable) { + movl(temp_reg, flags); + shrl(temp_reg, ConstantPoolCacheEntry::is_flattenable_field_shift); + andl(temp_reg, 0x1); + testl(temp_reg, temp_reg); + jcc(Assembler::notZero, is_flattenable); +} + +void MacroAssembler::test_field_is_not_flattenable(Register flags, Register temp_reg, Label& notFlattenable) { + movl(temp_reg, flags); + shrl(temp_reg, ConstantPoolCacheEntry::is_flattenable_field_shift); + andl(temp_reg, 0x1); + testl(temp_reg, temp_reg); + jcc(Assembler::zero, notFlattenable); +} + +void MacroAssembler::test_field_is_flattened(Register flags, Register temp_reg, Label& is_flattened) { + movl(temp_reg, flags); + shrl(temp_reg, ConstantPoolCacheEntry::is_flattened_field_shift); + andl(temp_reg, 0x1); + testl(temp_reg, temp_reg); + jcc(Assembler::notZero, is_flattened); +} + +void MacroAssembler::test_flat_array_klass(Register klass, Register temp_reg, + Label& is_flat_array) { + movl(temp_reg, Address(klass, Klass::layout_helper_offset())); + sarl(temp_reg, Klass::_lh_array_tag_shift); + cmpl(temp_reg, Klass::_lh_array_tag_vt_value); + jcc(Assembler::equal, is_flat_array); +} + + +void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg, + Label& is_flat_array) { + load_klass(temp_reg, oop); + test_flat_array_klass(temp_reg, temp_reg, is_flat_array); +} + void MacroAssembler::os_breakpoint() { // instead of directly emitting a breakpoint, call os:breakpoint for better debugability // (e.g., MSVC can't call ps() otherwise) @@ -4471,7 +4521,11 @@ } void MacroAssembler::verify_oop(Register reg, const char* s) { - if (!VerifyOops) return; + if (!VerifyOops || VerifyAdapterSharing) { + // Below address of the code string confuses VerifyAdapterSharing + // because it may differ between otherwise equivalent adapters. + return; + } // Pass register number to verify_oop_subroutine const char* b = NULL; @@ -4561,7 +4615,11 @@ void MacroAssembler::verify_oop_addr(Address addr, const char* s) { - if (!VerifyOops) return; + if (!VerifyOops || VerifyAdapterSharing) { + // Below address of the code string confuses VerifyAdapterSharing + // because it may differ between otherwise equivalent adapters. + return; + } // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord); // Pass register number to verify_oop_subroutine @@ -5437,7 +5495,12 @@ #endif // _LP64 // C2 compiled method's prolog code. -void MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b, bool is_stub) { +void MacroAssembler::verified_entry(Compile* C, int sp_inc) { + int framesize = C->frame_size_in_bytes(); + int bangsize = C->bang_size_in_bytes(); + bool fp_mode_24b = C->in_24_bit_fp_mode(); + int stack_bang_size = C->need_stack_bang(bangsize) ? bangsize : 0; + bool is_stub = C->stub_function() != NULL; // WARNING: Initial instruction MUST be 5 bytes or longer so that // NativeJump::patch_verified_entry will be able to patch out the entry @@ -5490,6 +5553,12 @@ } } + if (C->needs_stack_repair()) { + // Save stack increment (also account for fixed framesize and rbp) + assert((sp_inc & (StackAlignmentInBytes-1)) == 0, "stack increment not aligned"); + movptr(Address(rsp, C->sp_inc_offset()), sp_inc + framesize + wordSize); + } + if (VerifyStackAtCalls) { // Majik cookie to verify stack depth framesize -= wordSize; movptr(Address(rsp, framesize), (int32_t)0xbadb100d); @@ -5526,14 +5595,16 @@ } // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers -void MacroAssembler::xmm_clear_mem(Register base, Register cnt, XMMRegister xtmp) { +void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp) { // cnt - number of qwords (8-byte words). // base - start address, qword aligned. Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end; + movdq(xtmp, val); if (UseAVX >= 2) { - vpxor(xtmp, xtmp, xtmp, AVX_256bit); + punpcklqdq(xtmp, xtmp); + vinserti128_high(xtmp, xtmp); } else { - pxor(xtmp, xtmp); + punpcklqdq(xtmp, xtmp); } jmp(L_zero_64_bytes); @@ -5577,22 +5648,303 @@ BIND(L_end); } -void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp, XMMRegister xtmp, bool is_large) { +// Move a value between registers/stack slots and update the reg_state +bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[], int ret_off) { + if (reg_state[to->value()] == reg_written) { + return true; // Already written + } + if (from != to && bt != T_VOID) { + if (reg_state[to->value()] == reg_readonly) { + return false; // Not yet writable + } + if (from->is_reg()) { + if (to->is_reg()) { + if (from->is_XMMRegister()) { + if (bt == T_DOUBLE) { + movdbl(to->as_XMMRegister(), from->as_XMMRegister()); + } else { + assert(bt == T_FLOAT, "must be float"); + movflt(to->as_XMMRegister(), from->as_XMMRegister()); + } + } else { + movq(to->as_Register(), from->as_Register()); + } + } else { + int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize; + assert(st_off != ret_off, "overwriting return address at %d", st_off); + Address to_addr = Address(rsp, st_off); + if (from->is_XMMRegister()) { + if (bt == T_DOUBLE) { + movdbl(to_addr, from->as_XMMRegister()); + } else { + assert(bt == T_FLOAT, "must be float"); + movflt(to_addr, from->as_XMMRegister()); + } + } else { + movq(to_addr, from->as_Register()); + } + } + } else { + Address from_addr = Address(rsp, from->reg2stack() * VMRegImpl::stack_slot_size + wordSize); + if (to->is_reg()) { + if (to->is_XMMRegister()) { + if (bt == T_DOUBLE) { + movdbl(to->as_XMMRegister(), from_addr); + } else { + assert(bt == T_FLOAT, "must be float"); + movflt(to->as_XMMRegister(), from_addr); + } + } else { + movq(to->as_Register(), from_addr); + } + } else { + int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize; + assert(st_off != ret_off, "overwriting return address at %d", st_off); + movq(r13, from_addr); + movq(Address(rsp, st_off), r13); + } + } + } + // Update register states + reg_state[from->value()] = reg_writable; + reg_state[to->value()] = reg_written; + return true; +} + +// Read all fields from a value type oop and store the values in registers/stack slots +bool MacroAssembler::unpack_value_helper(const GrowableArray* sig, int& sig_index, VMReg from, VMRegPair* regs_to, int& to_index, RegState reg_state[], int ret_off) { + Register fromReg = from->is_reg() ? from->as_Register() : noreg; + assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter"); + + int vt = 1; + bool done = true; + bool mark_done = true; + do { + sig_index--; + BasicType bt = sig->at(sig_index)._bt; + if (bt == T_VALUETYPE) { + vt--; + } else if (bt == T_VOID && + sig->at(sig_index-1)._bt != T_LONG && + sig->at(sig_index-1)._bt != T_DOUBLE) { + vt++; + } else if (SigEntry::is_reserved_entry(sig, sig_index)) { + to_index--; // Ignore this + } else { + assert(to_index >= 0, "invalid to_index"); + VMRegPair pair_to = regs_to[to_index--]; + VMReg to = pair_to.first(); + + if (bt == T_VOID) continue; + + int idx = (int)to->value(); + if (reg_state[idx] == reg_readonly) { + if (idx != from->value()) { + mark_done = false; + } + done = false; + continue; + } else if (reg_state[idx] == reg_written) { + continue; + } else { + assert(reg_state[idx] == reg_writable, "must be writable"); + reg_state[idx] = reg_written; + } + + if (fromReg == noreg) { + int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + wordSize; + movq(r10, Address(rsp, st_off)); + fromReg = r10; + } + + int off = sig->at(sig_index)._offset; + assert(off > 0, "offset in object should be positive"); + bool is_oop = (bt == T_OBJECT || bt == T_ARRAY); + + Address fromAddr = Address(fromReg, off); + bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN); + if (!to->is_XMMRegister()) { + Register dst = to->is_stack() ? r13 : to->as_Register(); + if (is_oop) { + load_heap_oop(dst, fromAddr); + } else { + load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed); + } + if (to->is_stack()) { + int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize; + assert(st_off != ret_off, "overwriting return address at %d", st_off); + movq(Address(rsp, st_off), dst); + } + } else { + if (bt == T_DOUBLE) { + movdbl(to->as_XMMRegister(), fromAddr); + } else { + assert(bt == T_FLOAT, "must be float"); + movflt(to->as_XMMRegister(), fromAddr); + } + } + } + } while (vt != 0); + if (mark_done && reg_state[from->value()] != reg_written) { + // This is okay because no one else will write to that slot + reg_state[from->value()] = reg_writable; + } + return done; +} + +// Unpack all value type arguments passed as oops +void MacroAssembler::unpack_value_args(Compile* C, bool receiver_only) { + assert(C->has_scalarized_args(), "value type argument scalarization is disabled"); + Method* method = C->method()->get_Method(); + const GrowableArray* sig_cc = method->adapter()->get_sig_cc(); + assert(sig_cc != NULL, "must have scalarized signature"); + + // Get unscalarized calling convention + BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sig_cc->length()); + int args_passed = 0; + if (!method->is_static()) { + sig_bt[args_passed++] = T_OBJECT; + } + if (!receiver_only) { + for (SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) { + BasicType bt = ss.type(); + sig_bt[args_passed++] = bt; + if (type2size[bt] == 2) { + sig_bt[args_passed++] = T_VOID; + } + } + } else { + // Only unpack the receiver, all other arguments are already scalarized + InstanceKlass* holder = method->method_holder(); + int rec_len = holder->is_value() ? ValueKlass::cast(holder)->extended_sig()->length() : 1; + // Copy scalarized signature but skip receiver, value type delimiters and reserved entries + for (int i = 0; i < sig_cc->length(); i++) { + if (!SigEntry::is_reserved_entry(sig_cc, i)) { + if (SigEntry::skip_value_delimiters(sig_cc, i) && rec_len <= 0) { + sig_bt[args_passed++] = sig_cc->at(i)._bt; + } + rec_len--; + } + } + } + VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, args_passed); + int args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, args_passed, false); + + // Get scalarized calling convention + int args_passed_cc = SigEntry::fill_sig_bt(sig_cc, sig_bt); + VMRegPair* regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, sig_cc->length()); + int args_on_stack_cc = SharedRuntime::java_calling_convention(sig_bt, regs_cc, args_passed_cc, false); + + // Check if we need to extend the stack for unpacking + int sp_inc = (args_on_stack_cc - args_on_stack) * VMRegImpl::stack_slot_size; + if (sp_inc > 0) { + // Save the return address, adjust the stack (make sure it is properly + // 16-byte aligned) and copy the return address to the new top of the stack. + pop(r13); + sp_inc = align_up(sp_inc, StackAlignmentInBytes); + subptr(rsp, sp_inc); + push(r13); + } else { + // The scalarized calling convention needs less stack space than the unscalarized one. + // No need to extend the stack, the caller will take care of these adjustments. + sp_inc = 0; + } + + // Initialize register/stack slot states (make all writable) + int max_stack = MAX2(args_on_stack + sp_inc/VMRegImpl::stack_slot_size, args_on_stack_cc); + int max_reg = VMRegImpl::stack2reg(max_stack)->value(); + RegState* reg_state = NEW_RESOURCE_ARRAY(RegState, max_reg); + for (int i = 0; i < max_reg; ++i) { + reg_state[i] = reg_writable; + } + // Set all source registers/stack slots to readonly to prevent accidental overwriting + for (int i = 0; i < args_passed; ++i) { + VMReg reg = regs[i].first(); + if (!reg->is_valid()) continue; + if (reg->is_stack()) { + // Update source stack location by adding stack increment + reg = VMRegImpl::stack2reg(reg->reg2stack() + sp_inc/VMRegImpl::stack_slot_size); + regs[i] = reg; + } + assert(reg->value() >= 0 && reg->value() < max_reg, "reg value out of bounds"); + reg_state[reg->value()] = reg_readonly; + } + + // Emit code for unpacking value type arguments + // We try multiple times and eventually start spilling to resolve (circular) dependencies + bool done = false; + for (int i = 0; i < 2*args_passed_cc && !done; ++i) { + done = true; + bool spill = (i > args_passed_cc); // Start spilling? + // Iterate over all arguments (in reverse) + for (int from_index = args_passed-1, to_index = args_passed_cc-1, sig_index = sig_cc->length()-1; sig_index >= 0; sig_index--) { + if (SigEntry::is_reserved_entry(sig_cc, sig_index)) { + to_index--; // Skip reserved entry + } else { + assert(from_index >= 0, "index out of bounds"); + VMReg reg = regs[from_index].first(); + if (spill && reg->is_valid() && reg_state[reg->value()] == reg_readonly) { + // Spill argument to be able to write the source and resolve circular dependencies + VMReg spill_reg = reg->is_XMMRegister() ? xmm8->as_VMReg() : r14->as_VMReg(); + bool res = move_helper(reg, spill_reg, T_DOUBLE, reg_state, sp_inc); + assert(res, "Spilling should not fail"); + // Set spill_reg as new source and update state + reg = spill_reg; + regs[from_index].set1(reg); + reg_state[reg->value()] = reg_readonly; + spill = false; // Do not spill again in this round + } + BasicType bt = sig_cc->at(sig_index)._bt; + if (SigEntry::skip_value_delimiters(sig_cc, sig_index)) { + assert(to_index >= 0, "index out of bounds"); + done &= move_helper(reg, regs_cc[to_index].first(), bt, reg_state, sp_inc); + to_index--; + } else if (!receiver_only || (from_index == 0 && bt == T_VOID)) { + done &= unpack_value_helper(sig_cc, sig_index, reg, regs_cc, to_index, reg_state, sp_inc); + } else { + continue; + } + from_index--; + } + } + } + guarantee(done, "Could not resolve circular dependency when unpacking value type arguments"); + + // Emit code for verified entry and save increment for stack repair on return + verified_entry(C, sp_inc); +} + +// Restores the stack on return +void MacroAssembler::restore_stack(Compile* C) { + int framesize = C->frame_size_in_bytes(); + assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); + // Remove word for return addr already pushed and RBP + framesize -= 2*wordSize; + + if (C->needs_stack_repair()) { + // Restore rbp and repair rsp by adding the stack increment + movq(rbp, Address(rsp, framesize)); + addq(rsp, Address(rsp, C->sp_inc_offset())); + } else { + if (framesize > 0) { + addq(rsp, framesize); + } + pop(rbp); + } +} + +void MacroAssembler::clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, bool is_large, bool word_copy_only) { // cnt - number of qwords (8-byte words). // base - start address, qword aligned. // is_large - if optimizers know cnt is larger than InitArrayShortSize assert(base==rdi, "base register must be edi for rep stos"); - assert(tmp==rax, "tmp register must be eax for rep stos"); + assert(val==rax, "tmp register must be eax for rep stos"); assert(cnt==rcx, "cnt register must be ecx for rep stos"); assert(InitArrayShortSize % BytesPerLong == 0, "InitArrayShortSize should be the multiple of BytesPerLong"); Label DONE; - if (!is_large || !UseXMMForObjInit) { - xorptr(tmp, tmp); - } - if (!is_large) { Label LOOP, LONG; cmpptr(cnt, InitArrayShortSize/BytesPerLong); @@ -5605,7 +5957,7 @@ // Use individual pointer-sized stores for small counts: BIND(LOOP); - movptr(Address(base, cnt, Address::times_ptr), tmp); + movptr(Address(base, cnt, Address::times_ptr), val); decrement(cnt); jccb(Assembler::greaterEqual, LOOP); jmpb(DONE); @@ -5614,12 +5966,11 @@ } // Use longer rep-prefixed ops for non-small counts: - if (UseFastStosb) { + if (UseFastStosb && !word_copy_only) { shlptr(cnt, 3); // convert to number of bytes rep_stosb(); } else if (UseXMMForObjInit) { - movptr(tmp, base); - xmm_clear_mem(tmp, cnt, xtmp); + xmm_clear_mem(base, cnt, val, xtmp); } else { NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM rep_stos(); --- old/src/hotspot/cpu/x86/macroAssembler_x86.hpp 2019-03-11 14:24:56.254356122 +0100 +++ new/src/hotspot/cpu/x86/macroAssembler_x86.hpp 2019-03-11 14:24:56.054356125 +0100 @@ -28,6 +28,7 @@ #include "asm/assembler.hpp" #include "utilities/macros.hpp" #include "runtime/rtmLocking.hpp" +#include "runtime/signature.hpp" // MacroAssembler extends Assembler by frequently used macros. // @@ -98,6 +99,16 @@ static bool needs_explicit_null_check(intptr_t offset); static bool uses_implicit_null_check(void* address); + void test_klass_is_value(Register klass, Register temp_reg, Label& is_value); + + void test_field_is_flattenable(Register flags, Register temp_reg, Label& is_flattenable); + void test_field_is_not_flattenable(Register flags, Register temp_reg, Label& notFlattenable); + void test_field_is_flattened(Register flags, Register temp_reg, Label& is_flattened); + + // Check klass/oops is flat value type array (oop->_klass->_layout_helper & vt_bit) + void test_flat_array_klass(Register klass, Register temp_reg, Label& is_flat_array); + void test_flat_array_oop(Register oop, Register temp_reg, Label& is_flat_array); + // Required platform-specific helpers for Label::patch_instructions. // They _shadow_ the declarations in AbstractAssembler, which are undefined. void pd_patch_instruction(address branch, address target, const char* file, int line) { @@ -1600,14 +1611,26 @@ void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } // C2 compiled method's prolog code. - void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b, bool is_stub); + void verified_entry(Compile* C, int sp_inc = 0); + + enum RegState { + reg_readonly, + reg_writable, + reg_written + }; + + // Unpack all value type arguments passed as oops + void unpack_value_args(Compile* C, bool receiver_only); + bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[], int ret_off); + bool unpack_value_helper(const GrowableArray* sig, int& sig_index, VMReg from, VMRegPair* regs_to, int& to_index, RegState reg_state[], int ret_off); + void restore_stack(Compile* C); // clear memory of size 'cnt' qwords, starting at 'base'; // if 'is_large' is set, do not try to produce short loop - void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large); + void clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, bool is_large, bool word_copy_only); // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers - void xmm_clear_mem(Register base, Register cnt, XMMRegister xtmp); + void xmm_clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp); #ifdef COMPILER2 void string_indexof_char(Register str1, Register cnt1, Register ch, Register result, --- old/src/hotspot/cpu/x86/methodHandles_x86.cpp 2019-03-11 14:24:56.670356116 +0100 +++ new/src/hotspot/cpu/x86/methodHandles_x86.cpp 2019-03-11 14:24:56.466356119 +0100 @@ -147,7 +147,11 @@ __ BIND(run_compiled_code); } - const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() : + // The following jump might pass a value type argument that was erased to Object as oop to a + // callee that expects value type arguments to be passed as fields. We need to call the compiled + // value entry (_code->value_entry_point() or _adapter->c2i_value_entry()) which will take care + // of translating between the calling conventions. + const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_value_offset() : Method::from_interpreted_offset(); __ jmp(Address(method, entry_offset)); --- old/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp 2019-03-11 14:24:57.082356110 +0100 +++ new/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp 2019-03-11 14:24:56.874356113 +0100 @@ -463,6 +463,7 @@ case T_INT: case T_ARRAY: case T_OBJECT: + case T_VALUETYPE: case T_ADDRESS: if( reg_arg0 == 9999 ) { reg_arg0 = i; @@ -513,6 +514,15 @@ return align_up(stack, 2); } +const uint SharedRuntime::java_return_convention_max_int = 1; +const uint SharedRuntime::java_return_convention_max_float = 1; +int SharedRuntime::java_return_convention(const BasicType *sig_bt, + VMRegPair *regs, + int total_args_passed) { + Unimplemented(); + return 0; +} + // Patch the callers callsite with entry to compiled code if it exists. static void patch_callers_callsite(MacroAssembler *masm) { Label L; @@ -574,11 +584,13 @@ } static void gen_c2i_adapter(MacroAssembler *masm, - int total_args_passed, - int comp_args_on_stack, - const BasicType *sig_bt, + const GrowableArray& sig_extended, const VMRegPair *regs, - Label& skip_fixup) { + Label& skip_fixup, + address start, + OopMapSet*& oop_maps, + int& frame_complete, + int& frame_size_in_words) { // Before we get into the guts of the C2I adapter, see if we should be here // at all. We've come from compiled code and are attempting to jump to the // interpreter, which means the caller made a static call to get here @@ -600,7 +612,7 @@ // Since all args are passed on the stack, total_args_passed * interpreter_ // stack_element_size is the // space we need. - int extraspace = total_args_passed * Interpreter::stackElementSize; + int extraspace = sig_extended.length() * Interpreter::stackElementSize; // Get return address __ pop(rax); @@ -611,14 +623,14 @@ __ subptr(rsp, extraspace); // Now write the args into the outgoing interpreter space - for (int i = 0; i < total_args_passed; i++) { - if (sig_bt[i] == T_VOID) { - assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); + for (int i = 0; i < sig_extended.length(); i++) { + if (sig_extended.at(i)._bt == T_VOID) { + assert(i > 0 && (sig_extended.at(i-1)._bt == T_LONG || sig_extended.at(i-1)._bt == T_DOUBLE), "missing half"); continue; } // st_off points to lowest address on stack. - int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize; + int st_off = ((sig_extended.length() - 1) - i) * Interpreter::stackElementSize; int next_off = st_off - Interpreter::stackElementSize; // Say 4 args: @@ -668,7 +680,7 @@ NOT_LP64(ShouldNotReachHere()); // Two VMRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG // T_DOUBLE and T_LONG use two slots in the interpreter - if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { + if (sig_extended.at(i)._bt == T_LONG || sig_extended.at(i)._bt == T_DOUBLE) { // long/double in gpr #ifdef ASSERT // Overwrite the unused slot with known junk @@ -685,7 +697,7 @@ if (!r_2->is_valid()) { __ movflt(Address(rsp, st_off), r_1->as_XMMRegister()); } else { - assert(sig_bt[i] == T_DOUBLE || sig_bt[i] == T_LONG, "wrong type"); + assert(sig_extended.at(i)._bt == T_DOUBLE || sig_extended.at(i)._bt == T_LONG, "wrong type"); move_c2i_double(masm, r_1->as_XMMRegister(), st_off); } } @@ -718,10 +730,10 @@ } void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, - int total_args_passed, int comp_args_on_stack, - const BasicType *sig_bt, + const GrowableArray& sig_extended, const VMRegPair *regs) { + // Note: rsi contains the senderSP on entry. We must preserve it since // we may do a i2c -> c2i transition if we lose a race where compiled // code goes non-entrant while we get args ready. @@ -810,11 +822,11 @@ // Now generate the shuffle code. Pick up all register args and move the // rest through the floating point stack top. - for (int i = 0; i < total_args_passed; i++) { - if (sig_bt[i] == T_VOID) { + for (int i = 0; i < sig_extended.length(); i++) { + if (sig_extended.at(i)._bt == T_VOID) { // Longs and doubles are passed in native word order, but misaligned // in the 32-bit build. - assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); + assert(i > 0 && (sig_extended.at(i-1)._bt == T_LONG || sig_extended.at(i-1)._bt == T_DOUBLE), "missing half"); continue; } @@ -823,7 +835,7 @@ assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?"); // Load in argument order going down. - int ld_off = (total_args_passed - i) * Interpreter::stackElementSize; + int ld_off = (sig_extended.length() - i) * Interpreter::stackElementSize; // Point to interpreter value (vs. tag) int next_off = ld_off - Interpreter::stackElementSize; // @@ -864,7 +876,7 @@ // are accessed as negative so LSW is at LOW address // ld_off is MSW so get LSW - const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)? + const int offset = (NOT_LP64(true ||) sig_extended.at(i)._bt==T_LONG||sig_extended.at(i)._bt==T_DOUBLE)? next_off : ld_off; __ movptr(rsi, Address(saved_sp, offset)); __ movptr(Address(rsp, st_off), rsi); @@ -882,7 +894,7 @@ // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case // So we must adjust where to pick up the data to match the interpreter. - const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)? + const int offset = (NOT_LP64(true ||) sig_extended.at(i)._bt==T_LONG||sig_extended.at(i)._bt==T_DOUBLE)? next_off : ld_off; // this can be a misaligned move @@ -930,14 +942,14 @@ // --------------------------------------------------------------- AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, - int total_args_passed, int comp_args_on_stack, - const BasicType *sig_bt, + const GrowableArray& sig_extended, const VMRegPair *regs, - AdapterFingerPrint* fingerprint) { + AdapterFingerPrint* fingerprint, + AdapterBlob*& new_adapter) { address i2c_entry = __ pc(); - gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs); + gen_i2c_adapter(masm, comp_args_on_stack, sig_extended, regs); // ------------------------------------------------------------------------- // Generate a C2I adapter. On entry we know rbx, holds the Method* during calls @@ -974,9 +986,13 @@ address c2i_entry = __ pc(); - gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup); + OopMapSet* oop_maps = NULL; + int frame_complete = CodeOffsets::frame_never_safe; + int frame_size_in_words = 0; + gen_c2i_adapter(masm, sig_extended, regs, skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words); __ flush(); + new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps); return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); } @@ -1000,6 +1016,7 @@ case T_SHORT: case T_INT: case T_OBJECT: + case T_VALUETYPE: case T_ARRAY: case T_ADDRESS: case T_METADATA: @@ -1281,6 +1298,7 @@ } break; case T_OBJECT: + case T_VALUETYPE: default: ShouldNotReachHere(); } } else if (in_regs[i].first()->is_XMMRegister()) { @@ -1417,7 +1435,7 @@ if (VerifyOops) { for (int i = 0; i < method->size_of_parameters(); i++) { if (sig_bt[i] == T_OBJECT || - sig_bt[i] == T_ARRAY) { + sig_bt[i] == T_ARRAY || sig_bt[i] == T_VALUETYPE) { VMReg r = regs[i].first(); assert(r->is_valid(), "bad oop arg"); if (r->is_stack()) { @@ -1890,6 +1908,7 @@ c_arg++; break; } + case T_VALUETYPE: case T_OBJECT: assert(!is_critical_native, "no oop arguments"); object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg], @@ -2072,6 +2091,7 @@ // Result is in st0 we'll save as needed break; case T_ARRAY: // Really a handle + case T_VALUETYPE: // Really a handle case T_OBJECT: // Really a handle break; // can't de-handlize until after safepoint check case T_VOID: break; @@ -2218,7 +2238,7 @@ __ reset_last_Java_frame(thread, false); // Unbox oop result, e.g. JNIHandles::resolve value. - if (ret_type == T_OBJECT || ret_type == T_ARRAY) { + if (ret_type == T_OBJECT || ret_type == T_ARRAY || ret_type == T_VALUETYPE) { __ resolve_jobject(rax /* value */, thread /* thread */, rcx /* tmp */); @@ -3170,3 +3190,8 @@ // frame_size_words or bytes?? return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true); } + +BufferedValueTypeBlob* SharedRuntime::generate_buffered_value_type_adapter(const ValueKlass* vk) { + Unimplemented(); + return NULL; +} --- old/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp 2019-03-11 14:24:57.510356104 +0100 +++ new/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp 2019-03-11 14:24:57.306356107 +0100 @@ -28,6 +28,7 @@ #endif #include "asm/macroAssembler.hpp" #include "asm/macroAssembler.inline.hpp" +#include "classfile/symbolTable.hpp" #include "code/debugInfoRec.hpp" #include "code/icBuffer.hpp" #include "code/nativeInst.hpp" @@ -491,6 +492,7 @@ case T_OBJECT: case T_ARRAY: case T_ADDRESS: + case T_VALUETYPE: if (int_args < Argument::n_int_register_parameters_j) { regs[i].set2(INT_ArgReg[int_args++]->as_VMReg()); } else { @@ -524,6 +526,88 @@ return align_up(stk_args, 2); } +// Same as java_calling_convention() but for multiple return +// values. There's no way to store them on the stack so if we don't +// have enough registers, multiple values can't be returned. +const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j+1; +const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j; +int SharedRuntime::java_return_convention(const BasicType *sig_bt, + VMRegPair *regs, + int total_args_passed) { + // Create the mapping between argument positions and + // registers. + static const Register INT_ArgReg[java_return_convention_max_int] = { + rax, j_rarg5, j_rarg4, j_rarg3, j_rarg2, j_rarg1, j_rarg0 + }; + static const XMMRegister FP_ArgReg[java_return_convention_max_float] = { + j_farg0, j_farg1, j_farg2, j_farg3, + j_farg4, j_farg5, j_farg6, j_farg7 + }; + + + uint int_args = 0; + uint fp_args = 0; + + for (int i = 0; i < total_args_passed; i++) { + switch (sig_bt[i]) { + case T_BOOLEAN: + case T_CHAR: + case T_BYTE: + case T_SHORT: + case T_INT: + if (int_args < Argument::n_int_register_parameters_j+1) { + regs[i].set1(INT_ArgReg[int_args]->as_VMReg()); + int_args++; + } else { + return -1; + } + break; + case T_VOID: + // halves of T_LONG or T_DOUBLE + assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half"); + regs[i].set_bad(); + break; + case T_LONG: + assert(sig_bt[i + 1] == T_VOID, "expecting half"); + // fall through + case T_OBJECT: + case T_VALUETYPE: + case T_ARRAY: + case T_ADDRESS: + case T_METADATA: + if (int_args < Argument::n_int_register_parameters_j+1) { + regs[i].set2(INT_ArgReg[int_args]->as_VMReg()); + int_args++; + } else { + return -1; + } + break; + case T_FLOAT: + if (fp_args < Argument::n_float_register_parameters_j) { + regs[i].set1(FP_ArgReg[fp_args]->as_VMReg()); + fp_args++; + } else { + return -1; + } + break; + case T_DOUBLE: + assert(sig_bt[i + 1] == T_VOID, "expecting half"); + if (fp_args < Argument::n_float_register_parameters_j) { + regs[i].set2(FP_ArgReg[fp_args]->as_VMReg()); + fp_args++; + } else { + return -1; + } + break; + default: + ShouldNotReachHere(); + break; + } + } + + return int_args + fp_args; +} + // Patch the callers callsite with entry to compiled code if it exists. static void patch_callers_callsite(MacroAssembler *masm) { Label L; @@ -566,13 +650,127 @@ __ bind(L); } +// For each value type argument, sig includes the list of fields of +// the value type. This utility function computes the number of +// arguments for the call if value types are passed by reference (the +// calling convention the interpreter expects). +static int compute_total_args_passed_int(const GrowableArray* sig_extended) { + int total_args_passed = 0; + if (ValueTypePassFieldsAsArgs) { + for (int i = 0; i < sig_extended->length(); i++) { + BasicType bt = sig_extended->at(i)._bt; + if (SigEntry::is_reserved_entry(sig_extended, i)) { + // Ignore reserved entry + } else if (bt == T_VALUETYPE) { + // In sig_extended, a value type argument starts with: + // T_VALUETYPE, followed by the types of the fields of the + // value type and T_VOID to mark the end of the value + // type. Value types are flattened so, for instance, in the + // case of a value type with an int field and a value type + // field that itself has 2 fields, an int and a long: + // T_VALUETYPE T_INT T_VALUETYPE T_INT T_LONG T_VOID (second + // slot for the T_LONG) T_VOID (inner T_VALUETYPE) T_VOID + // (outer T_VALUETYPE) + total_args_passed++; + int vt = 1; + do { + i++; + BasicType bt = sig_extended->at(i)._bt; + BasicType prev_bt = sig_extended->at(i-1)._bt; + if (bt == T_VALUETYPE) { + vt++; + } else if (bt == T_VOID && + prev_bt != T_LONG && + prev_bt != T_DOUBLE) { + vt--; + } + } while (vt != 0); + } else { + total_args_passed++; + } + } + } else { + total_args_passed = sig_extended->length(); + } + return total_args_passed; +} + + +static void gen_c2i_adapter_helper(MacroAssembler* masm, + BasicType bt, + BasicType prev_bt, + size_t size_in_bytes, + const VMRegPair& reg_pair, + const Address& to, + int extraspace, + bool is_oop) { + assert(bt != T_VALUETYPE || !ValueTypePassFieldsAsArgs, "no value type here"); + if (bt == T_VOID) { + assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half"); + return; + } + + // Say 4 args: + // i st_off + // 0 32 T_LONG + // 1 24 T_VOID + // 2 16 T_OBJECT + // 3 8 T_BOOL + // - 0 return address + // + // However to make thing extra confusing. Because we can fit a long/double in + // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter + // leaves one slot empty and only stores to a single slot. In this case the + // slot that is occupied is the T_VOID slot. See I said it was confusing. + + bool wide = (size_in_bytes == wordSize); + VMReg r_1 = reg_pair.first(); + VMReg r_2 = reg_pair.second(); + assert(r_2->is_valid() == wide, "invalid size"); + if (!r_1->is_valid()) { + assert(!r_2->is_valid(), "must be invalid"); + return; + } + + if (!r_1->is_XMMRegister()) { + Register val = rax; + assert_different_registers(to.base(), val); + if(r_1->is_stack()) { + int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace; + __ load_sized_value(val, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false); + } else { + val = r_1->as_Register(); + } + if (is_oop) { + // We don't need barriers because the destination is a newly allocated object. + // Also, we cannot use store_heap_oop(to, val) because it uses r8 as tmp. + if (UseCompressedOops) { + __ encode_heap_oop(val); + __ movl(to, val); + } else { + __ movptr(to, val); + } + } else { + __ store_sized_value(to, val, size_in_bytes); + } + } else { + if (wide) { + __ movdbl(to, r_1->as_XMMRegister()); + } else { + __ movflt(to, r_1->as_XMMRegister()); + } + } +} static void gen_c2i_adapter(MacroAssembler *masm, - int total_args_passed, - int comp_args_on_stack, - const BasicType *sig_bt, + const GrowableArray* sig_extended, const VMRegPair *regs, - Label& skip_fixup) { + Label& skip_fixup, + address start, + OopMapSet* oop_maps, + int& frame_complete, + int& frame_size_in_words, + bool alloc_value_receiver) { // Before we get into the guts of the C2I adapter, see if we should be here // at all. We've come from compiled code and are attempting to jump to the // interpreter, which means the caller made a static call to get here @@ -582,11 +780,54 @@ __ bind(skip_fixup); + bool has_value_argument = false; + if (ValueTypePassFieldsAsArgs) { + // Is there a value type argument? + for (int i = 0; i < sig_extended->length() && !has_value_argument; i++) { + has_value_argument = (sig_extended->at(i)._bt == T_VALUETYPE); + } + if (has_value_argument) { + // There is at least a value type argument: we're coming from + // compiled code so we have no buffers to back the value + // types. Allocate the buffers here with a runtime call. + OopMap* map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); + + frame_complete = __ offset(); + + __ set_last_Java_frame(noreg, noreg, NULL); + + __ mov(c_rarg0, r15_thread); + __ mov(c_rarg1, rbx); + __ mov64(c_rarg2, (int64_t)alloc_value_receiver); + __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::allocate_value_types))); + + oop_maps->add_gc_map((int)(__ pc() - start), map); + __ reset_last_Java_frame(false); + + RegisterSaver::restore_live_registers(masm); + + Label no_exception; + __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); + __ jcc(Assembler::equal, no_exception); + + __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD); + __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); + __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); + + __ bind(no_exception); + + // We get an array of objects from the runtime call + __ get_vm_result(r13, r15_thread); // Use r13 as temporary because r10 is trashed by movptr() + __ get_vm_result_2(rbx, r15_thread); // TODO: required to keep the callee Method live? + __ mov(r10, r13); + } + } + // Since all args are passed on the stack, total_args_passed * // Interpreter::stackElementSize is the space we need. Plus 1 because // we also account for the return address location since // we store it first rather than hold it in rax across all the shuffling - + int total_args_passed = compute_total_args_passed_int(sig_extended); int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize; // stack is aligned, keep it that way @@ -604,97 +845,99 @@ __ movptr(Address(rsp, 0), rax); // Now write the args into the outgoing interpreter space - for (int i = 0; i < total_args_passed; i++) { - if (sig_bt[i] == T_VOID) { - assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); - continue; - } - // offset to start parameters - int st_off = (total_args_passed - i) * Interpreter::stackElementSize; - int next_off = st_off - Interpreter::stackElementSize; - - // Say 4 args: - // i st_off - // 0 32 T_LONG - // 1 24 T_VOID - // 2 16 T_OBJECT - // 3 8 T_BOOL - // - 0 return address - // - // However to make thing extra confusing. Because we can fit a long/double in - // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter - // leaves one slot empty and only stores to a single slot. In this case the - // slot that is occupied is the T_VOID slot. See I said it was confusing. - - VMReg r_1 = regs[i].first(); - VMReg r_2 = regs[i].second(); - if (!r_1->is_valid()) { - assert(!r_2->is_valid(), ""); - continue; - } - if (r_1->is_stack()) { - // memory to memory use rax - int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace; - if (!r_2->is_valid()) { - // sign extend?? - __ movl(rax, Address(rsp, ld_off)); - __ movptr(Address(rsp, st_off), rax); - - } else { - - __ movq(rax, Address(rsp, ld_off)); - - // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG - // T_DOUBLE and T_LONG use two slots in the interpreter - if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { - // ld_off == LSW, ld_off+wordSize == MSW - // st_off == MSW, next_off == LSW - __ movq(Address(rsp, next_off), rax); -#ifdef ASSERT - // Overwrite the unused slot with known junk - __ mov64(rax, CONST64(0xdeadffffdeadaaaa)); - __ movptr(Address(rsp, st_off), rax); -#endif /* ASSERT */ - } else { - __ movq(Address(rsp, st_off), rax); - } + // next_arg_comp is the next argument from the compiler point of + // view (value type fields are passed in registers/on the stack). In + // sig_extended, a value type argument starts with: T_VALUETYPE, + // followed by the types of the fields of the value type and T_VOID + // to mark the end of the value type. ignored counts the number of + // T_VALUETYPE/T_VOID. next_vt_arg is the next value type argument: + // used to get the buffer for that argument from the pool of buffers + // we allocated above and want to pass to the + // interpreter. next_arg_int is the next argument from the + // interpreter point of view (value types are passed by reference). + bool has_oop_field = false; + for (int next_arg_comp = 0, ignored = 0, next_vt_arg = 0, next_arg_int = 0; + next_arg_comp < sig_extended->length(); next_arg_comp++) { + assert(ignored <= next_arg_comp, "shouldn't skip over more slots than there are arguments"); + assert(next_arg_int <= total_args_passed, "more arguments for the interpreter than expected?"); + BasicType bt = sig_extended->at(next_arg_comp)._bt; + int st_off = (total_args_passed - next_arg_int) * Interpreter::stackElementSize; + if (!ValueTypePassFieldsAsArgs || bt != T_VALUETYPE) { + if (SigEntry::is_reserved_entry(sig_extended, next_arg_comp)) { + continue; // Ignore reserved entry } - } else if (r_1->is_Register()) { - Register r = r_1->as_Register(); - if (!r_2->is_valid()) { - // must be only an int (or less ) so move only 32bits to slot - // why not sign extend?? - __ movl(Address(rsp, st_off), r); - } else { - // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG - // T_DOUBLE and T_LONG use two slots in the interpreter - if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { - // long/double in gpr -#ifdef ASSERT - // Overwrite the unused slot with known junk - __ mov64(rax, CONST64(0xdeadffffdeadaaab)); - __ movptr(Address(rsp, st_off), rax); -#endif /* ASSERT */ - __ movq(Address(rsp, next_off), r); - } else { - __ movptr(Address(rsp, st_off), r); - } - } - } else { - assert(r_1->is_XMMRegister(), ""); - if (!r_2->is_valid()) { - // only a float use just part of the slot - __ movflt(Address(rsp, st_off), r_1->as_XMMRegister()); - } else { + int next_off = st_off - Interpreter::stackElementSize; + const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off; + const VMRegPair reg_pair = regs[next_arg_comp-ignored]; + size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4; + gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL, + size_in_bytes, reg_pair, Address(rsp, offset), extraspace, false); + next_arg_int++; #ifdef ASSERT + if (bt == T_LONG || bt == T_DOUBLE) { // Overwrite the unused slot with known junk - __ mov64(rax, CONST64(0xdeadffffdeadaaac)); + __ mov64(rax, CONST64(0xdeadffffdeadaaaa)); __ movptr(Address(rsp, st_off), rax); -#endif /* ASSERT */ - __ movdbl(Address(rsp, next_off), r_1->as_XMMRegister()); } +#endif /* ASSERT */ + } else { + ignored++; + // get the buffer from the just allocated pool of buffers + int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + next_vt_arg * type2aelembytes(T_VALUETYPE); + __ load_heap_oop(r11, Address(r10, index)); + next_vt_arg++; next_arg_int++; + int vt = 1; + // write fields we get from compiled code in registers/stack + // slots to the buffer: we know we are done with that value type + // argument when we hit the T_VOID that acts as an end of value + // type delimiter for this value type. Value types are flattened + // so we might encounter embedded value types. Each entry in + // sig_extended contains a field offset in the buffer. + do { + next_arg_comp++; + BasicType bt = sig_extended->at(next_arg_comp)._bt; + BasicType prev_bt = sig_extended->at(next_arg_comp-1)._bt; + if (bt == T_VALUETYPE) { + vt++; + ignored++; + } else if (bt == T_VOID && + prev_bt != T_LONG && + prev_bt != T_DOUBLE) { + vt--; + ignored++; + } else if (SigEntry::is_reserved_entry(sig_extended, next_arg_comp)) { + // Ignore reserved entry + } else { + int off = sig_extended->at(next_arg_comp)._offset; + assert(off > 0, "offset in object should be positive"); + size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize; + bool is_oop = (bt == T_OBJECT || bt == T_ARRAY); + has_oop_field = has_oop_field || is_oop; + gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL, + size_in_bytes, regs[next_arg_comp-ignored], Address(r11, off), extraspace, is_oop); + } + } while (vt != 0); + // pass the buffer to the interpreter + __ movptr(Address(rsp, st_off), r11); + } + } + + // If a value type was allocated and initialized, apply post barrier to all oop fields + if (has_value_argument && has_oop_field) { + __ push(r13); // save senderSP + __ push(rbx); // save callee + // Allocate argument register save area + if (frame::arg_reg_save_area_bytes != 0) { + __ subptr(rsp, frame::arg_reg_save_area_bytes); + } + __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::apply_post_barriers), r15_thread, r10); + // De-allocate argument register save area + if (frame::arg_reg_save_area_bytes != 0) { + __ addptr(rsp, frame::arg_reg_save_area_bytes); } + __ pop(rbx); // restore callee + __ pop(r13); // restore sender SP } // Schedule the branch target address early. @@ -715,10 +958,60 @@ __ bind(L_fail); } +static void gen_i2c_adapter_helper(MacroAssembler* masm, + BasicType bt, + BasicType prev_bt, + size_t size_in_bytes, + const VMRegPair& reg_pair, + const Address& from, + bool is_oop) { + assert(bt != T_VALUETYPE || !ValueTypePassFieldsAsArgs, "no value type here"); + if (bt == T_VOID) { + // Longs and doubles are passed in native word order, but misaligned + // in the 32-bit build. + assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half"); + return; + } + assert(!reg_pair.second()->is_valid() || reg_pair.first()->next() == reg_pair.second(), + "scrambled load targets?"); + + bool wide = (size_in_bytes == wordSize); + VMReg r_1 = reg_pair.first(); + VMReg r_2 = reg_pair.second(); + assert(r_2->is_valid() == wide, "invalid size"); + if (!r_1->is_valid()) { + assert(!r_2->is_valid(), "must be invalid"); + return; + } + + bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN); + if (!r_1->is_XMMRegister()) { + // We can use r13 as a temp here because compiled code doesn't need r13 as an input + // and if we end up going thru a c2i because of a miss a reasonable value of r13 + // will be generated. + Register dst = r_1->is_stack() ? r13 : r_1->as_Register(); + if (is_oop) { + __ load_heap_oop(dst, from); + } else { + __ load_sized_value(dst, from, size_in_bytes, is_signed); + } + if (r_1->is_stack()) { + // Convert stack slot to an SP offset (+ wordSize to account for return address) + int st_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + wordSize; + __ movq(Address(rsp, st_off), dst); + } + } else { + if (wide) { + __ movdbl(r_1->as_XMMRegister(), from); + } else { + __ movflt(r_1->as_XMMRegister(), from); + } + } +} + void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, - int total_args_passed, int comp_args_on_stack, - const BasicType *sig_bt, + const GrowableArray* sig, const VMRegPair *regs) { // Note: r13 contains the senderSP on entry. We must preserve it since @@ -796,7 +1089,6 @@ __ subptr(rsp, comp_words_on_stack * wordSize); } - // Ensure compiled code always sees stack at proper alignment __ andptr(rsp, -16); @@ -810,7 +1102,13 @@ // Will jump to the compiled code just as if compiled code was doing it. // Pre-load the register-jump target early, to schedule it better. - __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset()))); + if (StressValueTypePassFieldsAsArgs) { + // For stress testing, don't unpack value types in the i2c adapter but + // call the value type entry point and let it take care of unpacking. + __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_value_offset()))); + } else { + __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset()))); + } #if INCLUDE_JVMCI if (EnableJVMCI || UseAOT) { @@ -824,84 +1122,69 @@ } #endif // INCLUDE_JVMCI + int total_args_passed = compute_total_args_passed_int(sig); // Now generate the shuffle code. Pick up all register args and move the // rest through the floating point stack top. - for (int i = 0; i < total_args_passed; i++) { - if (sig_bt[i] == T_VOID) { - // Longs and doubles are passed in native word order, but misaligned - // in the 32-bit build. - assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); - continue; - } - - // Pick up 0, 1 or 2 words from SP+offset. - - assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), - "scrambled load targets?"); - // Load in argument order going down. - int ld_off = (total_args_passed - i)*Interpreter::stackElementSize; - // Point to interpreter value (vs. tag) - int next_off = ld_off - Interpreter::stackElementSize; - // - // - // - VMReg r_1 = regs[i].first(); - VMReg r_2 = regs[i].second(); - if (!r_1->is_valid()) { - assert(!r_2->is_valid(), ""); - continue; - } - if (r_1->is_stack()) { - // Convert stack slot to an SP offset (+ wordSize to account for return address ) - int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize; - // We can use r13 as a temp here because compiled code doesn't need r13 as an input - // and if we end up going thru a c2i because of a miss a reasonable value of r13 - // will be generated. - if (!r_2->is_valid()) { - // sign extend??? - __ movl(r13, Address(saved_sp, ld_off)); - __ movptr(Address(rsp, st_off), r13); - } else { - // - // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE - // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case - // So we must adjust where to pick up the data to match the interpreter. - // - // Interpreter local[n] == MSW, local[n+1] == LSW however locals - // are accessed as negative so LSW is at LOW address - - // ld_off is MSW so get LSW - const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)? - next_off : ld_off; - __ movq(r13, Address(saved_sp, offset)); - // st_off is LSW (i.e. reg.first()) - __ movq(Address(rsp, st_off), r13); - } - } else if (r_1->is_Register()) { // Register argument - Register r = r_1->as_Register(); - assert(r != rax, "must be different"); - if (r_2->is_valid()) { - // - // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE - // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case - // So we must adjust where to pick up the data to match the interpreter. - - const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)? - next_off : ld_off; - - // this can be a misaligned move - __ movq(r, Address(saved_sp, offset)); - } else { - // sign extend and use a full word? - __ movl(r, Address(saved_sp, ld_off)); + // next_arg_comp is the next argument from the compiler point of + // view (value type fields are passed in registers/on the stack). In + // sig_extended, a value type argument starts with: T_VALUETYPE, + // followed by the types of the fields of the value type and T_VOID + // to mark the end of the value type. ignored counts the number of + // T_VALUETYPE/T_VOID. next_arg_int is the next argument from the + // interpreter point of view (value types are passed by reference). + for (int next_arg_comp = 0, ignored = 0, next_arg_int = 0; next_arg_comp < sig->length(); next_arg_comp++) { + assert(ignored <= next_arg_comp, "shouldn't skip over more slots than there are arguments"); + assert(next_arg_int <= total_args_passed, "more arguments from the interpreter than expected?"); + BasicType bt = sig->at(next_arg_comp)._bt; + int ld_off = (total_args_passed - next_arg_int)*Interpreter::stackElementSize; + if (!ValueTypePassFieldsAsArgs || bt != T_VALUETYPE) { + // Load in argument order going down. + // Point to interpreter value (vs. tag) + if (SigEntry::is_reserved_entry(sig, next_arg_comp)) { + continue; // Ignore reserved entry } + int next_off = ld_off - Interpreter::stackElementSize; + int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off; + const VMRegPair reg_pair = regs[next_arg_comp-ignored]; + size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4; + gen_i2c_adapter_helper(masm, bt, next_arg_comp > 0 ? sig->at(next_arg_comp-1)._bt : T_ILLEGAL, + size_in_bytes, reg_pair, Address(saved_sp, offset), false); + next_arg_int++; } else { - if (!r_2->is_valid()) { - __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off)); - } else { - __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off)); - } + next_arg_int++; + ignored++; + // get the buffer for that value type + __ movptr(r10, Address(saved_sp, ld_off)); + int vt = 1; + // load fields to registers/stack slots from the buffer: we know + // we are done with that value type argument when we hit the + // T_VOID that acts as an end of value type delimiter for this + // value type. Value types are flattened so we might encounter + // embedded value types. Each entry in sig_extended contains a + // field offset in the buffer. + do { + next_arg_comp++; + BasicType bt = sig->at(next_arg_comp)._bt; + BasicType prev_bt = sig->at(next_arg_comp-1)._bt; + if (bt == T_VALUETYPE) { + vt++; + ignored++; + } else if (bt == T_VOID && + prev_bt != T_LONG && + prev_bt != T_DOUBLE) { + vt--; + ignored++; + } else if (SigEntry::is_reserved_entry(sig, next_arg_comp)) { + // Ignore reserved entry + } else { + int off = sig->at(next_arg_comp)._offset; + assert(off > 0, "offset in object should be positive"); + size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize; + bool is_oop = (bt == T_OBJECT || bt == T_ARRAY); + gen_i2c_adapter_helper(masm, bt, prev_bt, size_in_bytes, regs[next_arg_comp - ignored], Address(r10, off), is_oop); + } + } while (vt != 0); } } @@ -918,7 +1201,7 @@ __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx); // put Method* where a c2i would expect should we end up there - // only needed becaus eof c2 resolve stubs return Method* as a result in + // only needed because of c2 resolve stubs return Method* as a result in // rax __ mov(rax, rbx); __ jmp(r11); @@ -926,14 +1209,24 @@ // --------------------------------------------------------------- AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, - int total_args_passed, int comp_args_on_stack, - const BasicType *sig_bt, - const VMRegPair *regs, - AdapterFingerPrint* fingerprint) { + int comp_args_on_stack_cc, + const GrowableArray* sig, + const VMRegPair* regs, + const GrowableArray* sig_cc, + const VMRegPair* regs_cc, + const GrowableArray* sig_cc_ro, + const VMRegPair* regs_cc_ro, + AdapterFingerPrint* fingerprint, + AdapterBlob*& new_adapter) { address i2c_entry = __ pc(); - - gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs); + if (StressValueTypePassFieldsAsArgs) { + // For stress testing, don't unpack value types in the i2c adapter but + // call the value type entry point and let it take care of unpacking. + gen_i2c_adapter(masm, comp_args_on_stack, sig, regs); + } else { + gen_i2c_adapter(masm, comp_args_on_stack_cc, sig_cc, regs_cc); + } // ------------------------------------------------------------------------- // Generate a C2I adapter. On entry we know rbx holds the Method* during calls @@ -968,12 +1261,38 @@ __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); } + OopMapSet* oop_maps = new OopMapSet(); + int frame_complete = CodeOffsets::frame_never_safe; + int frame_size_in_words = 0; + + // Scalarized c2i adapter with non-scalarized receiver (i.e., don't pack receiver) + address c2i_value_ro_entry = __ pc(); + if (regs_cc != regs_cc_ro) { + Label unused; + gen_c2i_adapter(masm, sig_cc_ro, regs_cc_ro, skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, false); + skip_fixup = unused; + } + + // Scalarized c2i adapter address c2i_entry = __ pc(); + gen_c2i_adapter(masm, sig_cc, regs_cc, skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, true); - gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup); + // Non-scalarized c2i adapter + address c2i_value_entry = c2i_entry; + if (regs != regs_cc) { + c2i_value_entry = __ pc(); + Label unused; + gen_c2i_adapter(masm, sig, regs, unused, i2c_entry, oop_maps, frame_complete, frame_size_in_words, false); + } __ flush(); - return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); + + // The c2i adapters might safepoint and trigger a GC. The caller must make sure that + // the GC knows about the location of oop argument locations passed to the c2i adapter. + bool caller_must_gc_arguments = (regs != regs_cc); + new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps, caller_must_gc_arguments); + + return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_value_entry, c2i_value_ro_entry, c2i_unverified_entry); } int SharedRuntime::c_calling_convention(const BasicType *sig_bt, @@ -1031,6 +1350,7 @@ // fall through case T_OBJECT: case T_ARRAY: + case T_VALUETYPE: case T_ADDRESS: case T_METADATA: if (int_args < Argument::n_int_register_parameters_c) { @@ -1381,7 +1701,7 @@ if (map != NULL) { __ movq(Address(rsp, offset), in_regs[i].first()->as_Register()); if (in_sig_bt[i] == T_ARRAY) { - map->set_oop(VMRegImpl::stack2reg(slot));; + map->set_oop(VMRegImpl::stack2reg(slot)); } } else { __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset)); @@ -1415,6 +1735,7 @@ // handled above break; case T_OBJECT: + case T_VALUETYPE: default: ShouldNotReachHere(); } } else if (in_regs[i].first()->is_XMMRegister()) { @@ -1790,7 +2111,8 @@ if (VerifyOops) { for (int i = 0; i < method->size_of_parameters(); i++) { if (sig_bt[i] == T_OBJECT || - sig_bt[i] == T_ARRAY) { + sig_bt[i] == T_ARRAY || + sig_bt[i] == T_VALUETYPE) { VMReg r = regs[i].first(); assert(r->is_valid(), "bad oop arg"); if (r->is_stack()) { @@ -2330,6 +2652,7 @@ #endif break; } + case T_VALUETYPE: case T_OBJECT: assert(!is_critical_native, "no oop arguments"); object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg], @@ -2465,6 +2788,10 @@ // Load (object->mark() | 1) into swap_reg %rax __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); + if (EnableValhalla && !UseBiasedLocking) { + // For slow path is_always_locked, using biased, which is never natural for !UseBiasLocking + __ andptr(swap_reg, ~markOopDesc::biased_lock_bit_in_place); + } // Save (object->mark() | 1) into BasicLock's displaced header __ movptr(Address(lock_reg, mark_word_offset), swap_reg); @@ -2526,6 +2853,7 @@ // Result is in xmm0 we'll save as needed break; case T_ARRAY: // Really a handle + case T_VALUETYPE: // Really a handle case T_OBJECT: // Really a handle break; // can't de-handlize until after safepoint check case T_VOID: break; @@ -2679,7 +3007,7 @@ __ reset_last_Java_frame(false); // Unbox oop result, e.g. JNIHandles::resolve value. - if (ret_type == T_OBJECT || ret_type == T_ARRAY) { + if (ret_type == T_OBJECT || ret_type == T_ARRAY || ret_type == T_VALUETYPE) { __ resolve_jobject(rax /* value */, r15_thread /* thread */, rcx /* tmp */); @@ -4025,3 +4353,114 @@ _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1); } #endif // COMPILER2 + +BufferedValueTypeBlob* SharedRuntime::generate_buffered_value_type_adapter(const ValueKlass* vk) { + BufferBlob* buf = BufferBlob::create("value types pack/unpack", 16 * K); + CodeBuffer buffer(buf); + short buffer_locs[20]; + buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs, + sizeof(buffer_locs)/sizeof(relocInfo)); + + MacroAssembler* masm = new MacroAssembler(&buffer); + + const Array* sig_vk = vk->extended_sig(); + const Array* regs = vk->return_regs(); + + int pack_fields_off = __ offset(); + + int j = 1; + for (int i = 0; i < sig_vk->length(); i++) { + BasicType bt = sig_vk->at(i)._bt; + if (bt == T_VALUETYPE) { + continue; + } + if (bt == T_VOID) { + if (sig_vk->at(i-1)._bt == T_LONG || + sig_vk->at(i-1)._bt == T_DOUBLE) { + j++; + } + continue; + } + int off = sig_vk->at(i)._offset; + assert(off > 0, "offset in object should be positive"); + VMRegPair pair = regs->at(j); + VMReg r_1 = pair.first(); + VMReg r_2 = pair.second(); + Address to(rax, off); + if (bt == T_FLOAT) { + __ movflt(to, r_1->as_XMMRegister()); + } else if (bt == T_DOUBLE) { + __ movdbl(to, r_1->as_XMMRegister()); + } else if (bt == T_OBJECT || bt == T_ARRAY) { + Register val = r_1->as_Register(); + assert_different_registers(rax, val); + // We don't need barriers because the destination is a newly allocated object. + // Also, we cannot use store_heap_oop(to, val) because it uses r8 as tmp. + if (UseCompressedOops) { + __ encode_heap_oop(val); + __ movl(to, val); + } else { + __ movptr(to, val); + } + + } else { + assert(is_java_primitive(bt), "unexpected basic type"); + assert_different_registers(rax, r_1->as_Register()); + size_t size_in_bytes = type2aelembytes(bt); + __ store_sized_value(to, r_1->as_Register(), size_in_bytes); + } + j++; + } + assert(j == regs->length(), "missed a field?"); + + __ ret(0); + + int unpack_fields_off = __ offset(); + + j = 1; + for (int i = 0; i < sig_vk->length(); i++) { + BasicType bt = sig_vk->at(i)._bt; + if (bt == T_VALUETYPE) { + continue; + } + if (bt == T_VOID) { + if (sig_vk->at(i-1)._bt == T_LONG || + sig_vk->at(i-1)._bt == T_DOUBLE) { + j++; + } + continue; + } + int off = sig_vk->at(i)._offset; + assert(off > 0, "offset in object should be positive"); + VMRegPair pair = regs->at(j); + VMReg r_1 = pair.first(); + VMReg r_2 = pair.second(); + Address from(rax, off); + if (bt == T_FLOAT) { + __ movflt(r_1->as_XMMRegister(), from); + } else if (bt == T_DOUBLE) { + __ movdbl(r_1->as_XMMRegister(), from); + } else if (bt == T_OBJECT || bt == T_ARRAY) { + assert_different_registers(rax, r_1->as_Register()); + __ load_heap_oop(r_1->as_Register(), from); + } else { + assert(is_java_primitive(bt), "unexpected basic type"); + assert_different_registers(rax, r_1->as_Register()); + size_t size_in_bytes = type2aelembytes(bt); + __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN); + } + j++; + } + assert(j == regs->length(), "missed a field?"); + + if (StressValueTypeReturnedAsFields) { + __ load_klass(rax, rax); + __ orptr(rax, 1); + } + + __ ret(0); + + __ flush(); + + return BufferedValueTypeBlob::create(&buffer, pack_fields_off, unpack_fields_off); +} --- old/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp 2019-03-11 14:24:57.946356098 +0100 +++ new/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp 2019-03-11 14:24:57.742356101 +0100 @@ -335,12 +335,14 @@ return_address = __ pc(); // store result depending on type (everything that is not - // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) + // T_OBJECT, T_VALUETYPE, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) __ movptr(c_rarg0, result); Label is_long, is_float, is_double, exit; __ movl(c_rarg1, result_type); __ cmpl(c_rarg1, T_OBJECT); __ jcc(Assembler::equal, is_long); + __ cmpl(c_rarg1, T_VALUETYPE); + __ jcc(Assembler::equal, is_long); __ cmpl(c_rarg1, T_LONG); __ jcc(Assembler::equal, is_long); __ cmpl(c_rarg1, T_FLOAT); @@ -999,7 +1001,7 @@ StubCodeMark mark(this, "StubRoutines", "verify_oop"); address start = __ pc(); - Label exit, error; + Label exit, error, in_Java_heap; __ pushf(); __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); @@ -1042,7 +1044,14 @@ __ andptr(c_rarg2, c_rarg3); __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); __ cmpptr(c_rarg2, c_rarg3); - __ jcc(Assembler::notZero, error); + __ jcc(Assembler::zero, in_Java_heap); + // Not in Java heap, but could be valid if it's a bufferable value type + __ load_klass(c_rarg2, rax); + __ movbool(c_rarg2, Address(c_rarg2, InstanceKlass::extra_flags_offset())); + __ andptr(c_rarg2, InstanceKlass::_extra_is_bufferable); + __ testbool(c_rarg2); + __ jcc(Assembler::zero, error); + __ bind(in_Java_heap); // set r12 to heapbase for load_klass() __ reinit_heapbase(); @@ -5734,6 +5743,146 @@ StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; } + // Call here from the interpreter or compiled code to either load + // multiple returned values from the value type instance being + // returned to registers or to store returned values to a newly + // allocated value type instance. + address generate_return_value_stub(address destination, const char* name, bool has_res) { + // We need to save all registers the calling convention may use so + // the runtime calls read or update those registers. This needs to + // be in sync with SharedRuntime::java_return_convention(). + enum layout { + pad_off = frame::arg_reg_save_area_bytes/BytesPerInt, pad_off_2, + rax_off, rax_off_2, + j_rarg5_off, j_rarg5_2, + j_rarg4_off, j_rarg4_2, + j_rarg3_off, j_rarg3_2, + j_rarg2_off, j_rarg2_2, + j_rarg1_off, j_rarg1_2, + j_rarg0_off, j_rarg0_2, + j_farg0_off, j_farg0_2, + j_farg1_off, j_farg1_2, + j_farg2_off, j_farg2_2, + j_farg3_off, j_farg3_2, + j_farg4_off, j_farg4_2, + j_farg5_off, j_farg5_2, + j_farg6_off, j_farg6_2, + j_farg7_off, j_farg7_2, + rbp_off, rbp_off_2, + return_off, return_off_2, + + framesize + }; + + CodeBuffer buffer(name, 1000, 512); + MacroAssembler* masm = new MacroAssembler(&buffer); + + int frame_size_in_bytes = align_up(framesize*BytesPerInt, 16); + assert(frame_size_in_bytes == framesize*BytesPerInt, "misaligned"); + int frame_size_in_slots = frame_size_in_bytes / BytesPerInt; + int frame_size_in_words = frame_size_in_bytes / wordSize; + + OopMapSet *oop_maps = new OopMapSet(); + OopMap* map = new OopMap(frame_size_in_slots, 0); + + map->set_callee_saved(VMRegImpl::stack2reg(rax_off), rax->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(j_rarg5_off), j_rarg5->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(j_rarg4_off), j_rarg4->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(j_rarg3_off), j_rarg3->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(j_rarg2_off), j_rarg2->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(j_rarg1_off), j_rarg1->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(j_rarg0_off), j_rarg0->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(j_farg0_off), j_farg0->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(j_farg1_off), j_farg1->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(j_farg2_off), j_farg2->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(j_farg3_off), j_farg3->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(j_farg4_off), j_farg4->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(j_farg5_off), j_farg5->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(j_farg6_off), j_farg6->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(j_farg7_off), j_farg7->as_VMReg()); + + int start = __ offset(); + + __ subptr(rsp, frame_size_in_bytes - 8 /* return address*/); + + __ movptr(Address(rsp, rbp_off * BytesPerInt), rbp); + __ movdbl(Address(rsp, j_farg7_off * BytesPerInt), j_farg7); + __ movdbl(Address(rsp, j_farg6_off * BytesPerInt), j_farg6); + __ movdbl(Address(rsp, j_farg5_off * BytesPerInt), j_farg5); + __ movdbl(Address(rsp, j_farg4_off * BytesPerInt), j_farg4); + __ movdbl(Address(rsp, j_farg3_off * BytesPerInt), j_farg3); + __ movdbl(Address(rsp, j_farg2_off * BytesPerInt), j_farg2); + __ movdbl(Address(rsp, j_farg1_off * BytesPerInt), j_farg1); + __ movdbl(Address(rsp, j_farg0_off * BytesPerInt), j_farg0); + + __ movptr(Address(rsp, j_rarg0_off * BytesPerInt), j_rarg0); + __ movptr(Address(rsp, j_rarg1_off * BytesPerInt), j_rarg1); + __ movptr(Address(rsp, j_rarg2_off * BytesPerInt), j_rarg2); + __ movptr(Address(rsp, j_rarg3_off * BytesPerInt), j_rarg3); + __ movptr(Address(rsp, j_rarg4_off * BytesPerInt), j_rarg4); + __ movptr(Address(rsp, j_rarg5_off * BytesPerInt), j_rarg5); + __ movptr(Address(rsp, rax_off * BytesPerInt), rax); + + int frame_complete = __ offset(); + + __ set_last_Java_frame(noreg, noreg, NULL); + + __ mov(c_rarg0, r15_thread); + __ mov(c_rarg1, rax); + + __ call(RuntimeAddress(destination)); + + // Set an oopmap for the call site. + + oop_maps->add_gc_map( __ offset() - start, map); + + // clear last_Java_sp + __ reset_last_Java_frame(false); + + __ movptr(rbp, Address(rsp, rbp_off * BytesPerInt)); + __ movdbl(j_farg7, Address(rsp, j_farg7_off * BytesPerInt)); + __ movdbl(j_farg6, Address(rsp, j_farg6_off * BytesPerInt)); + __ movdbl(j_farg5, Address(rsp, j_farg5_off * BytesPerInt)); + __ movdbl(j_farg4, Address(rsp, j_farg4_off * BytesPerInt)); + __ movdbl(j_farg3, Address(rsp, j_farg3_off * BytesPerInt)); + __ movdbl(j_farg2, Address(rsp, j_farg2_off * BytesPerInt)); + __ movdbl(j_farg1, Address(rsp, j_farg1_off * BytesPerInt)); + __ movdbl(j_farg0, Address(rsp, j_farg0_off * BytesPerInt)); + + __ movptr(j_rarg0, Address(rsp, j_rarg0_off * BytesPerInt)); + __ movptr(j_rarg1, Address(rsp, j_rarg1_off * BytesPerInt)); + __ movptr(j_rarg2, Address(rsp, j_rarg2_off * BytesPerInt)); + __ movptr(j_rarg3, Address(rsp, j_rarg3_off * BytesPerInt)); + __ movptr(j_rarg4, Address(rsp, j_rarg4_off * BytesPerInt)); + __ movptr(j_rarg5, Address(rsp, j_rarg5_off * BytesPerInt)); + __ movptr(rax, Address(rsp, rax_off * BytesPerInt)); + + __ addptr(rsp, frame_size_in_bytes-8); + + // check for pending exceptions + Label pending; + __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); + __ jcc(Assembler::notEqual, pending); + + if (has_res) { + __ get_vm_result(rax, r15_thread); + } + + __ ret(0); + + __ bind(pending); + + __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); + __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); + + // ------------- + // make sure all code is generated + masm->flush(); + + RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, false); + return stub->entry_point(); + } + // Initialization void generate_initial() { // Generates all stubs and initializes the entry points @@ -5835,6 +5984,9 @@ StubRoutines::_dtan = generate_libmTan(); } } + + StubRoutines::_load_value_type_fields_in_regs = generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::load_value_type_fields_in_regs), "load_value_type_fields_in_regs", false); + StubRoutines::_store_value_type_fields_to_buf = generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::store_value_type_fields_to_buf), "store_value_type_fields_to_buf", true); } void generate_all() { --- old/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp 2019-03-11 14:24:58.386356092 +0100 +++ new/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp 2019-03-11 14:24:58.182356095 +0100 @@ -36,6 +36,7 @@ #include "oops/methodData.hpp" #include "oops/method.hpp" #include "oops/oop.inline.hpp" +#include "oops/valueKlass.hpp" #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.hpp" #include "runtime/arguments.hpp" @@ -57,7 +58,7 @@ // Run with +PrintInterpreter to get the VM to print out the size. // Max size with JVMTI #ifdef AMD64 -int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(268) NOT_JVMCI(256) * 1024; +int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(280) NOT_JVMCI(268) * 1024; #else int TemplateInterpreter::InterpreterCodeSize = 224 * 1024; #endif // AMD64 @@ -205,6 +206,56 @@ // and NULL it as marker that esp is now tos until next java call __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); + if (state == atos && ValueTypeReturnedAsFields) { +#ifndef _LP64 + __ super_call_VM_leaf(StubRoutines::store_value_type_fields_to_buf()); +#else + // A value type might be returned. If fields are in registers we + // need to allocate a value type instance and initialize it with + // the value of the fields. + Label skip, slow_case; + // We only need a new buffered value if a new one is not returned + __ testptr(rax, 1); + __ jcc(Assembler::zero, skip); + + // Try to allocate a new buffered value (from the heap) + if (UseTLAB) { + __ mov(rbx, rax); + __ andptr(rbx, -2); + + __ movl(r14, Address(rbx, Klass::layout_helper_offset())); + + __ movptr(r13, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset()))); + __ lea(r14, Address(r13, r14, Address::times_1)); + __ cmpptr(r14, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset()))); + __ jcc(Assembler::above, slow_case); + __ movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), r14); + __ movptr(Address(r13, oopDesc::mark_offset_in_bytes()), (intptr_t)markOopDesc::always_locked_prototype()); + + __ xorl(rax, rax); // use zero reg to clear memory (shorter code) + __ store_klass_gap(r13, rax); // zero klass gap for compressed oops + __ mov(rax, rbx); + __ store_klass(r13, rbx); // klass + + // We have our new buffered value, initialize its fields with a + // value class specific handler + __ movptr(rbx, Address(rax, InstanceKlass::adr_valueklass_fixed_block_offset())); + __ movptr(rbx, Address(rbx, ValueKlass::pack_handler_offset())); + __ mov(rax, r13); + __ call(rbx); + __ jmp(skip); + } + + __ bind(slow_case); + // We failed to allocate a new value, fall back to a runtime + // call. Some oop field may be live in some registers but we can't + // tell. That runtime call will take care of preserving them + // across a GC if there's one. + __ super_call_VM_leaf(StubRoutines::store_value_type_fields_to_buf()); + __ bind(skip); +#endif + } + __ restore_bcp(); __ restore_locals(); @@ -347,6 +398,7 @@ case T_DOUBLE : /* nothing to do */ break; #endif // _LP64 + case T_VALUETYPE: // fall through (value types are handled with oops) case T_OBJECT : // retrieve result from frame __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); --- old/src/hotspot/cpu/x86/templateTable_x86.cpp 2019-03-11 14:24:58.802356087 +0100 +++ new/src/hotspot/cpu/x86/templateTable_x86.cpp 2019-03-11 14:24:58.598356089 +0100 @@ -177,6 +177,7 @@ Label L_patch_done; switch (bc) { + case Bytecodes::_fast_qputfield: case Bytecodes::_fast_aputfield: case Bytecodes::_fast_bputfield: case Bytecodes::_fast_zputfield: @@ -369,6 +370,7 @@ // get type __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset)); + __ andl(rdx, ~JVM_CONSTANT_QDESC_BIT); // unresolved class - get the resolved class __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass); @@ -819,15 +821,32 @@ void TemplateTable::aaload() { transition(itos, atos); - // rax: index - // rdx: array - index_check(rdx, rax); // kills rbx - do_oop_load(_masm, - Address(rdx, rax, - UseCompressedOops ? Address::times_4 : Address::times_ptr, - arrayOopDesc::base_offset_in_bytes(T_OBJECT)), - rax, - IS_ARRAY); + + Register array = rcx; + Register index = rax; + + index_check(array, index); // kills rbx + if (ValueArrayFlatten) { + Label is_flat_array, done; + __ test_flat_array_oop(array, rbx, is_flat_array); + do_oop_load(_masm, + Address(array, index, + UseCompressedOops ? Address::times_4 : Address::times_ptr, + arrayOopDesc::base_offset_in_bytes(T_OBJECT)), + rax, + IS_ARRAY); + __ jmp(done); + __ bind(is_flat_array); + __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_load), array, index); + __ bind(done); + } else { + do_oop_load(_masm, + Address(array, index, + UseCompressedOops ? Address::times_4 : Address::times_ptr, + arrayOopDesc::base_offset_in_bytes(T_OBJECT)), + rax, + IS_ARRAY); + } } void TemplateTable::baload() { @@ -1113,7 +1132,7 @@ } void TemplateTable::aastore() { - Label is_null, ok_is_subtype, done; + Label is_null, is_flat_array, ok_is_subtype, done; transition(vtos, vtos); // stack: ..., array, index, value __ movptr(rax, at_tos()); // value @@ -1125,18 +1144,25 @@ arrayOopDesc::base_offset_in_bytes(T_OBJECT)); index_check_without_pop(rdx, rcx); // kills rbx + __ testptr(rax, rax); __ jcc(Assembler::zero, is_null); + // Move array class to rdi + __ load_klass(rdi, rdx); + if (ValueArrayFlatten) { + __ test_flat_array_klass(rdi, rbx, is_flat_array); + } + // Move subklass into rbx __ load_klass(rbx, rax); - // Move superklass into rax - __ load_klass(rax, rdx); - __ movptr(rax, Address(rax, + // Move array element superklass into rax + __ movptr(rax, Address(rdi, ObjArrayKlass::element_klass_offset())); // Generate subtype check. Blows rcx, rdi // Superklass in rax. Subklass in rbx. + // is "rbx <: rax" ? (value subclass <: array element superclass) __ gen_subtype_check(rbx, ok_is_subtype); // Come here on failure @@ -1156,10 +1182,53 @@ // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx] __ bind(is_null); __ profile_null_seen(rbx); + if (EnableValhalla) { + Label is_null_into_value_array_npe, store_null; + + __ load_klass(rdi, rdx); + // No way to store null in flat array + __ test_flat_array_klass(rdi, rbx, is_null_into_value_array_npe); + + // Use case for storing values in objArray where element_klass is specifically + // a value type because they could not be flattened "for reasons", + // these need to have the same semantics as flat arrays, i.e. NPE + __ movptr(rdi, Address(rdi, ObjArrayKlass::element_klass_offset())); + __ test_klass_is_value(rdi, rdi, is_null_into_value_array_npe); + __ jmp(store_null); + + __ bind(is_null_into_value_array_npe); + __ jump(ExternalAddress(Interpreter::_throw_NullPointerException_entry)); + __ bind(store_null); + } // Store a NULL do_oop_store(_masm, element_address, noreg, IS_ARRAY); + __ jmp(done); + if (EnableValhalla) { + Label is_type_ok; + __ bind(is_flat_array); // Store non-null value to flat + + // Simplistic type check... + + // Profile the not-null value's klass. + __ load_klass(rbx, rax); + __ profile_typecheck(rcx, rbx, rax); // blows rcx, and rax + // Move element klass into rax + __ movptr(rax, Address(rdi, ArrayKlass::element_klass_offset())); + // flat value array needs exact type match + // is "rax == rbx" (value subclass == array element superclass) + __ cmpptr(rax, rbx); + __ jccb(Assembler::equal, is_type_ok); + + __ profile_typecheck_failed(rcx); + __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry)); + + __ bind(is_type_ok); + __ movptr(rax, at_tos()); // value + __ movl(rcx, at_tos_p1()); // index + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_store), rax, rdx, rcx); + } // Pop stack arguments __ bind(done); __ addptr(rsp, 3 * Interpreter::stackElementSize); @@ -2405,10 +2474,37 @@ void TemplateTable::if_acmp(Condition cc) { transition(atos, vtos); // assume branch is more often taken than not (loops use backward branches) - Label not_taken; + Label taken, not_taken; __ pop_ptr(rdx); + + const int is_value_mask = markOopDesc::always_locked_pattern; + if (EnableValhalla && ACmpOnValues == 1) { + Label is_null; + __ testptr(rdx, rdx); + __ jcc(Assembler::zero, is_null); + __ movptr(rbx, Address(rdx, oopDesc::mark_offset_in_bytes())); + __ andptr(rbx, is_value_mask); + __ cmpl(rbx, is_value_mask); + __ setb(Assembler::equal, rbx); + __ movzbl(rbx, rbx); + __ orptr(rdx, rbx); + __ bind(is_null); + } + __ cmpoop(rdx, rax); + + if (EnableValhalla && ACmpOnValues != 1) { + __ jcc(Assembler::notEqual, (cc == not_equal) ? taken : not_taken); + __ testptr(rdx, rdx); + __ jcc(Assembler::zero, (cc == equal) ? taken : not_taken); + __ movptr(rbx, Address(rdx, oopDesc::mark_offset_in_bytes())); + __ andptr(rbx, is_value_mask); + __ cmpl(rbx, is_value_mask); + cc = (cc == equal) ? not_equal : equal; + } + __ jcc(j_not(cc), not_taken); + __ bind(taken); branch(false, false); __ bind(not_taken); __ profile_not_taken_branch(rax); @@ -2679,7 +2775,8 @@ if (state == itos) { __ narrow(rax); } - __ remove_activation(state, rbcp); + + __ remove_activation(state, rbcp, true, true, true); __ jmp(rbcp); } @@ -2866,16 +2963,23 @@ const Register off = rbx; const Register flags = rax; const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // uses same reg as obj, so don't mix them + const Register flags2 = rdx; resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); jvmti_post_field_access(cache, index, is_static, false); load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); - if (!is_static) pop_and_check_object(obj); - const Address field(obj, off, Address::times_1, 0*wordSize); - Label Done, notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj; + Label Done, notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj, notValueType; + + if (!is_static) { + __ movptr(rcx, Address(cache, index, Address::times_ptr, + in_bytes(ConstantPoolCache::base_offset() + + ConstantPoolCacheEntry::f1_offset()))); + } + + __ movl(flags2, flags); __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); // Make sure we don't need to mask edx after the above shift @@ -2885,6 +2989,7 @@ __ jcc(Assembler::notZero, notByte); // btos + if (!is_static) pop_and_check_object(obj); __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg); __ push(btos); // Rewrite bytecode to be faster @@ -2894,9 +2999,10 @@ __ jmp(Done); __ bind(notByte); + __ cmpl(flags, ztos); __ jcc(Assembler::notEqual, notBool); - + if (!is_static) pop_and_check_object(obj); // ztos (same code as btos) __ access_load_at(T_BOOLEAN, IN_HEAP, rax, field, noreg, noreg); __ push(ztos); @@ -2911,14 +3017,80 @@ __ cmpl(flags, atos); __ jcc(Assembler::notEqual, notObj); // atos - do_oop_load(_masm, field, rax); - __ push(atos); - if (!is_static && rc == may_rewrite) { - patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx); + if (!EnableValhalla) { + if (!is_static) pop_and_check_object(obj); + do_oop_load(_masm, field, rax); + __ push(atos); + if (!is_static && rc == may_rewrite) { + patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx); + } + __ jmp(Done); + } else { + if (is_static) { + __ load_heap_oop(rax, field); + Label isFlattenable, uninitialized; + // Issue below if the static field has not been initialized yet + __ test_field_is_flattenable(flags2, rscratch1, isFlattenable); + // Not flattenable case + __ push(atos); + __ jmp(Done); + // Flattenable case, must not return null even if uninitialized + __ bind(isFlattenable); + __ testptr(rax, rax); + __ jcc(Assembler::zero, uninitialized); + __ push(atos); + __ jmp(Done); + __ bind(uninitialized); + __ andl(flags2, ConstantPoolCacheEntry::field_index_mask); + __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_static_value_field), + obj, flags2); + __ verify_oop(rax); + __ push(atos); + __ jmp(Done); + } else { + Label isFlattened, nonnull, isFlattenable, rewriteFlattenable; + __ test_field_is_flattenable(flags2, rscratch1, isFlattenable); + // Non-flattenable field case, also covers the object case + pop_and_check_object(obj); + __ load_heap_oop(rax, field); + __ push(atos); + if (rc == may_rewrite) { + patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx); + } + __ jmp(Done); + __ bind(isFlattenable); + __ test_field_is_flattened(flags2, rscratch1, isFlattened); + // Non-flattened field case + pop_and_check_object(obj); + __ load_heap_oop(rax, field); + __ testptr(rax, rax); + __ jcc(Assembler::notZero, nonnull); + __ andl(flags2, ConstantPoolCacheEntry::field_index_mask); + __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_instance_value_field), + obj, flags2); + __ bind(nonnull); + __ verify_oop(rax); + __ push(atos); + __ jmp(rewriteFlattenable); + __ bind(isFlattened); + __ andl(flags2, ConstantPoolCacheEntry::field_index_mask); + pop_and_check_object(rbx); + call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flattened_field), + rbx, flags2, rcx); + __ verify_oop(rax); + __ push(atos); + __ bind(rewriteFlattenable); + if (rc == may_rewrite) { + patch_bytecode(Bytecodes::_fast_qgetfield, bc, rbx); + } + __ jmp(Done); + } } - __ jmp(Done); __ bind(notObj); + + if (!is_static) pop_and_check_object(obj); + __ cmpl(flags, itos); __ jcc(Assembler::notEqual, notInt); // itos @@ -3017,6 +3189,21 @@ getfield_or_static(byte_no, true); } +void TemplateTable::withfield() { + transition(vtos, atos); + + Register cache = LP64_ONLY(c_rarg1) NOT_LP64(rcx); + Register index = LP64_ONLY(c_rarg2) NOT_LP64(rdx); + + resolve_cache_and_index(f2_byte, cache, index, sizeof(u2)); + + call_VM(rbx, CAST_FROM_FN_PTR(address, InterpreterRuntime::withfield), cache); + // new value type is returned in rbx + // stack adjustement is returned in rax + __ verify_oop(rbx); + __ addptr(rsp, rax); + __ movptr(rax, rbx); +} // The registers cache and index expected to be set before call. // The function may destroy various registers, just not the cache and index registers. @@ -3112,6 +3299,7 @@ const Register obj = rcx; const Register off = rbx; const Register flags = rax; + const Register flags2 = rdx; resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); jvmti_post_field_mod(cache, index, is_static); @@ -3128,28 +3316,29 @@ // Check for volatile store __ testl(rdx, rdx); + __ movl(flags2, flags); __ jcc(Assembler::zero, notVolatile); - putfield_or_static_helper(byte_no, is_static, rc, obj, off, flags); + putfield_or_static_helper(byte_no, is_static, rc, obj, off, flags, flags2); volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | Assembler::StoreStore)); __ jmp(Done); __ bind(notVolatile); - putfield_or_static_helper(byte_no, is_static, rc, obj, off, flags); + putfield_or_static_helper(byte_no, is_static, rc, obj, off, flags, flags2); __ bind(Done); } void TemplateTable::putfield_or_static_helper(int byte_no, bool is_static, RewriteControl rc, - Register obj, Register off, Register flags) { + Register obj, Register off, Register flags, Register flags2) { // field addresses const Address field(obj, off, Address::times_1, 0*wordSize); NOT_LP64( const Address hi(obj, off, Address::times_1, 1*wordSize);) Label notByte, notBool, notInt, notShort, notChar, - notLong, notFloat, notObj; + notLong, notFloat, notObj, notValueType; Label Done; const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx); @@ -3192,14 +3381,56 @@ // atos { - __ pop(atos); - if (!is_static) pop_and_check_object(obj); - // Store into the field - do_oop_store(_masm, field, rax); - if (!is_static && rc == may_rewrite) { - patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no); + if (!EnableValhalla) { + __ pop(atos); + if (!is_static) pop_and_check_object(obj); + // Store into the field + do_oop_store(_masm, field, rax); + if (!is_static && rc == may_rewrite) { + patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no); + } + __ jmp(Done); + } else { + __ pop(atos); + if (is_static) { + Label notFlattenable, notBuffered; + __ test_field_is_not_flattenable(flags2, rscratch1, notFlattenable); + __ null_check(rax); + __ bind(notFlattenable); + do_oop_store(_masm, field, rax); + __ jmp(Done); + } else { + Label isFlattenable, isFlattened, notBuffered, notBuffered2, rewriteNotFlattenable, rewriteFlattenable; + __ test_field_is_flattenable(flags2, rscratch1, isFlattenable); + // Not flattenable case, covers not flattenable values and objects + pop_and_check_object(obj); + // Store into the field + do_oop_store(_masm, field, rax); + __ bind(rewriteNotFlattenable); + if (rc == may_rewrite) { + patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no); + } + __ jmp(Done); + // Implementation of the flattenable semantic + __ bind(isFlattenable); + __ null_check(rax); + __ test_field_is_flattened(flags2, rscratch1, isFlattened); + // Not flattened case + pop_and_check_object(obj); + // Store into the field + do_oop_store(_masm, field, rax); + __ jmp(rewriteFlattenable); + __ bind(isFlattened); + pop_and_check_object(obj); + call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_flattened_value), + rax, off, obj); + __ bind(rewriteFlattenable); + if (rc == may_rewrite) { + patch_bytecode(Bytecodes::_fast_qputfield, bc, rbx, true, byte_no); + } + __ jmp(Done); + } } - __ jmp(Done); } __ bind(notObj); @@ -3336,6 +3567,7 @@ // to do it for every data type, we use the saved values as the // jvalue object. switch (bytecode()) { // load values into the jvalue object + case Bytecodes::_fast_qputfield: //fall through case Bytecodes::_fast_aputfield: __ push_ptr(rax); break; case Bytecodes::_fast_bputfield: // fall through case Bytecodes::_fast_zputfield: // fall through @@ -3361,6 +3593,7 @@ NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx)); switch (bytecode()) { // restore tos values + case Bytecodes::_fast_qputfield: // fall through case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break; case Bytecodes::_fast_bputfield: // fall through case Bytecodes::_fast_zputfield: // fall through @@ -3400,6 +3633,10 @@ // Assembler::StoreStore)); Label notVolatile, Done; + if (bytecode() == Bytecodes::_fast_qputfield) { + __ movl(rscratch2, rdx); + } + __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); __ andl(rdx, 0x1); @@ -3428,8 +3665,24 @@ // access field switch (bytecode()) { + case Bytecodes::_fast_qputfield: + { + Label isFlattened, done; + __ null_check(rax); + __ test_field_is_flattened(rscratch2, rscratch1, isFlattened); + // No Flattened case + do_oop_store(_masm, field, rax); + __ jmp(done); + __ bind(isFlattened); + call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_flattened_value), + rax, rbx, rcx); + __ bind(done); + } + break; case Bytecodes::_fast_aputfield: - do_oop_store(_masm, field, rax); + { + do_oop_store(_masm, field, rax); + } break; case Bytecodes::_fast_lputfield: #ifdef _LP64 @@ -3499,17 +3752,53 @@ // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); // __ andl(rdx, 0x1); // - __ movptr(rbx, Address(rcx, rbx, Address::times_ptr, + __ movptr(rdx, Address(rcx, rbx, Address::times_ptr, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()))); // rax: object __ verify_oop(rax); __ null_check(rax); - Address field(rax, rbx, Address::times_1); + Address field(rax, rdx, Address::times_1); // access field switch (bytecode()) { + case Bytecodes::_fast_qgetfield: + { + Label isFlattened, nonnull, Done; + __ movptr(rscratch1, Address(rcx, rbx, Address::times_ptr, + in_bytes(ConstantPoolCache::base_offset() + + ConstantPoolCacheEntry::flags_offset()))); + __ test_field_is_flattened(rscratch1, rscratch2, isFlattened); + // Non-flattened field case + __ movptr(rscratch1, rax); + __ load_heap_oop(rax, field); + __ testptr(rax, rax); + __ jcc(Assembler::notZero, nonnull); + __ movptr(rax, rscratch1); + __ movl(rcx, Address(rcx, rbx, Address::times_ptr, + in_bytes(ConstantPoolCache::base_offset() + + ConstantPoolCacheEntry::flags_offset()))); + __ andl(rcx, ConstantPoolCacheEntry::field_index_mask); + __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_instance_value_field), + rax, rcx); + __ bind(nonnull); + __ verify_oop(rax); + __ jmp(Done); + __ bind(isFlattened); + __ movl(rdx, Address(rcx, rbx, Address::times_ptr, + in_bytes(ConstantPoolCache::base_offset() + + ConstantPoolCacheEntry::flags_offset()))); + __ andl(rdx, ConstantPoolCacheEntry::field_index_mask); + __ movptr(rcx, Address(rcx, rbx, Address::times_ptr, + in_bytes(ConstantPoolCache::base_offset() + + ConstantPoolCacheEntry::f1_offset()))); + call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flattened_field), + rax, rdx, rcx); + __ verify_oop(rax); + __ bind(Done); + } + break; case Bytecodes::_fast_agetfield: do_oop_load(_masm, field, rax); __ verify_oop(rax); @@ -4134,6 +4423,20 @@ __ bind(done); } +void TemplateTable::defaultvalue() { + transition(vtos, atos); + + Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rcx); + Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx); + + __ get_unsigned_2_byte_index_at_bcp(rarg2, 1); + __ get_constant_pool(rarg1); + + call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::defaultvalue), + rarg1, rarg2); + __ verify_oop(rax); +} + void TemplateTable::newarray() { transition(itos, atos); Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rdx); @@ -4170,10 +4473,11 @@ __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index // See if bytecode has already been quicked - __ cmpb(Address(rdx, rbx, - Address::times_1, - Array::base_offset_in_bytes()), - JVM_CONSTANT_Class); + __ movzbl(rdx, Address(rdx, rbx, + Address::times_1, + Array::base_offset_in_bytes())); + __ andl (rdx, ~JVM_CONSTANT_QDESC_BIT); + __ cmpl(rdx, JVM_CONSTANT_Class); __ jcc(Assembler::equal, quicked); __ push(atos); // save receiver for result, and for GC call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); @@ -4211,15 +4515,29 @@ // Come here on success __ bind(ok_is_subtype); __ mov(rax, rdx); // Restore object in rdx + __ jmp(done); + + __ bind(is_null); // Collect counts on whether this check-cast sees NULLs a lot or not. if (ProfileInterpreter) { - __ jmp(done); - __ bind(is_null); __ profile_null_seen(rcx); - } else { - __ bind(is_null); // same as 'done' } + + if (EnableValhalla) { + // Get cpool & tags index + __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array + __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index + // See if CP entry is a Q-descriptor + __ movzbl(rcx, Address(rdx, rbx, + Address::times_1, + Array::base_offset_in_bytes())); + __ andl (rcx, JVM_CONSTANT_QDESC_BIT); + __ cmpl(rcx, JVM_CONSTANT_QDESC_BIT); + __ jcc(Assembler::notEqual, done); + __ jump(ExternalAddress(Interpreter::_throw_NullPointerException_entry)); + } + __ bind(done); } @@ -4233,10 +4551,11 @@ __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index // See if bytecode has already been quicked - __ cmpb(Address(rdx, rbx, - Address::times_1, - Array::base_offset_in_bytes()), - JVM_CONSTANT_Class); + __ movzbl(rdx, Address(rdx, rbx, + Address::times_1, + Array::base_offset_in_bytes())); + __ andl (rdx, ~JVM_CONSTANT_QDESC_BIT); + __ cmpl(rdx, JVM_CONSTANT_Class); __ jcc(Assembler::equal, quicked); __ push(atos); // save receiver for result, and for GC @@ -4288,7 +4607,6 @@ // rax = 1: obj != NULL and obj is an instanceof the specified klass } - //---------------------------------------------------------------------------------------------------- // Breakpoints void TemplateTable::_breakpoint() { --- old/src/hotspot/cpu/x86/templateTable_x86.hpp 2019-03-11 14:24:59.242356081 +0100 +++ new/src/hotspot/cpu/x86/templateTable_x86.hpp 2019-03-11 14:24:59.042356083 +0100 @@ -40,7 +40,7 @@ static void index_check_without_pop(Register array, Register index); static void putfield_or_static_helper(int byte_no, bool is_static, RewriteControl rc, - Register obj, Register off, Register flags); + Register obj, Register off, Register flags, Register flags2); static void fast_storefield_helper(Address field, Register rax); #endif // CPU_X86_TEMPLATETABLE_X86_HPP --- old/src/hotspot/cpu/x86/vm_version_x86.cpp 2019-03-11 14:24:59.650356075 +0100 +++ new/src/hotspot/cpu/x86/vm_version_x86.cpp 2019-03-11 14:24:59.446356078 +0100 @@ -1411,7 +1411,7 @@ } // Use XMM/YMM MOVDQU instruction for Object Initialization - if (!UseFastStosb && UseSSE >= 2 && UseUnalignedLoadStores) { + if (UseSSE >= 2 && UseUnalignedLoadStores) { if (FLAG_IS_DEFAULT(UseXMMForObjInit)) { UseXMMForObjInit = true; } --- old/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp 2019-03-11 14:25:00.062356069 +0100 +++ new/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp 2019-03-11 14:24:59.858356072 +0100 @@ -118,7 +118,7 @@ Label L; __ cmpptr(method, (int32_t)NULL_WORD); __ jcc(Assembler::equal, L); - __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD); + __ cmpptr(Address(method, Method::from_compiled_value_ro_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::notZero, L); __ stop("Vtable entry is NULL"); __ bind(L); @@ -129,7 +129,7 @@ // method (rbx): Method* // rcx: receiver address ame_addr = __ pc(); - __ jmp( Address(rbx, Method::from_compiled_offset())); + __ jmp( Address(rbx, Method::from_compiled_value_ro_offset())); masm->flush(); slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets @@ -235,7 +235,7 @@ Label L2; __ cmpptr(method, (int32_t)NULL_WORD); __ jcc(Assembler::equal, L2); - __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD); + __ cmpptr(Address(method, Method::from_compiled_value_ro_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::notZero, L2); __ stop("compiler entrypoint is null"); __ bind(L2); @@ -243,7 +243,7 @@ #endif // ASSERT address ame_addr = __ pc(); - __ jmp(Address(method, Method::from_compiled_offset())); + __ jmp(Address(method, Method::from_compiled_value_ro_offset())); __ bind(L_no_such_interface); // Handle IncompatibleClassChangeError in itable stubs. --- old/src/hotspot/cpu/x86/x86_32.ad 2019-03-11 14:25:00.490356063 +0100 +++ new/src/hotspot/cpu/x86/x86_32.ad 2019-03-11 14:25:00.286356066 +0100 @@ -616,10 +616,7 @@ Compile* C = ra_->C; MacroAssembler _masm(&cbuf); - int framesize = C->frame_size_in_bytes(); - int bangsize = C->bang_size_in_bytes(); - - __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, C->in_24_bit_fp_mode(), C->stub_function() != NULL); + __ verified_entry(C); C->set_frame_complete(cbuf.insts_size()); --- old/src/hotspot/cpu/x86/x86_64.ad 2019-03-11 14:25:00.978356057 +0100 +++ new/src/hotspot/cpu/x86/x86_64.ad 2019-03-11 14:25:00.750356060 +0100 @@ -791,10 +791,8 @@ Compile* C = ra_->C; MacroAssembler _masm(&cbuf); - int framesize = C->frame_size_in_bytes(); - int bangsize = C->bang_size_in_bytes(); - - __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false, C->stub_function() != NULL); + __ verified_entry(C); + __ bind(*_verified_entry); C->set_frame_complete(cbuf.insts_size()); @@ -868,29 +866,8 @@ __ vzeroupper(); } - int framesize = C->frame_size_in_bytes(); - assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); - // Remove word for return adr already pushed - // and RBP - framesize -= 2*wordSize; - - // Note that VerifyStackAtCalls' Majik cookie does not change the frame size popped here + __ restore_stack(C); - if (framesize) { - emit_opcode(cbuf, Assembler::REX_W); - if (framesize < 0x80) { - emit_opcode(cbuf, 0x83); // addq rsp, #framesize - emit_rm(cbuf, 0x3, 0x00, RSP_enc); - emit_d8(cbuf, framesize); - } else { - emit_opcode(cbuf, 0x81); // addq rsp, #framesize - emit_rm(cbuf, 0x3, 0x00, RSP_enc); - emit_d32(cbuf, framesize); - } - } - - // popq rbp - emit_opcode(cbuf, 0x58 | RBP_enc); if (StackReservedPages > 0 && C->has_reserved_stack_access()) { __ reserved_stack_check(); @@ -1463,6 +1440,39 @@ //============================================================================= #ifndef PRODUCT +void MachVEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const +{ + st->print_cr("MachVEPNode"); +} +#endif + +void MachVEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const +{ + MacroAssembler masm(&cbuf); + if (!_verified) { + uint insts_size = cbuf.insts_size(); + if (UseCompressedClassPointers) { + masm.load_klass(rscratch1, j_rarg0); + masm.cmpptr(rax, rscratch1); + } else { + masm.cmpptr(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes())); + } + masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub())); + } else { + // Unpack value type args passed as oop and then jump to + // the verified entry point (skipping the unverified entry). + masm.unpack_value_args(ra_->C, _receiver_only); + masm.jmp(*_verified_entry); + } +} + +uint MachVEPNode::size(PhaseRegAlloc* ra_) const +{ + return MachNode::size(ra_); // too many variables; just compute it the hard way +} + +//============================================================================= +#ifndef PRODUCT void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const { if (UseCompressedClassPointers) { @@ -6601,6 +6611,19 @@ ins_pipe(ialu_reg_reg); // XXX %} +instruct castN2X(rRegL dst, rRegN src) +%{ + match(Set dst (CastP2X src)); + + format %{ "movq $dst, $src\t# ptr -> long" %} + ins_encode %{ + if ($dst$$reg != $src$$reg) { + __ movptr($dst$$Register, $src$$Register); + } + %} + ins_pipe(ialu_reg_reg); // XXX +%} + instruct castP2X(rRegL dst, rRegP src) %{ match(Set dst (CastP2X src)); @@ -10836,15 +10859,14 @@ // ======================================================================= // fast clearing of an array -instruct rep_stos(rcx_RegL cnt, rdi_RegP base, regD tmp, rax_RegI zero, +instruct rep_stos(rcx_RegL cnt, rdi_RegP base, regD tmp, rax_RegL val, Universe dummy, rFlagsReg cr) %{ - predicate(!((ClearArrayNode*)n)->is_large()); - match(Set dummy (ClearArray cnt base)); - effect(USE_KILL cnt, USE_KILL base, TEMP tmp, KILL zero, KILL cr); + predicate(!((ClearArrayNode*)n)->is_large() && !((ClearArrayNode*)n)->word_copy_only()); + match(Set dummy (ClearArray (Binary cnt base) val)); + effect(USE_KILL cnt, USE_KILL base, TEMP tmp, KILL cr); format %{ $$template - $$emit$$"xorq rax, rax\t# ClearArray:\n\t" $$emit$$"cmp InitArrayShortSize,rcx\n\t" $$emit$$"jg LARGE\n\t" $$emit$$"dec rcx\n\t" @@ -10858,19 +10880,20 @@ $$emit$$"shlq rcx,3\t# Convert doublewords to bytes\n\t" $$emit$$"rep stosb\t# Store rax to *rdi++ while rcx--\n\t" } else if (UseXMMForObjInit) { - $$emit$$"mov rdi,rax\n\t" - $$emit$$"vpxor ymm0,ymm0,ymm0\n\t" + $$emit$$"movdq $tmp, $val\n\t" + $$emit$$"punpcklqdq $tmp, $tmp\n\t" + $$emit$$"vinserti128_high $tmp, $tmp\n\t" $$emit$$"jmpq L_zero_64_bytes\n\t" $$emit$$"# L_loop:\t# 64-byte LOOP\n\t" - $$emit$$"vmovdqu ymm0,(rax)\n\t" - $$emit$$"vmovdqu ymm0,0x20(rax)\n\t" + $$emit$$"vmovdqu $tmp,(rax)\n\t" + $$emit$$"vmovdqu $tmp,0x20(rax)\n\t" $$emit$$"add 0x40,rax\n\t" $$emit$$"# L_zero_64_bytes:\n\t" $$emit$$"sub 0x8,rcx\n\t" $$emit$$"jge L_loop\n\t" $$emit$$"add 0x4,rcx\n\t" $$emit$$"jl L_tail\n\t" - $$emit$$"vmovdqu ymm0,(rax)\n\t" + $$emit$$"vmovdqu $tmp,(rax)\n\t" $$emit$$"add 0x20,rax\n\t" $$emit$$"sub 0x4,rcx\n\t" $$emit$$"# L_tail:\t# Clearing tail bytes\n\t" @@ -10889,38 +10912,94 @@ $$emit$$"# DONE" %} ins_encode %{ - __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register, - $tmp$$XMMRegister, false); + __ clear_mem($base$$Register, $cnt$$Register, $val$$Register, + $tmp$$XMMRegister, false, false); %} ins_pipe(pipe_slow); %} -instruct rep_stos_large(rcx_RegL cnt, rdi_RegP base, regD tmp, rax_RegI zero, +instruct rep_stos_word_copy(rcx_RegL cnt, rdi_RegP base, regD tmp, rax_RegL val, + Universe dummy, rFlagsReg cr) +%{ + predicate(!((ClearArrayNode*)n)->is_large() && ((ClearArrayNode*)n)->word_copy_only()); + match(Set dummy (ClearArray (Binary cnt base) val)); + effect(USE_KILL cnt, USE_KILL base, TEMP tmp, KILL cr); + + format %{ $$template + $$emit$$"cmp InitArrayShortSize,rcx\n\t" + $$emit$$"jg LARGE\n\t" + $$emit$$"dec rcx\n\t" + $$emit$$"js DONE\t# Zero length\n\t" + $$emit$$"mov rax,(rdi,rcx,8)\t# LOOP\n\t" + $$emit$$"dec rcx\n\t" + $$emit$$"jge LOOP\n\t" + $$emit$$"jmp DONE\n\t" + $$emit$$"# LARGE:\n\t" + if (UseXMMForObjInit) { + $$emit$$"movdq $tmp, $val\n\t" + $$emit$$"punpcklqdq $tmp, $tmp\n\t" + $$emit$$"vinserti128_high $tmp, $tmp\n\t" + $$emit$$"jmpq L_zero_64_bytes\n\t" + $$emit$$"# L_loop:\t# 64-byte LOOP\n\t" + $$emit$$"vmovdqu $tmp,(rax)\n\t" + $$emit$$"vmovdqu $tmp,0x20(rax)\n\t" + $$emit$$"add 0x40,rax\n\t" + $$emit$$"# L_zero_64_bytes:\n\t" + $$emit$$"sub 0x8,rcx\n\t" + $$emit$$"jge L_loop\n\t" + $$emit$$"add 0x4,rcx\n\t" + $$emit$$"jl L_tail\n\t" + $$emit$$"vmovdqu $tmp,(rax)\n\t" + $$emit$$"add 0x20,rax\n\t" + $$emit$$"sub 0x4,rcx\n\t" + $$emit$$"# L_tail:\t# Clearing tail bytes\n\t" + $$emit$$"add 0x4,rcx\n\t" + $$emit$$"jle L_end\n\t" + $$emit$$"dec rcx\n\t" + $$emit$$"# L_sloop:\t# 8-byte short loop\n\t" + $$emit$$"vmovq xmm0,(rax)\n\t" + $$emit$$"add 0x8,rax\n\t" + $$emit$$"dec rcx\n\t" + $$emit$$"jge L_sloop\n\t" + $$emit$$"# L_end:\n\t" + } else { + $$emit$$"rep stosq\t# Store rax to *rdi++ while rcx--\n\t" + } + $$emit$$"# DONE" + %} + ins_encode %{ + __ clear_mem($base$$Register, $cnt$$Register, $val$$Register, + $tmp$$XMMRegister, false, true); + %} + ins_pipe(pipe_slow); +%} + +instruct rep_stos_large(rcx_RegL cnt, rdi_RegP base, regD tmp, rax_RegL val, Universe dummy, rFlagsReg cr) %{ - predicate(((ClearArrayNode*)n)->is_large()); - match(Set dummy (ClearArray cnt base)); - effect(USE_KILL cnt, USE_KILL base, TEMP tmp, KILL zero, KILL cr); + predicate(((ClearArrayNode*)n)->is_large() && !((ClearArrayNode*)n)->word_copy_only()); + match(Set dummy (ClearArray (Binary cnt base) val)); + effect(USE_KILL cnt, USE_KILL base, TEMP tmp, KILL cr); format %{ $$template if (UseFastStosb) { - $$emit$$"xorq rax, rax\t# ClearArray:\n\t" $$emit$$"shlq rcx,3\t# Convert doublewords to bytes\n\t" $$emit$$"rep stosb\t# Store rax to *rdi++ while rcx--" } else if (UseXMMForObjInit) { - $$emit$$"mov rdi,rax\t# ClearArray:\n\t" - $$emit$$"vpxor ymm0,ymm0,ymm0\n\t" + $$emit$$"movdq $tmp, $val\n\t" + $$emit$$"punpcklqdq $tmp, $tmp\n\t" + $$emit$$"vinserti128_high $tmp, $tmp\n\t" $$emit$$"jmpq L_zero_64_bytes\n\t" $$emit$$"# L_loop:\t# 64-byte LOOP\n\t" - $$emit$$"vmovdqu ymm0,(rax)\n\t" - $$emit$$"vmovdqu ymm0,0x20(rax)\n\t" + $$emit$$"vmovdqu $tmp,(rax)\n\t" + $$emit$$"vmovdqu $tmp,0x20(rax)\n\t" $$emit$$"add 0x40,rax\n\t" $$emit$$"# L_zero_64_bytes:\n\t" $$emit$$"sub 0x8,rcx\n\t" $$emit$$"jge L_loop\n\t" $$emit$$"add 0x4,rcx\n\t" $$emit$$"jl L_tail\n\t" - $$emit$$"vmovdqu ymm0,(rax)\n\t" + $$emit$$"vmovdqu $tmp,(rax)\n\t" $$emit$$"add 0x20,rax\n\t" $$emit$$"sub 0x4,rcx\n\t" $$emit$$"# L_tail:\t# Clearing tail bytes\n\t" @@ -10934,13 +11013,58 @@ $$emit$$"jge L_sloop\n\t" $$emit$$"# L_end:\n\t" } else { - $$emit$$"xorq rax, rax\t# ClearArray:\n\t" $$emit$$"rep stosq\t# Store rax to *rdi++ while rcx--" } %} ins_encode %{ - __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register, - $tmp$$XMMRegister, true); + __ clear_mem($base$$Register, $cnt$$Register, $val$$Register, + $tmp$$XMMRegister, true, false); + %} + ins_pipe(pipe_slow); +%} + +instruct rep_stos_large_word_copy(rcx_RegL cnt, rdi_RegP base, regD tmp, rax_RegL val, + Universe dummy, rFlagsReg cr) +%{ + predicate(((ClearArrayNode*)n)->is_large() && ((ClearArrayNode*)n)->word_copy_only()); + match(Set dummy (ClearArray (Binary cnt base) val)); + effect(USE_KILL cnt, USE_KILL base, TEMP tmp, KILL cr); + + format %{ $$template + if (UseXMMForObjInit) { + $$emit$$"movdq $tmp, $val\n\t" + $$emit$$"punpcklqdq $tmp, $tmp\n\t" + $$emit$$"vinserti128_high $tmp, $tmp\n\t" + $$emit$$"jmpq L_zero_64_bytes\n\t" + $$emit$$"# L_loop:\t# 64-byte LOOP\n\t" + $$emit$$"vmovdqu $tmp,(rax)\n\t" + $$emit$$"vmovdqu $tmp,0x20(rax)\n\t" + $$emit$$"add 0x40,rax\n\t" + $$emit$$"# L_zero_64_bytes:\n\t" + $$emit$$"sub 0x8,rcx\n\t" + $$emit$$"jge L_loop\n\t" + $$emit$$"add 0x4,rcx\n\t" + $$emit$$"jl L_tail\n\t" + $$emit$$"vmovdqu $tmp,(rax)\n\t" + $$emit$$"add 0x20,rax\n\t" + $$emit$$"sub 0x4,rcx\n\t" + $$emit$$"# L_tail:\t# Clearing tail bytes\n\t" + $$emit$$"add 0x4,rcx\n\t" + $$emit$$"jle L_end\n\t" + $$emit$$"dec rcx\n\t" + $$emit$$"# L_sloop:\t# 8-byte short loop\n\t" + $$emit$$"vmovq xmm0,(rax)\n\t" + $$emit$$"add 0x8,rax\n\t" + $$emit$$"dec rcx\n\t" + $$emit$$"jge L_sloop\n\t" + $$emit$$"# L_end:\n\t" + } else { + $$emit$$"rep stosq\t# Store rax to *rdi++ while rcx--" + } + %} + ins_encode %{ + __ clear_mem($base$$Register, $cnt$$Register, $val$$Register, + $tmp$$XMMRegister, true, true); %} ins_pipe(pipe_slow); %} @@ -12513,8 +12637,24 @@ %} // Call runtime without safepoint +// entry point is null, target holds the address to call +instruct CallLeafNoFPInDirect(rRegP target) +%{ + predicate(n->as_Call()->entry_point() == NULL); + match(CallLeafNoFP target); + + ins_cost(300); + format %{ "call_leaf_nofp,runtime indirect " %} + ins_encode %{ + __ call($target$$Register); + %} + + ins_pipe(pipe_slow); +%} + instruct CallLeafNoFPDirect(method meth) %{ + predicate(n->as_Call()->entry_point() != NULL); match(CallLeafNoFP); effect(USE meth); --- old/src/hotspot/cpu/zero/globals_zero.hpp 2019-03-11 14:25:01.486356050 +0100 +++ new/src/hotspot/cpu/zero/globals_zero.hpp 2019-03-11 14:25:01.266356053 +0100 @@ -76,6 +76,9 @@ define_pd_global(bool, PreserveFramePointer, false); +define_pd_global(bool, ValueTypePassFieldsAsArgs, false); +define_pd_global(bool, ValueTypeReturnedAsFields, false); + // No performance work done here yet. define_pd_global(bool, CompactStrings, false); --- old/src/hotspot/share/adlc/formssel.cpp 2019-03-11 14:25:01.922356043 +0100 +++ new/src/hotspot/share/adlc/formssel.cpp 2019-03-11 14:25:01.706356046 +0100 @@ -877,7 +877,8 @@ strcmp(_matrule->_opType,"TailCall" )==0 || strcmp(_matrule->_opType,"TailJump" )==0 || strcmp(_matrule->_opType,"SafePoint" )==0 || - strcmp(_matrule->_opType,"Halt" )==0 ) + strcmp(_matrule->_opType,"Halt" )==0 || + strcmp(_matrule->_opType,"CallLeafNoFP")==0) return AdlcVMDeps::Parms; // Skip the machine-state edges if( _matrule->_rChild && --- old/src/hotspot/share/aot/aotCompiledMethod.hpp 2019-03-11 14:25:02.410356037 +0100 +++ new/src/hotspot/share/aot/aotCompiledMethod.hpp 2019-03-11 14:25:02.210356039 +0100 @@ -194,6 +194,8 @@ virtual int comp_level() const { return CompLevel_aot; } virtual address verified_entry_point() const { return _code + _meta->verified_entry_offset(); } + virtual address verified_value_entry_point() const { return NULL; } + virtual address verified_value_ro_entry_point() const { return NULL; } virtual void log_identity(xmlStream* stream) const; virtual void log_state_change() const; virtual bool make_entrant() NOT_TIERED({ ShouldNotReachHere(); return false; }); --- old/src/hotspot/share/asm/codeBuffer.hpp 2019-03-11 14:25:02.850356031 +0100 +++ new/src/hotspot/share/asm/codeBuffer.hpp 2019-03-11 14:25:02.622356034 +0100 @@ -42,6 +42,8 @@ public: enum Entries { Entry, Verified_Entry, + Verified_Value_Entry, + Verified_Value_Entry_RO, Frame_Complete, // Offset in the code where the frame setup is (for forte stackwalks) is complete OSR_Entry, Exceptions, // Offset where exception handler lives @@ -62,6 +64,8 @@ CodeOffsets() { _values[Entry ] = 0; _values[Verified_Entry] = 0; + _values[Verified_Value_Entry] = -1; + _values[Verified_Value_Entry_RO] = -1; _values[Frame_Complete] = frame_never_safe; _values[OSR_Entry ] = 0; _values[Exceptions ] = -1; --- old/src/hotspot/share/c1/c1_Canonicalizer.cpp 2019-03-11 14:25:03.430356023 +0100 +++ new/src/hotspot/share/c1/c1_Canonicalizer.cpp 2019-03-11 14:25:03.090356027 +0100 @@ -644,11 +644,13 @@ void Canonicalizer::do_TypeCast (TypeCast* x) {} void Canonicalizer::do_Invoke (Invoke* x) {} void Canonicalizer::do_NewInstance (NewInstance* x) {} +void Canonicalizer::do_NewValueTypeInstance(NewValueTypeInstance* x) {} void Canonicalizer::do_NewTypeArray (NewTypeArray* x) {} void Canonicalizer::do_NewObjectArray (NewObjectArray* x) {} void Canonicalizer::do_NewMultiArray (NewMultiArray* x) {} void Canonicalizer::do_CheckCast (CheckCast* x) { - if (x->klass()->is_loaded()) { + if (x->klass()->is_loaded() && !x->is_never_null()) { + // Don't canonicalize for non-nullable types -- we need to throw NPE. Value obj = x->obj(); ciType* klass = obj->exact_type(); if (klass == NULL) { --- old/src/hotspot/share/c1/c1_Canonicalizer.hpp 2019-03-11 14:25:03.958356015 +0100 +++ new/src/hotspot/share/c1/c1_Canonicalizer.hpp 2019-03-11 14:25:03.674356019 +0100 @@ -81,6 +81,7 @@ virtual void do_TypeCast (TypeCast* x); virtual void do_Invoke (Invoke* x); virtual void do_NewInstance (NewInstance* x); + virtual void do_NewValueTypeInstance(NewValueTypeInstance* x); virtual void do_NewTypeArray (NewTypeArray* x); virtual void do_NewObjectArray (NewObjectArray* x); virtual void do_NewMultiArray (NewMultiArray* x); --- old/src/hotspot/share/c1/c1_CodeStubs.hpp 2019-03-11 14:25:04.710356005 +0100 +++ new/src/hotspot/share/c1/c1_CodeStubs.hpp 2019-03-11 14:25:04.450356009 +0100 @@ -232,6 +232,61 @@ }; +class LoadFlattenedArrayStub: public CodeStub { + private: + LIR_Opr _array; + LIR_Opr _index; + LIR_Opr _result; + LIR_Opr _scratch_reg; + CodeEmitInfo* _info; + + public: + LoadFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info); + virtual void emit_code(LIR_Assembler* e); + virtual CodeEmitInfo* info() const { return _info; } + virtual void visit(LIR_OpVisitState* visitor) { + visitor->do_slow_case(_info); + visitor->do_input(_array); + visitor->do_input(_index); + visitor->do_output(_result); + if (_scratch_reg != LIR_OprFact::illegalOpr) { + visitor->do_temp(_scratch_reg); + } + } + +#ifndef PRODUCT + virtual void print_name(outputStream* out) const { out->print("LoadFlattenedArrayStub"); } +#endif // PRODUCT +}; + + +class StoreFlattenedArrayStub: public CodeStub { + private: + LIR_Opr _array; + LIR_Opr _index; + LIR_Opr _value; + LIR_Opr _scratch_reg; + CodeEmitInfo* _info; + + public: + StoreFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr value, CodeEmitInfo* info); + virtual void emit_code(LIR_Assembler* e); + virtual CodeEmitInfo* info() const { return _info; } + virtual void visit(LIR_OpVisitState* visitor) { + visitor->do_slow_case(_info); + visitor->do_input(_array); + visitor->do_input(_index); + visitor->do_input(_value); + if (_scratch_reg != LIR_OprFact::illegalOpr) { + visitor->do_temp(_scratch_reg); + } + } +#ifndef PRODUCT + virtual void print_name(outputStream* out) const { out->print("StoreFlattenedArrayStub"); } +#endif // PRODUCT +}; + + class NewInstanceStub: public CodeStub { private: ciInstanceKlass* _klass; @@ -284,9 +339,9 @@ LIR_Opr _length; LIR_Opr _result; CodeEmitInfo* _info; - + bool _is_value_type; public: - NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info); + NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info, bool is_value_type); virtual void emit_code(LIR_Assembler* e); virtual CodeEmitInfo* info() const { return _info; } virtual void visit(LIR_OpVisitState* visitor) { @@ -321,15 +376,20 @@ class MonitorEnterStub: public MonitorAccessStub { private: CodeEmitInfo* _info; + CodeStub* _throw_imse_stub; + LIR_Opr _scratch_reg; public: - MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info); + MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info, CodeStub* throw_imse_stub = NULL, LIR_Opr scratch_reg = LIR_OprFact::illegalOpr); virtual void emit_code(LIR_Assembler* e); virtual CodeEmitInfo* info() const { return _info; } virtual void visit(LIR_OpVisitState* visitor) { visitor->do_input(_obj_reg); visitor->do_input(_lock_reg); + if (_scratch_reg != LIR_OprFact::illegalOpr) { + visitor->do_temp(_scratch_reg); + } visitor->do_slow_case(_info); } #ifndef PRODUCT --- old/src/hotspot/share/c1/c1_Compiler.cpp 2019-03-11 14:25:05.398355995 +0100 +++ new/src/hotspot/share/c1/c1_Compiler.cpp 2019-03-11 14:25:05.142355999 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it --- old/src/hotspot/share/c1/c1_FrameMap.cpp 2019-03-11 14:25:05.970355987 +0100 +++ new/src/hotspot/share/c1/c1_FrameMap.cpp 2019-03-11 14:25:05.718355991 +0100 @@ -41,7 +41,7 @@ for (int i = 0; i < sig->count(); i++) { ciType* type = sig->type_at(i); BasicType t = type->basic_type(); - if (t == T_ARRAY) { + if (t == T_ARRAY || t == T_VALUETYPE) { t = T_OBJECT; } sta->append(t); --- old/src/hotspot/share/c1/c1_GraphBuilder.cpp 2019-03-11 14:25:06.694355977 +0100 +++ new/src/hotspot/share/c1/c1_GraphBuilder.cpp 2019-03-11 14:25:06.318355983 +0100 @@ -33,6 +33,7 @@ #include "ci/ciKlass.hpp" #include "ci/ciMemberName.hpp" #include "ci/ciUtilities.inline.hpp" +#include "ci/ciValueKlass.hpp" #include "compiler/compileBroker.hpp" #include "interpreter/bytecode.hpp" #include "jfr/jfrEvents.hpp" @@ -648,6 +649,17 @@ } } + // Record this newly allocated object + void new_instance(NewValueTypeInstance* object) { + int index = _newobjects.length(); + _newobjects.append(object); + if (_fields.at_grow(index, NULL) == NULL) { + _fields.at_put(index, new FieldBuffer()); + } else { + _fields.at(index)->kill(); + } + } + void store_value(Value value) { int index = _newobjects.find(value); if (index != -1) { @@ -979,7 +991,19 @@ (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant())) { length = append(new ArrayLength(array, state_before)); } - push(as_ValueType(type), append(new LoadIndexed(array, index, length, type, state_before))); + + if (array->is_loaded_flattened_array()) { + ciType* array_type = array->declared_type(); + ciValueKlass* elem_klass = array_type->as_value_array_klass()->element_klass()->as_value_klass(); + NewValueTypeInstance* new_instance = new NewValueTypeInstance(elem_klass, state_before, false); + _memory->new_instance(new_instance); + apush(append_split(new_instance)); + LoadIndexed* load_indexed = new LoadIndexed(array, index, length, type, state_before); + load_indexed->set_vt(new_instance); + append(load_indexed); + } else { + push(as_ValueType(type), append(new LoadIndexed(array, index, length, type, state_before))); + } } @@ -1008,6 +1032,7 @@ } else if (type == T_BYTE) { check_boolean = true; } + StoreIndexed* result = new StoreIndexed(array, index, length, type, value, state_before, check_boolean); append(result); _memory->store_value(value); @@ -1619,15 +1644,57 @@ } } +void GraphBuilder::copy_value_content(ciValueKlass* vk, Value src, int src_off, Value dest, int dest_off, + ValueStack* state_before, bool needs_patching) { + for (int i = 0; i < vk->nof_nonstatic_fields(); i++) { + ciField* inner_field = vk->nonstatic_field_at(i); + assert(!inner_field->is_flattened(), "the iteration over nested fields is handled by the loop itself"); + int off = inner_field->offset() - vk->first_field_offset(); + LoadField* load = new LoadField(src, src_off + off, inner_field, false, state_before, needs_patching); + Value replacement = append(load); + StoreField* store = new StoreField(dest, dest_off + off, inner_field, replacement, false, state_before, needs_patching); + append(store); + } +} + void GraphBuilder::access_field(Bytecodes::Code code) { bool will_link; ciField* field = stream()->get_field(will_link); ciInstanceKlass* holder = field->holder(); BasicType field_type = field->type()->basic_type(); ValueType* type = as_ValueType(field_type); + + // Null check and deopt for getting static value field + ciValueKlass* value_klass = NULL; + Value default_value = NULL; + bool needs_deopt = false; + if (code == Bytecodes::_getstatic && !field->is_static_constant() && + field->layout_type() == T_VALUETYPE && field->is_flattenable()) { + value_klass = field->type()->as_value_klass(); + if (holder->is_loaded()) { + ciInstance* mirror = field->holder()->java_mirror(); + ciObject* val = mirror->field_value(field).as_object(); + if (val->is_null_object()) { + // This is a non-nullable static field, but it's not initialized. + // We need to do a null check, and replace it with the default value. + } else { + // No need to perform null check on this static field + value_klass = NULL; + } + } + if (value_klass != NULL) { + if (value_klass->is_loaded()) { + default_value = new Constant(new InstanceConstant(value_klass->default_value_instance())); + } else { + needs_deopt = true; + } + } + } + // call will_link again to determine if the field is valid. const bool needs_patching = !holder->is_loaded() || !field->will_link(method(), code) || + needs_deopt || PatchALot; ValueStack* state_before = NULL; @@ -1675,8 +1742,13 @@ if (state_before == NULL) { state_before = copy_state_for_exception(); } - push(type, append(new LoadField(append(obj), offset, field, true, - state_before, needs_patching))); + LoadField* load_field = new LoadField(append(obj), offset, field, true, + state_before, needs_patching, + value_klass, default_value); + if (field->layout_type() == T_VALUETYPE && field->is_flattenable()) { + load_field->set_never_null(true); + } + push(type, append(load_field)); } break; } @@ -1697,7 +1769,7 @@ Value constant = NULL; obj = apop(); ObjectType* obj_type = obj->type()->as_ObjectType(); - if (field->is_constant() && obj_type->is_constant() && !PatchALot) { + if (field->is_constant() && !field->is_flattened() && obj_type->is_constant() && !PatchALot) { ciObject* const_oop = obj_type->constant_value(); if (!const_oop->is_null_object() && const_oop->is_loaded()) { ciConstant field_value = field->constant_value_of(const_oop); @@ -1720,13 +1792,28 @@ if (state_before == NULL) { state_before = copy_state_for_exception(); } - LoadField* load = new LoadField(obj, offset, field, false, state_before, needs_patching); - Value replacement = !needs_patching ? _memory->load(load) : load; - if (replacement != load) { - assert(replacement->is_linked() || !replacement->can_be_linked(), "should already by linked"); - push(type, replacement); - } else { - push(type, append(load)); + + if (!field->is_flattened()) { + LoadField* load = new LoadField(obj, offset, field, false, state_before, needs_patching); + Value replacement = !needs_patching ? _memory->load(load) : load; + if (replacement != load) { + assert(replacement->is_linked() || !replacement->can_be_linked(), "should already by linked"); + push(type, replacement); + } else { + push(type, append(load)); + } + } else { // flattened field, not optimized solution: re-instantiate the flattened value + assert(field->type()->is_valuetype(), "Sanity check"); + ciValueKlass* value_klass = field->type()->as_value_klass(); + int flattening_offset = field->offset() - value_klass->first_field_offset(); + assert(field->type()->is_valuetype(), "Sanity check"); + scope()->set_wrote_final(); + scope()->set_wrote_fields(); + NewValueTypeInstance* new_instance = new NewValueTypeInstance(value_klass, state_before, false); + _memory->new_instance(new_instance); + apush(append_split(new_instance)); + copy_value_content(value_klass, obj, field->offset() , new_instance, value_klass->first_field_offset(), + state_before, needs_patching); } } break; @@ -1741,10 +1828,19 @@ Value mask = append(new Constant(new IntConstant(1))); val = append(new LogicOp(Bytecodes::_iand, val, mask)); } - StoreField* store = new StoreField(obj, offset, field, val, false, state_before, needs_patching); - if (!needs_patching) store = _memory->store(store); - if (store != NULL) { - append(store); + + if (!field->is_flattened()) { + StoreField* store = new StoreField(obj, offset, field, val, false, state_before, needs_patching); + if (!needs_patching) store = _memory->store(store); + if (store != NULL) { + append(store); + } + } else { + assert(field->type()->is_valuetype(), "Sanity check"); + ciValueKlass* value_klass = field->type()->as_value_klass(); + int flattening_offset = field->offset() - value_klass->first_field_offset(); + copy_value_content(value_klass, val, value_klass->first_field_offset(), obj, field->offset(), + state_before, needs_patching); } break; } @@ -1754,6 +1850,69 @@ } } +// Baseline version of withfield, allocate every time +void GraphBuilder::withfield(int field_index) +{ + bool will_link; + ciField* field_modify = stream()->get_field(will_link); + ciInstanceKlass* holder = field_modify->holder(); + assert(holder->is_valuetype(), "must be a value klass"); + BasicType field_type = field_modify->type()->basic_type(); + ValueType* type = as_ValueType(field_type); + + // call will_link again to determine if the field is valid. + const bool needs_patching = !holder->is_loaded() || + !field_modify->will_link(method(), Bytecodes::_withfield) || + PatchALot; + + + scope()->set_wrote_final(); + scope()->set_wrote_fields(); + + const int offset = !needs_patching ? field_modify->offset() : -1; + Value val = pop(type); + Value obj = apop(); + + ValueStack* state_before = copy_state_for_exception(); + + NewValueTypeInstance* new_instance = new NewValueTypeInstance(holder->as_value_klass(), state_before, false); + _memory->new_instance(new_instance); + apush(append_split(new_instance)); + + for (int i = 0; i < holder->nof_nonstatic_fields(); i++) { + ciField* field = holder->nonstatic_field_at(i); + int off = field->offset(); + + if (field->offset() != offset) { + if (field->is_flattened()) { + assert(field->type()->is_valuetype(), "Sanity check"); + assert(field->type()->is_valuetype(), "Only value types can be flattened"); + ciValueKlass* vk = field->type()->as_value_klass(); + copy_value_content(vk, obj, off, new_instance, vk->first_field_offset(), state_before, needs_patching); + } else { + // Only load those fields who are not modified + LoadField* load = new LoadField(obj, off, field, false, state_before, needs_patching); + Value replacement = append(load); + StoreField* store = new StoreField(new_instance, off, field, replacement, false, state_before, needs_patching); + append(store); + } + } + } + + // Field to modify + if (field_modify->type()->basic_type() == T_BOOLEAN) { + Value mask = append(new Constant(new IntConstant(1))); + val = append(new LogicOp(Bytecodes::_iand, val, mask)); + } + if (field_modify->is_flattened()) { + assert(field_modify->type()->is_valuetype(), "Only value types can be flattened"); + ciValueKlass* vk = field_modify->type()->as_value_klass(); + copy_value_content(vk, val, vk->first_field_offset(), new_instance, field_modify->offset(), state_before, needs_patching); + } else { + StoreField* store = new StoreField(new_instance, offset, field_modify, val, false, state_before, needs_patching); + append(store); + } +} Dependencies* GraphBuilder::dependency_recorder() const { assert(DeoptC1, "need debug information"); @@ -2111,7 +2270,8 @@ } } - Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target, state_before); + Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target, state_before, + declared_signature->returns_never_null()); // push result append_split(result); @@ -2133,11 +2293,22 @@ bool will_link; ciKlass* klass = stream()->get_klass(will_link); assert(klass->is_instance_klass(), "must be an instance klass"); + assert(!klass->is_valuetype(), "must not be a value klass"); NewInstance* new_instance = new NewInstance(klass->as_instance_klass(), state_before, stream()->is_unresolved_klass()); _memory->new_instance(new_instance); apush(append_split(new_instance)); } +void GraphBuilder::new_value_type_instance(int klass_index) { + ValueStack* state_before = copy_state_exhandling(); + bool will_link; + ciKlass* klass = stream()->get_klass(will_link); + assert(klass->is_valuetype(), "must be a value klass"); + NewValueTypeInstance* new_instance = new NewValueTypeInstance(klass->as_value_klass(), + state_before, stream()->is_unresolved_klass()); + _memory->new_instance(new_instance); + apush(append_split(new_instance)); +} void GraphBuilder::new_type_array() { ValueStack* state_before = copy_state_exhandling(); @@ -2150,6 +2321,9 @@ ciKlass* klass = stream()->get_klass(will_link); ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling(); NewArray* n = new NewObjectArray(klass, ipop(), state_before); + if (stream()->is_klass_never_null()) { + n->set_never_null(true); + } apush(append_split(n)); } @@ -2174,8 +2348,9 @@ void GraphBuilder::check_cast(int klass_index) { bool will_link; ciKlass* klass = stream()->get_klass(will_link); + bool never_null = stream()->is_klass_never_null(); ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_for_exception(); - CheckCast* c = new CheckCast(klass, apop(), state_before); + CheckCast* c = new CheckCast(klass, apop(), state_before, never_null); apush(append_split(c)); c->set_direct_compare(direct_compare(klass)); @@ -2214,9 +2389,28 @@ void GraphBuilder::monitorenter(Value x, int bci) { + bool maybe_valuetype = false; + if (bci == InvocationEntryBci) { + // Called by GraphBuilder::inline_sync_entry. +#ifdef ASSERT + ciType* obj_type = x->declared_type(); + assert(obj_type == NULL || !obj_type->is_valuetype(), "valuetypes cannot have synchronized methods"); +#endif + } else { + // We are compiling a monitorenter bytecode + if (EnableValhalla) { + ciType* obj_type = x->declared_type(); + if (obj_type == NULL || obj_type->is_valuetype() || obj_type->as_klass()->is_java_lang_Object()) { + // If we're (possibly) locking on a valuetype, check for markOopDesc::always_locked_pattern + // and throw IMSE. (obj_type is null for Phi nodes, so let's just be conservative). + maybe_valuetype = true; + } + } + } + // save state before locking in case of deoptimization after a NullPointerException ValueStack* state_before = copy_state_for_exception_with_bci(bci); - append_with_bci(new MonitorEnter(x, state()->lock(x), state_before), bci); + append_with_bci(new MonitorEnter(x, state()->lock(x), state_before, maybe_valuetype), bci); kill_all(); } @@ -2872,6 +3066,8 @@ case Bytecodes::_ifnonnull : if_null(objectType, If::neq); break; case Bytecodes::_goto_w : _goto(s.cur_bci(), s.get_far_dest()); break; case Bytecodes::_jsr_w : jsr(s.get_far_dest()); break; + case Bytecodes::_defaultvalue : new_value_type_instance(s.get_index_u2()); break; + case Bytecodes::_withfield : withfield(s.get_index_u2()); break; case Bytecodes::_breakpoint : BAILOUT_("concurrent setting of breakpoint", NULL); default : ShouldNotReachHere(); break; } @@ -3157,7 +3353,8 @@ int idx = 0; if (!method()->is_static()) { // we should always see the receiver - state->store_local(idx, new Local(method()->holder(), objectType, idx, true)); + state->store_local(idx, new Local(method()->holder(), objectType, idx, + /*receiver*/ true, /*never_null*/ method()->holder()->is_value_array_klass())); idx = 1; } @@ -3167,9 +3364,9 @@ ciType* type = sig->type_at(i); BasicType basic_type = type->basic_type(); // don't allow T_ARRAY to propagate into locals types - if (basic_type == T_ARRAY) basic_type = T_OBJECT; + if (basic_type == T_ARRAY || basic_type == T_VALUETYPE) basic_type = T_OBJECT; ValueType* vt = as_ValueType(basic_type); - state->store_local(idx, new Local(type, vt, idx, false)); + state->store_local(idx, new Local(type, vt, idx, false, sig->is_never_null_at(i))); idx += type->size(); } --- old/src/hotspot/share/c1/c1_GraphBuilder.hpp 2019-03-11 14:25:07.134355971 +0100 +++ new/src/hotspot/share/c1/c1_GraphBuilder.hpp 2019-03-11 14:25:06.934355974 +0100 @@ -267,6 +267,12 @@ void throw_op(int bci); Value round_fp(Value fp_value); + // value types + void new_value_type_instance(int klass_index); + void withfield(int field_index); + void copy_value_content(ciValueKlass* vk, Value src, int src_off, Value dest, int dest_off, + ValueStack* state_before, bool needs_patching); + // stack/code manipulation helpers Instruction* append_with_bci(Instruction* instr, int bci); Instruction* append(Instruction* instr); --- old/src/hotspot/share/c1/c1_IR.hpp 2019-03-11 14:25:07.538355966 +0100 +++ new/src/hotspot/share/c1/c1_IR.hpp 2019-03-11 14:25:07.338355969 +0100 @@ -244,7 +244,7 @@ bool reexecute = topmost ? should_reexecute() : false; bool return_oop = false; // This flag will be ignored since it used only for C2 with escape analysis. bool rethrow_exception = false; - recorder->describe_scope(pc_offset, methodHandle(), scope()->method(), bci(), reexecute, rethrow_exception, is_method_handle_invoke, return_oop, locvals, expvals, monvals); + recorder->describe_scope(pc_offset, methodHandle(), scope()->method(), bci(), reexecute, rethrow_exception, is_method_handle_invoke, return_oop, false, locvals, expvals, monvals); } }; --- old/src/hotspot/share/c1/c1_Instruction.cpp 2019-03-11 14:25:07.942355960 +0100 +++ new/src/hotspot/share/c1/c1_Instruction.cpp 2019-03-11 14:25:07.738355963 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,8 @@ #include "c1/c1_ValueStack.hpp" #include "ci/ciObjArrayKlass.hpp" #include "ci/ciTypeArrayKlass.hpp" +#include "ci/ciValueArrayKlass.hpp" +#include "ci/ciValueKlass.hpp" // Implementation of Instruction @@ -113,6 +115,71 @@ } +// FIXME -- this is used by ValueStack::merge_types only. We should remove this function +// and use a better way for handling phi nodes. +bool Instruction::is_flattened_array() const { + if (ValueArrayFlatten) { + ciType* type = declared_type(); + if (type != NULL && type->is_value_array_klass()) { + ciValueKlass* element_klass = type->as_value_array_klass()->element_klass()->as_value_klass(); + assert(element_klass->is_loaded(), "ciValueKlasses are always loaded"); + if (element_klass->flatten_array()) { + return true; + } + } + } + + return false; +} + +bool Instruction::is_loaded_flattened_array() const { + if (ValueArrayFlatten) { + ciType* type = declared_type(); + if (type != NULL && type->is_value_array_klass()) { + ciValueKlass* element_klass = type->as_value_array_klass()->element_klass()->as_value_klass(); + assert(element_klass->is_loaded(), "ciValueKlasses are always loaded"); + if (element_klass->flatten_array()) { + return true; + } + } + } + + return false; +} + +bool Instruction::maybe_flattened_array() { + if (ValueArrayFlatten) { + ciType* type = declared_type(); + if (type != NULL) { + if (type->is_value_array_klass()) { + ciValueKlass* element_klass = type->as_value_array_klass()->element_klass()->as_value_klass(); + assert(element_klass->is_loaded(), "ciValueKlasses are always loaded"); + if (element_klass->flatten_array()) { + return true; + } + } else if (type->is_obj_array_klass()) { + ciKlass* element_klass = type->as_obj_array_klass()->element_klass(); + if (!element_klass->is_loaded() || element_klass->is_java_lang_Object() || element_klass->is_interface()) { + // Array covariance: + // (ValueType[] <: Object[]) + // (ValueType[] <: []) + // We will add a runtime check for flat-ness. + return true; + } + } else if (type->is_klass() && type->as_klass()->is_java_lang_Object()) { + // This can happen as a parameter to System.arraycopy() + return true; + } + } else if (as_Phi() != NULL) { + // Type info gets lost during Phi merging, but we might be storing into a + // flattened array, so we should do a runtime check. + return true; + } + } + + return false; +} + #ifndef PRODUCT void Instruction::check_state(ValueStack* state) { if (state != NULL) { @@ -197,6 +264,16 @@ return ak->element_type(); } +bool StoreIndexed::is_exact_flattened_array_store() const { + if (array()->is_loaded_flattened_array() && value()->as_Constant() == NULL) { + ciKlass* element_klass = array()->declared_type()->as_value_array_klass()->element_klass(); + ciKlass* actual_klass = value()->declared_type()->as_klass(); + if (element_klass == actual_klass) { + return true; + } + } + return false; +} ciType* LoadField::declared_type() const { return field()->type(); @@ -208,7 +285,16 @@ } ciType* NewObjectArray::exact_type() const { - return ciObjArrayKlass::make(klass()); + ciKlass* element_klass = klass(); + if (element_klass->is_valuetype()) { + return ciValueArrayKlass::make(element_klass); + } else { + return ciObjArrayKlass::make(element_klass); + } +} + +ciType* NewMultiArray::exact_type() const { + return _klass; } ciType* NewArray::declared_type() const { @@ -223,6 +309,23 @@ return exact_type(); } +Value NewValueTypeInstance::depends_on() { + if (_depends_on != this) { + if (_depends_on->as_NewValueTypeInstance() != NULL) { + return _depends_on->as_NewValueTypeInstance()->depends_on(); + } + } + return _depends_on; +} + +ciType* NewValueTypeInstance::exact_type() const { + return klass(); +} + +ciType* NewValueTypeInstance::declared_type() const { + return exact_type(); +} + ciType* CheckCast::declared_type() const { return klass(); } @@ -322,7 +425,7 @@ Invoke::Invoke(Bytecodes::Code code, ValueType* result_type, Value recv, Values* args, - int vtable_index, ciMethod* target, ValueStack* state_before) + int vtable_index, ciMethod* target, ValueStack* state_before, bool never_null) : StateSplit(result_type, state_before) , _code(code) , _recv(recv) @@ -333,6 +436,7 @@ set_flag(TargetIsLoadedFlag, target->is_loaded()); set_flag(TargetIsFinalFlag, target_is_loaded() && target->is_final_method()); set_flag(TargetIsStrictfpFlag, target_is_loaded() && target->is_strict()); + set_never_null(never_null); assert(args != NULL, "args must exist"); #ifdef ASSERT @@ -791,7 +895,7 @@ TRACE_PHI(tty->print_cr("loop header block, initializing phi functions")); for_each_stack_value(new_state, index, new_value) { - new_state->setup_phi_for_stack(this, index); + new_state->setup_phi_for_stack(this, index, NULL, new_value); TRACE_PHI(tty->print_cr("creating phi-function %c%d for stack %d", new_state->stack_at(index)->type()->tchar(), new_state->stack_at(index)->id(), index)); } @@ -800,7 +904,7 @@ for_each_local_value(new_state, index, new_value) { bool requires_phi = requires_phi_function.at(index) || (new_value->type()->is_double_word() && requires_phi_function.at(index + 1)); if (requires_phi || !SelectivePhiFunctions) { - new_state->setup_phi_for_local(this, index); + new_state->setup_phi_for_local(this, index, NULL, new_value); TRACE_PHI(tty->print_cr("creating phi-function %c%d for local %d", new_state->local_at(index)->type()->tchar(), new_state->local_at(index)->id(), index)); } } @@ -859,7 +963,7 @@ Phi* existing_phi = existing_value->as_Phi(); if (new_value != existing_value && (existing_phi == NULL || existing_phi->block() != this)) { - existing_state->setup_phi_for_stack(this, index); + existing_state->setup_phi_for_stack(this, index, existing_value, new_value); TRACE_PHI(tty->print_cr("creating phi-function %c%d for stack %d", existing_state->stack_at(index)->type()->tchar(), existing_state->stack_at(index)->id(), index)); } } @@ -873,7 +977,7 @@ existing_state->invalidate_local(index); TRACE_PHI(tty->print_cr("invalidating local %d because of type mismatch", index)); } else if (new_value != existing_value && (existing_phi == NULL || existing_phi->block() != this)) { - existing_state->setup_phi_for_local(this, index); + existing_state->setup_phi_for_local(this, index, existing_value, new_value); TRACE_PHI(tty->print_cr("creating phi-function %c%d for local %d", existing_state->local_at(index)->type()->tchar(), existing_state->local_at(index)->id(), index)); } } --- old/src/hotspot/share/c1/c1_Instruction.hpp 2019-03-11 14:25:08.346355955 +0100 +++ new/src/hotspot/share/c1/c1_Instruction.hpp 2019-03-11 14:25:08.150355957 +0100 @@ -72,6 +72,7 @@ class StateSplit; class Invoke; class NewInstance; +class NewValueTypeInstance; class NewArray; class NewTypeArray; class NewObjectArray; @@ -177,6 +178,7 @@ virtual void do_TypeCast (TypeCast* x) = 0; virtual void do_Invoke (Invoke* x) = 0; virtual void do_NewInstance (NewInstance* x) = 0; + virtual void do_NewValueTypeInstance(NewValueTypeInstance* x) = 0; virtual void do_NewTypeArray (NewTypeArray* x) = 0; virtual void do_NewObjectArray (NewObjectArray* x) = 0; virtual void do_NewMultiArray (NewMultiArray* x) = 0; @@ -357,6 +359,7 @@ enum InstructionFlag { NeedsNullCheckFlag = 0, + NeverNullFlag, // For "Q" signatures CanTrapFlag, DirectCompareFlag, IsEliminatedFlag, @@ -451,6 +454,8 @@ void set_needs_null_check(bool f) { set_flag(NeedsNullCheckFlag, f); } bool needs_null_check() const { return check_flag(NeedsNullCheckFlag); } + void set_never_null(bool f) { set_flag(NeverNullFlag, f); } + bool is_never_null() const { return check_flag(NeverNullFlag); } bool is_linked() const { return check_flag(IsLinkedInBlockFlag); } bool can_be_linked() { return as_Local() == NULL && as_Phi() == NULL; } @@ -503,6 +508,11 @@ return _next; } + bool is_flattened_array() const; // FIXME -- remove it + + bool is_loaded_flattened_array() const; + bool maybe_flattened_array(); + Instruction *insert_after_same_bci(Instruction *i) { #ifndef PRODUCT i->set_printable_bci(printable_bci()); @@ -550,6 +560,7 @@ virtual StateSplit* as_StateSplit() { return NULL; } virtual Invoke* as_Invoke() { return NULL; } virtual NewInstance* as_NewInstance() { return NULL; } + virtual NewValueTypeInstance* as_NewValueTypeInstance() { return NULL; } virtual NewArray* as_NewArray() { return NULL; } virtual NewTypeArray* as_NewTypeArray() { return NULL; } virtual NewObjectArray* as_NewObjectArray() { return NULL; } @@ -644,12 +655,14 @@ private: int _pf_flags; // the flags of the phi function int _index; // to value on operand stack (index < 0) or to local + ciType* _exact_type; // currently is set only for flattened arrays, NULL otherwise. public: // creation - Phi(ValueType* type, BlockBegin* b, int index) + Phi(ValueType* type, BlockBegin* b, int index, ciType* exact_type) : Instruction(type->base()) , _pf_flags(0) , _index(index) + , _exact_type(exact_type) { _block = b; NOT_PRODUCT(set_printable_bci(Value(b)->printable_bci())); @@ -658,6 +671,14 @@ } } + virtual ciType* exact_type() const { + return _exact_type; + } + + virtual ciType* declared_type() const { + return _exact_type; + } + // flags enum Flag { no_flag = 0, @@ -703,12 +724,13 @@ ciType* _declared_type; public: // creation - Local(ciType* declared, ValueType* type, int index, bool receiver) + Local(ciType* declared, ValueType* type, int index, bool receiver, bool never_null) : Instruction(type) , _java_index(index) , _is_receiver(receiver) , _declared_type(declared) { + set_never_null(never_null); NOT_PRODUCT(set_printable_bci(-1)); } @@ -825,17 +847,24 @@ LEAF(LoadField, AccessField) + ciValueKlass* _value_klass; + Value _default_value; public: // creation LoadField(Value obj, int offset, ciField* field, bool is_static, - ValueStack* state_before, bool needs_patching) + ValueStack* state_before, bool needs_patching, + ciValueKlass* value_klass = NULL, Value default_value = NULL ) : AccessField(obj, offset, field, is_static, state_before, needs_patching) + , _value_klass(value_klass), _default_value(default_value) {} ciType* declared_type() const; // generic HASHING2(LoadField, !needs_patching() && !field()->is_volatile(), obj()->subst(), offset()) // cannot be eliminated if needs patching or if volatile + + ciValueKlass* value_klass() const { return _value_klass;} + Value default_value() const { return _default_value; } }; @@ -947,6 +976,7 @@ LEAF(LoadIndexed, AccessIndexed) private: NullCheck* _explicit_null_check; // For explicit null check elimination + NewValueTypeInstance* _vt; public: // creation @@ -964,6 +994,9 @@ ciType* exact_type() const; ciType* declared_type() const; + NewValueTypeInstance* vt() { return _vt; } + void set_vt(NewValueTypeInstance* vt) { _vt = vt; } + // generic HASHING2(LoadIndexed, true, array()->subst(), index()->subst()) }; @@ -1002,6 +1035,8 @@ bool should_profile() const { return check_flag(ProfileMDOFlag); } ciMethod* profiled_method() const { return _profiled_method; } int profiled_bci() const { return _profiled_bci; } + // Flattened array support + bool is_exact_flattened_array_store() const; // generic virtual void input_values_do(ValueVisitor* f) { AccessIndexed::input_values_do(f); f->visit(&_value); } }; @@ -1254,7 +1289,7 @@ public: // creation Invoke(Bytecodes::Code code, ValueType* result_type, Value recv, Values* args, - int vtable_index, ciMethod* target, ValueStack* state_before); + int vtable_index, ciMethod* target, ValueStack* state_before, bool never_null); // accessors Bytecodes::Code code() const { return _code; } @@ -1315,6 +1350,43 @@ ciType* declared_type() const; }; +LEAF(NewValueTypeInstance, StateSplit) + bool _is_unresolved; + ciValueKlass* _klass; + Value _depends_on; // Link to instance on with withfield was called on + +public: + + // Default creation, always allocated for now + NewValueTypeInstance(ciValueKlass* klass, ValueStack* state_before, bool is_unresolved, Value depends_on = NULL) + : StateSplit(instanceType, state_before) + , _is_unresolved(is_unresolved) + , _klass(klass) + { + if (depends_on == NULL) { + _depends_on = this; + } else { + _depends_on = depends_on; + } + set_never_null(true); + } + + // accessors + bool is_unresolved() const { return _is_unresolved; } + Value depends_on(); + + ciValueKlass* klass() const { return _klass; } + + virtual bool needs_exception_state() const { return false; } + + // generic + virtual bool can_trap() const { return true; } + ciType* exact_type() const; + ciType* declared_type() const; + + // Only done in LIR Generator -> map everything to object + void set_to_object_type() { set_type(instanceType); } +}; BASE(NewArray, StateSplit) private: @@ -1401,6 +1473,8 @@ StateSplit::input_values_do(f); for (int i = 0; i < _dims->length(); i++) f->visit(_dims->adr_at(i)); } + + ciType* exact_type() const; }; @@ -1447,8 +1521,10 @@ LEAF(CheckCast, TypeCheck) public: // creation - CheckCast(ciKlass* klass, Value obj, ValueStack* state_before) - : TypeCheck(klass, obj, objectType, state_before) {} + CheckCast(ciKlass* klass, Value obj, ValueStack* state_before, bool never_null = false) + : TypeCheck(klass, obj, objectType, state_before) { + set_never_null(never_null); + } void set_incompatible_class_change_check() { set_flag(ThrowIncompatibleClassChangeErrorFlag, true); @@ -1506,14 +1582,19 @@ LEAF(MonitorEnter, AccessMonitor) + bool _maybe_valuetype; public: // creation - MonitorEnter(Value obj, int monitor_no, ValueStack* state_before) + MonitorEnter(Value obj, int monitor_no, ValueStack* state_before, bool maybe_valuetype) : AccessMonitor(obj, monitor_no, state_before) + , _maybe_valuetype(maybe_valuetype) { ASSERT_VALUES } + // accessors + bool maybe_valuetype() const { return _maybe_valuetype; } + // generic virtual bool can_trap() const { return true; } }; --- old/src/hotspot/share/c1/c1_InstructionPrinter.cpp 2019-03-11 14:25:08.770355949 +0100 +++ new/src/hotspot/share/c1/c1_InstructionPrinter.cpp 2019-03-11 14:25:08.570355952 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ #include "ci/ciArray.hpp" #include "ci/ciInstance.hpp" #include "ci/ciObject.hpp" +#include "ci/ciValueKlass.hpp" #ifndef PRODUCT @@ -44,6 +45,7 @@ case T_DOUBLE : return "double"; case T_ARRAY : return "array"; case T_OBJECT : return "object"; + case T_VALUETYPE : return "value type"; default : return "???"; } } @@ -516,6 +518,10 @@ output()->put(']'); } +void InstructionPrinter::do_NewValueTypeInstance(NewValueTypeInstance* x) { + output()->print("new value type instance "); + print_klass(x->klass()); +} void InstructionPrinter::do_NewObjectArray(NewObjectArray* x) { output()->print("new object array ["); --- old/src/hotspot/share/c1/c1_InstructionPrinter.hpp 2019-03-11 14:25:09.182355943 +0100 +++ new/src/hotspot/share/c1/c1_InstructionPrinter.hpp 2019-03-11 14:25:08.982355946 +0100 @@ -104,6 +104,7 @@ virtual void do_TypeCast (TypeCast* x); virtual void do_Invoke (Invoke* x); virtual void do_NewInstance (NewInstance* x); + virtual void do_NewValueTypeInstance(NewValueTypeInstance* x); virtual void do_NewTypeArray (NewTypeArray* x); virtual void do_NewObjectArray (NewObjectArray* x); virtual void do_NewMultiArray (NewMultiArray* x); --- old/src/hotspot/share/c1/c1_LIR.cpp 2019-03-11 14:25:09.590355937 +0100 +++ new/src/hotspot/share/c1/c1_LIR.cpp 2019-03-11 14:25:09.386355940 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -107,6 +107,7 @@ char LIR_OprDesc::type_char(BasicType t) { switch (t) { case T_ARRAY: + case T_VALUETYPE: t = T_OBJECT; case T_BOOLEAN: case T_CHAR: @@ -165,6 +166,7 @@ case T_OBJECT: case T_METADATA: case T_ARRAY: + case T_VALUETYPE: assert((kindfield == cpu_register || kindfield == stack_value) && size_field() == single_size, "must match"); break; @@ -311,7 +313,7 @@ LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, - CodeStub* stub) + CodeStub* stub, bool need_null_check) : LIR_Op(code, result, NULL) , _object(object) @@ -327,6 +329,7 @@ , _profiled_method(NULL) , _profiled_bci(-1) , _should_profile(false) + , _need_null_check(need_null_check) { if (code == lir_checkcast) { assert(info_for_exception != NULL, "checkcast throws exceptions"); @@ -354,6 +357,7 @@ , _profiled_method(NULL) , _profiled_bci(-1) , _should_profile(false) + , _need_null_check(true) { if (code == lir_store_check) { _stub = new ArrayStoreExceptionStub(object, info_for_exception); @@ -363,6 +367,18 @@ } } +LIR_OpFlattenedStoreCheck::LIR_OpFlattenedStoreCheck(LIR_Opr object, ciKlass* element_klass, + LIR_Opr tmp1, LIR_Opr tmp2, + CodeEmitInfo* info_for_exception) + : LIR_Op(lir_flattened_store_check, LIR_OprFact::illegalOpr, NULL) + , _object(object) + , _element_klass(element_klass) + , _tmp1(tmp1) + , _tmp2(tmp2) + , _info_for_exception(info_for_exception) +{ + _stub = new ArrayStoreExceptionStub(object, info_for_exception); +} LIR_OpArrayCopy::LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info) @@ -817,6 +833,7 @@ assert(opLock->_result->is_illegal(), "unused"); do_stub(opLock->_stub); + do_stub(opLock->_throw_imse_stub); break; } @@ -853,6 +870,19 @@ break; } +// LIR_OpFlattenedStoreCheck + case lir_flattened_store_check: { + assert(op->as_OpFlattenedStoreCheck() != NULL, "must be"); + LIR_OpFlattenedStoreCheck* opFlattenedStoreCheck = (LIR_OpFlattenedStoreCheck*)op; + + if (opFlattenedStoreCheck->_info_for_exception) do_info(opFlattenedStoreCheck->_info_for_exception); + if (opFlattenedStoreCheck->_object->is_valid()) do_temp(opFlattenedStoreCheck->_object); + if (opFlattenedStoreCheck->_tmp1->is_valid()) do_temp(opFlattenedStoreCheck->_tmp1); + if (opFlattenedStoreCheck->_tmp2->is_valid()) do_temp(opFlattenedStoreCheck->_tmp2); + do_stub(opFlattenedStoreCheck->_stub); + break; + } + // LIR_OpCompareAndSwap case lir_cas_long: case lir_cas_obj: @@ -1040,6 +1070,13 @@ } } +void LIR_OpFlattenedStoreCheck::emit_code(LIR_Assembler* masm) { + masm->emit_opFlattenedStoreCheck(this); + if (stub()) { + masm->append_code_stub(stub()); + } +} + void LIR_OpCompareAndSwap::emit_code(LIR_Assembler* masm) { masm->emit_compare_and_swap(this); } @@ -1053,6 +1090,9 @@ if (stub()) { masm->append_code_stub(stub()); } + if (throw_imse_stub()) { + masm->append_code_stub(throw_imse_stub()); + } } #ifdef ASSERT @@ -1354,7 +1394,7 @@ dst)); } -void LIR_List::lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info) { +void LIR_List::lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info, CodeStub* throw_imse_stub) { append(new LIR_OpLock( lir_lock, hdr, @@ -1362,7 +1402,8 @@ lock, scratch, stub, - info)); + info, + throw_imse_stub)); } void LIR_List::unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub) { @@ -1387,9 +1428,13 @@ void LIR_List::checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub, - ciMethod* profiled_method, int profiled_bci) { + ciMethod* profiled_method, int profiled_bci, bool is_never_null) { + // If klass is non-nullable, LIRGenerator::do_CheckCast has already performed null-check + // on the object. + bool need_null_check = !is_never_null; LIR_OpTypeCheck* c = new LIR_OpTypeCheck(lir_checkcast, result, object, klass, - tmp1, tmp2, tmp3, fast_check, info_for_exception, info_for_patch, stub); + tmp1, tmp2, tmp3, fast_check, info_for_exception, info_for_patch, stub, + need_null_check); if (profiled_method != NULL) { c->set_profiled_method(profiled_method); c->set_profiled_bci(profiled_bci); @@ -1432,6 +1477,13 @@ } } +void LIR_List::flattened_store_check(LIR_Opr object, ciKlass* element_klass, + LIR_Opr tmp1, LIR_Opr tmp2, + CodeEmitInfo* info_for_exception) { + LIR_OpFlattenedStoreCheck* c = new LIR_OpFlattenedStoreCheck(object, element_klass, tmp1, tmp2, info_for_exception); + append(c); +} + void LIR_List::cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, LIR_Opr t1, LIR_Opr t2, LIR_Opr result) { append(new LIR_OpCompareAndSwap(lir_cas_long, addr, cmp_value, new_value, t1, t2, result)); @@ -1724,6 +1776,8 @@ case lir_instanceof: s = "instanceof"; break; case lir_checkcast: s = "checkcast"; break; case lir_store_check: s = "store_check"; break; + // LIR_OpFlattenedStoreCheck + case lir_flattened_store_check: s = "flattened_store_check"; break; // LIR_OpCompareAndSwap case lir_cas_long: s = "cas_long"; break; case lir_cas_obj: s = "cas_obj"; break; @@ -1969,6 +2023,14 @@ if (info_for_exception() != NULL) out->print(" [bci:%d]", info_for_exception()->stack()->bci()); } +void LIR_OpFlattenedStoreCheck::print_instr(outputStream* out) const { + object()->print(out); out->print(" "); + element_klass()->print_name_on(out); out->print(" "); + tmp1()->print(out); out->print(" "); + tmp2()->print(out); out->print(" "); + if (info_for_exception() != NULL) out->print(" [bci:%d]", info_for_exception()->stack()->bci()); +} + // LIR_Op3 void LIR_Op3::print_instr(outputStream* out) const { --- old/src/hotspot/share/c1/c1_LIR.hpp 2019-03-11 14:25:10.002355932 +0100 +++ new/src/hotspot/share/c1/c1_LIR.hpp 2019-03-11 14:25:09.802355935 +0100 @@ -316,6 +316,7 @@ case T_INT: case T_ADDRESS: case T_OBJECT: + case T_VALUETYPE: case T_ARRAY: case T_METADATA: return single_size; @@ -466,6 +467,7 @@ case T_FLOAT: return LIR_OprDesc::float_type; case T_DOUBLE: return LIR_OprDesc::double_type; case T_OBJECT: + case T_VALUETYPE: case T_ARRAY: return LIR_OprDesc::object_type; case T_ADDRESS: return LIR_OprDesc::address_type; case T_METADATA: return LIR_OprDesc::metadata_type; @@ -651,6 +653,7 @@ LIR_Opr res; switch (type) { case T_OBJECT: // fall through + case T_VALUETYPE: // fall through case T_ARRAY: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::object_type | @@ -756,6 +759,7 @@ static LIR_Opr stack(int index, BasicType type) { LIR_Opr res; switch (type) { + case T_VALUETYPE: // fall through case T_OBJECT: // fall through case T_ARRAY: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | @@ -869,6 +873,7 @@ class LIR_OpUpdateCRC32; class LIR_OpLock; class LIR_OpTypeCheck; +class LIR_OpFlattenedStoreCheck; class LIR_OpCompareAndSwap; class LIR_OpProfileCall; class LIR_OpProfileType; @@ -983,6 +988,9 @@ , lir_checkcast , lir_store_check , end_opTypeCheck + , begin_opFlattenedStoreCheck + , lir_flattened_store_check + , end_opFlattenedStoreCheck , begin_opCompareAndSwap , lir_cas_long , lir_cas_obj @@ -1133,6 +1141,7 @@ virtual LIR_OpArrayCopy* as_OpArrayCopy() { return NULL; } virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32() { return NULL; } virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; } + virtual LIR_OpFlattenedStoreCheck* as_OpFlattenedStoreCheck() { return NULL; } virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; } virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; } virtual LIR_OpProfileType* as_OpProfileType() { return NULL; } @@ -1264,7 +1273,10 @@ unaligned = 1 << 9, src_objarray = 1 << 10, dst_objarray = 1 << 11, - all_flags = (1 << 12) - 1 + always_slow_path = 1 << 12, + src_flat_check = 1 << 13, + dst_flat_check = 1 << 14, + all_flags = (1 << 15) - 1 }; LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, @@ -1557,11 +1569,12 @@ ciMethod* _profiled_method; int _profiled_bci; bool _should_profile; + bool _need_null_check; public: LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, - CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub); + CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub, bool need_null_check = true); LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception); @@ -1583,13 +1596,41 @@ ciMethod* profiled_method() const { return _profiled_method; } int profiled_bci() const { return _profiled_bci; } bool should_profile() const { return _should_profile; } - + bool need_null_check() const { return _need_null_check; } virtual bool is_patching() { return _info_for_patch != NULL; } virtual void emit_code(LIR_Assembler* masm); virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; } void print_instr(outputStream* out) const PRODUCT_RETURN; }; +// LIR_OpFlattenedStoreCheck +class LIR_OpFlattenedStoreCheck: public LIR_Op { + friend class LIR_OpVisitState; + + private: + LIR_Opr _object; + ciKlass* _element_klass; + LIR_Opr _tmp1; + LIR_Opr _tmp2; + CodeEmitInfo* _info_for_exception; + CodeStub* _stub; + +public: + LIR_OpFlattenedStoreCheck(LIR_Opr object, ciKlass* element_klass, LIR_Opr tmp1, LIR_Opr tmp2, + CodeEmitInfo* info_for_exception); + + LIR_Opr object() const { return _object; } + LIR_Opr tmp1() const { return _tmp1; } + LIR_Opr tmp2() const { return _tmp2; } + ciKlass* element_klass() const { return _element_klass; } + CodeEmitInfo* info_for_exception() const { return _info_for_exception; } + CodeStub* stub() const { return _stub; } + + virtual void emit_code(LIR_Assembler* masm); + virtual LIR_OpFlattenedStoreCheck* as_OpFlattenedStoreCheck() { return this; } + virtual void print_instr(outputStream* out) const PRODUCT_RETURN; +}; + // LIR_Op2 class LIR_Op2: public LIR_Op { friend class LIR_OpVisitState; @@ -1782,20 +1823,23 @@ LIR_Opr _lock; LIR_Opr _scratch; CodeStub* _stub; + CodeStub* _throw_imse_stub; public: - LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info) + LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info, CodeStub* throw_imse_stub=NULL) : LIR_Op(code, LIR_OprFact::illegalOpr, info) , _hdr(hdr) , _obj(obj) , _lock(lock) , _scratch(scratch) - , _stub(stub) {} + , _stub(stub) + , _throw_imse_stub(throw_imse_stub) {} LIR_Opr hdr_opr() const { return _hdr; } LIR_Opr obj_opr() const { return _obj; } LIR_Opr lock_opr() const { return _lock; } LIR_Opr scratch_opr() const { return _scratch; } CodeStub* stub() const { return _stub; } + CodeStub* throw_imse_stub() const { return _throw_imse_stub; } virtual void emit_code(LIR_Assembler* masm); virtual LIR_OpLock* as_OpLock() { return this; } @@ -2230,7 +2274,7 @@ void load_stack_address_monitor(int monitor_ix, LIR_Opr dst) { append(new LIR_Op1(lir_monaddr, LIR_OprFact::intConst(monitor_ix), dst)); } void unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub); - void lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info); + void lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info, CodeStub* throw_imse_stub=NULL); void set_24bit_fpu() { append(new LIR_Op0(lir_24bit_FPU )); } void restore_fpu() { append(new LIR_Op0(lir_reset_FPU )); } @@ -2244,11 +2288,12 @@ void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci); void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception, ciMethod* profiled_method, int profiled_bci); + void flattened_store_check(LIR_Opr object, ciKlass* element_klass, LIR_Opr tmp1, LIR_Opr tmp2, CodeEmitInfo* info_for_exception); void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub, - ciMethod* profiled_method, int profiled_bci); + ciMethod* profiled_method, int profiled_bci, bool is_never_null); // MethodData* profiling void profile_call(ciMethod* method, int bci, ciMethod* callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) { append(new LIR_OpProfileCall(method, bci, callee, mdo, recv, t1, cha_klass)); --- old/src/hotspot/share/c1/c1_LIRAssembler.cpp 2019-03-11 14:25:10.418355926 +0100 +++ new/src/hotspot/share/c1/c1_LIRAssembler.cpp 2019-03-11 14:25:10.218355929 +0100 @@ -59,6 +59,7 @@ } else if (patch->id() == PatchingStub::load_klass_id) { switch (code) { case Bytecodes::_new: + case Bytecodes::_defaultvalue: case Bytecodes::_anewarray: case Bytecodes::_multianewarray: case Bytecodes::_instanceof: @@ -620,6 +621,8 @@ check_icache(); } offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset()); + offsets()->set_value(CodeOffsets::Verified_Value_Entry, _masm->offset()); + offsets()->set_value(CodeOffsets::Verified_Value_Entry_RO, _masm->offset()); _masm->verified_entry(); build_frame(); offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset()); --- old/src/hotspot/share/c1/c1_LIRAssembler.hpp 2019-03-11 14:25:10.838355920 +0100 +++ new/src/hotspot/share/c1/c1_LIRAssembler.hpp 2019-03-11 14:25:10.634355923 +0100 @@ -198,6 +198,7 @@ void emit_alloc_obj(LIR_OpAllocObj* op); void emit_alloc_array(LIR_OpAllocArray* op); void emit_opTypeCheck(LIR_OpTypeCheck* op); + void emit_opFlattenedStoreCheck(LIR_OpFlattenedStoreCheck* op); void emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null); void emit_compare_and_swap(LIR_OpCompareAndSwap* op); void emit_lock(LIR_OpLock* op); --- old/src/hotspot/share/c1/c1_LIRGenerator.cpp 2019-03-11 14:25:11.246355915 +0100 +++ new/src/hotspot/share/c1/c1_LIRGenerator.cpp 2019-03-11 14:25:11.046355917 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,6 +34,8 @@ #include "ci/ciInstance.hpp" #include "ci/ciObjArray.hpp" #include "ci/ciUtilities.hpp" +#include "ci/ciValueArrayKlass.hpp" +#include "ci/ciValueKlass.hpp" #include "gc/shared/barrierSet.hpp" #include "gc/shared/c1/barrierSetC1.hpp" #include "runtime/arguments.hpp" @@ -641,13 +643,14 @@ } -void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) { +void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, + CodeEmitInfo* info_for_exception, CodeEmitInfo* info, CodeStub* throw_imse_stub) { if (!GenerateSynchronizationCode) return; // for slow path, use debug info for state after successful locking - CodeStub* slow_path = new MonitorEnterStub(object, lock, info); + CodeStub* slow_path = new MonitorEnterStub(object, lock, info, throw_imse_stub, scratch); __ load_stack_address_monitor(monitor_no, lock); // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter - __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception); + __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception, throw_imse_stub); } @@ -789,6 +792,16 @@ // of the required checks for a fast case can be elided. int flags = LIR_OpArrayCopy::all_flags; + if (!src->is_loaded_flattened_array() && !dst->is_loaded_flattened_array()) { + flags &= ~LIR_OpArrayCopy::always_slow_path; + } + if (!src->maybe_flattened_array()) { + flags &= ~LIR_OpArrayCopy::src_flat_check; + } + if (!dst->maybe_flattened_array()) { + flags &= ~LIR_OpArrayCopy::dst_flat_check; + } + if (!src_objarray) flags &= ~LIR_OpArrayCopy::src_objarray; if (!dst_objarray) @@ -1530,10 +1543,19 @@ if (x->needs_null_check() && (needs_patching || MacroAssembler::needs_explicit_null_check(x->offset()))) { - // Emit an explicit null check because the offset is too large. - // If the class is not loaded and the object is NULL, we need to deoptimize to throw a - // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code. - __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching); + if (needs_patching && x->field()->signature()->starts_with("Q", 1)) { + // We are storing a field of type "QT;", but T is not yet loaded, so we don't + // know whether this field is flattened or not. Let's deoptimize and recompile. + CodeStub* stub = new DeoptimizeStub(new CodeEmitInfo(info), + Deoptimization::Reason_unloaded, + Deoptimization::Action_make_not_entrant); + __ branch(lir_cond_always, T_ILLEGAL, stub); + } else { + // Emit an explicit null check because the offset is too large. + // If the class is not loaded and the object is NULL, we need to deoptimize to throw a + // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code. + __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching); + } } DecoratorSet decorators = IN_HEAP; @@ -1548,12 +1570,135 @@ value.result(), info != NULL ? new CodeEmitInfo(info) : NULL, info); } +// FIXME -- I can't find any other way to pass an address to access_load_at(). +class TempResolvedAddress: public Instruction { + public: + TempResolvedAddress(ValueType* type, LIR_Opr addr) : Instruction(type) { + set_operand(addr); + } + virtual void input_values_do(ValueVisitor*) {} + virtual void visit(InstructionVisitor* v) {} + virtual const char* name() const { return "TempResolvedAddress"; } +}; + +void LIRGenerator::access_flattened_array(bool is_load, LIRItem& array, LIRItem& index, LIRItem& obj_item) { + // Find the starting address of the source (inside the array) + ciType* array_type = array.value()->declared_type(); + ciValueArrayKlass* value_array_klass = array_type->as_value_array_klass(); + assert(value_array_klass->is_loaded(), "must be"); + + ciValueKlass* elem_klass = value_array_klass->element_klass()->as_value_klass(); + int array_header_size = value_array_klass->array_header_in_bytes(); + int shift = value_array_klass->log2_element_size(); + +#ifndef _LP64 + LIR_Opr index_op = new_register(T_INT); + // FIXME -- on 32-bit, the shift below can overflow, so we need to check that + // the top (shift+1) bits of index_op must be zero, or + // else throw ArrayIndexOutOfBoundsException + if (index.result()->is_constant()) { + jint const_index = index.result()->as_jint(); + __ move(LIR_OprFact::intConst(const_index << shift), index_op); + } else { + __ shift_left(index_op, shift, index.result()); + } +#else + LIR_Opr index_op = new_register(T_LONG); + if (index.result()->is_constant()) { + jint const_index = index.result()->as_jint(); + __ move(LIR_OprFact::longConst(const_index << shift), index_op); + } else { + __ convert(Bytecodes::_i2l, index.result(), index_op); + // Need to shift manually, as LIR_Address can scale only up to 3. + __ shift_left(index_op, shift, index_op); + } +#endif + + LIR_Opr elm_op = new_pointer_register(); + LIR_Address* elm_address = new LIR_Address(array.result(), index_op, array_header_size, T_ADDRESS); + __ leal(LIR_OprFact::address(elm_address), elm_op); + + for (int i = 0; i < elem_klass->nof_nonstatic_fields(); i++) { + ciField* inner_field = elem_klass->nonstatic_field_at(i); + assert(!inner_field->is_flattened(), "flattened fields must have been expanded"); + int obj_offset = inner_field->offset(); + int elm_offset = obj_offset - elem_klass->first_field_offset(); // object header is not stored in array. + + BasicType field_type = inner_field->type()->basic_type(); + switch (field_type) { + case T_BYTE: + case T_BOOLEAN: + case T_SHORT: + case T_CHAR: + field_type = T_INT; + break; + default: + break; + } + + LIR_Opr temp = new_register(field_type); + TempResolvedAddress* elm_resolved_addr = new TempResolvedAddress(as_ValueType(field_type), elm_op); + LIRItem elm_item(elm_resolved_addr, this); + + DecoratorSet decorators = IN_HEAP; + if (is_load) { + access_load_at(decorators, field_type, + elm_item, LIR_OprFact::intConst(elm_offset), temp, + NULL, NULL); + access_store_at(decorators, field_type, + obj_item, LIR_OprFact::intConst(obj_offset), temp, + NULL, NULL); + } else { + access_load_at(decorators, field_type, + obj_item, LIR_OprFact::intConst(obj_offset), temp, + NULL, NULL); + access_store_at(decorators, field_type, + elm_item, LIR_OprFact::intConst(elm_offset), temp, + NULL, NULL); + } + } +} + +void LIRGenerator::check_flattened_array(LIRItem& array, CodeStub* slow_path) { + LIR_Opr array_klass_reg = new_register(T_METADATA); + + __ move(new LIR_Address(array.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), array_klass_reg); + LIR_Opr layout = new_register(T_INT); + __ move(new LIR_Address(array_klass_reg, in_bytes(Klass::layout_helper_offset()), T_INT), layout); + __ shift_right(layout, Klass::_lh_array_tag_shift, layout); + __ cmp(lir_cond_equal, layout, LIR_OprFact::intConst(Klass::_lh_array_tag_vt_value)); + __ branch(lir_cond_equal, T_ILLEGAL, slow_path); +} + +bool LIRGenerator::needs_flattened_array_store_check(StoreIndexed* x) { + if (ValueArrayFlatten && x->elt_type() == T_OBJECT && x->array()->maybe_flattened_array()) { + ciType* type = x->value()->declared_type(); + if (type != NULL && type->is_klass()) { + ciKlass* klass = type->as_klass(); + if (klass->is_loaded() && + !(klass->is_valuetype() && klass->as_value_klass()->flatten_array()) && + !klass->is_java_lang_Object() && + !klass->is_interface()) { + // This is known to be a non-flattenable object. If the array is flattened, + // it will be caught by the code generated by array_store_check(). + return false; + } + } + // We're not 100% sure, so let's do the flattened_array_store_check. + return true; + } + return false; +} + void LIRGenerator::do_StoreIndexed(StoreIndexed* x) { assert(x->is_pinned(),""); + assert(x->elt_type() != T_ARRAY, "never used"); + bool is_loaded_flattened_array = x->array()->is_loaded_flattened_array(); bool needs_range_check = x->compute_needs_range_check(); bool use_length = x->length() != NULL; - bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT; - bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL || + bool obj_store = x->elt_type() == T_OBJECT; + bool needs_store_check = obj_store && !is_loaded_flattened_array && + (x->value()->as_Constant() == NULL || !get_jobject_constant(x->value())->is_null_object() || x->should_profile()); @@ -1568,9 +1713,10 @@ if (use_length && needs_range_check) { length.set_instruction(x->length()); length.load_item(); - } - if (needs_store_check || x->check_boolean()) { + + if (needs_store_check || x->check_boolean() + || is_loaded_flattened_array || needs_flattened_array_store_check(x)) { value.load_item(); } else { value.load_for_store(x->elt_type()); @@ -1603,13 +1749,36 @@ array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci()); } - DecoratorSet decorators = IN_HEAP | IS_ARRAY; - if (x->check_boolean()) { - decorators |= C1_MASK_BOOLEAN; + if (is_loaded_flattened_array) { + if (!x->is_exact_flattened_array_store()) { + CodeEmitInfo* info = new CodeEmitInfo(range_check_info); + ciKlass* element_klass = x->array()->declared_type()->as_value_array_klass()->element_klass(); + flattened_array_store_check(value.result(), element_klass, info); + } else if (!x->value()->is_never_null()) { + __ null_check(value.result(), new CodeEmitInfo(range_check_info)); + } + access_flattened_array(false, array, index, value); + } else { + StoreFlattenedArrayStub* slow_path = NULL; + + if (needs_flattened_array_store_check(x)) { + // Check if we indeed have a flattened array + index.load_item(); + slow_path = new StoreFlattenedArrayStub(array.result(), index.result(), value.result(), state_for(x)); + check_flattened_array(array, slow_path); + } + + DecoratorSet decorators = IN_HEAP | IS_ARRAY; + if (x->check_boolean()) { + decorators |= C1_MASK_BOOLEAN; + } + + access_store_at(decorators, x->elt_type(), array, index.result(), value.result(), + NULL, null_check_info); + if (slow_path != NULL) { + __ branch_destination(slow_path->continuation()); + } } - - access_store_at(decorators, x->elt_type(), array, index.result(), value.result(), - NULL, null_check_info); } void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type, @@ -1733,15 +1902,32 @@ (needs_patching || MacroAssembler::needs_explicit_null_check(x->offset()) || stress_deopt)) { - LIR_Opr obj = object.result(); - if (stress_deopt) { - obj = new_register(T_OBJECT); - __ move(LIR_OprFact::oopConst(NULL), obj); - } - // Emit an explicit null check because the offset is too large. - // If the class is not loaded and the object is NULL, we need to deoptimize to throw a - // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code. - __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching); + if (needs_patching && x->field()->signature()->starts_with("Q", 1)) { + // We are loading a field of type "QT;", but class T is not yet loaded. We don't know + // whether this field is flattened or not. Let's deoptimize and recompile. + CodeStub* stub = new DeoptimizeStub(new CodeEmitInfo(info), + Deoptimization::Reason_unloaded, + Deoptimization::Action_make_not_entrant); + __ branch(lir_cond_always, T_ILLEGAL, stub); + } else { + LIR_Opr obj = object.result(); + if (stress_deopt) { + obj = new_register(T_OBJECT); + __ move(LIR_OprFact::oopConst(NULL), obj); + } + // Emit an explicit null check because the offset is too large. + // If the class is not loaded and the object is NULL, we need to deoptimize to throw a + // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code. + __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching); + } + } else if (x->value_klass() != NULL && x->default_value() == NULL) { + assert(x->is_static() && !x->value_klass()->is_loaded(), "must be"); + assert(needs_patching, "must be"); + // The value klass was not loaded so we don't know what its default value should be + CodeStub* stub = new DeoptimizeStub(new CodeEmitInfo(info), + Deoptimization::Reason_unloaded, + Deoptimization::Action_make_not_entrant); + __ branch(lir_cond_always, T_ILLEGAL, stub); } DecoratorSet decorators = IN_HEAP; @@ -1756,6 +1942,18 @@ access_load_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()), result, info ? new CodeEmitInfo(info) : NULL, info); + + if (x->value_klass() != NULL && x->default_value() != NULL) { + LabelObj* L_end = new LabelObj(); + __ cmp(lir_cond_notEqual, result, LIR_OprFact::oopConst(NULL)); + __ branch(lir_cond_notEqual, T_OBJECT, L_end->label()); + + LIRItem default_value(x->default_value(), this); + default_value.load_item(); + __ move(default_value.result(), result); + + __ branch_destination(L_end->label()); + } } @@ -1870,12 +2068,33 @@ } } - DecoratorSet decorators = IN_HEAP | IS_ARRAY; + if (x->array()->is_loaded_flattened_array()) { + // Find the destination address (of the NewValueTypeInstance) + LIR_Opr obj = x->vt()->operand(); + LIRItem obj_item(x->vt(), this); + + access_flattened_array(true, array, index, obj_item); + set_no_result(x); + } else { + LIR_Opr result = rlock_result(x, x->elt_type()); + LoadFlattenedArrayStub* slow_path = NULL; + + if (x->elt_type() == T_OBJECT && x->array()->maybe_flattened_array()) { + index.load_item(); + // if we are loading from flattened array, load it using a runtime call + slow_path = new LoadFlattenedArrayStub(array.result(), index.result(), result, state_for(x)); + check_flattened_array(array, slow_path); + } + + DecoratorSet decorators = IN_HEAP | IS_ARRAY; + access_load_at(decorators, x->elt_type(), + array, index.result(), result, + NULL, null_check_info); - LIR_Opr result = rlock_result(x, x->elt_type()); - access_load_at(decorators, x->elt_type(), - array, index.result(), result, - NULL, null_check_info); + if (slow_path != NULL) { + __ branch_destination(slow_path->continuation()); + } + } } @@ -2735,6 +2954,7 @@ } else { LIR_Address* addr = loc->as_address_ptr(); param->load_for_store(addr->type()); + assert(addr->type() != T_VALUETYPE, "not supported yet"); if (addr->type() == T_OBJECT) { __ move_wide(param->result(), addr); } else --- old/src/hotspot/share/c1/c1_LIRGenerator.hpp 2019-03-11 14:25:11.894355906 +0100 +++ new/src/hotspot/share/c1/c1_LIRGenerator.hpp 2019-03-11 14:25:11.594355910 +0100 @@ -266,6 +266,10 @@ void do_update_CRC32C(Intrinsic* x); void do_vectorizedMismatch(Intrinsic* x); + void access_flattened_array(bool is_load, LIRItem& array, LIRItem& index, LIRItem& obj_item); + bool needs_flattened_array_store_check(StoreIndexed* x); + void check_flattened_array(LIRItem& array, CodeStub* slow_path); + public: LIR_Opr call_runtime(BasicTypeArray* signature, LIRItemList* args, address entry, ValueType* result_type, CodeEmitInfo* info); LIR_Opr call_runtime(BasicTypeArray* signature, LIR_OprList* args, address entry, ValueType* result_type, CodeEmitInfo* info); @@ -314,6 +318,7 @@ // specific implementations void array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci); + void flattened_array_store_check(LIR_Opr value, ciKlass* element_klass, CodeEmitInfo* store_check_info); static LIR_Opr result_register_for(ValueType* type, bool callee = false); @@ -360,7 +365,7 @@ void logic_op (Bytecodes::Code code, LIR_Opr dst_reg, LIR_Opr left, LIR_Opr right); - void monitor_enter (LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info); + void monitor_enter (LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info, CodeStub* throw_imse_stub); void monitor_exit (LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no); void new_instance (LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info); @@ -565,6 +570,7 @@ virtual void do_TypeCast (TypeCast* x); virtual void do_Invoke (Invoke* x); virtual void do_NewInstance (NewInstance* x); + virtual void do_NewValueTypeInstance(NewValueTypeInstance* x); virtual void do_NewTypeArray (NewTypeArray* x); virtual void do_NewObjectArray (NewObjectArray* x); virtual void do_NewMultiArray (NewMultiArray* x); --- old/src/hotspot/share/c1/c1_LinearScan.cpp 2019-03-11 14:25:12.426355898 +0100 +++ new/src/hotspot/share/c1/c1_LinearScan.cpp 2019-03-11 14:25:12.214355901 +0100 @@ -61,9 +61,9 @@ // Map BasicType to spill size in 32-bit words, matching VMReg's notion of words #ifdef _LP64 -static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 2, 2, 0, 2, 1, 2, 1, -1}; +static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 2, 2, 0, 2, 1, 2, 1, 2, -1}; #else -static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, -1, 1, 1, -1}; +static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, -1, 1, 1, 1, -1}; #endif --- old/src/hotspot/share/c1/c1_Optimizer.cpp 2019-03-11 14:25:12.906355892 +0100 +++ new/src/hotspot/share/c1/c1_Optimizer.cpp 2019-03-11 14:25:12.706355894 +0100 @@ -508,6 +508,7 @@ void do_TypeCast (TypeCast* x); void do_Invoke (Invoke* x); void do_NewInstance (NewInstance* x); + void do_NewValueTypeInstance(NewValueTypeInstance* x); void do_NewTypeArray (NewTypeArray* x); void do_NewObjectArray (NewObjectArray* x); void do_NewMultiArray (NewMultiArray* x); @@ -656,6 +657,7 @@ void handle_NullCheck (NullCheck* x); void handle_Invoke (Invoke* x); void handle_NewInstance (NewInstance* x); + void handle_NewValueTypeInstance(NewValueTypeInstance* x); void handle_NewArray (NewArray* x); void handle_AccessMonitor (AccessMonitor* x); void handle_Intrinsic (Intrinsic* x); @@ -694,6 +696,7 @@ void NullCheckVisitor::do_TypeCast (TypeCast* x) {} void NullCheckVisitor::do_Invoke (Invoke* x) { nce()->handle_Invoke(x); } void NullCheckVisitor::do_NewInstance (NewInstance* x) { nce()->handle_NewInstance(x); } +void NullCheckVisitor::do_NewValueTypeInstance(NewValueTypeInstance* x) { nce()->handle_NewValueTypeInstance(x); } void NullCheckVisitor::do_NewTypeArray (NewTypeArray* x) { nce()->handle_NewArray(x); } void NullCheckVisitor::do_NewObjectArray (NewObjectArray* x) { nce()->handle_NewArray(x); } void NullCheckVisitor::do_NewMultiArray (NewMultiArray* x) { nce()->handle_NewArray(x); } @@ -868,7 +871,7 @@ if (field->is_constant()) { ciConstant field_val = field->constant_value(); BasicType field_type = field_val.basic_type(); - if (field_type == T_OBJECT || field_type == T_ARRAY) { + if (field_type == T_OBJECT || field_type == T_ARRAY || field_type == T_VALUETYPE) { ciObject* obj_val = field_val.as_object(); if (!obj_val->is_null_object()) { if (PrintNullCheckElimination) { @@ -1046,6 +1049,13 @@ } } +void NullCheckEliminator::handle_NewValueTypeInstance(NewValueTypeInstance* x) { + set_put(x); + if (PrintNullCheckElimination) { + tty->print_cr("NewValueTypeInstance %d is non-null", x->id()); + } +} + void NullCheckEliminator::handle_NewArray(NewArray* x) { set_put(x); --- old/src/hotspot/share/c1/c1_RangeCheckElimination.hpp 2019-03-11 14:25:13.338355886 +0100 +++ new/src/hotspot/share/c1/c1_RangeCheckElimination.hpp 2019-03-11 14:25:13.118355889 +0100 @@ -143,6 +143,7 @@ void do_NullCheck (NullCheck* x) { /* nothing to do */ }; void do_TypeCast (TypeCast* x) { /* nothing to do */ }; void do_NewInstance (NewInstance* x) { /* nothing to do */ }; + void do_NewValueTypeInstance (NewValueTypeInstance* x) { /* nothing to do */ }; void do_NewTypeArray (NewTypeArray* x) { /* nothing to do */ }; void do_NewObjectArray (NewObjectArray* x) { /* nothing to do */ }; void do_NewMultiArray (NewMultiArray* x) { /* nothing to do */ }; --- old/src/hotspot/share/c1/c1_Runtime1.cpp 2019-03-11 14:25:13.750355880 +0100 +++ new/src/hotspot/share/c1/c1_Runtime1.cpp 2019-03-11 14:25:13.550355883 +0100 @@ -52,6 +52,8 @@ #include "oops/objArrayOop.inline.hpp" #include "oops/objArrayKlass.hpp" #include "oops/oop.inline.hpp" +#include "oops/valueArrayKlass.hpp" +#include "oops/valueArrayOop.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/compilationPolicy.hpp" @@ -119,6 +121,8 @@ int Runtime1::_new_object_array_slowcase_cnt = 0; int Runtime1::_new_instance_slowcase_cnt = 0; int Runtime1::_new_multi_array_slowcase_cnt = 0; +int Runtime1::_load_flattened_array_slowcase_cnt = 0; +int Runtime1::_store_flattened_array_slowcase_cnt = 0; int Runtime1::_monitorenter_slowcase_cnt = 0; int Runtime1::_monitorexit_slowcase_cnt = 0; int Runtime1::_patch_code_slowcase_cnt = 0; @@ -128,6 +132,7 @@ int Runtime1::_throw_null_pointer_exception_count = 0; int Runtime1::_throw_class_cast_exception_count = 0; int Runtime1::_throw_incompatible_class_change_error_count = 0; +int Runtime1::_throw_illegal_monitor_state_exception_count = 0; int Runtime1::_throw_array_store_exception_count = 0; int Runtime1::_throw_count = 0; @@ -385,9 +390,14 @@ // (This may have to change if this code changes!) assert(array_klass->is_klass(), "not a class"); Handle holder(THREAD, array_klass->klass_holder()); // keep the klass alive - Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass(); - objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK); - thread->set_vm_result(obj); + Klass* elem_klass = ArrayKlass::cast(array_klass)->element_klass(); + if (elem_klass->is_value()) { + arrayOop obj = oopFactory::new_valueArray(elem_klass, length, CHECK); + thread->set_vm_result(obj); + } else { + objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK); + thread->set_vm_result(obj); + } // This is pretty rare but this runtime patch is stressful to deoptimization // if we deoptimize here so force a deopt to stress the path. if (DeoptimizeALot) { @@ -407,6 +417,46 @@ JRT_END +JRT_ENTRY(void, Runtime1::load_flattened_array(JavaThread* thread, valueArrayOopDesc* array, int index)) + NOT_PRODUCT(_load_flattened_array_slowcase_cnt++;) + Klass* klass = array->klass(); + assert(klass->is_valueArray_klass(), "expected value array oop"); + assert(array->length() > 0 && index < array->length(), "already checked"); + + ValueArrayKlass* vaklass = ValueArrayKlass::cast(klass); + ValueKlass* vklass = vaklass->element_klass(); + + // We have a non-empty flattened array, so the element type must have been initialized. + assert(vklass->is_initialized(), "must be"); + Handle holder(THREAD, vklass->klass_holder()); // keep the vklass alive + valueArrayHandle ha(THREAD, array); + oop obj = vklass->allocate_instance(CHECK); + + void* src = ha()->value_at_addr(index, vaklass->layout_helper()); + vklass->value_store(src, vklass->data_for_oop(obj), + vaklass->element_byte_size(), true, false); + thread->set_vm_result(obj); +JRT_END + + +JRT_ENTRY(void, Runtime1::store_flattened_array(JavaThread* thread, valueArrayOopDesc* array, int index, oopDesc* value)) + NOT_PRODUCT(_store_flattened_array_slowcase_cnt++;) + if (value == NULL) { + SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException()); + } else { + Klass* klass = array->klass(); + assert(klass->is_valueArray_klass(), "expected value array"); + assert(ArrayKlass::cast(klass)->element_klass() == value->klass(), "Store type incorrect"); + + ValueArrayKlass* vaklass = ValueArrayKlass::cast(klass); + ValueKlass* vklass = vaklass->element_klass(); + const int lh = vaklass->layout_helper(); + vklass->value_store(vklass->data_for_oop(value), array->value_at_addr(index, lh), + vaklass->element_byte_size(), true, false); + } +JRT_END + + JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* thread, StubID id)) tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", id); JRT_END @@ -692,6 +742,13 @@ JRT_END +JRT_ENTRY(void, Runtime1::throw_illegal_monitor_state_exception(JavaThread* thread)) + NOT_PRODUCT(_throw_illegal_monitor_state_exception_count++;) + ResourceMark rm(thread); + SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IllegalMonitorStateException()); +JRT_END + + JRT_ENTRY_NO_ASYNC(void, Runtime1::monitorenter(JavaThread* thread, oopDesc* obj, BasicObjectLock* lock)) NOT_PRODUCT(_monitorenter_slowcase_cnt++;) if (PrintBiasedLockingStatistics) { @@ -957,6 +1014,11 @@ k = caller_method->constants()->klass_at(bnew.index(), CHECK); } break; + case Bytecodes::_defaultvalue: + { Bytecode_defaultvalue bdefaultvalue(caller_method(), caller_method->bcp_from(bci)); + k = caller_method->constants()->klass_at(bdefaultvalue.index(), CHECK); + } + break; case Bytecodes::_multianewarray: { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci)); k = caller_method->constants()->klass_at(mna.index(), CHECK); @@ -1483,6 +1545,8 @@ tty->print_cr(" _new_object_array_slowcase_cnt: %d", _new_object_array_slowcase_cnt); tty->print_cr(" _new_instance_slowcase_cnt: %d", _new_instance_slowcase_cnt); tty->print_cr(" _new_multi_array_slowcase_cnt: %d", _new_multi_array_slowcase_cnt); + tty->print_cr(" _load_flattened_array_slowcase_cnt: %d", _load_flattened_array_slowcase_cnt); + tty->print_cr(" _store_flattened_array_slowcase_cnt:%d", _store_flattened_array_slowcase_cnt); tty->print_cr(" _monitorenter_slowcase_cnt: %d", _monitorenter_slowcase_cnt); tty->print_cr(" _monitorexit_slowcase_cnt: %d", _monitorexit_slowcase_cnt); tty->print_cr(" _patch_code_slowcase_cnt: %d", _patch_code_slowcase_cnt); @@ -1493,6 +1557,7 @@ tty->print_cr(" _throw_null_pointer_exception_count: %d:", _throw_null_pointer_exception_count); tty->print_cr(" _throw_class_cast_exception_count: %d:", _throw_class_cast_exception_count); tty->print_cr(" _throw_incompatible_class_change_error_count: %d:", _throw_incompatible_class_change_error_count); + tty->print_cr(" _throw_illegal_monitor_state_exception_count: %d:", _throw_illegal_monitor_state_exception_count); tty->print_cr(" _throw_array_store_exception_count: %d:", _throw_array_store_exception_count); tty->print_cr(" _throw_count: %d:", _throw_count); --- old/src/hotspot/share/c1/c1_Runtime1.hpp 2019-03-11 14:25:14.214355873 +0100 +++ new/src/hotspot/share/c1/c1_Runtime1.hpp 2019-03-11 14:25:13.986355877 +0100 @@ -51,13 +51,17 @@ stub(fast_new_instance_init_check) \ stub(new_type_array) \ stub(new_object_array) \ + stub(new_value_array) \ stub(new_multi_array) \ + stub(load_flattened_array) \ + stub(store_flattened_array) \ stub(handle_exception_nofpu) /* optimized version that does not preserve fpu registers */ \ stub(handle_exception) \ stub(handle_exception_from_callee) \ stub(throw_array_store_exception) \ stub(throw_class_cast_exception) \ stub(throw_incompatible_class_change_error) \ + stub(throw_illegal_monitor_state_exception) \ stub(slow_subtype_check) \ stub(monitorenter) \ stub(monitorenter_nofpu) /* optimized version that does not preserve fpu registers */ \ @@ -106,6 +110,8 @@ static int _new_object_array_slowcase_cnt; static int _new_instance_slowcase_cnt; static int _new_multi_array_slowcase_cnt; + static int _load_flattened_array_slowcase_cnt; + static int _store_flattened_array_slowcase_cnt; static int _monitorenter_slowcase_cnt; static int _monitorexit_slowcase_cnt; static int _patch_code_slowcase_cnt; @@ -115,6 +121,7 @@ static int _throw_null_pointer_exception_count; static int _throw_class_cast_exception_count; static int _throw_incompatible_class_change_error_count; + static int _throw_illegal_monitor_state_exception_count; static int _throw_array_store_exception_count; static int _throw_count; #endif @@ -142,6 +149,8 @@ static void new_type_array (JavaThread* thread, Klass* klass, jint length); static void new_object_array(JavaThread* thread, Klass* klass, jint length); static void new_multi_array (JavaThread* thread, Klass* klass, int rank, jint* dims); + static void load_flattened_array(JavaThread* thread, valueArrayOopDesc* array, int index); + static void store_flattened_array(JavaThread* thread, valueArrayOopDesc* array, int index, oopDesc* value); static address counter_overflow(JavaThread* thread, int bci, Method* method); @@ -155,6 +164,7 @@ static void throw_null_pointer_exception(JavaThread* thread); static void throw_class_cast_exception(JavaThread* thread, oopDesc* object); static void throw_incompatible_class_change_error(JavaThread* thread); + static void throw_illegal_monitor_state_exception(JavaThread* thread); static void throw_array_store_exception(JavaThread* thread, oopDesc* object); static void monitorenter(JavaThread* thread, oopDesc* obj, BasicObjectLock* lock); --- old/src/hotspot/share/c1/c1_ValueMap.hpp 2019-03-11 14:25:14.690355867 +0100 +++ new/src/hotspot/share/c1/c1_ValueMap.hpp 2019-03-11 14:25:14.446355870 +0100 @@ -186,6 +186,7 @@ void do_NullCheck (NullCheck* x) { /* nothing to do */ } void do_TypeCast (TypeCast* x) { /* nothing to do */ } void do_NewInstance (NewInstance* x) { /* nothing to do */ } + void do_NewValueTypeInstance (NewValueTypeInstance* x) { /* nothing to do */ } void do_NewTypeArray (NewTypeArray* x) { /* nothing to do */ } void do_NewObjectArray (NewObjectArray* x) { /* nothing to do */ } void do_NewMultiArray (NewMultiArray* x) { /* nothing to do */ } --- old/src/hotspot/share/c1/c1_ValueStack.cpp 2019-03-11 14:25:15.142355861 +0100 +++ new/src/hotspot/share/c1/c1_ValueStack.cpp 2019-03-11 14:25:14.918355864 +0100 @@ -172,22 +172,35 @@ return total_locks_size(); } +// When we merge two object slots, we usually lose the type information. +// However, for aaload/aastore to work with flattened arrays, we need to preserve +// the type info (because the aaload/aastore bytecode themselves don't carry the +// type info). +ciType* ValueStack::merge_types(Value existing_value, Value new_value) { + if (new_value->is_flattened_array() && + (existing_value == NULL || existing_value->is_flattened_array())) { + assert(existing_value == NULL || existing_value->exact_type() == new_value->exact_type(), + "must be guaranteed by verifier"); + return new_value->exact_type(); + } + return NULL; +} -void ValueStack::setup_phi_for_stack(BlockBegin* b, int index) { +void ValueStack::setup_phi_for_stack(BlockBegin* b, int index, Value existing_value, Value new_value) { assert(stack_at(index)->as_Phi() == NULL || stack_at(index)->as_Phi()->block() != b, "phi function already created"); ValueType* t = stack_at(index)->type(); - Value phi = new Phi(t, b, -index - 1); + Value phi = new Phi(t, b, -index - 1, merge_types(existing_value, new_value)); _stack.at_put(index, phi); assert(!t->is_double_word() || _stack.at(index + 1) == NULL, "hi-word of doubleword value must be NULL"); } -void ValueStack::setup_phi_for_local(BlockBegin* b, int index) { +void ValueStack::setup_phi_for_local(BlockBegin* b, int index, Value existing_value, Value new_value) { assert(local_at(index)->as_Phi() == NULL || local_at(index)->as_Phi()->block() != b, "phi function already created"); ValueType* t = local_at(index)->type(); - Value phi = new Phi(t, b, index); + Value phi = new Phi(t, b, index, merge_types(existing_value, new_value)); store_local(index, phi); } --- old/src/hotspot/share/c1/c1_ValueStack.hpp 2019-03-11 14:25:15.558355855 +0100 +++ new/src/hotspot/share/c1/c1_ValueStack.hpp 2019-03-11 14:25:15.350355858 +0100 @@ -204,8 +204,10 @@ Value lock_at(int i) const { return _locks.at(i); } // SSA form IR support - void setup_phi_for_stack(BlockBegin* b, int index); - void setup_phi_for_local(BlockBegin* b, int index); + void setup_phi_for_stack(BlockBegin* b, int index, Value existing_value, Value new_value); + void setup_phi_for_local(BlockBegin* b, int index, Value existing_value, Value new_value); + + ciType* merge_types(Value existing_value, Value new_value); // debugging void print() PRODUCT_RETURN; --- old/src/hotspot/share/c1/c1_ValueType.cpp 2019-03-11 14:25:15.998355849 +0100 +++ new/src/hotspot/share/c1/c1_ValueType.cpp 2019-03-11 14:25:15.786355852 +0100 @@ -135,6 +135,7 @@ case T_DOUBLE : return doubleType; case T_ARRAY : return arrayType; case T_OBJECT : return objectType; + case T_VALUETYPE: return objectType; case T_ADDRESS: return addressType; case T_ILLEGAL: return illegalType; default : ShouldNotReachHere(); @@ -154,6 +155,7 @@ case T_FLOAT : return new FloatConstant (value.as_float ()); case T_DOUBLE : return new DoubleConstant(value.as_double()); case T_ARRAY : // fall through (ciConstant doesn't have an array accessor) + case T_VALUETYPE: // fall through case T_OBJECT : { // TODO: Common the code with GraphBuilder::load_constant? ciObject* obj = value.as_object(); --- old/src/hotspot/share/ci/bcEscapeAnalyzer.cpp 2019-03-11 14:25:16.410355843 +0100 +++ new/src/hotspot/share/ci/bcEscapeAnalyzer.cpp 2019-03-11 14:25:16.206355846 +0100 @@ -557,6 +557,9 @@ set_global_escape(state.apop()); state.spop(); ArgumentMap arr = state.apop(); + // If the array is flattened, a larger part of it is modified than + // the size of a reference. However, if OFFSET_ANY is given as + // parameter to set_modified(), size is not taken into account. set_modified(arr, OFFSET_ANY, type2size[T_OBJECT]*HeapWordSize); break; } @@ -941,8 +944,24 @@ } break; case Bytecodes::_new: + case Bytecodes::_defaultvalue: state.apush(allocated_obj); break; + case Bytecodes::_withfield: { + bool will_link; + ciField* field = s.get_field(will_link); + BasicType field_type = field->type()->basic_type(); + if (field_type == T_OBJECT || field_type == T_ARRAY) { + set_global_escape(state.apop()); + } else if (type2size[field_type] == 1) { + state.spop(); + } else { + state.lpop(); + } + set_method_escape(state.apop()); + state.apush(allocated_obj); + break; + } case Bytecodes::_newarray: case Bytecodes::_anewarray: state.spop(); --- old/src/hotspot/share/ci/ciArray.hpp 2019-03-11 14:25:16.918355836 +0100 +++ new/src/hotspot/share/ci/ciArray.hpp 2019-03-11 14:25:16.638355840 +0100 @@ -31,6 +31,7 @@ #include "oops/arrayOop.hpp" #include "oops/objArrayOop.hpp" #include "oops/typeArrayOop.hpp" +#include "oops/valueArrayOop.hpp" // ciArray // @@ -44,6 +45,7 @@ ciArray( arrayHandle h_a) : ciObject(h_a), _length(h_a()->length()) {} ciArray( objArrayHandle h_a) : ciObject(h_a), _length(h_a()->length()) {} ciArray(typeArrayHandle h_a) : ciObject(h_a), _length(h_a()->length()) {} + ciArray(valueArrayHandle h_a): ciObject(h_a), _length(h_a()->length()) {} ciArray(ciKlass* klass, int len) : ciObject(klass), _length(len) {} --- old/src/hotspot/share/ci/ciArrayKlass.cpp 2019-03-11 14:25:17.370355830 +0100 +++ new/src/hotspot/share/ci/ciArrayKlass.cpp 2019-03-11 14:25:17.146355833 +0100 @@ -27,6 +27,8 @@ #include "ci/ciObjArrayKlass.hpp" #include "ci/ciTypeArrayKlass.hpp" #include "ci/ciUtilities.hpp" +#include "ci/ciValueArrayKlass.hpp" +#include "ci/ciValueKlass.hpp" // ciArrayKlass // @@ -59,7 +61,7 @@ if (is_type_array_klass()) { return ciType::make(as_type_array_klass()->element_type()); } else { - return as_obj_array_klass()->element_klass()->as_klass(); + return element_klass()->as_klass(); } } @@ -71,12 +73,14 @@ ciType* ciArrayKlass::base_element_type() { if (is_type_array_klass()) { return ciType::make(as_type_array_klass()->element_type()); - } else { + } else if (is_obj_array_klass()) { ciKlass* ek = as_obj_array_klass()->base_element_klass(); if (ek->is_type_array_klass()) { return ciType::make(ek->as_type_array_klass()->element_type()); } return ek; + } else { + return as_value_array_klass()->base_element_klass(); } } @@ -99,7 +103,13 @@ ciArrayKlass* ciArrayKlass::make(ciType* element_type) { if (element_type->is_primitive_type()) { return ciTypeArrayKlass::make(element_type->basic_type()); + } else if (element_type->is_valuetype() && element_type->as_value_klass()->flatten_array()) { + return ciValueArrayKlass::make(element_type->as_klass()); } else { return ciObjArrayKlass::make(element_type->as_klass()); } } + +int ciArrayKlass::array_header_in_bytes() { + return get_ArrayKlass()->array_header_in_bytes(); +} --- old/src/hotspot/share/ci/ciArrayKlass.hpp 2019-03-11 14:25:17.834355823 +0100 +++ new/src/hotspot/share/ci/ciArrayKlass.hpp 2019-03-11 14:25:17.610355827 +0100 @@ -56,7 +56,12 @@ bool is_array_klass() const { return true; } bool is_java_klass() const { return true; } + // The one-level type of the array elements. + virtual ciKlass* element_klass() { return NULL; } + static ciArrayKlass* make(ciType* element_type); + + int array_header_in_bytes(); }; #endif // SHARE_CI_CIARRAYKLASS_HPP --- old/src/hotspot/share/ci/ciClassList.hpp 2019-03-11 14:25:18.306355817 +0100 +++ new/src/hotspot/share/ci/ciClassList.hpp 2019-03-11 14:25:18.074355820 +0100 @@ -62,9 +62,12 @@ class ciReturnAddress; class ciKlass; class ciInstanceKlass; +class ciValueKlass; class ciArrayKlass; +class ciValueArrayKlass; class ciObjArrayKlass; class ciTypeArrayKlass; +class ciWrapper; // Simulate Java Language style package-private access with // friend declarations. @@ -112,9 +115,12 @@ friend class ciTypeArray; \ friend class ciType; \ friend class ciReturnAddress; \ +friend class ciWrapper; \ friend class ciKlass; \ friend class ciInstanceKlass; \ +friend class ciValueKlass; \ friend class ciArrayKlass; \ +friend class ciValueArrayKlass; \ friend class ciObjArrayKlass; \ friend class ciTypeArrayKlass; \ --- old/src/hotspot/share/ci/ciConstant.cpp 2019-03-11 14:25:18.834355810 +0100 +++ new/src/hotspot/share/ci/ciConstant.cpp 2019-03-11 14:25:18.590355813 +0100 @@ -56,6 +56,7 @@ case T_DOUBLE: tty->print("%lf", _value._double); break; + case T_VALUETYPE: case T_OBJECT: case T_ARRAY: _value._object->print(); --- old/src/hotspot/share/ci/ciConstant.hpp 2019-03-11 14:25:19.238355804 +0100 +++ new/src/hotspot/share/ci/ciConstant.hpp 2019-03-11 14:25:19.042355807 +0100 @@ -106,7 +106,7 @@ return _value._double; } ciObject* as_object() const { - assert(basic_type() == T_OBJECT || basic_type() == T_ARRAY, "wrong type"); + assert(basic_type() == T_OBJECT || basic_type() == T_ARRAY || basic_type() == T_VALUETYPE, "wrong type"); return _value._object; } --- old/src/hotspot/share/ci/ciEnv.cpp 2019-03-11 14:25:19.674355798 +0100 +++ new/src/hotspot/share/ci/ciEnv.cpp 2019-03-11 14:25:19.458355801 +0100 @@ -33,6 +33,7 @@ #include "ci/ciNullObject.hpp" #include "ci/ciReplay.hpp" #include "ci/ciUtilities.inline.hpp" +#include "ci/ciValueKlass.hpp" #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" #include "code/codeCache.hpp" @@ -396,8 +397,8 @@ // Now we need to check the SystemDictionary Symbol* sym = name->get_symbol(); - if (sym->char_at(0) == 'L' && - sym->char_at(sym->utf8_length()-1) == ';') { + if ((sym->char_at(0) == 'L' || sym->char_at(0) == 'Q') && + sym->char_at(sym->utf8_length()-1) == ';') { // This is a name from a signature. Strip off the trimmings. // Call recursive to keep scope of strippedsym. TempNewSymbol strippedsym = SymbolTable::new_symbol(sym->as_utf8()+1, @@ -451,7 +452,7 @@ // to be loaded if their element klasses are loaded, except when memory // is exhausted. if (sym->char_at(0) == '[' && - (sym->char_at(1) == '[' || sym->char_at(1) == 'L')) { + (sym->char_at(1) == '[' || sym->char_at(1) == 'L' || sym->char_at(1) == 'Q')) { // We have an unloaded array. // Build it on the fly if the element class exists. TempNewSymbol elem_sym = SymbolTable::new_symbol(sym->as_utf8()+1, @@ -466,7 +467,11 @@ require_local); if (elem_klass != NULL && elem_klass->is_loaded()) { // Now make an array for it - return ciObjArrayKlass::make_impl(elem_klass); + if (elem_klass->is_valuetype() && elem_klass->as_value_klass()->flatten_array()) { + return ciValueArrayKlass::make_impl(elem_klass); + } else { + return ciObjArrayKlass::make_impl(elem_klass); + } } } @@ -492,6 +497,21 @@ // Not yet loaded into the VM, or not governed by loader constraints. // Make a CI representative for it. + int i = 0; + while (sym->char_at(i) == '[') { + i++; + } + if (i > 0 && sym->char_at(i) == 'Q') { + // An unloaded array class of value types is an ObjArrayKlass, an + // unloaded value type class is an InstanceKlass. For consistency, + // make the signature of the unloaded array of value type use L + // rather than Q. + char *new_name = CURRENT_THREAD_ENV->name_buffer(sym->utf8_length()+1); + strncpy(new_name, (char*)sym->base(), sym->utf8_length()); + new_name[i] = 'L'; + new_name[sym->utf8_length()] = '\0'; + return get_unloaded_klass(accessing_klass, ciSymbol::make(new_name)); + } return get_unloaded_klass(accessing_klass, name); } @@ -522,7 +542,7 @@ klass_name = cpool->symbol_at(index); } else { // Check if it's resolved if it's not a symbol constant pool entry. - klass = ConstantPool::klass_at_if_loaded(cpool, index); + klass = ConstantPool::klass_at_if_loaded(cpool, index); // Try to look it up by name. if (klass == NULL) { klass_name = cpool->klass_name_at(index); @@ -575,6 +595,23 @@ } // ------------------------------------------------------------------ +// ciEnv::is_klass_never_null_impl +// +// Implementation of is_klass_never_null. +bool ciEnv::is_klass_never_null_impl(const constantPoolHandle& cpool, int index) { + Symbol* klass_name = cpool->klass_name_at(index); + return klass_name->is_Q_signature(); +} + +// ------------------------------------------------------------------ +// ciEnv::is_klass_never_null +// +// Get information about nullability from the constant pool. +bool ciEnv::is_klass_never_null(const constantPoolHandle& cpool, int index) { + GUARDED_VM_ENTRY(return is_klass_never_null_impl(cpool, index);) +} + +// ------------------------------------------------------------------ // ciEnv::get_constant_by_index_impl // // Implementation of get_constant_by_index(). --- old/src/hotspot/share/ci/ciEnv.hpp 2019-03-11 14:25:20.102355792 +0100 +++ new/src/hotspot/share/ci/ciEnv.hpp 2019-03-11 14:25:19.890355795 +0100 @@ -133,6 +133,8 @@ ciMethod* get_method_by_index(const constantPoolHandle& cpool, int method_index, Bytecodes::Code bc, ciInstanceKlass* loading_klass); + bool is_klass_never_null(const constantPoolHandle& cpool, + int klass_index); // Implementation methods for loading and constant pool access. ciKlass* get_klass_by_name_impl(ciKlass* accessing_klass, @@ -151,6 +153,8 @@ ciMethod* get_method_by_index_impl(const constantPoolHandle& cpool, int method_index, Bytecodes::Code bc, ciInstanceKlass* loading_klass); + bool is_klass_never_null_impl(const constantPoolHandle& cpool, + int klass_index); // Helper methods bool check_klass_accessibility(ciKlass* accessing_klass, @@ -197,6 +201,10 @@ if (o == NULL) return NULL; return get_object(o)->as_instance(); } + ciValueArrayKlass* get_value_array_klass(Klass* o) { + if (o == NULL) return NULL; + return get_metadata(o)->as_value_array_klass(); + } ciObjArrayKlass* get_obj_array_klass(Klass* o) { if (o == NULL) return NULL; return get_metadata(o)->as_obj_array_klass(); @@ -473,6 +481,10 @@ void dump_replay_data(outputStream* out); void dump_replay_data_unsafe(outputStream* out); void dump_compile_data(outputStream* out); + + ciWrapper* make_never_null_wrapper(ciType* type) { + return _factory->make_never_null_wrapper(type); + } }; #endif // SHARE_CI_CIENV_HPP --- old/src/hotspot/share/ci/ciField.cpp 2019-03-11 14:25:20.530355786 +0100 +++ new/src/hotspot/share/ci/ciField.cpp 2019-03-11 14:25:20.318355789 +0100 @@ -68,7 +68,7 @@ // ------------------------------------------------------------------ // ciField::ciField ciField::ciField(ciInstanceKlass* klass, int index) : - _known_to_link_with_put(NULL), _known_to_link_with_get(NULL) { + _is_flattened(false), _known_to_link_with_put(NULL), _known_to_link_with_get(NULL) { ASSERT_IN_VM; CompilerThread *THREAD = CompilerThread::current(); @@ -91,7 +91,7 @@ // If the field is a pointer type, get the klass of the // field. - if (field_type == T_OBJECT || field_type == T_ARRAY) { + if (field_type == T_OBJECT || field_type == T_ARRAY || field_type == T_VALUETYPE) { bool ignore; // This is not really a class reference; the index always refers to the // field's type signature, as a symbol. Linkage checks do not apply. @@ -200,7 +200,7 @@ // If the field is a pointer type, get the klass of the // field. - if (field_type == T_OBJECT || field_type == T_ARRAY) { + if (field_type == T_OBJECT || field_type == T_ARRAY || field_type == T_VALUETYPE) { _type = NULL; // must call compute_type on first access } else { _type = ciType::make(field_type); @@ -213,6 +213,30 @@ "bootstrap classes must not create & cache unshared fields"); } +// Special copy constructor used to flatten value type fields by +// copying the fields of the value type to a new holder klass. +ciField::ciField(ciField* field, ciInstanceKlass* holder, int offset, bool is_final) { + assert(field->holder()->is_valuetype(), "should only be used for value type field flattening"); + // Set the is_final flag + jint final = is_final ? JVM_ACC_FINAL : ~JVM_ACC_FINAL; + AccessFlags flags(field->flags().as_int() & final); + _flags = ciFlags(flags); + _holder = holder; + _offset = offset; + // Copy remaining fields + _name = field->_name; + _signature = field->_signature; + _type = field->_type; + _is_constant = field->_is_constant; + _known_to_link_with_put = field->_known_to_link_with_put; + _known_to_link_with_get = field->_known_to_link_with_get; + _constant_value = field->_constant_value; + assert(!field->is_flattened(), "field must not be flattened"); + assert(!field->is_flattenable(), "field must not be flattenable"); + _is_flattened = false; + _is_flattenable = false; +} + static bool trust_final_non_static_fields(ciInstanceKlass* holder) { if (holder == NULL) return false; @@ -250,6 +274,8 @@ Klass* field_holder = fd->field_holder(); assert(field_holder != NULL, "null field_holder"); _holder = CURRENT_ENV->get_instance_klass(field_holder); + _is_flattened = fd->is_flattened(); + _is_flattenable = fd->is_flattenable(); // Check to see if the field is constant. Klass* k = _holder->get_Klass(); @@ -362,8 +388,8 @@ Bytecodes::Code bc) { VM_ENTRY_MARK; assert(bc == Bytecodes::_getstatic || bc == Bytecodes::_putstatic || - bc == Bytecodes::_getfield || bc == Bytecodes::_putfield, - "unexpected bytecode"); + bc == Bytecodes::_getfield || bc == Bytecodes::_putfield || + bc == Bytecodes::_withfield, "unexpected bytecode"); if (_offset == -1) { // at creation we couldn't link to our holder so we need to --- old/src/hotspot/share/ci/ciField.hpp 2019-03-11 14:25:20.946355780 +0100 +++ new/src/hotspot/share/ci/ciField.hpp 2019-03-11 14:25:20.734355783 +0100 @@ -49,6 +49,8 @@ ciType* _type; int _offset; bool _is_constant; + bool _is_flattened; + bool _is_flattenable; ciMethod* _known_to_link_with_put; ciInstanceKlass* _known_to_link_with_get; ciConstant _constant_value; @@ -58,6 +60,7 @@ ciField(ciInstanceKlass* klass, int index); ciField(fieldDescriptor* fd); + ciField(ciField* field, ciInstanceKlass* holder, int offset, bool is_final); // shared constructor code void initialize_from(fieldDescriptor* fd); @@ -102,7 +105,7 @@ ciType* type() { return (_type == NULL) ? compute_type() : _type; } // How is this field actually stored in memory? - BasicType layout_type() { return type2field[(_type == NULL) ? T_OBJECT : _type->basic_type()]; } + BasicType layout_type() { return type2field[type()->basic_type()]; } // How big is this field in memory? int size_in_bytes() { return type2aelembytes(layout_type()); } @@ -174,6 +177,9 @@ bool is_stable () const { return flags().is_stable(); } bool is_volatile () const { return flags().is_volatile(); } bool is_transient () const { return flags().is_transient(); } + bool is_flattened () const { return _is_flattened; } + bool is_flattenable () const { return _is_flattenable; } + // The field is modified outside of instance initializer methods // (or class/initializer methods if the field is static). bool has_initialized_final_update() const { return flags().has_initialized_final_update(); } --- old/src/hotspot/share/ci/ciInstance.cpp 2019-03-11 14:25:21.370355775 +0100 +++ new/src/hotspot/share/ci/ciInstance.cpp 2019-03-11 14:25:21.158355777 +0100 @@ -70,6 +70,7 @@ case T_FLOAT: return ciConstant(obj->float_field(offset)); case T_DOUBLE: return ciConstant(obj->double_field(offset)); case T_LONG: return ciConstant(obj->long_field(offset)); + case T_VALUETYPE: // fall through case T_OBJECT: // fall through case T_ARRAY: { oop o = obj->obj_field(offset); @@ -100,7 +101,8 @@ ciConstant ciInstance::field_value(ciField* field) { assert(is_loaded(), "invalid access - must be loaded"); assert(field->holder()->is_loaded(), "invalid access - holder must be loaded"); - assert(field->is_static() || klass()->is_subclass_of(field->holder()), "invalid access - must be subclass"); + assert(field->is_static() || field->holder()->is_valuetype() || klass()->is_subclass_of(field->holder()), + "invalid access - must be subclass"); GUARDED_VM_ENTRY(return field_value_impl(field->type()->basic_type(), field->offset());) } --- old/src/hotspot/share/ci/ciInstanceKlass.cpp 2019-03-11 14:25:21.846355768 +0100 +++ new/src/hotspot/share/ci/ciInstanceKlass.cpp 2019-03-11 14:25:21.622355771 +0100 @@ -27,12 +27,14 @@ #include "ci/ciInstance.hpp" #include "ci/ciInstanceKlass.hpp" #include "ci/ciUtilities.inline.hpp" +#include "ci/ciValueKlass.hpp" #include "classfile/systemDictionary.hpp" #include "memory/allocation.hpp" #include "memory/allocation.inline.hpp" #include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "oops/fieldStreams.hpp" +#include "oops/valueKlass.hpp" #include "runtime/fieldDescriptor.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/jniHandles.inline.hpp" @@ -63,8 +65,9 @@ _has_nonstatic_fields = ik->has_nonstatic_fields(); _has_nonstatic_concrete_methods = ik->has_nonstatic_concrete_methods(); _is_unsafe_anonymous = ik->is_unsafe_anonymous(); - _nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields: + _nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields _has_injected_fields = -1; + _vcc_klass = NULL; _implementor = NULL; // we will fill these lazily // Ensure that the metadata wrapped by the ciMetadata is kept alive by GC. @@ -113,15 +116,17 @@ // Version for unloaded classes: ciInstanceKlass::ciInstanceKlass(ciSymbol* name, - jobject loader, jobject protection_domain) - : ciKlass(name, T_OBJECT) + jobject loader, jobject protection_domain, + BasicType bt) + : ciKlass(name, bt) { assert(name->char_at(0) != '[', "not an instance klass"); _init_state = (InstanceKlass::ClassState)0; _nonstatic_field_size = -1; _has_nonstatic_fields = false; - _nonstatic_fields = NULL; + _nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields _has_injected_fields = -1; + _vcc_klass = NULL; _is_unsafe_anonymous = false; _loader = loader; _protection_domain = protection_domain; @@ -416,6 +421,29 @@ return field; } +ciField* ciInstanceKlass::get_non_flattened_field_by_offset(int field_offset) { + if (super() != NULL && super()->has_nonstatic_fields()) { + ciField* f = super()->get_non_flattened_field_by_offset(field_offset); + if (f != NULL) { + return f; + } + } + + VM_ENTRY_MARK; + InstanceKlass* k = get_instanceKlass(); + Arena* arena = CURRENT_ENV->arena(); + for (JavaFieldStream fs(k); !fs.done(); fs.next()) { + if (fs.access_flags().is_static()) continue; + fieldDescriptor& fd = fs.field_descriptor(); + if (fd.offset() == field_offset) { + ciField* f = new (arena) ciField(&fd); + return f; + } + } + + return NULL; +} + // ------------------------------------------------------------------ // ciInstanceKlass::get_field_by_name ciField* ciInstanceKlass::get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static) { @@ -483,18 +511,11 @@ } } - int flen = fields->length(); - - // Now sort them by offset, ascending. - // (In principle, they could mix with superclass fields.) - fields->sort(sort_field_by_offset); _nonstatic_fields = fields; - return flen; + return fields->length(); } -GrowableArray* -ciInstanceKlass::compute_nonstatic_fields_impl(GrowableArray* - super_fields) { +GrowableArray* ciInstanceKlass::compute_nonstatic_fields_impl(GrowableArray* super_fields, bool flatten) { ASSERT_IN_VM; Arena* arena = CURRENT_ENV->arena(); int flen = 0; @@ -512,6 +533,7 @@ if (super_fields != NULL) { flen += super_fields->length(); } + fields = new (arena) GrowableArray(arena, flen, 0, NULL); if (super_fields != NULL) { fields->appendAll(super_fields); @@ -520,10 +542,33 @@ for (JavaFieldStream fs(k); !fs.done(); fs.next()) { if (fs.access_flags().is_static()) continue; fieldDescriptor& fd = fs.field_descriptor(); - ciField* field = new (arena) ciField(&fd); - fields->append(field); + if (fd.is_flattened() && flatten) { + // Value type fields are embedded + int field_offset = fd.offset(); + // Get ValueKlass and adjust number of fields + Klass* k = get_instanceKlass()->get_value_field_klass(fd.index()); + ciValueKlass* vk = CURRENT_ENV->get_klass(k)->as_value_klass(); + flen += vk->nof_nonstatic_fields() - 1; + // Iterate over fields of the flattened value type and copy them to 'this' + for (int i = 0; i < vk->nof_nonstatic_fields(); ++i) { + ciField* flattened_field = vk->nonstatic_field_at(i); + // Adjust offset to account for missing oop header + int offset = field_offset + (flattened_field->offset() - vk->first_field_offset()); + // A flattened field can be treated as final if the non-flattened + // field is declared final or the holder klass is a value type itself. + bool is_final = fd.is_final() || is_valuetype(); + ciField* field = new (arena) ciField(flattened_field, this, offset, is_final); + fields->append(field); + } + } else { + ciField* field = new (arena) ciField(&fd); + fields->append(field); + } } assert(fields->length() == flen, "sanity"); + // Now sort them by offset, ascending. + // (In principle, they could mix with superclass fields.) + fields->sort(sort_field_by_offset); return fields; } @@ -631,6 +676,10 @@ return NULL; } +ciInstanceKlass* ciInstanceKlass::vcc_klass() { + return NULL; +} + // Utility class for printing of the contents of the static fields for // use by compilation replay. It only prints out the information that // could be consumed by the compiler, so for primitive types it prints @@ -639,70 +688,123 @@ // only value which statically unchangeable. For all other reference // types it simply prints out the dynamic type. -class StaticFinalFieldPrinter : public FieldClosure { +class StaticFieldPrinter : public FieldClosure { +protected: outputStream* _out; +public: + StaticFieldPrinter(outputStream* out) : + _out(out) { + } + void do_field_helper(fieldDescriptor* fd, oop obj, bool flattened); +}; + +class StaticFinalFieldPrinter : public StaticFieldPrinter { const char* _holder; public: StaticFinalFieldPrinter(outputStream* out, const char* holder) : - _out(out), - _holder(holder) { + StaticFieldPrinter(out), _holder(holder) { } void do_field(fieldDescriptor* fd) { if (fd->is_final() && !fd->has_initial_value()) { ResourceMark rm; - oop mirror = fd->field_holder()->java_mirror(); - _out->print("staticfield %s %s %s ", _holder, fd->name()->as_quoted_ascii(), fd->signature()->as_quoted_ascii()); - switch (fd->field_type()) { - case T_BYTE: _out->print_cr("%d", mirror->byte_field(fd->offset())); break; - case T_BOOLEAN: _out->print_cr("%d", mirror->bool_field(fd->offset())); break; - case T_SHORT: _out->print_cr("%d", mirror->short_field(fd->offset())); break; - case T_CHAR: _out->print_cr("%d", mirror->char_field(fd->offset())); break; - case T_INT: _out->print_cr("%d", mirror->int_field(fd->offset())); break; - case T_LONG: _out->print_cr(INT64_FORMAT, (int64_t)(mirror->long_field(fd->offset()))); break; - case T_FLOAT: { - float f = mirror->float_field(fd->offset()); - _out->print_cr("%d", *(int*)&f); - break; - } - case T_DOUBLE: { - double d = mirror->double_field(fd->offset()); - _out->print_cr(INT64_FORMAT, *(int64_t*)&d); - break; - } - case T_ARRAY: // fall-through - case T_OBJECT: { - oop value = mirror->obj_field_acquire(fd->offset()); - if (value == NULL) { - _out->print_cr("null"); - } else if (value->is_instance()) { - assert(fd->field_type() == T_OBJECT, ""); - if (value->is_a(SystemDictionary::String_klass())) { - const char* ascii_value = java_lang_String::as_quoted_ascii(value); - _out->print("\"%s\"", (ascii_value != NULL) ? ascii_value : ""); - } else { - const char* klass_name = value->klass()->name()->as_quoted_ascii(); - _out->print_cr("%s", klass_name); - } - } else if (value->is_array()) { - typeArrayOop ta = (typeArrayOop)value; - _out->print("%d", ta->length()); - if (value->is_objArray()) { - objArrayOop oa = (objArrayOop)value; - const char* klass_name = value->klass()->name()->as_quoted_ascii(); - _out->print(" %s", klass_name); - } - _out->cr(); - } else { - ShouldNotReachHere(); - } - break; + InstanceKlass* holder = fd->field_holder(); + oop mirror = holder->java_mirror(); + _out->print("staticfield %s %s ", _holder, fd->name()->as_quoted_ascii()); + BasicType bt = fd->field_type(); + if (bt != T_OBJECT && bt != T_ARRAY) { + _out->print("%s ", fd->signature()->as_quoted_ascii()); + } + do_field_helper(fd, mirror, false); + _out->cr(); + } + } +}; + +class ValueTypeFieldPrinter : public StaticFieldPrinter { + oop _obj; +public: + ValueTypeFieldPrinter(outputStream* out, oop obj) : + StaticFieldPrinter(out), _obj(obj) { + } + void do_field(fieldDescriptor* fd) { + do_field_helper(fd, _obj, true); + _out->print(" "); + } +}; + +void StaticFieldPrinter::do_field_helper(fieldDescriptor* fd, oop mirror, bool flattened) { + BasicType bt = fd->field_type(); + switch (bt) { + case T_BYTE: _out->print("%d", mirror->byte_field(fd->offset())); break; + case T_BOOLEAN: _out->print("%d", mirror->bool_field(fd->offset())); break; + case T_SHORT: _out->print("%d", mirror->short_field(fd->offset())); break; + case T_CHAR: _out->print("%d", mirror->char_field(fd->offset())); break; + case T_INT: _out->print("%d", mirror->int_field(fd->offset())); break; + case T_LONG: _out->print(INT64_FORMAT, (int64_t)(mirror->long_field(fd->offset()))); break; + case T_FLOAT: { + float f = mirror->float_field(fd->offset()); + _out->print("%d", *(int*)&f); + break; + } + case T_DOUBLE: { + double d = mirror->double_field(fd->offset()); + _out->print(INT64_FORMAT, *(int64_t*)&d); + break; + } + case T_ARRAY: // fall-through + case T_OBJECT: { + oop value = mirror->obj_field_acquire(fd->offset()); + if (value == NULL) { + _out->print_cr("null"); + } else if (value->is_instance()) { + assert(fd->field_type() == T_OBJECT, ""); + if (value->is_a(SystemDictionary::String_klass())) { + const char* ascii_value = java_lang_String::as_quoted_ascii(value); + _out->print("\"%s\"", (ascii_value != NULL) ? ascii_value : ""); + } else { + const char* klass_name = value->klass()->name()->as_quoted_ascii(); + _out->print_cr("%s", klass_name); } - default: - ShouldNotReachHere(); + } else if (value->is_array()) { + typeArrayOop ta = (typeArrayOop)value; + _out->print("%d", ta->length()); + if (value->is_objArray() || value->is_valueArray()) { + objArrayOop oa = (objArrayOop)value; + const char* klass_name = value->klass()->name()->as_quoted_ascii(); + _out->print(" %s", klass_name); } + _out->cr(); + } else { + ShouldNotReachHere(); + } + break; + } + case T_VALUETYPE: { + ResetNoHandleMark rnhm; + Thread* THREAD = Thread::current(); + SignatureStream ss(fd->signature(), false); + Symbol* name = ss.as_symbol(THREAD); + assert(!HAS_PENDING_EXCEPTION, "can resolve klass?"); + InstanceKlass* holder = fd->field_holder(); + Klass* k = SystemDictionary::find(name, Handle(THREAD, holder->class_loader()), + Handle(THREAD, holder->protection_domain()), THREAD); + assert(k != NULL && !HAS_PENDING_EXCEPTION, "can resolve klass?"); + ValueKlass* vk = ValueKlass::cast(k); + oop obj; + if (flattened) { + int field_offset = fd->offset() - vk->first_field_offset(); + obj = (oop)((address)mirror + field_offset); + } else { + obj = mirror->obj_field_acquire(fd->offset()); + } + ValueTypeFieldPrinter print_field(_out, obj); + vk->do_nonstatic_fields(&print_field); + break; } + default: + ShouldNotReachHere(); } -}; +} void ciInstanceKlass::dump_replay_data(outputStream* out) { --- old/src/hotspot/share/ci/ciInstanceKlass.hpp 2019-03-11 14:25:22.302355762 +0100 +++ new/src/hotspot/share/ci/ciInstanceKlass.hpp 2019-03-11 14:25:22.082355765 +0100 @@ -67,8 +67,11 @@ ciConstantPoolCache* _field_cache; // cached map index->field GrowableArray* _nonstatic_fields; + int _has_injected_fields; // any non static injected fields? lazily initialized. + ciInstanceKlass* _vcc_klass; // points to the value-capable class corresponding to the current derived value type class. + // The possible values of the _implementor fall into following three cases: // NULL: no implementor. // A ciInstanceKlass that's not itself: one implementor. @@ -80,7 +83,7 @@ protected: ciInstanceKlass(Klass* k); - ciInstanceKlass(ciSymbol* name, jobject loader, jobject protection_domain); + ciInstanceKlass(ciSymbol* name, jobject loader, jobject protection_domain, BasicType bt = T_OBJECT); // for unloaded klasses InstanceKlass* get_instanceKlass() const { return InstanceKlass::cast(get_Klass()); @@ -104,8 +107,8 @@ void compute_shared_init_state(); bool compute_shared_has_subklass(); - int compute_nonstatic_fields(); - GrowableArray* compute_nonstatic_fields_impl(GrowableArray* super_fields); + virtual int compute_nonstatic_fields(); + GrowableArray* compute_nonstatic_fields_impl(GrowableArray* super_fields, bool flatten = true); // Update the init_state for shared klasses void update_if_shared(InstanceKlass::ClassState expected) { @@ -189,13 +192,16 @@ ciInstanceKlass* get_canonical_holder(int offset); ciField* get_field_by_offset(int field_offset, bool is_static); ciField* get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static); + // get field descriptor at field_offset ignoring flattening + ciField* get_non_flattened_field_by_offset(int field_offset); // total number of nonstatic fields (including inherited): int nof_nonstatic_fields() { - if (_nonstatic_fields == NULL) + if (_nonstatic_fields == NULL) { return compute_nonstatic_fields(); - else + } else { return _nonstatic_fields->length(); + } } bool has_injected_fields() { @@ -217,7 +223,7 @@ bool has_finalizable_subclass(); bool contains_field_offset(int offset) { - return instanceOopDesc::contains_field_offset(offset, nonstatic_field_size()); + return instanceOopDesc::contains_field_offset(offset, nonstatic_field_size(), is_valuetype()); } // Get the instance of java.lang.Class corresponding to @@ -238,6 +244,7 @@ bool is_leaf_type(); ciInstanceKlass* implementor(); + ciInstanceKlass* vcc_klass(); // Is the defining class loader of this class the default loader? bool uses_default_loader() const; --- old/src/hotspot/share/ci/ciKlass.hpp 2019-03-11 14:25:22.778355755 +0100 +++ new/src/hotspot/share/ci/ciKlass.hpp 2019-03-11 14:25:22.526355759 +0100 @@ -44,6 +44,7 @@ friend class ciMethod; friend class ciMethodData; friend class ciObjArrayKlass; + friend class ciValueArrayKlass; friend class ciReceiverTypeData; private: --- old/src/hotspot/share/ci/ciMetadata.hpp 2019-03-11 14:25:23.238355749 +0100 +++ new/src/hotspot/share/ci/ciMetadata.hpp 2019-03-11 14:25:23.022355752 +0100 @@ -57,9 +57,12 @@ virtual bool is_method_data() const { return false; } virtual bool is_klass() const { return false; } virtual bool is_instance_klass() const { return false; } + virtual bool is_valuetype() const { return false; } virtual bool is_array_klass() const { return false; } + virtual bool is_value_array_klass() const { return false; } virtual bool is_obj_array_klass() const { return false; } virtual bool is_type_array_klass() const { return false; } + virtual bool is_wrapper() const { return false; } virtual void dump_replay_data(outputStream* st) { /* do nothing */ } ciMethod* as_method() { @@ -94,6 +97,10 @@ assert(is_array_klass(), "bad cast"); return (ciArrayKlass*)this; } + ciValueArrayKlass* as_value_array_klass() { + assert(is_value_array_klass(), "bad cast"); + return (ciValueArrayKlass*)this; + } ciObjArrayKlass* as_obj_array_klass() { assert(is_obj_array_klass(), "bad cast"); return (ciObjArrayKlass*)this; @@ -102,10 +109,18 @@ assert(is_type_array_klass(), "bad cast"); return (ciTypeArrayKlass*)this; } + ciValueKlass* as_value_klass() { + assert(is_valuetype(), "bad cast"); + return (ciValueKlass*)this; + } + ciWrapper* as_wrapper() { + assert(is_wrapper(), "bad cast"); + return (ciWrapper*)this; + } Metadata* constant_encoding() { return _metadata; } - bool equals(ciMetadata* obj) const { return (this == obj); } + virtual bool equals(ciMetadata* obj) const { return (this == obj); } int hash() { return ident() * 31; } // ??? --- old/src/hotspot/share/ci/ciMethod.cpp 2019-03-11 14:25:23.658355743 +0100 +++ new/src/hotspot/share/ci/ciMethod.cpp 2019-03-11 14:25:23.446355746 +0100 @@ -47,6 +47,7 @@ #include "prims/nativeLookup.hpp" #include "runtime/deoptimization.hpp" #include "runtime/handles.inline.hpp" +#include "runtime/sharedRuntime.hpp" #include "utilities/bitMap.inline.hpp" #include "utilities/xmlstream.hpp" #ifdef COMPILER2 @@ -1420,6 +1421,7 @@ static BasicType erase_to_word_type(BasicType bt) { if (is_subword_type(bt)) return T_INT; if (bt == T_ARRAY) return T_OBJECT; + if (bt == T_VALUETYPE) return T_OBJECT; return bt; } @@ -1507,3 +1509,16 @@ } // ------------------------------------------------------------------ + +bool ciMethod::has_scalarized_args() const { + VM_ENTRY_MARK; + return get_Method()->has_scalarized_args(); +} + +const GrowableArray* ciMethod::get_sig_cc() { + VM_ENTRY_MARK; + if (get_Method()->adapter() == NULL) { + return NULL; + } + return get_Method()->adapter()->get_sig_cc(); +} --- old/src/hotspot/share/ci/ciMethod.hpp 2019-03-11 14:25:24.114355737 +0100 +++ new/src/hotspot/share/ci/ciMethod.hpp 2019-03-11 14:25:23.882355740 +0100 @@ -368,6 +368,10 @@ void print_short_name(outputStream* st = tty); static bool is_consistent_info(ciMethod* declared_method, ciMethod* resolved_method); + + // Support for the value type calling convention + bool has_scalarized_args() const; + const GrowableArray* get_sig_cc(); }; #endif // SHARE_CI_CIMETHOD_HPP --- old/src/hotspot/share/ci/ciMethodType.cpp 2019-03-11 14:25:25.094355723 +0100 +++ new/src/hotspot/share/ci/ciMethodType.cpp 2019-03-11 14:25:24.858355726 +0100 @@ -38,9 +38,10 @@ } } -ciType* ciMethodType::rtype() const { +ciType* ciMethodType::rtype(bool& never_null) const { GUARDED_VM_ENTRY( oop rtype = java_lang_invoke_MethodType::rtype(get_oop()); + never_null = (java_lang_Class::value_mirror(rtype) == rtype); return class_to_citype(rtype); ) } @@ -53,9 +54,10 @@ GUARDED_VM_ENTRY(return java_lang_invoke_MethodType::ptype_slot_count(get_oop());) } -ciType* ciMethodType::ptype_at(int index) const { +ciType* ciMethodType::ptype_at(int index, bool& never_null) const { GUARDED_VM_ENTRY( oop ptype = java_lang_invoke_MethodType::ptype(get_oop(), index); + never_null = (java_lang_Class::value_mirror(ptype) == ptype); return class_to_citype(ptype); ) } --- old/src/hotspot/share/ci/ciMethodType.hpp 2019-03-11 14:25:25.506355717 +0100 +++ new/src/hotspot/share/ci/ciMethodType.hpp 2019-03-11 14:25:25.306355720 +0100 @@ -40,12 +40,12 @@ // What kind of ciObject is this? bool is_method_type() const { return true; } - ciType* rtype() const; + ciType* rtype(bool& never_null) const; int ptype_count() const; int ptype_slot_count() const ; - ciType* ptype_at(int index) const; + ciType* ptype_at(int index, bool& never_null) const; }; #endif // SHARE_CI_CIMETHODTYPE_HPP --- old/src/hotspot/share/ci/ciObjArrayKlass.cpp 2019-03-11 14:25:25.906355712 +0100 +++ new/src/hotspot/share/ci/ciObjArrayKlass.cpp 2019-03-11 14:25:25.706355715 +0100 @@ -65,7 +65,8 @@ dimension, T_OBJECT) { _base_element_klass = base_element_klass; assert(_base_element_klass->is_instance_klass() || - _base_element_klass->is_type_array_klass(), "bad base klass"); + _base_element_klass->is_type_array_klass() || + _base_element_klass->is_value_array_klass(), "bad base klass"); if (dimension == 1) { _element_klass = base_element_klass; } else { --- old/src/hotspot/share/ci/ciObject.hpp 2019-03-11 14:25:26.310355706 +0100 +++ new/src/hotspot/share/ci/ciObject.hpp 2019-03-11 14:25:26.110355709 +0100 @@ -122,6 +122,7 @@ virtual bool is_array() { return false; } virtual bool is_obj_array() { return false; } virtual bool is_type_array() { return false; } + virtual bool is_value_array() { return false; } // Is this a type or value which has no associated class? // It is true of primitive types and null objects. --- old/src/hotspot/share/ci/ciObjectFactory.cpp 2019-03-11 14:25:26.734355700 +0100 +++ new/src/hotspot/share/ci/ciObjectFactory.cpp 2019-03-11 14:25:26.510355703 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #include "ci/ciCallSite.hpp" #include "ci/ciInstance.hpp" #include "ci/ciInstanceKlass.hpp" +#include "ci/ciValueKlass.hpp" #include "ci/ciMemberName.hpp" #include "ci/ciMethod.hpp" #include "ci/ciMethodData.hpp" @@ -40,6 +41,8 @@ #include "ci/ciTypeArray.hpp" #include "ci/ciTypeArrayKlass.hpp" #include "ci/ciUtilities.inline.hpp" +#include "ci/ciValueArray.hpp" +#include "ci/ciValueArrayKlass.hpp" #include "classfile/javaClasses.inline.hpp" #include "classfile/systemDictionary.hpp" #include "gc/shared/collectedHeap.inline.hpp" @@ -148,7 +151,8 @@ for (int i = T_BOOLEAN; i <= T_CONFLICT; i++) { BasicType t = (BasicType)i; - if (type2name(t) != NULL && t != T_OBJECT && t != T_ARRAY && t != T_NARROWOOP && t != T_NARROWKLASS) { + if (type2name(t) != NULL && t != T_OBJECT && t != T_ARRAY && + t != T_VALUETYPE && t != T_NARROWOOP && t != T_NARROWKLASS) { ciType::_basic_types[t] = new (_arena) ciType(t); init_ident_of(ciType::_basic_types[t]); } @@ -362,6 +366,9 @@ } else if (o->is_typeArray()) { typeArrayHandle h_ta(THREAD, (typeArrayOop)o); return new (arena()) ciTypeArray(h_ta); + } else if (o->is_valueArray()) { + valueArrayHandle h_ta(THREAD, (valueArrayOop)o); + return new (arena()) ciValueArray(h_ta); } // The oop is of some type not supported by the compiler interface. @@ -381,8 +388,12 @@ if (o->is_klass()) { Klass* k = (Klass*)o; - if (k->is_instance_klass()) { + if (k->is_value()) { + return new (arena()) ciValueKlass(k); + } else if (k->is_instance_klass()) { return new (arena()) ciInstanceKlass(k); + } else if (k->is_valueArray_klass()) { + return new (arena()) ciValueArrayKlass(k); } else if (k->is_objArray_klass()) { return new (arena()) ciObjArrayKlass(k); } else if (k->is_typeArray_klass()) { @@ -497,7 +508,7 @@ int dimension = fd.dimension(); assert(element_type != T_ARRAY, "unsuccessful decomposition"); ciKlass* element_klass = NULL; - if (element_type == T_OBJECT) { + if (element_type == T_OBJECT || element_type == T_VALUETYPE) { ciEnv *env = CURRENT_THREAD_ENV; ciSymbol* ci_name = env->get_symbol(fd.object_key()); element_klass = @@ -628,6 +639,12 @@ return new_ret_addr; } +ciWrapper* ciObjectFactory::make_never_null_wrapper(ciType* type) { + ciWrapper* wrapper = new (arena()) ciWrapper(type, /* never_null */ true); + init_ident_of(wrapper); + return wrapper; +} + // ------------------------------------------------------------------ // ciObjectFactory::init_ident_of void ciObjectFactory::init_ident_of(ciBaseObject* obj) { --- old/src/hotspot/share/ci/ciObjectFactory.hpp 2019-03-11 14:25:27.226355694 +0100 +++ new/src/hotspot/share/ci/ciObjectFactory.hpp 2019-03-11 14:25:26.970355697 +0100 @@ -144,6 +144,8 @@ void print_contents(); void print(); + + ciWrapper* make_never_null_wrapper(ciType* type); }; #endif // SHARE_CI_CIOBJECTFACTORY_HPP --- old/src/hotspot/share/ci/ciReplay.cpp 2019-03-11 14:25:27.734355687 +0100 +++ new/src/hotspot/share/ci/ciReplay.cpp 2019-03-11 14:25:27.486355690 +0100 @@ -36,6 +36,7 @@ #include "oops/constantPool.hpp" #include "oops/method.inline.hpp" #include "oops/oop.inline.hpp" +#include "oops/valueKlass.hpp" #include "runtime/fieldDescriptor.inline.hpp" #include "runtime/handles.inline.hpp" #include "utilities/copy.hpp" @@ -708,6 +709,7 @@ } break; } + case JVM_CONSTANT_Long: case JVM_CONSTANT_Double: parsed_two_word = i + 1; @@ -754,43 +756,104 @@ } } - // Initialize a class and fill in the value for a static field. - // This is useful when the compile was dependent on the value of - // static fields but it's impossible to properly rerun the static - // initiailizer. - void process_staticfield(TRAPS) { - InstanceKlass* k = (InstanceKlass *)parse_klass(CHECK); - - if (k == NULL || ReplaySuppressInitializers == 0 || - (ReplaySuppressInitializers == 2 && k->class_loader() == NULL)) { - return; - } - - assert(k->is_initialized(), "must be"); - - const char* field_name = parse_escaped_string(); - const char* field_signature = parse_string(); - fieldDescriptor fd; - Symbol* name = SymbolTable::lookup(field_name, (int)strlen(field_name), CHECK); - Symbol* sig = SymbolTable::lookup(field_signature, (int)strlen(field_signature), CHECK); - if (!k->find_local_field(name, sig, &fd) || - !fd.is_static() || - fd.has_initial_value()) { - report_error(field_name); - return; + class ValueTypeFieldInitializer : public FieldClosure { + oop _vt; + CompileReplay* _replay; + public: + ValueTypeFieldInitializer(oop vt, CompileReplay* replay) + : _vt(vt), _replay(replay) {} + + void do_field(fieldDescriptor* fd) { + BasicType bt = fd->field_type(); + const char* string_value = bt != T_VALUETYPE ? _replay->parse_escaped_string() : NULL; + switch (bt) { + case T_BYTE: { + int value = atoi(string_value); + _vt->byte_field_put(fd->offset(), value); + break; + } + case T_BOOLEAN: { + int value = atoi(string_value); + _vt->bool_field_put(fd->offset(), value); + break; + } + case T_SHORT: { + int value = atoi(string_value); + _vt->short_field_put(fd->offset(), value); + break; + } + case T_CHAR: { + int value = atoi(string_value); + _vt->char_field_put(fd->offset(), value); + break; + } + case T_INT: { + int value = atoi(string_value); + _vt->int_field_put(fd->offset(), value); + break; + } + case T_LONG: { + jlong value; + if (sscanf(string_value, JLONG_FORMAT, &value) != 1) { + fprintf(stderr, "Error parsing long: %s\n", string_value); + break; + } + _vt->long_field_put(fd->offset(), value); + break; + } + case T_FLOAT: { + float value = atof(string_value); + _vt->float_field_put(fd->offset(), value); + break; + } + case T_DOUBLE: { + double value = atof(string_value); + _vt->double_field_put(fd->offset(), value); + break; + } + case T_ARRAY: + case T_OBJECT: { + Thread* THREAD = Thread::current(); + bool res = _replay->process_staticfield_reference(string_value, _vt, fd, THREAD); + assert(res, "should succeed for arrays & objects"); + break; + } + case T_VALUETYPE: { + Thread* THREAD = Thread::current(); + SignatureStream ss(fd->signature(), false); + InstanceKlass* holder = fd->field_holder(); + Klass* k = ss.as_klass(Handle(THREAD, holder->class_loader()), + Handle(THREAD, holder->protection_domain()), + SignatureStream::ReturnNull, THREAD); + assert(k != NULL && !HAS_PENDING_EXCEPTION, "can resolve klass?"); + ValueKlass* vk = ValueKlass::cast(k); + if (fd->is_flattened()) { + int field_offset = fd->offset() - vk->first_field_offset(); + oop obj = (oop)((address)_vt + field_offset); + ValueTypeFieldInitializer init_fields(obj, _replay); + vk->do_nonstatic_fields(&init_fields); + } else { + oop value = vk->allocate_instance(THREAD); + _vt->obj_field_put(fd->offset(), value); + } + break; + } + default: { + fatal("Unhandled type: %s", type2name(bt)); + } + } } + }; - oop java_mirror = k->java_mirror(); + bool process_staticfield_reference(const char* field_signature, oop java_mirror, fieldDescriptor* fd, TRAPS) { if (field_signature[0] == '[') { int length = parse_int("array length"); oop value = NULL; if (field_signature[1] == '[') { // multi dimensional array - ArrayKlass* kelem = (ArrayKlass *)parse_klass(CHECK); - if (kelem == NULL) { - return; - } + Klass* k = resolve_klass(field_signature, CHECK_(true)); + ArrayKlass* kelem = (ArrayKlass *)k; int rank = 0; while (field_signature[rank] == '[') { rank++; @@ -800,70 +863,120 @@ for (int i = 1; i < rank; i++) { dims[i] = 1; // These aren't relevant to the compiler } - value = kelem->multi_allocate(rank, dims, CHECK); + value = kelem->multi_allocate(rank, dims, CHECK_(true)); } else { if (strcmp(field_signature, "[B") == 0) { - value = oopFactory::new_byteArray(length, CHECK); + value = oopFactory::new_byteArray(length, CHECK_(true)); } else if (strcmp(field_signature, "[Z") == 0) { - value = oopFactory::new_boolArray(length, CHECK); + value = oopFactory::new_boolArray(length, CHECK_(true)); } else if (strcmp(field_signature, "[C") == 0) { - value = oopFactory::new_charArray(length, CHECK); + value = oopFactory::new_charArray(length, CHECK_(true)); } else if (strcmp(field_signature, "[S") == 0) { - value = oopFactory::new_shortArray(length, CHECK); + value = oopFactory::new_shortArray(length, CHECK_(true)); } else if (strcmp(field_signature, "[F") == 0) { - value = oopFactory::new_floatArray(length, CHECK); + value = oopFactory::new_floatArray(length, CHECK_(true)); } else if (strcmp(field_signature, "[D") == 0) { - value = oopFactory::new_doubleArray(length, CHECK); + value = oopFactory::new_doubleArray(length, CHECK_(true)); } else if (strcmp(field_signature, "[I") == 0) { - value = oopFactory::new_intArray(length, CHECK); + value = oopFactory::new_intArray(length, CHECK_(true)); } else if (strcmp(field_signature, "[J") == 0) { - value = oopFactory::new_longArray(length, CHECK); + value = oopFactory::new_longArray(length, CHECK_(true)); } else if (field_signature[0] == '[' && field_signature[1] == 'L') { - Klass* kelem = resolve_klass(field_signature + 1, CHECK); - value = oopFactory::new_objArray(kelem, length, CHECK); + Klass* kelem = resolve_klass(field_signature + 1, CHECK_(true)); + value = oopFactory::new_array(kelem, length, CHECK_(true)); } else { report_error("unhandled array staticfield"); } } + java_mirror->obj_field_put(fd->offset(), value); + return true; + } else if (strcmp(field_signature, "Ljava/lang/String;") == 0) { + const char* string_value = parse_escaped_string(); + Handle value = java_lang_String::create_from_str(string_value, CHECK_(true)); + java_mirror->obj_field_put(fd->offset(), value()); + return true; + } else if (field_signature[0] == 'L') { + Klass* k = resolve_klass(field_signature, CHECK_(true)); + oop value = InstanceKlass::cast(k)->allocate_instance(CHECK_(true)); + java_mirror->obj_field_put(fd->offset(), value); + return true; + } + return false; + } + + // Initialize a class and fill in the value for a static field. + // This is useful when the compile was dependent on the value of + // static fields but it's impossible to properly rerun the static + // initializer. + void process_staticfield(TRAPS) { + InstanceKlass* k = (InstanceKlass *)parse_klass(CHECK); + + if (k == NULL || ReplaySuppressInitializers == 0 || + (ReplaySuppressInitializers == 2 && k->class_loader() == NULL)) { + return; + } + + assert(k->is_initialized(), "must be"); + + const char* field_name = parse_escaped_string(); + const char* field_signature = parse_string(); + fieldDescriptor fd; + Symbol* name = SymbolTable::lookup(field_name, (int)strlen(field_name), CHECK); + Symbol* sig = SymbolTable::lookup(field_signature, (int)strlen(field_signature), CHECK); + if (!k->find_local_field(name, sig, &fd) || + !fd.is_static() || + fd.has_initial_value()) { + report_error(field_name); + return; + } + + oop java_mirror = k->java_mirror(); + if (strcmp(field_signature, "I") == 0) { + const char* string_value = parse_escaped_string(); + int value = atoi(string_value); + java_mirror->int_field_put(fd.offset(), value); + } else if (strcmp(field_signature, "B") == 0) { + const char* string_value = parse_escaped_string(); + int value = atoi(string_value); + java_mirror->byte_field_put(fd.offset(), value); + } else if (strcmp(field_signature, "C") == 0) { + const char* string_value = parse_escaped_string(); + int value = atoi(string_value); + java_mirror->char_field_put(fd.offset(), value); + } else if (strcmp(field_signature, "S") == 0) { + const char* string_value = parse_escaped_string(); + int value = atoi(string_value); + java_mirror->short_field_put(fd.offset(), value); + } else if (strcmp(field_signature, "Z") == 0) { + const char* string_value = parse_escaped_string(); + int value = atoi(string_value); + java_mirror->bool_field_put(fd.offset(), value); + } else if (strcmp(field_signature, "J") == 0) { + const char* string_value = parse_escaped_string(); + jlong value; + if (sscanf(string_value, JLONG_FORMAT, &value) != 1) { + fprintf(stderr, "Error parsing long: %s\n", string_value); + return; + } + java_mirror->long_field_put(fd.offset(), value); + } else if (strcmp(field_signature, "F") == 0) { + const char* string_value = parse_escaped_string(); + float value = atof(string_value); + java_mirror->float_field_put(fd.offset(), value); + } else if (strcmp(field_signature, "D") == 0) { + const char* string_value = parse_escaped_string(); + double value = atof(string_value); + java_mirror->double_field_put(fd.offset(), value); + } else if (field_signature[0] == 'Q') { + Klass* kelem = resolve_klass(field_signature, CHECK); + ValueKlass* vk = ValueKlass::cast(kelem); + oop value = vk->allocate_instance(CHECK); + ValueTypeFieldInitializer init_fields(value, this); + vk->do_nonstatic_fields(&init_fields); java_mirror->obj_field_put(fd.offset(), value); } else { - const char* string_value = parse_escaped_string(); - if (strcmp(field_signature, "I") == 0) { - int value = atoi(string_value); - java_mirror->int_field_put(fd.offset(), value); - } else if (strcmp(field_signature, "B") == 0) { - int value = atoi(string_value); - java_mirror->byte_field_put(fd.offset(), value); - } else if (strcmp(field_signature, "C") == 0) { - int value = atoi(string_value); - java_mirror->char_field_put(fd.offset(), value); - } else if (strcmp(field_signature, "S") == 0) { - int value = atoi(string_value); - java_mirror->short_field_put(fd.offset(), value); - } else if (strcmp(field_signature, "Z") == 0) { - int value = atoi(string_value); - java_mirror->bool_field_put(fd.offset(), value); - } else if (strcmp(field_signature, "J") == 0) { - jlong value; - if (sscanf(string_value, JLONG_FORMAT, &value) != 1) { - fprintf(stderr, "Error parsing long: %s\n", string_value); - return; - } - java_mirror->long_field_put(fd.offset(), value); - } else if (strcmp(field_signature, "F") == 0) { - float value = atof(string_value); - java_mirror->float_field_put(fd.offset(), value); - } else if (strcmp(field_signature, "D") == 0) { - double value = atof(string_value); - java_mirror->double_field_put(fd.offset(), value); - } else if (strcmp(field_signature, "Ljava/lang/String;") == 0) { - Handle value = java_lang_String::create_from_str(string_value, CHECK); - java_mirror->obj_field_put(fd.offset(), value()); - } else if (field_signature[0] == 'L') { - Klass* k = resolve_klass(string_value, CHECK); - oop value = InstanceKlass::cast(k)->allocate_instance(CHECK); - java_mirror->obj_field_put(fd.offset(), value); - } else { + bool res = process_staticfield_reference(field_signature, java_mirror, &fd, CHECK); + if (!res) { report_error("unhandled staticfield"); } } --- old/src/hotspot/share/ci/ciSignature.cpp 2019-03-11 14:25:28.146355681 +0100 +++ new/src/hotspot/share/ci/ciSignature.cpp 2019-03-11 14:25:27.946355684 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -68,6 +68,9 @@ ciSymbol* klass_name = env->get_symbol(name); type = env->get_klass_by_name_impl(_accessing_klass, cpool, klass_name, false); } + if (type->is_valuetype() && ss.type() == T_VALUETYPE) { + type = env->make_never_null_wrapper(type); + } } _types->append(type); if (ss.at_return_type()) { @@ -91,12 +94,23 @@ { ASSERT_IN_VM; EXCEPTION_CONTEXT; - Arena* arena = CURRENT_ENV->arena(); + ciEnv* env = CURRENT_ENV; + Arena* arena = env->arena(); _types = new (arena) GrowableArray(arena, _count + 1, 0, NULL); + ciType* type = NULL; + bool never_null = false; for (int i = 0; i < _count; i++) { - _types->append(method_type->ptype_at(i)); + type = method_type->ptype_at(i, never_null); + if (type->is_valuetype() && never_null) { + type = env->make_never_null_wrapper(type); + } + _types->append(type); } - _types->append(method_type->rtype()); + type = method_type->rtype(never_null); + if (type->is_valuetype() && never_null) { + type = env->make_never_null_wrapper(type); + } + _types->append(type); } // ------------------------------------------------------------------ @@ -104,7 +118,7 @@ // // What is the return type of this signature? ciType* ciSignature::return_type() const { - return _types->at(_count); + return _types->at(_count)->unwrap(); } // ------------------------------------------------------------------ @@ -115,7 +129,24 @@ ciType* ciSignature::type_at(int index) const { assert(index < _count, "out of bounds"); // The first _klasses element holds the return klass. - return _types->at(index); + return _types->at(index)->unwrap(); +} + +// ------------------------------------------------------------------ +// ciSignature::return_never_null +// +// True if we statically know that the return value is never null. +bool ciSignature::returns_never_null() const { + return _types->at(_count)->is_never_null(); +} + +// ------------------------------------------------------------------ +// ciSignature::never_null_at +// +// True if we statically know that the argument at 'index' is never null. +bool ciSignature::is_never_null_at(int index) const { + assert(index < _count, "out of bounds"); + return _types->at(index)->is_never_null(); } // ------------------------------------------------------------------ --- old/src/hotspot/share/ci/ciSignature.hpp 2019-03-11 14:25:28.594355675 +0100 +++ new/src/hotspot/share/ci/ciSignature.hpp 2019-03-11 14:25:28.346355678 +0100 @@ -60,6 +60,8 @@ ciType* return_type() const; ciType* type_at(int index) const; + bool returns_never_null() const; + bool is_never_null_at(int index) const; int size() const { return _size; } int count() const { return _count; } --- old/src/hotspot/share/ci/ciStreams.cpp 2019-03-11 14:25:29.062355668 +0100 +++ new/src/hotspot/share/ci/ciStreams.cpp 2019-03-11 14:25:28.834355671 +0100 @@ -171,6 +171,7 @@ case Bytecodes::_anewarray: case Bytecodes::_multianewarray: case Bytecodes::_new: + case Bytecodes::_defaultvalue: case Bytecodes::_newarray: return get_index_u2(); default: @@ -191,6 +192,16 @@ } // ------------------------------------------------------------------ +// ciBytecodeStream::is_klass_never_null +// +// Get information about nullability from the constant pool. +bool ciBytecodeStream::is_klass_never_null() const { + VM_ENTRY_MARK; + constantPoolHandle cpool(_method->get_Method()->constants()); + return CURRENT_ENV->is_klass_never_null(cpool, get_klass_index()); +} + +// ------------------------------------------------------------------ // ciBytecodeStream::get_constant_raw_index // // If this bytecode is one of the ldc variants, get the index of the @@ -259,7 +270,8 @@ assert(cur_bc() == Bytecodes::_getfield || cur_bc() == Bytecodes::_putfield || cur_bc() == Bytecodes::_getstatic || - cur_bc() == Bytecodes::_putstatic, "wrong bc"); + cur_bc() == Bytecodes::_putstatic || + cur_bc() == Bytecodes::_withfield, "wrong bc"); return get_index_u2_cpcache(); } --- old/src/hotspot/share/ci/ciStreams.hpp 2019-03-11 14:25:29.490355662 +0100 +++ new/src/hotspot/share/ci/ciStreams.hpp 2019-03-11 14:25:29.286355665 +0100 @@ -221,6 +221,7 @@ // or checkcast, get the referenced klass. ciKlass* get_klass(bool& will_link); int get_klass_index() const; + bool is_klass_never_null() const; // If this bytecode is one of the ldc variants, get the referenced // constant. Do not attempt to resolve it, since that would require @@ -287,6 +288,14 @@ } } + bool is_never_null() { + if (at_return_type()) { + return _sig->returns_never_null(); + } else { + return _sig->is_never_null_at(_pos); + } + } + // next klass in the signature ciKlass* next_klass() { ciKlass* sig_k; --- old/src/hotspot/share/ci/ciSymbol.hpp 2019-03-11 14:25:29.898355657 +0100 +++ new/src/hotspot/share/ci/ciSymbol.hpp 2019-03-11 14:25:29.694355659 +0100 @@ -46,6 +46,7 @@ friend class ciMethod; friend class ciField; friend class ciObjArrayKlass; + friend class ciValueArrayKlass; private: const vmSymbols::SID _sid; --- old/src/hotspot/share/ci/ciType.cpp 2019-03-11 14:25:30.302355651 +0100 +++ new/src/hotspot/share/ci/ciType.cpp 2019-03-11 14:25:30.098355654 +0100 @@ -34,8 +34,8 @@ // ciType // -// This class represents either a class (T_OBJECT), array (T_ARRAY), -// or one of the primitive types such as T_INT. +// This class represents either a class (T_OBJECT), value (T_VALUETYPE), +// array (T_ARRAY),or one of the primitive types such as T_INT. // ------------------------------------------------------------------ // ciType::ciType @@ -46,7 +46,7 @@ } ciType::ciType(Klass* k) : ciMetadata(k) { - _basic_type = k->is_array_klass() ? T_ARRAY : T_OBJECT; + _basic_type = k->is_array_klass() ? T_ARRAY : (k->is_value() ? T_VALUETYPE : T_OBJECT); } @@ -105,6 +105,7 @@ // ciType::box_klass // ciKlass* ciType::box_klass() { + assert(basic_type() != T_VALUETYPE, "value type boxing not yet supported"); if (!is_primitive_type()) return this->as_klass(); // reference types are "self boxing" // Void is "boxed" with a null. --- old/src/hotspot/share/ci/ciType.hpp 2019-03-11 14:25:30.758355645 +0100 +++ new/src/hotspot/share/ci/ciType.hpp 2019-03-11 14:25:30.506355648 +0100 @@ -29,12 +29,13 @@ // ciType // -// This class represents either a class (T_OBJECT), array (T_ARRAY), -// or one of the primitive types such as T_INT. +// This class represents either a class (T_OBJECT), value (T_VALUETYPE), +// array (T_ARRAY), or one of the primitive types such as T_INT. class ciType : public ciMetadata { CI_PACKAGE_ACCESS friend class ciKlass; friend class ciReturnAddress; + friend class ciWrapper; private: BasicType _basic_type; @@ -67,7 +68,7 @@ ciKlass* box_klass(); // Returns true if this is not a klass or array (i.e., not a reference type). - bool is_primitive_type() const { return basic_type() != T_OBJECT && basic_type() != T_ARRAY; } + bool is_primitive_type() const { return basic_type() != T_OBJECT && basic_type() != T_ARRAY && basic_type() != T_VALUETYPE; } int size() const { return type2size[basic_type()]; } bool is_void() const { return basic_type() == T_VOID; } bool is_one_word() const { return size() == 1; } @@ -77,6 +78,9 @@ bool is_type() const { return true; } bool is_classless() const { return is_primitive_type(); } + virtual ciType* unwrap() { return this; } + virtual bool is_never_null() const { return false; } + const char* name(); virtual void print_name_on(outputStream* st); void print_name() { @@ -112,4 +116,38 @@ static ciReturnAddress* make(int bci); }; +// ciWrapper +// +// This class wraps another type to carry additional information like nullability. +// Should only be instantiated and used by ciTypeFlow and ciSignature. +class ciWrapper : public ciType { + CI_PACKAGE_ACCESS + +private: + ciType* _type; + bool _never_null; + + ciWrapper(ciType* type, bool never_null) : ciType(type->basic_type()) { + assert(type->is_valuetype(), "should only be used for value types"); + _type = type; + _never_null = never_null; + } + + const char* type_string() { return "ciWrapper"; } + + void print_impl(outputStream* st) { _type->print_impl(st); } + +public: + bool equals(ciMetadata* obj) const { + return obj->is_wrapper() && + obj->as_wrapper()->unwrap()->equals(_type) && + obj->as_wrapper()->is_never_null() == _never_null; + } + + bool is_wrapper() const { return true; } + + ciType* unwrap() { return _type; } + bool is_never_null() const { return _never_null; } +}; + #endif // SHARE_CI_CITYPE_HPP --- old/src/hotspot/share/ci/ciTypeFlow.cpp 2019-03-11 14:25:31.258355638 +0100 +++ new/src/hotspot/share/ci/ciTypeFlow.cpp 2019-03-11 14:25:31.038355641 +0100 @@ -31,6 +31,7 @@ #include "ci/ciStreams.hpp" #include "ci/ciTypeArrayKlass.hpp" #include "ci/ciTypeFlow.hpp" +#include "ci/ciValueKlass.hpp" #include "compiler/compileLog.hpp" #include "interpreter/bytecode.hpp" #include "interpreter/bytecodes.hpp" @@ -271,6 +272,7 @@ // different kinds is always java.lang.Object. ciType* ciTypeFlow::StateVector::type_meet_internal(ciType* t1, ciType* t2, ciTypeFlow* analyzer) { assert(t1 != t2, "checked in caller"); + if (t1->equals(top_type())) { return t2; } else if (t2->equals(top_type())) { @@ -291,50 +293,66 @@ // At least one of the two types is a non-top primitive type. // The other type is not equal to it. Fall to bottom. return bottom_type(); - } else { - // Both types are non-top non-primitive types. That is, - // both types are either instanceKlasses or arrayKlasses. - ciKlass* object_klass = analyzer->env()->Object_klass(); - ciKlass* k1 = t1->as_klass(); - ciKlass* k2 = t2->as_klass(); - if (k1->equals(object_klass) || k2->equals(object_klass)) { - return object_klass; - } else if (!k1->is_loaded() || !k2->is_loaded()) { - // Unloaded classes fall to java.lang.Object at a merge. - return object_klass; - } else if (k1->is_interface() != k2->is_interface()) { - // When an interface meets a non-interface, we get Object; - // This is what the verifier does. - return object_klass; - } else if (k1->is_array_klass() || k2->is_array_klass()) { - // When an array meets a non-array, we get Object. - // When objArray meets typeArray, we also get Object. - // And when typeArray meets different typeArray, we again get Object. - // But when objArray meets objArray, we look carefully at element types. - if (k1->is_obj_array_klass() && k2->is_obj_array_klass()) { - // Meet the element types, then construct the corresponding array type. - ciKlass* elem1 = k1->as_obj_array_klass()->element_klass(); - ciKlass* elem2 = k2->as_obj_array_klass()->element_klass(); - ciKlass* elem = type_meet_internal(elem1, elem2, analyzer)->as_klass(); - // Do an easy shortcut if one type is a super of the other. - if (elem == elem1) { - assert(k1 == ciObjArrayKlass::make(elem), "shortcut is OK"); - return k1; - } else if (elem == elem2) { - assert(k2 == ciObjArrayKlass::make(elem), "shortcut is OK"); - return k2; - } else { - return ciObjArrayKlass::make(elem); - } + } + + // Unwrap the types after gathering nullness information + bool never_null1 = t1->is_never_null(); + bool never_null2 = t2->is_never_null(); + t1 = t1->unwrap(); + t2 = t2->unwrap(); + + // Both types are non-top non-primitive types. That is, + // both types are either instanceKlasses or arrayKlasses. + ciKlass* object_klass = analyzer->env()->Object_klass(); + ciKlass* k1 = t1->as_klass(); + ciKlass* k2 = t2->as_klass(); + if (k1->equals(object_klass) || k2->equals(object_klass)) { + return object_klass; + } else if (!k1->is_loaded() || !k2->is_loaded()) { + // Unloaded classes fall to java.lang.Object at a merge. + return object_klass; + } else if (k1->is_interface() != k2->is_interface()) { + // When an interface meets a non-interface, we get Object; + // This is what the verifier does. + return object_klass; + } else if (k1->is_array_klass() || k2->is_array_klass()) { + // When an array meets a non-array, we get Object. + // When objArray meets typeArray, we also get Object. + // And when typeArray meets different typeArray, we again get Object. + // But when objArray meets objArray, we look carefully at element types. + if (k1->is_obj_array_klass() && k2->is_obj_array_klass()) { + // Meet the element types, then construct the corresponding array type. + ciKlass* elem1 = k1->as_obj_array_klass()->element_klass(); + ciKlass* elem2 = k2->as_obj_array_klass()->element_klass(); + ciKlass* elem = type_meet_internal(elem1, elem2, analyzer)->as_klass(); + // Do an easy shortcut if one type is a super of the other. + if (elem == elem1) { + assert(k1 == ciObjArrayKlass::make(elem), "shortcut is OK"); + return k1; + } else if (elem == elem2) { + assert(k2 == ciObjArrayKlass::make(elem), "shortcut is OK"); + return k2; } else { - return object_klass; + return ciObjArrayKlass::make(elem); } + } else if (k1->is_value_array_klass() || k2->is_value_array_klass()) { + ciKlass* elem1 = k1->as_array_klass()->element_klass(); + ciKlass* elem2 = k2->as_array_klass()->element_klass(); + ciKlass* elem = type_meet_internal(elem1, elem2, analyzer)->as_klass(); + return ciArrayKlass::make(elem); } else { - // Must be two plain old instance klasses. - assert(k1->is_instance_klass(), "previous cases handle non-instances"); - assert(k2->is_instance_klass(), "previous cases handle non-instances"); - return k1->least_common_ancestor(k2); + return object_klass; + } + } else { + // Must be two plain old instance klasses. + assert(k1->is_instance_klass(), "previous cases handle non-instances"); + assert(k2->is_instance_klass(), "previous cases handle non-instances"); + ciType* result = k1->least_common_ancestor(k2); + if (never_null1 && never_null2 && result->is_valuetype()) { + // Both value types are never null, mark the result as never null + result = analyzer->mark_as_never_null(result); } + return result; } } @@ -396,13 +414,22 @@ // "Push" the method signature into the first few locals. state->set_stack_size(-max_locals()); if (!method()->is_static()) { - state->push(method()->holder()); + ciType* holder = method()->holder(); + if (holder->is_valuetype()) { + // The receiver is never null + holder = mark_as_never_null(holder); + } + state->push(holder); assert(state->tos() == state->local(0), ""); } for (ciSignatureStream str(method()->signature()); !str.at_return_type(); str.next()) { - state->push_translate(str.type()); + ciType* arg = str.type(); + if (str.is_never_null()) { + arg = mark_as_never_null(arg); + } + state->push_translate(arg); } // Set the rest of the locals to bottom. Cell cell = state->next_cell(state->tos()); @@ -548,12 +575,12 @@ } // ------------------------------------------------------------------ -// ciTypeFlow::StateVector::do_aaload -void ciTypeFlow::StateVector::do_aaload(ciBytecodeStream* str) { +// ciTypeFlow::StateVector::do_aload +void ciTypeFlow::StateVector::do_aload(ciBytecodeStream* str) { pop_int(); - ciObjArrayKlass* array_klass = pop_objArray(); + ciArrayKlass* array_klass = pop_objOrValueArray(); if (array_klass == NULL) { - // Did aaload on a null reference; push a null and ignore the exception. + // Did aload on a null reference; push a null and ignore the exception. // This instruction will never continue normally. All we have to do // is report a value that will meet correctly with any downstream // reference types on paths that will truly be executed. This null type @@ -578,7 +605,12 @@ (Deoptimization::Reason_unloaded, Deoptimization::Action_reinterpret)); } else { - push_object(element_klass); + if (element_klass->is_valuetype()) { + // Value type array elements are never null + push(outer()->mark_as_never_null(element_klass)); + } else { + push_object(element_klass); + } } } @@ -597,7 +629,13 @@ do_null_assert(klass); } else { pop_object(); - push_object(klass); + if (str->is_klass_never_null()) { + // Casting to a Q-Type contains a NULL check + assert(klass->is_valuetype(), "must be a value type"); + push(outer()->mark_as_never_null(klass)); + } else { + push_object(klass); + } } } @@ -639,6 +677,10 @@ // (See bug 4379915.) do_null_assert(field_type->as_klass()); } else { + if (field->is_flattenable()) { + // A flattenable field is never null + field_type = outer()->mark_as_never_null(field_type); + } push_translate(field_type); } } @@ -706,6 +748,9 @@ // See do_getstatic() for similar explanation, as well as bug 4684993. do_null_assert(return_type->as_klass()); } else { + if (sigstr.is_never_null()) { + return_type = outer()->mark_as_never_null(return_type); + } push_translate(return_type); } } @@ -729,13 +774,17 @@ outer()->record_failure("ldc did not link"); return; } - if (basic_type == T_OBJECT || basic_type == T_ARRAY) { + if (basic_type == T_OBJECT || basic_type == T_VALUETYPE || basic_type == T_ARRAY) { ciObject* obj = con.as_object(); if (obj->is_null_object()) { push_null(); } else { assert(obj->is_instance() || obj->is_array(), "must be java_mirror of klass"); - push_object(obj->klass()); + ciType* type = obj->klass(); + if (type->is_valuetype()) { + type = outer()->mark_as_never_null(type); + } + push(type); } } else { push_translate(ciType::make(basic_type)); @@ -771,6 +820,42 @@ } // ------------------------------------------------------------------ +// ciTypeFlow::StateVector::do_defaultvalue +void ciTypeFlow::StateVector::do_defaultvalue(ciBytecodeStream* str) { + bool will_link; + ciKlass* klass = str->get_klass(will_link); + if (!will_link) { + trap(str, klass, str->get_klass_index()); + } else { + // The default value type is never null + push(outer()->mark_as_never_null(klass)); + } +} + +// ------------------------------------------------------------------ +// ciTypeFlow::StateVector::do_withfield +void ciTypeFlow::StateVector::do_withfield(ciBytecodeStream* str) { + bool will_link; + ciField* field = str->get_field(will_link); + ciKlass* klass = field->holder(); + if (!will_link) { + trap(str, klass, str->get_field_holder_index()); + } else { + ciType* type = pop_value(); + ciType* field_type = field->type(); + assert(field_type->is_loaded(), "field type must be loaded"); + if (field_type->is_two_word()) { + ciType* type2 = pop_value(); + assert(type2->is_two_word(), "must be 2nd half"); + assert(type == half_type(type2), "must be 2nd half"); + } + pop_object(); + // The newly created value type can never be null + push(outer()->mark_as_never_null(klass)); + } +} + +// ------------------------------------------------------------------ // ciTypeFlow::StateVector::do_newarray void ciTypeFlow::StateVector::do_newarray(ciBytecodeStream* str) { pop_int(); @@ -875,13 +960,13 @@ } switch(str->cur_bc()) { - case Bytecodes::_aaload: do_aaload(str); break; + case Bytecodes::_aaload: do_aload(str); break; case Bytecodes::_aastore: { pop_object(); pop_int(); - pop_objArray(); + pop_objOrValueArray(); break; } case Bytecodes::_aconst_null: @@ -903,7 +988,7 @@ if (!will_link) { trap(str, element_klass, str->get_klass_index()); } else { - push_object(ciObjArrayKlass::make(element_klass)); + push_object(ciArrayKlass::make(element_klass)); } break; } @@ -1435,6 +1520,9 @@ case Bytecodes::_new: do_new(str); break; + case Bytecodes::_defaultvalue: do_defaultvalue(str); break; + case Bytecodes::_withfield: do_withfield(str); break; + case Bytecodes::_newarray: do_newarray(str); break; case Bytecodes::_pop: @@ -1462,6 +1550,7 @@ push(value2); break; } + case Bytecodes::_wide: default: { @@ -1745,9 +1834,12 @@ break; } - case Bytecodes::_athrow: case Bytecodes::_ireturn: - case Bytecodes::_lreturn: case Bytecodes::_freturn: - case Bytecodes::_dreturn: case Bytecodes::_areturn: + case Bytecodes::_athrow: + case Bytecodes::_ireturn: + case Bytecodes::_lreturn: + case Bytecodes::_freturn: + case Bytecodes::_dreturn: + case Bytecodes::_areturn: case Bytecodes::_return: _successors = new (arena) GrowableArray(arena, 1, 0, NULL); @@ -2978,6 +3070,11 @@ } } +ciType* ciTypeFlow::mark_as_never_null(ciType* type) { + // Wrap the type to carry the information that it is never null + return env()->make_never_null_wrapper(type); +} + #ifndef PRODUCT // ------------------------------------------------------------------ // ciTypeFlow::print_on --- old/src/hotspot/share/ci/ciTypeFlow.hpp 2019-03-11 14:25:31.734355631 +0100 +++ new/src/hotspot/share/ci/ciTypeFlow.hpp 2019-03-11 14:25:31.510355634 +0100 @@ -335,14 +335,16 @@ type_at_tos()->is_array_klass(), "must be array type"); pop(); } - // pop_objArray and pop_typeArray narrow the tos to ciObjArrayKlass - // or ciTypeArrayKlass (resp.). In the rare case that an explicit + // pop_valueOrobjArray and pop_typeArray narrow the tos to ciObjArrayKlass, + // ciValueArrayKlass or ciTypeArrayKlass (resp.). In the rare case that an explicit // null is popped from the stack, we return NULL. Caller beware. - ciObjArrayKlass* pop_objArray() { + ciArrayKlass* pop_objOrValueArray() { ciType* array = pop_value(); if (array == null_type()) return NULL; - assert(array->is_obj_array_klass(), "must be object array type"); - return array->as_obj_array_klass(); + // Value type arrays may contain oop or flattened representation + assert(array->is_obj_array_klass() || (ValueArrayFlatten && array->is_value_array_klass()), + "must be value or object array type"); + return array->as_array_klass(); } ciTypeArrayKlass* pop_typeArray() { ciType* array = pop_value(); @@ -356,7 +358,7 @@ void do_null_assert(ciKlass* unloaded_klass); // Helper convenience routines. - void do_aaload(ciBytecodeStream* str); + void do_aload(ciBytecodeStream* str); void do_checkcast(ciBytecodeStream* str); void do_getfield(ciBytecodeStream* str); void do_getstatic(ciBytecodeStream* str); @@ -365,6 +367,8 @@ void do_ldc(ciBytecodeStream* str); void do_multianewarray(ciBytecodeStream* str); void do_new(ciBytecodeStream* str); + void do_defaultvalue(ciBytecodeStream* str); + void do_withfield(ciBytecodeStream* str); void do_newarray(ciBytecodeStream* str); void do_putfield(ciBytecodeStream* str); void do_putstatic(ciBytecodeStream* str); @@ -843,6 +847,8 @@ return _block_map[rpo]; } int inc_next_pre_order() { return _next_pre_order++; } + ciType* mark_as_never_null(ciType* type); + private: // A work list used during flow analysis. Block* _work_list; --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/src/hotspot/share/ci/ciValueArray.hpp 2019-03-11 14:25:31.974355628 +0100 @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_CI_CIVALUEARRAY_HPP +#define SHARE_VM_CI_CIVALUEARRAY_HPP + +#include "ci/ciArray.hpp" +#include "ci/ciClassList.hpp" +#include "oops/valueArrayOop.hpp" + +// ciValueArray +// +// This class represents a valueArrayOop in the HotSpot virtual +// machine. +class ciValueArray : public ciArray { + CI_PACKAGE_ACCESS + +protected: + ciValueArray(valueArrayHandle h_o) : ciArray(h_o) {} + + ciValueArray(ciValueKlass* klass, int len) : ciArray(klass, len) {} + + valueArrayOop get_valueArrayOop() { + return (valueArrayOop)get_oop(); + } + + const char* type_string() { return "ciValuejArray"; } + +public: + // What kind of ciObject is this? + bool is_value_array() { return true; } +}; + +#endif // SHARE_VM_CI_CIVALUEARRAY_HPP --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/src/hotspot/share/ci/ciValueArrayKlass.cpp 2019-03-11 14:25:32.426355622 +0100 @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "ci/ciInstanceKlass.hpp" +#include "ci/ciValueArrayKlass.hpp" +#include "ci/ciValueKlass.hpp" +#include "ci/ciSymbol.hpp" +#include "ci/ciUtilities.hpp" +#include "ci/ciUtilities.inline.hpp" +#include "oops/valueArrayKlass.hpp" + +// ciValueArrayKlass +// +// This class represents a Klass* in the HotSpot virtual machine +// whose Klass part is a ValueArrayKlass. + +// ------------------------------------------------------------------ +// ciValueArrayKlass::ciValueArrayKlass +// +// Constructor for loaded value array klasses. +ciValueArrayKlass::ciValueArrayKlass(Klass* h_k) : ciArrayKlass(h_k) { + assert(get_Klass()->is_valueArray_klass(), "wrong type"); + ValueKlass* element_Klass = get_ValueArrayKlass()->element_klass(); + _base_element_klass = CURRENT_ENV->get_klass(element_Klass); + assert(_base_element_klass->is_valuetype(), "bad base klass"); + if (dimension() == 1) { + _element_klass = _base_element_klass; + } else { + _element_klass = NULL; + } + if (!ciObjectFactory::is_initialized()) { + assert(_element_klass->is_java_lang_Object(), "only arrays of object are shared"); + } +} + +ciValueArrayKlass::ciValueArrayKlass(ciSymbol* array_name, + ciValueKlass* base_element_klass, + int dimension) + : ciArrayKlass(array_name, dimension, T_VALUETYPE) { + _base_element_klass = base_element_klass; + _element_klass = base_element_klass; +} + +// ------------------------------------------------------------------ +// ciValueArrayKlass::element_klass +// +// What is the one-level element type of this array? +ciKlass* ciValueArrayKlass::element_klass() { + if (_element_klass == NULL) { + assert(dimension() > 1, "_element_klass should not be NULL"); + // Produce the element klass. + if (is_loaded()) { + VM_ENTRY_MARK; + Klass* element_Klass = get_ValueArrayKlass()->element_klass(); + _element_klass = CURRENT_THREAD_ENV->get_klass(element_Klass); + } else { + // TODO handle this + guarantee(false, "unloaded array klass"); + VM_ENTRY_MARK; + // We are an unloaded array klass. Attempt to fetch our + // element klass by name. + _element_klass = CURRENT_THREAD_ENV->get_klass_by_name_impl( + this, + constantPoolHandle(), + construct_array_name(base_element_klass()->name(), + dimension() - 1), + false); + } + } + return _element_klass; +} + +// ------------------------------------------------------------------ +// ciValueArrayKlass::construct_array_name +// +// Build an array name from an element name and a dimension. +ciSymbol* ciValueArrayKlass::construct_array_name(ciSymbol* element_name, + int dimension) { + EXCEPTION_CONTEXT; + int element_len = element_name->utf8_length(); + + Symbol* base_name_sym = element_name->get_symbol(); + char* name; + + if (base_name_sym->char_at(0) == '[' || + (base_name_sym->char_at(0) == 'L' && // watch package name 'Lxx' + base_name_sym->char_at(element_len-1) == ';')) { + + int new_len = element_len + dimension + 1; // for the ['s and '\0' + name = CURRENT_THREAD_ENV->name_buffer(new_len); + + int pos = 0; + for ( ; pos < dimension; pos++) { + name[pos] = '['; + } + strncpy(name+pos, (char*)element_name->base(), element_len); + name[new_len-1] = '\0'; + } else { + int new_len = 3 // for L, ;, and '\0' + + dimension // for ['s + + element_len; + + name = CURRENT_THREAD_ENV->name_buffer(new_len); + int pos = 0; + for ( ; pos < dimension; pos++) { + name[pos] = '['; + } + name[pos++] = 'Q'; + strncpy(name+pos, (char*)element_name->base(), element_len); + name[new_len-2] = ';'; + name[new_len-1] = '\0'; + } + return ciSymbol::make(name); +} + +// ------------------------------------------------------------------ +// ciValueArrayKlass::make_impl +// +// Implementation of make. +ciValueArrayKlass* ciValueArrayKlass::make_impl(ciKlass* element_klass) { + assert(ValueArrayFlatten, "should only be used for flattened value type arrays"); + assert(element_klass->is_valuetype(), "element type must be value type"); + assert(element_klass->is_loaded(), "unloaded Q klasses are represented by ciInstanceKlass"); + { + EXCEPTION_CONTEXT; + // The element klass is loaded + Klass* array = element_klass->get_Klass()->array_klass(THREAD); + if (HAS_PENDING_EXCEPTION) { + CLEAR_PENDING_EXCEPTION; + CURRENT_THREAD_ENV->record_out_of_memory_failure(); + // TODO handle this + guarantee(false, "out of memory"); + return NULL; + } + return CURRENT_THREAD_ENV->get_value_array_klass(array); + } +} + +// ------------------------------------------------------------------ +// ciValueArrayKlass::make +// +// Make an array klass corresponding to the specified primitive type. +ciValueArrayKlass* ciValueArrayKlass::make(ciKlass* element_klass) { + GUARDED_VM_ENTRY(return make_impl(element_klass);) +} + +ciKlass* ciValueArrayKlass::exact_klass() { + assert(element_klass()->is_valuetype(), "element type must be value type"); + if (element_klass()->is_loaded()) { + assert(element_klass()->as_value_klass()->exact_klass() != NULL, "must be"); + return this; + } + + // TODO handle this + guarantee(false, "klass not loaded"); + return NULL; +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/src/hotspot/share/ci/ciValueArrayKlass.hpp 2019-03-11 14:25:32.862355616 +0100 @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_CI_CIVALUEARRAYKLASS_HPP +#define SHARE_VM_CI_CIVALUEARRAYKLASS_HPP + +#include "ci/ciArrayKlass.hpp" + +// ciValueArrayKlass +// +// This class represents a Klass* in the HotSpot virtual machine +// whose Klass part is a ValueArrayKlass. +class ciValueArrayKlass : public ciArrayKlass { + CI_PACKAGE_ACCESS + friend class ciEnv; + +private: + ciKlass* _element_klass; + // TODO remove this?? + ciKlass* _base_element_klass; + +protected: + ciValueArrayKlass(Klass* h_k); + ciValueArrayKlass(ciSymbol* array_name, + ciValueKlass* element_klass, + int dimension); + + ValueArrayKlass* get_ValueArrayKlass() { + return (ValueArrayKlass*)get_Klass(); + } + + static ciValueArrayKlass* make_impl(ciKlass* element_klass); + static ciSymbol* construct_array_name(ciSymbol* element_name, + int dimension); + + const char* type_string() { return "ciValueArrayKlass"; } + + oop loader() { return _base_element_klass->loader(); } + jobject loader_handle() { return _base_element_klass->loader_handle(); } + + oop protection_domain() { return _base_element_klass->protection_domain(); } + jobject protection_domain_handle() { return _base_element_klass->protection_domain_handle(); } + + +public: + // The one-level type of the array elements. + ciKlass* element_klass(); + + // TODO refactor all of this + int log2_element_size() { + return Klass::layout_helper_log2_element_size(layout_helper()); + } + int element_byte_size() { return 1 << log2_element_size(); } + + // The innermost type of the array elements. + ciKlass* base_element_klass() { return _base_element_klass; } + + // What kind of ciObject is this? + bool is_value_array_klass() const { return true; } + + static ciValueArrayKlass* make(ciKlass* element_klass); + + virtual ciKlass* exact_klass(); +}; + + +#endif // SHARE_VM_CI_CIVALUEARRAYKLASS_HPP --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/src/hotspot/share/ci/ciValueKlass.cpp 2019-03-11 14:25:33.318355609 +0100 @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "ci/ciField.hpp" +#include "ci/ciUtilities.inline.hpp" +#include "ci/ciValueKlass.hpp" +#include "oops/valueKlass.hpp" + +int ciValueKlass::compute_nonstatic_fields() { + int result = ciInstanceKlass::compute_nonstatic_fields(); + assert(super() == NULL || !super()->has_nonstatic_fields(), "a value type must not inherit fields from its superclass"); + + // Compute declared non-static fields (without flattening of value type fields) + GrowableArray* fields = NULL; + GUARDED_VM_ENTRY(fields = compute_nonstatic_fields_impl(NULL, false /* no flattening */);) + Arena* arena = CURRENT_ENV->arena(); + _declared_nonstatic_fields = (fields != NULL) ? fields : new (arena) GrowableArray(arena, 0, 0, 0); + return result; +} + +// Offset of the first field in the value type +int ciValueKlass::first_field_offset() const { + GUARDED_VM_ENTRY(return ValueKlass::cast(get_Klass())->first_field_offset();) +} + +// Returns the index of the field with the given offset. If the field at 'offset' +// belongs to a flattened value type field, return the index of the field +// in the flattened value type. +int ciValueKlass::field_index_by_offset(int offset) { + assert(contains_field_offset(offset), "invalid field offset"); + int best_offset = 0; + int best_index = -1; + // Search the field with the given offset + for (int i = 0; i < nof_declared_nonstatic_fields(); ++i) { + int field_offset = _declared_nonstatic_fields->at(i)->offset(); + if (field_offset == offset) { + // Exact match + return i; + } else if (field_offset < offset && field_offset > best_offset) { + // No exact match. Save the index of the field with the closest offset that + // is smaller than the given field offset. This index corresponds to the + // flattened value type field that holds the field we are looking for. + best_offset = field_offset; + best_index = i; + } + } + assert(best_index >= 0, "field not found"); + assert(best_offset == offset || _declared_nonstatic_fields->at(best_index)->type()->is_valuetype(), "offset should match for non-VTs"); + return best_index; +} + +// Are arrays containing this value type flattened? +bool ciValueKlass::flatten_array() const { + GUARDED_VM_ENTRY(return ValueKlass::cast(get_Klass())->flatten_array();) +} + +// Can this value type be returned as multiple values? +bool ciValueKlass::can_be_returned_as_fields() const { + GUARDED_VM_ENTRY(return ValueKlass::cast(get_Klass())->can_be_returned_as_fields();) +} + +// TODO +bool ciValueKlass::is_scalarizable() const { + return ScalarizeValueTypes; +} + +// When passing a value type's fields as arguments, count the number +// of argument slots that are needed +int ciValueKlass::value_arg_slots() { + int slots = 0; + for (int j = 0; j < nof_nonstatic_fields(); j++) { + ciField* field = nonstatic_field_at(j); + slots += type2size[field->type()->basic_type()]; + } + return slots; +} + +// Offset of the default oop in the mirror +int ciValueKlass::default_value_offset() const { + GUARDED_VM_ENTRY(return ValueKlass::cast(get_Klass())->default_value_offset();) +} + +ciInstance* ciValueKlass::default_value_instance() const { + GUARDED_VM_ENTRY( + oop default_value = ValueKlass::cast(get_Klass())->default_value(); + return CURRENT_ENV->get_instance(default_value); + ) +} + +bool ciValueKlass::contains_oops() const { + GUARDED_VM_ENTRY(return ValueKlass::cast(get_Klass())->contains_oops();) +} + +Array* ciValueKlass::extended_sig() const { + GUARDED_VM_ENTRY(return ValueKlass::cast(get_Klass())->extended_sig();) +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/src/hotspot/share/ci/ciValueKlass.hpp 2019-03-11 14:25:33.790355603 +0100 @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_CI_CIVALUEKLASS_HPP +#define SHARE_VM_CI_CIVALUEKLASS_HPP + +#include "ci/ciConstantPoolCache.hpp" +#include "ci/ciEnv.hpp" +#include "ci/ciFlags.hpp" +#include "ci/ciInstanceKlass.hpp" +#include "ci/ciSymbol.hpp" +#include "oops/valueKlass.hpp" + +// ciValueKlass +// +// Specialized ciInstanceKlass for value types. +class ciValueKlass : public ciInstanceKlass { + CI_PACKAGE_ACCESS + +private: + // Fields declared in the bytecode (without flattened value type fields) + GrowableArray* _declared_nonstatic_fields; + +protected: + ciValueKlass(Klass* h_k) : ciInstanceKlass(h_k), _declared_nonstatic_fields(NULL) { + assert(is_final(), "ValueKlass must be final"); + }; + + ciValueKlass(ciSymbol* name, jobject loader, jobject protection_domain) : + ciInstanceKlass(name, loader, protection_domain, T_VALUETYPE) {} + + int compute_nonstatic_fields(); + const char* type_string() { return "ciValueKlass"; } + +public: + bool is_valuetype() const { return true; } + + int nof_declared_nonstatic_fields() { + if (_declared_nonstatic_fields == NULL) { + compute_nonstatic_fields(); + } + return _declared_nonstatic_fields->length(); + } + + // ith non-static declared field (presented by ascending address) + ciField* declared_nonstatic_field_at(int i) { + assert(_declared_nonstatic_fields != NULL, "should be initialized"); + return _declared_nonstatic_fields->at(i); + } + + // Value type fields + int first_field_offset() const; + int field_index_by_offset(int offset); + + bool flatten_array() const; + bool can_be_returned_as_fields() const; + bool is_scalarizable() const; + int value_arg_slots(); + int default_value_offset() const; + ciInstance* default_value_instance() const; + bool contains_oops() const; + Array* extended_sig() const; +}; + +#endif // SHARE_VM_CI_CIVALUEKLASS_HPP --- old/src/hotspot/share/ci/compilerInterface.hpp 2019-03-11 14:25:34.446355594 +0100 +++ new/src/hotspot/share/ci/compilerInterface.hpp 2019-03-11 14:25:34.250355596 +0100 @@ -46,6 +46,7 @@ #include "ci/ciSymbol.hpp" #include "ci/ciTypeArray.hpp" #include "ci/ciTypeArrayKlass.hpp" +#include "ci/ciValueArrayKlass.hpp" // This is a dummy file used for including the complete // compiler interface. --- old/src/hotspot/share/classfile/bytecodeAssembler.cpp 2019-03-11 14:25:34.850355588 +0100 +++ new/src/hotspot/share/classfile/bytecodeAssembler.cpp 2019-03-11 14:25:34.650355591 +0100 @@ -188,6 +188,7 @@ case T_DOUBLE: dload(index); break; case T_LONG: lload(index); break; case T_OBJECT: + case T_VALUETYPE: case T_ARRAY: aload(index); break; default: ShouldNotReachHere(); @@ -255,6 +256,7 @@ case T_DOUBLE: dreturn(); break; case T_LONG: lreturn(); break; case T_OBJECT: + case T_VALUETYPE: case T_ARRAY: areturn(); break; case T_VOID: _return(); break; default: --- old/src/hotspot/share/classfile/classFileParser.cpp 2019-03-11 14:25:35.262355582 +0100 +++ new/src/hotspot/share/classfile/classFileParser.cpp 2019-03-11 14:25:35.058355585 +0100 @@ -55,6 +55,7 @@ #include "oops/method.hpp" #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" +#include "oops/valueKlass.hpp" #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.hpp" #include "runtime/arguments.hpp" @@ -122,6 +123,8 @@ #define JAVA_13_VERSION 57 +#define CONSTANT_CLASS_DESCRIPTORS 57 + void ClassFileParser::set_class_bad_constant_seen(short bad_constant) { assert((bad_constant == 19 || bad_constant == 20) && _major_version >= JAVA_9_VERSION, "Unexpected bad constant pool entry"); @@ -160,7 +163,7 @@ // so we don't need bounds-check for reading tag. const u1 tag = cfs->get_u1_fast(); switch (tag) { - case JVM_CONSTANT_Class : { + case JVM_CONSTANT_Class: { cfs->guarantee_more(3, CHECK); // name_index, tag/access_flags const u2 name_index = cfs->get_u2_fast(); cp->klass_index_at_put(index, name_index); @@ -492,7 +495,14 @@ check_property(valid_symbol_at(class_index), "Invalid constant pool index %u in class file %s", class_index, CHECK); - cp->unresolved_klass_at_put(index, class_index, num_klasses++); + + Symbol* const name = cp->symbol_at(class_index); + const unsigned int name_len = name->utf8_length(); + if (name->is_Q_signature()) { + cp->unresolved_qdescriptor_at_put(index, class_index, num_klasses++); + } else { + cp->unresolved_klass_at_put(index, class_index, num_klasses++); + } break; } case JVM_CONSTANT_StringIndex: { @@ -1461,11 +1471,13 @@ STATIC_SHORT, // shorts STATIC_WORD, // ints STATIC_DOUBLE, // aligned long or double + STATIC_FLATTENABLE, // flattenable field NONSTATIC_OOP, NONSTATIC_BYTE, NONSTATIC_SHORT, NONSTATIC_WORD, NONSTATIC_DOUBLE, + NONSTATIC_FLATTENABLE, MAX_FIELD_ALLOCATION_TYPE, BAD_ALLOCATION_TYPE = -1 }; @@ -1485,12 +1497,13 @@ NONSTATIC_DOUBLE, // T_LONG = 11, NONSTATIC_OOP, // T_OBJECT = 12, NONSTATIC_OOP, // T_ARRAY = 13, - BAD_ALLOCATION_TYPE, // T_VOID = 14, - BAD_ALLOCATION_TYPE, // T_ADDRESS = 15, - BAD_ALLOCATION_TYPE, // T_NARROWOOP = 16, - BAD_ALLOCATION_TYPE, // T_METADATA = 17, - BAD_ALLOCATION_TYPE, // T_NARROWKLASS = 18, - BAD_ALLOCATION_TYPE, // T_CONFLICT = 19, + NONSTATIC_OOP, // T_VALUETYPE = 14, + BAD_ALLOCATION_TYPE, // T_VOID = 15, + BAD_ALLOCATION_TYPE, // T_ADDRESS = 16, + BAD_ALLOCATION_TYPE, // T_NARROWOOP = 17, + BAD_ALLOCATION_TYPE, // T_METADATA = 18, + BAD_ALLOCATION_TYPE, // T_NARROWKLASS = 19, + BAD_ALLOCATION_TYPE, // T_CONFLICT = 20, BAD_ALLOCATION_TYPE, // 0 BAD_ALLOCATION_TYPE, // 1 BAD_ALLOCATION_TYPE, // 2 @@ -1505,18 +1518,22 @@ STATIC_DOUBLE, // T_LONG = 11, STATIC_OOP, // T_OBJECT = 12, STATIC_OOP, // T_ARRAY = 13, - BAD_ALLOCATION_TYPE, // T_VOID = 14, - BAD_ALLOCATION_TYPE, // T_ADDRESS = 15, - BAD_ALLOCATION_TYPE, // T_NARROWOOP = 16, - BAD_ALLOCATION_TYPE, // T_METADATA = 17, - BAD_ALLOCATION_TYPE, // T_NARROWKLASS = 18, - BAD_ALLOCATION_TYPE, // T_CONFLICT = 19, + STATIC_OOP, // T_VALUETYPE = 14, + BAD_ALLOCATION_TYPE, // T_VOID = 15, + BAD_ALLOCATION_TYPE, // T_ADDRESS = 16, + BAD_ALLOCATION_TYPE, // T_NARROWOOP = 17, + BAD_ALLOCATION_TYPE, // T_METADATA = 18, + BAD_ALLOCATION_TYPE, // T_NARROWKLASS = 19, + BAD_ALLOCATION_TYPE, // T_CONFLICT = 20 }; -static FieldAllocationType basic_type_to_atype(bool is_static, BasicType type) { +static FieldAllocationType basic_type_to_atype(bool is_static, BasicType type, bool is_flattenable) { assert(type >= T_BOOLEAN && type < T_VOID, "only allowable values"); FieldAllocationType result = _basic_type_to_atype[type + (is_static ? (T_CONFLICT + 1) : 0)]; assert(result != BAD_ALLOCATION_TYPE, "bad type"); + if (is_flattenable) { + result = is_static ? STATIC_FLATTENABLE : NONSTATIC_FLATTENABLE; + } return result; } @@ -1530,8 +1547,8 @@ } } - FieldAllocationType update(bool is_static, BasicType type) { - FieldAllocationType atype = basic_type_to_atype(is_static, type); + FieldAllocationType update(bool is_static, BasicType type, bool is_flattenable) { + FieldAllocationType atype = basic_type_to_atype(is_static, type, is_flattenable); if (atype != BAD_ALLOCATION_TYPE) { // Make sure there is no overflow with injected fields. assert(count[atype] < 0xFFFF, "More than 65535 fields"); @@ -1545,6 +1562,7 @@ // _fields_type_annotations fields void ClassFileParser::parse_fields(const ClassFileStream* const cfs, bool is_interface, + bool is_value_type, FieldAllocationCount* const fac, ConstantPool* cp, const int cp_size, @@ -1567,7 +1585,8 @@ int num_injected = 0; const InjectedField* const injected = JavaClasses::get_injected(_class_name, &num_injected); - const int total_fields = length + num_injected; + + const int total_fields = length + num_injected + (is_value_type ? 1 : 0); // The field array starts with tuples of shorts // [access, name index, sig index, initial value index, byte offset]. @@ -1601,9 +1620,11 @@ // access_flags, name_index, descriptor_index, attributes_count cfs->guarantee_more(8, CHECK); + jint recognized_modifiers = JVM_RECOGNIZED_FIELD_MODIFIERS; + + const jint flags = cfs->get_u2_fast() & recognized_modifiers; + verify_legal_field_modifiers(flags, is_interface, is_value_type, CHECK); AccessFlags access_flags; - const jint flags = cfs->get_u2_fast() & JVM_RECOGNIZED_FIELD_MODIFIERS; - verify_legal_field_modifiers(flags, is_interface, CHECK); access_flags.set_flags(flags); const u2 name_index = cfs->get_u2_fast(); @@ -1619,6 +1640,22 @@ signature_index, CHECK); const Symbol* const sig = cp->symbol_at(signature_index); verify_legal_field_signature(name, sig, CHECK); + assert(!access_flags.is_flattenable(), "ACC_FLATTENABLE should have been filtered out"); + if (sig->is_Q_signature()) { + // assert(_major_version >= CONSTANT_CLASS_DESCRIPTORS, "Q-descriptors are only supported in recent classfiles"); + access_flags.set_is_flattenable(); + } + if (access_flags.is_flattenable()) { + // Array flattenability cannot be specified. Arrays of value classes are + // are always flattenable. Arrays of other classes are not flattenable. + if (sig->utf8_length() > 1 && sig->char_at(0) == '[') { + classfile_parse_error( + "Field \"%s\" with signature \"%s\" in class file %s is invalid." + " ACC_FLATTENABLE cannot be specified for an array", + name->as_C_string(), sig->as_klass_external_name(), CHECK); + } + _has_flattenable_fields = true; + } u2 constantvalue_index = 0; bool is_synthetic = false; @@ -1678,7 +1715,7 @@ const BasicType type = cp->basic_type_for_signature_at(signature_index); // Remember how many oops we encountered and compute allocation type - const FieldAllocationType atype = fac->update(is_static, type); + const FieldAllocationType atype = fac->update(is_static, type, access_flags.is_flattenable()); field->set_allocation_type(atype); // After field is initialized with type, we can augment it with aux info @@ -1719,12 +1756,25 @@ const BasicType type = FieldType::basic_type(injected[n].signature()); // Remember how many oops we encountered and compute allocation type - const FieldAllocationType atype = fac->update(false, type); + const FieldAllocationType atype = fac->update(false, type, false); field->set_allocation_type(atype); index++; } } + if (is_value_type) { + index = length + num_injected; + FieldInfo* const field = FieldInfo::from_field_array(fa, index); + field->initialize(JVM_ACC_FIELD_INTERNAL | JVM_ACC_STATIC, + vmSymbols::default_value_name_enum, + vmSymbols::java_lang_Object_enum, + 0); + const BasicType type = FieldType::basic_type(vmSymbols::object_signature()); + const FieldAllocationType atype = fac->update(true, type, false); + field->set_allocation_type(atype); + index++; + } + assert(NULL == _fields, "invariant"); _fields = @@ -2348,6 +2398,7 @@ Method* ClassFileParser::parse_method(const ClassFileStream* const cfs, bool is_interface, + bool is_value_type, const ConstantPool* cp, AccessFlags* const promoted_flags, TRAPS) { @@ -2388,11 +2439,17 @@ classfile_parse_error("Method is not static in class file %s", CHECK_NULL); } } else { - verify_legal_method_modifiers(flags, is_interface, name, CHECK_NULL); + verify_legal_method_modifiers(flags, is_interface, is_value_type, name, CHECK_NULL); } - if (name == vmSymbols::object_initializer_name() && is_interface) { - classfile_parse_error("Interface cannot have a method named , class file %s", CHECK_NULL); + if (name == vmSymbols::object_initializer_name()) { + if (is_interface) { + classfile_parse_error("Interface cannot have a method named , class file %s", CHECK_NULL); +/* TBD: uncomment when javac stops generating () for value types. + } else if (is_value_type) { + classfile_parse_error("Value Type cannot have a method named , class file %s", CHECK_NULL); +*/ + } } int args_size = -1; // only used when _need_verify is true @@ -2963,6 +3020,7 @@ // Side-effects: populates the _methods field in the parser void ClassFileParser::parse_methods(const ClassFileStream* const cfs, bool is_interface, + bool is_value_type, AccessFlags* promoted_flags, bool* has_final_method, bool* declares_nonstatic_concrete_methods, @@ -2987,6 +3045,7 @@ for (int index = 0; index < length; index++) { Method* method = parse_method(cfs, is_interface, + is_value_type, _cp, promoted_flags, CHECK); @@ -3179,14 +3238,20 @@ guarantee_property(inner_class_info_index != outer_class_info_index, "Class is both outer and inner class in class file %s", CHECK_0); } - // Access flags - jint flags; + + jint recognized_modifiers = RECOGNIZED_INNER_CLASS_MODIFIERS; // JVM_ACC_MODULE is defined in JDK-9 and later. if (_major_version >= JAVA_9_VERSION) { - flags = cfs->get_u2_fast() & (RECOGNIZED_INNER_CLASS_MODIFIERS | JVM_ACC_MODULE); - } else { - flags = cfs->get_u2_fast() & RECOGNIZED_INNER_CLASS_MODIFIERS; + recognized_modifiers |= JVM_ACC_MODULE; + } + // JVM_ACC_VALUE is defined for class file version 55 and later + if (supports_value_types()) { + recognized_modifiers |= JVM_ACC_VALUE; } + + // Access flags + jint flags = cfs->get_u2_fast() & recognized_modifiers; + if ((flags & JVM_ACC_INTERFACE) && _major_version < JAVA_6_VERSION) { // Set abstract bit for old class files for backward compatibility flags |= JVM_ACC_ABSTRACT; @@ -3389,6 +3454,8 @@ bool parsed_source_debug_ext_annotations_exist = false; const u1* inner_classes_attribute_start = NULL; u4 inner_classes_attribute_length = 0; + const u1* value_types_attribute_start = NULL; + u4 value_types_attribute_length = 0; u2 enclosing_method_class_index = 0; u2 enclosing_method_method_index = 0; const u1* nest_members_attribute_start = NULL; @@ -3738,7 +3805,8 @@ const InstanceKlass* super_klass = NULL; if (super_class_index == 0) { - check_property(_class_name == vmSymbols::java_lang_Object(), + check_property(_class_name == vmSymbols::java_lang_Object() + || (_access_flags.get_flags() & JVM_ACC_VALUE), "Invalid superclass index %u in class file %s", super_class_index, CHECK_NULL); @@ -3765,39 +3833,6 @@ return super_klass; } -static unsigned int compute_oop_map_count(const InstanceKlass* super, - unsigned int nonstatic_oop_map_count, - int first_nonstatic_oop_offset) { - - unsigned int map_count = - NULL == super ? 0 : super->nonstatic_oop_map_count(); - if (nonstatic_oop_map_count > 0) { - // We have oops to add to map - if (map_count == 0) { - map_count = nonstatic_oop_map_count; - } - else { - // Check whether we should add a new map block or whether the last one can - // be extended - const OopMapBlock* const first_map = super->start_of_nonstatic_oop_maps(); - const OopMapBlock* const last_map = first_map + map_count - 1; - - const int next_offset = last_map->offset() + last_map->count() * heapOopSize; - if (next_offset == first_nonstatic_oop_offset) { - // There is no gap bettwen superklass's last oop field and first - // local oop field, merge maps. - nonstatic_oop_map_count -= 1; - } - else { - // Superklass didn't end with a oop field, add extra maps - assert(next_offset < first_nonstatic_oop_offset, "just checking"); - } - map_count += nonstatic_oop_map_count; - } - } - return map_count; -} - #ifndef PRODUCT static void print_field_layout(const Symbol* name, Array* fields, @@ -3838,16 +3873,158 @@ // Values needed for oopmap and InstanceKlass creation class ClassFileParser::FieldLayoutInfo : public ResourceObj { public: - int* nonstatic_oop_offsets; - unsigned int* nonstatic_oop_counts; - unsigned int nonstatic_oop_map_count; - unsigned int total_oop_map_count; + OopMapBlocksBuilder* oop_map_blocks; int instance_size; int nonstatic_field_size; int static_field_size; bool has_nonstatic_fields; }; +// Utility to collect and compact oop maps during layout +class ClassFileParser::OopMapBlocksBuilder : public ResourceObj { + public: + OopMapBlock* nonstatic_oop_maps; + unsigned int nonstatic_oop_map_count; + unsigned int max_nonstatic_oop_maps; + + public: + OopMapBlocksBuilder(unsigned int max_blocks, TRAPS) { + max_nonstatic_oop_maps = max_blocks; + nonstatic_oop_map_count = 0; + if (max_blocks == 0) { + nonstatic_oop_maps = NULL; + } else { + nonstatic_oop_maps = NEW_RESOURCE_ARRAY_IN_THREAD( + THREAD, OopMapBlock, max_nonstatic_oop_maps); + memset(nonstatic_oop_maps, 0, sizeof(OopMapBlock) * max_blocks); + } + } + + OopMapBlock* last_oop_map() const { + assert(nonstatic_oop_map_count > 0, "Has no oop maps"); + return nonstatic_oop_maps + (nonstatic_oop_map_count - 1); + } + + // addition of super oop maps + void initialize_inherited_blocks(OopMapBlock* blocks, unsigned int nof_blocks) { + assert(nof_blocks && nonstatic_oop_map_count == 0 && + nof_blocks <= max_nonstatic_oop_maps, "invariant"); + + memcpy(nonstatic_oop_maps, blocks, sizeof(OopMapBlock) * nof_blocks); + nonstatic_oop_map_count += nof_blocks; + } + + // collection of oops + void add(int offset, int count) { + if (nonstatic_oop_map_count == 0) { + nonstatic_oop_map_count++; + } + OopMapBlock* nonstatic_oop_map = last_oop_map(); + if (nonstatic_oop_map->count() == 0) { // Unused map, set it up + nonstatic_oop_map->set_offset(offset); + nonstatic_oop_map->set_count(count); + } else if (nonstatic_oop_map->is_contiguous(offset)) { // contiguous, add + nonstatic_oop_map->increment_count(count); + } else { // Need a new one... + nonstatic_oop_map_count++; + assert(nonstatic_oop_map_count <= max_nonstatic_oop_maps, "range check"); + nonstatic_oop_map = last_oop_map(); + nonstatic_oop_map->set_offset(offset); + nonstatic_oop_map->set_count(count); + } + } + + // general purpose copy, e.g. into allocated instanceKlass + void copy(OopMapBlock* dst) { + if (nonstatic_oop_map_count != 0) { + memcpy(dst, nonstatic_oop_maps, sizeof(OopMapBlock) * nonstatic_oop_map_count); + } + } + + // Sort and compact adjacent blocks + void compact(TRAPS) { + if (nonstatic_oop_map_count <= 1) { + return; + } + /* + * Since field layout sneeks in oops before values, we will be able to condense + * blocks. There is potential to compact between super, own refs and values + * containing refs. + * + * Currently compaction is slightly limited due to values being 8 byte aligned. + * This may well change: FixMe if doesn't, the code below is fairly general purpose + * and maybe it doesn't need to be. + */ + qsort(nonstatic_oop_maps, nonstatic_oop_map_count, sizeof(OopMapBlock), + (_sort_Fn)OopMapBlock::compare_offset); + if (nonstatic_oop_map_count < 2) { + return; + } + + //Make a temp copy, and iterate through and copy back into the orig + ResourceMark rm(THREAD); + OopMapBlock* oop_maps_copy = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, OopMapBlock, + nonstatic_oop_map_count); + OopMapBlock* oop_maps_copy_end = oop_maps_copy + nonstatic_oop_map_count; + copy(oop_maps_copy); + OopMapBlock* nonstatic_oop_map = nonstatic_oop_maps; + unsigned int new_count = 1; + oop_maps_copy++; + while(oop_maps_copy < oop_maps_copy_end) { + assert(nonstatic_oop_map->offset() < oop_maps_copy->offset(), "invariant"); + if (nonstatic_oop_map->is_contiguous(oop_maps_copy->offset())) { + nonstatic_oop_map->increment_count(oop_maps_copy->count()); + } else { + nonstatic_oop_map++; + new_count++; + nonstatic_oop_map->set_offset(oop_maps_copy->offset()); + nonstatic_oop_map->set_count(oop_maps_copy->count()); + } + oop_maps_copy++; + } + assert(new_count <= nonstatic_oop_map_count, "end up with more maps after compact() ?"); + nonstatic_oop_map_count = new_count; + } + + void print_on(outputStream* st) const { + st->print_cr(" OopMapBlocks: %3d /%3d", nonstatic_oop_map_count, max_nonstatic_oop_maps); + if (nonstatic_oop_map_count > 0) { + OopMapBlock* map = nonstatic_oop_maps; + OopMapBlock* last_map = last_oop_map(); + assert(map <= last_map, "Last less than first"); + while (map <= last_map) { + st->print_cr(" Offset: %3d -%3d Count: %3d", map->offset(), + map->offset() + map->offset_span() - heapOopSize, map->count()); + map++; + } + } + } + + void print_value_on(outputStream* st) const { + print_on(st); + } + +}; + +void ClassFileParser::throwValueTypeLimitation(THREAD_AND_LOCATION_DECL, + const char* msg, + const Symbol* name, + const Symbol* sig) const { + + ResourceMark rm(THREAD); + if (name == NULL || sig == NULL) { + Exceptions::fthrow(THREAD_AND_LOCATION_ARGS, + vmSymbols::java_lang_ClassFormatError(), + "class: %s - %s", _class_name->as_C_string(), msg); + } + else { + Exceptions::fthrow(THREAD_AND_LOCATION_ARGS, + vmSymbols::java_lang_ClassFormatError(), + "\"%s\" sig: \"%s\" class: %s - %s", name->as_C_string(), sig->as_C_string(), + _class_name->as_C_string(), msg); + } +} + // Layout fields and fill in FieldLayoutInfo. Could use more refactoring! void ClassFileParser::layout_fields(ConstantPool* cp, const FieldAllocationCount* fac, @@ -3860,6 +4037,12 @@ // Field size and offset computation int nonstatic_field_size = _super_klass == NULL ? 0 : _super_klass->nonstatic_field_size(); + int next_nonstatic_valuetype_offset = 0; + int first_nonstatic_valuetype_offset = 0; + + // Fields that are value types are handled differently depending if they are static or not: + // - static fields are oops + // - non-static fields are embedded // Count the contended fields by type. // @@ -3880,8 +4063,9 @@ // Calculate the starting byte offsets int next_static_oop_offset = InstanceMirrorKlass::offset_of_static_fields(); + // Value types in static fields are not embedded, they are handled with oops int next_static_double_offset = next_static_oop_offset + - ((fac->count[STATIC_OOP]) * heapOopSize); + ((fac->count[STATIC_OOP] + fac->count[STATIC_FLATTENABLE]) * heapOopSize); if (fac->count[STATIC_DOUBLE]) { next_static_double_offset = align_up(next_static_double_offset, BytesPerLong); } @@ -3896,6 +4080,16 @@ int nonstatic_fields_start = instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size * heapOopSize; + // First field of value types is aligned on a long boundary in order to ease + // in-lining of value types (with header removal) in packed arrays and + // flatten value types + int initial_value_type_padding = 0; + if (is_value_type()) { + int old = nonstatic_fields_start; + nonstatic_fields_start = align_up(nonstatic_fields_start, BytesPerLong); + initial_value_type_padding = nonstatic_fields_start - old; + } + int next_nonstatic_field_offset = nonstatic_fields_start; const bool is_contended_class = parsed_annotations->is_contended(); @@ -3905,6 +4099,14 @@ next_nonstatic_field_offset += ContendedPaddingWidth; } + // Temporary value types restrictions + if (is_value_type()) { + if (is_contended_class) { + throwValueTypeLimitation(THREAD_AND_LOCATION, "Value Types do not support @Contended annotation yet"); + return; + } + } + // Compute the non-contended fields count. // The packing code below relies on these counts to determine if some field // can be squeezed into the alignment gap. Contended fields are obviously @@ -3915,16 +4117,96 @@ unsigned int nonstatic_byte_count = fac->count[NONSTATIC_BYTE] - fac_contended.count[NONSTATIC_BYTE]; unsigned int nonstatic_oop_count = fac->count[NONSTATIC_OOP] - fac_contended.count[NONSTATIC_OOP]; + int static_value_type_count = 0; + int nonstatic_value_type_count = 0; + int* nonstatic_value_type_indexes = NULL; + Klass** nonstatic_value_type_klasses = NULL; + unsigned int value_type_oop_map_count = 0; + int not_flattened_value_types = 0; + + int max_nonstatic_value_type = fac->count[NONSTATIC_FLATTENABLE] + 1; + + nonstatic_value_type_indexes = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, int, + max_nonstatic_value_type); + for (int i = 0; i < max_nonstatic_value_type; i++) { + nonstatic_value_type_indexes[i] = -1; + } + nonstatic_value_type_klasses = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, Klass*, + max_nonstatic_value_type); + + for (AllFieldStream fs(_fields, _cp); !fs.done(); fs.next()) { + if (fs.allocation_type() == STATIC_FLATTENABLE) { + // Pre-resolve the flattenable field and check for value type circularity + // issues. Note that super-class circularity checks are not needed here + // because flattenable fields can only be in value types and value types + // only have java.lang.Object as their super class. + // Also, note that super-interface circularity checks are not needed + // because interfaces cannot be value types. + ResourceMark rm; + if (!fs.signature()->is_Q_signature()) { + THROW(vmSymbols::java_lang_ClassFormatError()); + } + Klass* klass = + SystemDictionary::resolve_flattenable_field_or_fail(&fs, + Handle(THREAD, _loader_data->class_loader()), + _protection_domain, true, CHECK); + assert(klass != NULL, "Sanity check"); + if (!klass->access_flags().is_value_type()) { + THROW(vmSymbols::java_lang_IncompatibleClassChangeError()); + } + static_value_type_count++; + } else if (fs.allocation_type() == NONSTATIC_FLATTENABLE) { + // Pre-resolve the flattenable field and check for value type circularity issues. + ResourceMark rm; + if (!fs.signature()->is_Q_signature()) { + THROW(vmSymbols::java_lang_ClassFormatError()); + } + Klass* klass = + SystemDictionary::resolve_flattenable_field_or_fail(&fs, + Handle(THREAD, _loader_data->class_loader()), + _protection_domain, true, CHECK); + assert(klass != NULL, "Sanity check"); + if (!klass->access_flags().is_value_type()) { + THROW(vmSymbols::java_lang_IncompatibleClassChangeError()); + } + ValueKlass* vk = ValueKlass::cast(klass); + // Conditions to apply flattening or not should be defined in a single place + if ((ValueFieldMaxFlatSize < 0) || (vk->size_helper() * HeapWordSize) <= ValueFieldMaxFlatSize) { + nonstatic_value_type_indexes[nonstatic_value_type_count] = fs.index(); + nonstatic_value_type_klasses[nonstatic_value_type_count] = klass; + nonstatic_value_type_count++; + + ValueKlass* vklass = ValueKlass::cast(klass); + if (vklass->contains_oops()) { + value_type_oop_map_count += vklass->nonstatic_oop_map_count(); + } + fs.set_flattened(true); + } else { + not_flattened_value_types++; + fs.set_flattened(false); + } + } + } + + // Adjusting non_static_oop_count to take into account not flattened value types; + nonstatic_oop_count += not_flattened_value_types; + // Total non-static fields count, including every contended field unsigned int nonstatic_fields_count = fac->count[NONSTATIC_DOUBLE] + fac->count[NONSTATIC_WORD] + fac->count[NONSTATIC_SHORT] + fac->count[NONSTATIC_BYTE] + - fac->count[NONSTATIC_OOP]; + fac->count[NONSTATIC_OOP] + fac->count[NONSTATIC_FLATTENABLE]; const bool super_has_nonstatic_fields = (_super_klass != NULL && _super_klass->has_nonstatic_fields()); const bool has_nonstatic_fields = super_has_nonstatic_fields || (nonstatic_fields_count != 0); + const bool has_nonstatic_value_fields = nonstatic_value_type_count > 0; + if (is_value_type() && (!has_nonstatic_fields)) { + // There are a number of fixes required throughout the type system and JIT + throwValueTypeLimitation(THREAD_AND_LOCATION, "Value Types do not support zero instance size yet"); + return; + } // Prepare list of oops for oop map generation. // @@ -3934,15 +4216,18 @@ // we pessimistically allocate the maps to fit all the oops into the // distinct regions. // - // TODO: We add +1 to always allocate non-zero resource arrays; we need - // to figure out if we still need to do this. - unsigned int nonstatic_oop_map_count = 0; - unsigned int max_nonstatic_oop_maps = fac->count[NONSTATIC_OOP] + 1; - - int* nonstatic_oop_offsets = NEW_RESOURCE_ARRAY_IN_THREAD( - THREAD, int, max_nonstatic_oop_maps); - unsigned int* const nonstatic_oop_counts = NEW_RESOURCE_ARRAY_IN_THREAD( - THREAD, unsigned int, max_nonstatic_oop_maps); + int super_oop_map_count = (_super_klass == NULL) ? 0 :_super_klass->nonstatic_oop_map_count(); + int max_oop_map_count = + super_oop_map_count + + fac->count[NONSTATIC_OOP] + + value_type_oop_map_count + + not_flattened_value_types; + + OopMapBlocksBuilder* nonstatic_oop_maps = new OopMapBlocksBuilder(max_oop_map_count, THREAD); + if (super_oop_map_count > 0) { + nonstatic_oop_maps->initialize_inherited_blocks(_super_klass->start_of_nonstatic_oop_maps(), + _super_klass->nonstatic_oop_map_count()); + } int first_nonstatic_oop_offset = 0; // will be set for first oop field @@ -3991,13 +4276,8 @@ next_nonstatic_double_offset = next_nonstatic_field_offset; } else if( allocation_style == 2 ) { // Fields allocation: oops fields in super and sub classes are together. - if( nonstatic_field_size > 0 && _super_klass != NULL && - _super_klass->nonstatic_oop_map_size() > 0 ) { - const unsigned int map_count = _super_klass->nonstatic_oop_map_count(); - const OopMapBlock* const first_map = _super_klass->start_of_nonstatic_oop_maps(); - const OopMapBlock* const last_map = first_map + map_count - 1; - const int next_offset = last_map->offset() + (last_map->count() * heapOopSize); - if (next_offset == next_nonstatic_field_offset) { + if( nonstatic_field_size > 0 && super_oop_map_count > 0 ) { + if (next_nonstatic_field_offset == nonstatic_oop_maps->last_oop_map()->end_offset()) { allocation_style = 0; // allocate oops first next_nonstatic_oop_offset = next_nonstatic_field_offset; next_nonstatic_double_offset = next_nonstatic_oop_offset + @@ -4080,6 +4360,16 @@ next_nonstatic_padded_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * heapOopSize); } + // Aligning embedded value types + // bug below, the current algorithm to layout embedded value types always put them at the + // end of the layout, which doesn't match the different allocation policies the VM is + // supposed to provide => FixMe + // Note also that the current alignment policy is to make each value type starting on a + // 64 bits boundary. This could be optimized later. For instance, it could be nice to + // align value types according to their most constrained internal type. + next_nonstatic_valuetype_offset = align_up(next_nonstatic_padded_offset, BytesPerLong); + int next_value_type_index = 0; + // Iterate over fields again and compute correct offsets. // The field allocation type was temporarily stored in the offset slot. // oop fields are located before non-oop fields (static and non-static). @@ -4096,6 +4386,8 @@ // pack the rest of the fields switch (atype) { + // Value types in static fields are handled with oops + case STATIC_FLATTENABLE: // Fallthrough case STATIC_OOP: real_offset = next_static_oop_offset; next_static_oop_offset += heapOopSize; @@ -4116,6 +4408,31 @@ real_offset = next_static_double_offset; next_static_double_offset += BytesPerLong; break; + case NONSTATIC_FLATTENABLE: + if (fs.is_flattened()) { + Klass* klass = nonstatic_value_type_klasses[next_value_type_index]; + assert(klass != NULL, "Klass should have been loaded and resolved earlier"); + assert(klass->access_flags().is_value_type(),"Must be a value type"); + ValueKlass* vklass = ValueKlass::cast(klass); + real_offset = next_nonstatic_valuetype_offset; + next_nonstatic_valuetype_offset += (vklass->size_helper()) * wordSize - vklass->first_field_offset(); + // aligning next value type on a 64 bits boundary + next_nonstatic_valuetype_offset = align_up(next_nonstatic_valuetype_offset, BytesPerLong); + next_value_type_index += 1; + + if (vklass->contains_oops()) { // add flatten oop maps + int diff = real_offset - vklass->first_field_offset(); + const OopMapBlock* map = vklass->start_of_nonstatic_oop_maps(); + const OopMapBlock* const last_map = map + vklass->nonstatic_oop_map_count(); + while (map < last_map) { + nonstatic_oop_maps->add(map->offset() + diff, map->count()); + map++; + } + } + break; + } else { + // Fall through + } case NONSTATIC_OOP: if( nonstatic_oop_space_count > 0 ) { real_offset = nonstatic_oop_space_offset; @@ -4125,26 +4442,7 @@ real_offset = next_nonstatic_oop_offset; next_nonstatic_oop_offset += heapOopSize; } - - // Record this oop in the oop maps - if( nonstatic_oop_map_count > 0 && - nonstatic_oop_offsets[nonstatic_oop_map_count - 1] == - real_offset - - int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) * - heapOopSize ) { - // This oop is adjacent to the previous one, add to current oop map - assert(nonstatic_oop_map_count - 1 < max_nonstatic_oop_maps, "range check"); - nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1; - } else { - // This oop is not adjacent to the previous one, create new oop map - assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check"); - nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset; - nonstatic_oop_counts [nonstatic_oop_map_count] = 1; - nonstatic_oop_map_count += 1; - if( first_nonstatic_oop_offset == 0 ) { // Undefined - first_nonstatic_oop_offset = real_offset; - } - } + nonstatic_oop_maps->add(real_offset, 1); break; case NONSTATIC_BYTE: if( nonstatic_byte_space_count > 0 ) { @@ -4253,30 +4551,17 @@ next_nonstatic_padded_offset += BytesPerLong; break; + // Value types in static fields are handled with oops + case NONSTATIC_FLATTENABLE: + throwValueTypeLimitation(THREAD_AND_LOCATION, + "@Contended annotation not supported for value types yet", fs.name(), fs.signature()); + return; + case NONSTATIC_OOP: next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, heapOopSize); real_offset = next_nonstatic_padded_offset; next_nonstatic_padded_offset += heapOopSize; - - // Record this oop in the oop maps - if( nonstatic_oop_map_count > 0 && - nonstatic_oop_offsets[nonstatic_oop_map_count - 1] == - real_offset - - int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) * - heapOopSize ) { - // This oop is adjacent to the previous one, add to current oop map - assert(nonstatic_oop_map_count - 1 < max_nonstatic_oop_maps, "range check"); - nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1; - } else { - // This oop is not adjacent to the previous one, create new oop map - assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check"); - nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset; - nonstatic_oop_counts [nonstatic_oop_map_count] = 1; - nonstatic_oop_map_count += 1; - if( first_nonstatic_oop_offset == 0 ) { // Undefined - first_nonstatic_oop_offset = real_offset; - } - } + nonstatic_oop_maps->add(real_offset, 1); break; default: @@ -4311,12 +4596,24 @@ // This helps to alleviate memory contention effects for subclass fields // and/or adjacent object. if (is_contended_class) { + assert(!is_value_type(), "@Contended not supported for value types yet"); next_nonstatic_padded_offset += ContendedPaddingWidth; } - int notaligned_nonstatic_fields_end = next_nonstatic_padded_offset; + int notaligned_nonstatic_fields_end; + if (nonstatic_value_type_count != 0) { + notaligned_nonstatic_fields_end = next_nonstatic_valuetype_offset; + } else { + notaligned_nonstatic_fields_end = next_nonstatic_padded_offset; + } - int nonstatic_fields_end = align_up(notaligned_nonstatic_fields_end, heapOopSize); + int nonstatic_field_sz_align = heapOopSize; + if (is_value_type()) { + if ((notaligned_nonstatic_fields_end - nonstatic_fields_start) > heapOopSize) { + nonstatic_field_sz_align = BytesPerLong; // value copy of fields only uses jlong copy + } + } + int nonstatic_fields_end = align_up(notaligned_nonstatic_fields_end, nonstatic_field_sz_align); int instance_end = align_up(notaligned_nonstatic_fields_end, wordSize); int static_fields_end = align_up(next_static_byte_offset, wordSize); @@ -4328,8 +4625,9 @@ int instance_size = align_object_size(instance_end / wordSize); assert(instance_size == align_object_size(align_up( - (instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize), - wordSize) / wordSize), "consistent layout helper value"); + (instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize) + + initial_value_type_padding, wordSize) / wordSize), "consistent layout helper value"); + // Invariant: nonstatic_field end/start should only change if there are // nonstatic fields in the class, or if the class is contended. We compare @@ -4340,12 +4638,11 @@ (nonstatic_fields_count > 0), "double-check nonstatic start/end"); // Number of non-static oop map blocks allocated at end of klass. - const unsigned int total_oop_map_count = - compute_oop_map_count(_super_klass, nonstatic_oop_map_count, - first_nonstatic_oop_offset); + nonstatic_oop_maps->compact(THREAD); #ifndef PRODUCT - if (PrintFieldLayout) { + if ((PrintFieldLayout && !is_value_type()) || + (PrintValueLayout && (is_value_type() || has_nonstatic_value_fields))) { print_field_layout(_class_name, _fields, cp, @@ -4353,63 +4650,20 @@ nonstatic_fields_start, nonstatic_fields_end, static_fields_end); + nonstatic_oop_maps->print_on(tty); + tty->print("\n"); } #endif // Pass back information needed for InstanceKlass creation - info->nonstatic_oop_offsets = nonstatic_oop_offsets; - info->nonstatic_oop_counts = nonstatic_oop_counts; - info->nonstatic_oop_map_count = nonstatic_oop_map_count; - info->total_oop_map_count = total_oop_map_count; + info->oop_map_blocks = nonstatic_oop_maps; info->instance_size = instance_size; info->static_field_size = static_field_size; info->nonstatic_field_size = nonstatic_field_size; info->has_nonstatic_fields = has_nonstatic_fields; } -static void fill_oop_maps(const InstanceKlass* k, - unsigned int nonstatic_oop_map_count, - const int* nonstatic_oop_offsets, - const unsigned int* nonstatic_oop_counts) { - - assert(k != NULL, "invariant"); - - OopMapBlock* this_oop_map = k->start_of_nonstatic_oop_maps(); - const InstanceKlass* const super = k->superklass(); - const unsigned int super_count = super ? super->nonstatic_oop_map_count() : 0; - if (super_count > 0) { - // Copy maps from superklass - OopMapBlock* super_oop_map = super->start_of_nonstatic_oop_maps(); - for (unsigned int i = 0; i < super_count; ++i) { - *this_oop_map++ = *super_oop_map++; - } - } - - if (nonstatic_oop_map_count > 0) { - if (super_count + nonstatic_oop_map_count > k->nonstatic_oop_map_count()) { - // The counts differ because there is no gap between superklass's last oop - // field and the first local oop field. Extend the last oop map copied - // from the superklass instead of creating new one. - nonstatic_oop_map_count--; - nonstatic_oop_offsets++; - this_oop_map--; - this_oop_map->set_count(this_oop_map->count() + *nonstatic_oop_counts++); - this_oop_map++; - } - - // Add new map blocks, fill them - while (nonstatic_oop_map_count-- > 0) { - this_oop_map->set_offset(*nonstatic_oop_offsets++); - this_oop_map->set_count(*nonstatic_oop_counts++); - this_oop_map++; - } - assert(k->start_of_nonstatic_oop_maps() + k->nonstatic_oop_map_count() == - this_oop_map, "sanity"); - } -} - - -void ClassFileParser::set_precomputed_flags(InstanceKlass* ik) { +void ClassFileParser::set_precomputed_flags(InstanceKlass* ik, TRAPS) { assert(ik != NULL, "invariant"); const Klass* const super = ik->super(); @@ -4442,6 +4696,10 @@ // Check if this klass supports the java.lang.Cloneable interface if (SystemDictionary::Cloneable_klass_loaded()) { if (ik->is_subtype_of(SystemDictionary::Cloneable_klass())) { + if (ik->is_value()) { + throwValueTypeLimitation(THREAD_AND_LOCATION, "Value Types do not support Cloneable"); + return; + } ik->set_is_cloneable(); } } @@ -4482,6 +4740,11 @@ } } +bool ClassFileParser::supports_value_types() const { + // Value types are only supported by class file version 55 and later + return _major_version >= JAVA_11_VERSION; +} + // utility methods for appending an array with check for duplicates static void append_interfaces(GrowableArray* result, @@ -4744,7 +5007,9 @@ void ClassFileParser::verify_legal_class_modifiers(jint flags, TRAPS) const { const bool is_module = (flags & JVM_ACC_MODULE) != 0; + const bool is_value_type = (flags & JVM_ACC_VALUE) != 0; assert(_major_version >= JAVA_9_VERSION || !is_module, "JVM_ACC_MODULE should not be set"); + assert(supports_value_types() || !is_value_type, "JVM_ACC_VALUE should not be set"); if (is_module) { ResourceMark rm(THREAD); Exceptions::fthrow( @@ -4755,6 +5020,16 @@ return; } + if (is_value_type && !EnableValhalla) { + ResourceMark rm(THREAD); + Exceptions::fthrow( + THREAD_AND_LOCATION, + vmSymbols::java_lang_ClassFormatError(), + "Class modifier ACC_VALUE in class %s requires option -XX:+EnableValhalla", + _class_name->as_C_string() + ); + } + if (!_need_verify) { return; } const bool is_interface = (flags & JVM_ACC_INTERFACE) != 0; @@ -4768,7 +5043,8 @@ if ((is_abstract && is_final) || (is_interface && !is_abstract) || (is_interface && major_gte_15 && (is_super || is_enum)) || - (!is_interface && major_gte_15 && is_annotation)) { + (!is_interface && major_gte_15 && is_annotation) || + (is_value_type && (is_interface || is_abstract || is_enum || !is_final))) { ResourceMark rm(THREAD); Exceptions::fthrow( THREAD_AND_LOCATION, @@ -4851,6 +5127,7 @@ void ClassFileParser::verify_legal_field_modifiers(jint flags, bool is_interface, + bool is_value_type, TRAPS) const { if (!_need_verify) { return; } @@ -4875,6 +5152,10 @@ } else { // not interface if (has_illegal_visibility(flags) || (is_final && is_volatile)) { is_illegal = true; + } else { + if (is_value_type && !is_static && !is_final) { + is_illegal = true; + } } } @@ -4891,6 +5172,7 @@ void ClassFileParser::verify_legal_method_modifiers(jint flags, bool is_interface, + bool is_value_type, const Symbol* name, TRAPS) const { if (!_need_verify) { return; } @@ -4950,10 +5232,14 @@ is_illegal = true; } } else { // not initializer - if (is_abstract) { - if ((is_final || is_native || is_private || is_static || - (major_gte_15 && (is_synchronized || is_strict)))) { - is_illegal = true; + if (is_value_type && is_synchronized && !is_static) { + is_illegal = true; + } else { + if (is_abstract) { + if ((is_final || is_native || is_private || is_static || + (major_gte_15 && (is_synchronized || is_strict)))) { + is_illegal = true; + } } } } @@ -5122,7 +5408,16 @@ case JVM_SIGNATURE_LONG: case JVM_SIGNATURE_DOUBLE: return signature + 1; - case JVM_SIGNATURE_CLASS: { + case JVM_SIGNATURE_VALUETYPE: + // Can't enable this check until JDK upgrades the bytecode generators + // if (_major_version < CONSTANT_CLASS_DESCRIPTORS ) { + // classfile_parse_error("Class name contains illegal Q-signature " + // "in descriptor in class file %s", + // CHECK_0); + // } + // fall through + case JVM_SIGNATURE_CLASS: + { if (_major_version < JAVA_1_5_VERSION) { // Skip over the class name if one is there const char* const p = skip_over_field_name(signature + 1, true, --length); @@ -5133,7 +5428,7 @@ } } else { - // Skip leading 'L' and ignore first appearance of ';' + // Skip leading 'L' or 'Q' and ignore first appearance of ';' signature++; const char* c = (const char*) memchr(signature, ';', length - 1); // Format check signature @@ -5188,6 +5483,9 @@ p = skip_over_field_name(bytes, true, length); legal = (p != NULL) && ((p - bytes) == (int)length); } + } else if (_major_version >= CONSTANT_CLASS_DESCRIPTORS && bytes[length - 1] == ';' ) { + // Support for L...; and Q...; descriptors + legal = verify_unqualified_name(bytes + 1, length - 2, LegalClass); } else { // 4900761: relax the constraints based on JSR202 spec // Class names may be drawn from the entire Unicode character set. @@ -5362,7 +5660,7 @@ int ClassFileParser::total_oop_map_count() const { assert(_field_info != NULL, "invariant"); - return _field_info->total_oop_map_count; + return _field_info->oop_map_blocks->nonstatic_oop_map_count; } jint ClassFileParser::layout_size() const { @@ -5492,6 +5790,12 @@ } } + if (ik->is_value()) { + ValueKlass* vk = ValueKlass::cast(ik); + oop val = ik->allocate_instance(CHECK_NULL); + vk->set_default_value(val); + } + return ik; } @@ -5512,7 +5816,7 @@ assert(_field_info != NULL, "invariant"); assert(ik->static_field_size() == _field_info->static_field_size, "sanity"); - assert(ik->nonstatic_oop_map_count() == _field_info->total_oop_map_count, + assert(ik->nonstatic_oop_map_count() == _field_info->oop_map_blocks->nonstatic_oop_map_count, "sanity"); assert(ik->is_instance_klass(), "sanity"); @@ -5525,7 +5829,7 @@ ik->set_nonstatic_field_size(_field_info->nonstatic_field_size); ik->set_has_nonstatic_fields(_field_info->has_nonstatic_fields); assert(_fac != NULL, "invariant"); - ik->set_static_oop_field_count(_fac->count[STATIC_OOP]); + ik->set_static_oop_field_count(_fac->count[STATIC_OOP] + _fac->count[STATIC_FLATTENABLE]); // this transfers ownership of a lot of arrays from // the parser onto the InstanceKlass* @@ -5613,13 +5917,13 @@ // Compute transitive closure of interfaces this class implements // Do final class setup - fill_oop_maps(ik, - _field_info->nonstatic_oop_map_count, - _field_info->nonstatic_oop_offsets, - _field_info->nonstatic_oop_counts); + OopMapBlocksBuilder* oop_map_blocks = _field_info->oop_map_blocks; + if (oop_map_blocks->nonstatic_oop_map_count > 0) { + oop_map_blocks->copy(ik->start_of_nonstatic_oop_maps()); + } // Fill in has_finalizer, has_vanilla_constructor, and layout_helper - set_precomputed_flags(ik); + set_precomputed_flags(ik, CHECK); // check if this class can access its super class check_super_class_access(ik, CHECK); @@ -5669,6 +5973,29 @@ } } + int nfields = ik->java_fields_count(); + if (ik->is_value()) nfields++; + for (int i = 0; i < nfields; i++) { + if (ik->field_access_flags(i) & JVM_ACC_FLATTENABLE) { + Symbol* klass_name = ik->field_signature(i)->fundamental_name(CHECK); + // Value classes must have been pre-loaded + Klass* klass = SystemDictionary::find(klass_name, + Handle(THREAD, ik->class_loader()), + Handle(THREAD, ik->protection_domain()), CHECK); + assert(klass != NULL, "Sanity check"); + assert(klass->access_flags().is_value_type(), "Value type expected"); + ik->set_value_field_klass(i, klass); + klass_name->decrement_refcount(); + } else if (is_value_type() && ((ik->field_access_flags(i) & JVM_ACC_FIELD_INTERNAL) != 0) + && ((ik->field_access_flags(i) & JVM_ACC_STATIC) != 0)) { + ValueKlass::cast(ik)->set_default_value_offset(ik->field_offset(i)); + } + } + + if (is_value_type()) { + ValueKlass::cast(ik)->initialize_calling_convention(CHECK); + } + ClassLoadingService::notify_class_loaded(ik, false /* not shared class */); if (!is_internal()) { @@ -5858,6 +6185,7 @@ _has_nonstatic_concrete_methods(false), _declares_nonstatic_concrete_methods(false), _has_final_method(false), + _has_flattenable_fields(false), _has_finalizer(false), _has_empty_finalizer(false), _has_vanilla_constructor(false), @@ -6053,15 +6381,19 @@ // ACCESS FLAGS stream->guarantee_more(8, CHECK); // flags, this_class, super_class, infs_len - // Access flags - jint flags; + jint recognized_modifiers = JVM_RECOGNIZED_CLASS_MODIFIERS; // JVM_ACC_MODULE is defined in JDK-9 and later. if (_major_version >= JAVA_9_VERSION) { - flags = stream->get_u2_fast() & (JVM_RECOGNIZED_CLASS_MODIFIERS | JVM_ACC_MODULE); - } else { - flags = stream->get_u2_fast() & JVM_RECOGNIZED_CLASS_MODIFIERS; + recognized_modifiers |= JVM_ACC_MODULE; + } + // JVM_ACC_VALUE is defined for class file version 55 and later + if (supports_value_types()) { + recognized_modifiers |= JVM_ACC_VALUE; } + // Access flags + jint flags = stream->get_u2_fast() & recognized_modifiers; + if ((flags & JVM_ACC_INTERFACE) && _major_version < JAVA_6_VERSION) { // Set abstract bit for old class files for backward compatibility flags |= JVM_ACC_ABSTRACT; @@ -6196,6 +6528,7 @@ _fac = new FieldAllocationCount(); parse_fields(stream, _access_flags.is_interface(), + _access_flags.is_value_type(), _fac, cp, cp_size, @@ -6208,6 +6541,7 @@ AccessFlags promoted_flags; parse_methods(stream, _access_flags.is_interface(), + _access_flags.is_value_type(), &promoted_flags, &_has_final_method, &_declares_nonstatic_concrete_methods, @@ -6289,6 +6623,14 @@ ); return; } + + // For a value class, only java/lang/Object is an acceptable super class + if (_access_flags.get_flags() & JVM_ACC_VALUE) { + guarantee_property(_super_klass->name() == vmSymbols::java_lang_Object(), + "Value type must have java.lang.Object as superclass in class file %s", + CHECK); + } + // Make sure super class is not final if (_super_klass->is_final()) { THROW_MSG(vmSymbols::java_lang_VerifyError(), "Cannot inherit from final class"); @@ -6329,6 +6671,19 @@ assert(_fac != NULL, "invariant"); assert(_parsed_annotations != NULL, "invariant"); + + for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) { + if (fs.is_flattenable()) { + // Pre-load value class + Klass* klass = SystemDictionary::resolve_flattenable_field_or_fail(&fs, + Handle(THREAD, _loader_data->class_loader()), + _protection_domain, true, CHECK); + assert(klass != NULL, "Sanity check"); + assert(klass->access_flags().is_value_type(), "Value type expected"); + _has_flattenable_fields = true; + } + } + _field_info = new FieldLayoutInfo(); layout_fields(cp, _fac, _parsed_annotations, _field_info, CHECK); @@ -6366,6 +6721,7 @@ return _stream->clone(); } + // ---------------------------------------------------------------------------- // debugging --- old/src/hotspot/share/classfile/classFileParser.hpp 2019-03-11 14:25:35.726355576 +0100 +++ new/src/hotspot/share/classfile/classFileParser.hpp 2019-03-11 14:25:35.526355579 +0100 @@ -28,6 +28,7 @@ #include "memory/referenceType.hpp" #include "oops/annotations.hpp" #include "oops/constantPool.hpp" +#include "oops/instanceKlass.hpp" #include "oops/typeArrayOop.hpp" #include "utilities/accessFlags.hpp" @@ -55,6 +56,7 @@ class FieldAllocationCount; class FieldAnnotationCollector; class FieldLayoutInfo; + class OopMapBlocksBuilder; public: // The ClassFileParser has an associated "publicity" level @@ -160,6 +162,7 @@ bool _has_nonstatic_concrete_methods; bool _declares_nonstatic_concrete_methods; bool _has_final_method; + bool _has_flattenable_fields; // precomputed flags bool _has_finalizer; @@ -227,6 +230,7 @@ void parse_fields(const ClassFileStream* const cfs, bool is_interface, + bool is_value_type, FieldAllocationCount* const fac, ConstantPool* cp, const int cp_size, @@ -236,12 +240,14 @@ // Method parsing Method* parse_method(const ClassFileStream* const cfs, bool is_interface, + bool is_value_type, const ConstantPool* cp, AccessFlags* const promoted_flags, TRAPS); void parse_methods(const ClassFileStream* const cfs, bool is_interface, + bool is_value_type, AccessFlags* const promoted_flags, bool* const has_final_method, bool* const declares_nonstatic_concrete_methods, @@ -315,7 +321,7 @@ int runtime_invisible_annotations_length, TRAPS); - void set_precomputed_flags(InstanceKlass* k); + void set_precomputed_flags(InstanceKlass* k, TRAPS); // Format checker methods void classfile_parse_error(const char* msg, TRAPS) const; @@ -399,6 +405,11 @@ const Symbol* sig, TRAPS) const; + void throwValueTypeLimitation(THREAD_AND_LOCATION_DECL, + const char* msg, + const Symbol* name = NULL, + const Symbol* sig = NULL) const; + void verify_constantvalue(const ConstantPool* const cp, int constantvalue_index, int signature_index, @@ -417,9 +428,13 @@ TRAPS) const; void verify_legal_class_modifiers(jint flags, TRAPS) const; - void verify_legal_field_modifiers(jint flags, bool is_interface, TRAPS) const; + void verify_legal_field_modifiers(jint flags, + bool is_interface, + bool is_value_type, + TRAPS) const; void verify_legal_method_modifiers(jint flags, bool is_interface, + bool is_value_type, const Symbol* name, TRAPS) const; @@ -498,6 +513,9 @@ void update_class_name(Symbol* new_name); + // Check if the class file supports value types + bool supports_value_types() const; + public: ClassFileParser(ClassFileStream* stream, Symbol* name, @@ -528,6 +546,11 @@ bool is_unsafe_anonymous() const { return _unsafe_anonymous_host != NULL; } bool is_interface() const { return _access_flags.is_interface(); } + bool is_value_type() const { return _access_flags.is_value_type(); } + bool is_value_capable_class() const; + bool has_flattenable_fields() const { return _has_flattenable_fields; } + + u2 java_fields_count() const { return _java_fields_count; } const InstanceKlass* unsafe_anonymous_host() const { return _unsafe_anonymous_host; } const GrowableArray* cp_patches() const { return _cp_patches; } --- old/src/hotspot/share/classfile/classLoader.cpp 2019-03-11 14:25:36.138355570 +0100 +++ new/src/hotspot/share/classfile/classLoader.cpp 2019-03-11 14:25:35.938355573 +0100 @@ -206,7 +206,7 @@ // Set bad_class_name to true to indicate that the package name // could not be obtained due to an error condition. // In this situation, is_same_class_package returns false. - if (*class_name_ptr == 'L') { + if (*class_name_ptr == 'L' || *class_name_ptr == 'Q') { if (bad_class_name != NULL) { *bad_class_name = true; } --- old/src/hotspot/share/classfile/classLoaderData.cpp 2019-03-11 14:25:36.550355565 +0100 +++ new/src/hotspot/share/classfile/classLoaderData.cpp 2019-03-11 14:25:36.350355567 +0100 @@ -63,6 +63,7 @@ #include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/oopHandle.inline.hpp" +#include "oops/valueKlass.hpp" #include "oops/weakHandle.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/handles.inline.hpp" @@ -363,6 +364,16 @@ } } +void ClassLoaderData::value_classes_do(void f(ValueKlass*)) { + // Lock-free access requires load_acquire + for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { + if (k->is_value()) { + f(ValueKlass::cast(k)); + } + assert(k != k->next_link(), "no loops!"); + } +} + void ClassLoaderData::modules_do(void f(ModuleEntry*)) { assert_locked_or_safepoint(Module_lock); if (_unnamed_module != NULL) { @@ -529,6 +540,8 @@ // if they are not already on the _klasses list. free_deallocate_list_C_heap_structures(); + value_classes_do(ValueKlass::cleanup); + // Clean up class dependencies and tell serviceability tools // these classes are unloading. Must be called // after erroneous classes are released. @@ -823,7 +836,11 @@ } else if (m->is_constantPool()) { MetadataFactory::free_metadata(this, (ConstantPool*)m); } else if (m->is_klass()) { - MetadataFactory::free_metadata(this, (InstanceKlass*)m); + if (!((Klass*)m)->is_value()) { + MetadataFactory::free_metadata(this, (InstanceKlass*)m); + } else { + MetadataFactory::free_metadata(this, (ValueKlass*)m); + } } else { ShouldNotReachHere(); } --- old/src/hotspot/share/classfile/classLoaderData.hpp 2019-03-11 14:25:36.962355559 +0100 +++ new/src/hotspot/share/classfile/classLoaderData.hpp 2019-03-11 14:25:36.758355562 +0100 @@ -185,6 +185,7 @@ void classes_do(void f(Klass* const)); void loaded_classes_do(KlassClosure* klass_closure); void classes_do(void f(InstanceKlass*)); + void value_classes_do(void f(ValueKlass*)); void methods_do(void f(Method*)); void modules_do(void f(ModuleEntry*)); void packages_do(void f(PackageEntry*)); --- old/src/hotspot/share/classfile/javaClasses.cpp 2019-03-11 14:25:37.370355553 +0100 +++ new/src/hotspot/share/classfile/javaClasses.cpp 2019-03-11 14:25:37.170355556 +0100 @@ -50,6 +50,7 @@ #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" #include "oops/typeArrayOop.inline.hpp" +#include "oops/valueArrayKlass.hpp" #include "prims/jvmtiExport.hpp" #include "prims/resolvedMethodTable.hpp" #include "runtime/fieldDescriptor.inline.hpp" @@ -908,14 +909,28 @@ // It might also have a component mirror. This mirror must already exist. if (k->is_array_klass()) { - if (k->is_typeArray_klass()) { + if (k->is_valueArray_klass()) { + Klass* element_klass = (Klass*) ValueArrayKlass::cast(k)->element_klass(); + if (element_klass->is_value()) { + ValueKlass* vk = ValueKlass::cast(InstanceKlass::cast(element_klass)); + comp_mirror = Handle(THREAD, vk->value_mirror()); + } else { + comp_mirror = Handle(THREAD, element_klass->java_mirror()); + } + } + else if (k->is_typeArray_klass()) { BasicType type = TypeArrayKlass::cast(k)->element_type(); comp_mirror = Handle(THREAD, Universe::java_mirror(type)); } else { assert(k->is_objArray_klass(), "Must be"); Klass* element_klass = ObjArrayKlass::cast(k)->element_klass(); assert(element_klass != NULL, "Must have an element klass"); - comp_mirror = Handle(THREAD, element_klass->java_mirror()); + if (element_klass->is_value()) { + ValueKlass* vk = ValueKlass::cast(InstanceKlass::cast(element_klass)); + comp_mirror = Handle(THREAD, vk->value_mirror()); + } else { + comp_mirror = Handle(THREAD, element_klass->java_mirror()); + } } assert(comp_mirror() != NULL, "must have a mirror"); @@ -955,12 +970,46 @@ // concurrently doesn't expect a k to have a null java_mirror. release_set_array_klass(comp_mirror(), k); } + + if (k->is_value()) { + // create the secondary mirror for value class + oop value_mirror_oop = create_value_mirror(k, mirror, CHECK); + set_box_mirror(mirror(), mirror()); + set_value_mirror(mirror(), value_mirror_oop); + } } else { assert(fixup_mirror_list() != NULL, "fixup_mirror_list not initialized"); fixup_mirror_list()->push(k); } } +// Create the secondary mirror for value type. Sets all the fields of this java.lang.Class +// instance with the same value as the primary mirror except signers. +// Class::setSigners and getSigners will use the primary mirror when passed to the JVM. +oop java_lang_Class::create_value_mirror(Klass* k, Handle mirror, TRAPS) { + // Allocate mirror (java.lang.Class instance) + oop mirror_oop = InstanceMirrorKlass::cast(SystemDictionary::Class_klass())->allocate_instance(k, CHECK_0); + Handle value_mirror(THREAD, mirror_oop); + + java_lang_Class::set_klass(value_mirror(), k); + java_lang_Class::set_static_oop_field_count(value_mirror(), static_oop_field_count(mirror())); + // ## do we need to set init lock? + java_lang_Class::set_init_lock(value_mirror(), init_lock(mirror())); + + if (k->is_array_klass()) { + assert(component_mirror(mirror()) != NULL, "must have a mirror"); + set_component_mirror(value_mirror(), component_mirror(mirror())); + } + + set_protection_domain(value_mirror(), protection_domain(mirror())); + set_class_loader(value_mirror(), class_loader(mirror())); + // ## handle if java.base is not yet defined + set_module(value_mirror(), module(mirror())); + set_box_mirror(value_mirror(), mirror()); + set_value_mirror(value_mirror(), value_mirror()); + return value_mirror(); +} + #if INCLUDE_CDS_JAVA_HEAP // Clears mirror fields. Static final fields with initial values are reloaded // from constant pool. The object identity hash is in the object header and is @@ -1368,6 +1417,26 @@ java_class->obj_field_put(_source_file_offset, source_file); } +oop java_lang_Class::value_mirror(oop java_class) { + assert(_value_mirror_offset != 0, "must be set"); + return java_class->obj_field(_value_mirror_offset); +} + +void java_lang_Class::set_value_mirror(oop java_class, oop mirror) { + assert(_value_mirror_offset != 0, "must be set"); + java_class->obj_field_put(_value_mirror_offset, mirror); +} + +oop java_lang_Class::box_mirror(oop java_class) { + assert(_box_mirror_offset != 0, "must be set"); + return java_class->obj_field(_box_mirror_offset); +} + +void java_lang_Class::set_box_mirror(oop java_class, oop mirror) { + assert(_box_mirror_offset != 0, "must be set"); + java_class->obj_field_put(_box_mirror_offset, mirror); +} + oop java_lang_Class::create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS) { // This should be improved by adding a field at the Java level or by // introducing a new VM klass (see comment in ClassFileParser) @@ -1412,18 +1481,26 @@ assert(java_lang_Class::is_instance(java_class), "must be a Class object"); Symbol* name = NULL; bool is_instance = false; + bool is_value = false; if (is_primitive(java_class)) { name = vmSymbols::type_signature(primitive_type(java_class)); } else { Klass* k = as_Klass(java_class); is_instance = k->is_instance_klass(); + is_value = k->is_value(); name = k->name(); } if (name == NULL) { st->print(""); return; } - if (is_instance) st->print("L"); + if (is_instance) { + if (is_value && (java_class == value_mirror(java_class))) { + st->print("Q"); + } else { + st->print("L"); + } + } st->write((char*) name->base(), (int) name->utf8_length()); if (is_instance) st->print(";"); } @@ -1444,8 +1521,14 @@ name->increment_refcount(); } else { ResourceMark rm; - const char* sigstr = k->signature_name(); - int siglen = (int) strlen(sigstr); + const char* sigstr; + if (k->is_value()) { + char c = (java_class == value_mirror(java_class)) ? 'Q' : 'L'; + sigstr = InstanceKlass::cast(k)->signature_name_of(c); + } else { + sigstr = k->signature_name(); + } + int siglen = (int) strlen(sigstr); if (!intern_if_not_found) { name = SymbolTable::probe(sigstr, siglen); } else { @@ -1530,6 +1613,8 @@ macro(_component_mirror_offset, k, "componentType", class_signature, false); \ macro(_module_offset, k, "module", module_signature, false); \ macro(_name_offset, k, "name", string_signature, false); \ + macro(_box_mirror_offset, k, "boxType", class_signature, false); \ + macro(_value_mirror_offset, k, "valueType", class_signature, false); \ void java_lang_Class::compute_offsets() { if (offsets_computed) { @@ -4007,6 +4092,8 @@ int java_lang_Class::_module_offset; int java_lang_Class::_protection_domain_offset; int java_lang_Class::_component_mirror_offset; +int java_lang_Class::_box_mirror_offset; +int java_lang_Class::_value_mirror_offset; int java_lang_Class::_init_lock_offset; int java_lang_Class::_signers_offset; int java_lang_Class::_name_offset; --- old/src/hotspot/share/classfile/javaClasses.hpp 2019-03-11 14:25:37.802355547 +0100 +++ new/src/hotspot/share/classfile/javaClasses.hpp 2019-03-11 14:25:37.602355550 +0100 @@ -246,6 +246,8 @@ static int _component_mirror_offset; static int _name_offset; static int _source_file_offset; + static int _box_mirror_offset; + static int _value_mirror_offset; static bool offsets_computed; static int classRedefinedCount_offset; @@ -268,6 +270,7 @@ Handle protection_domain, TRAPS); static void fixup_mirror(Klass* k, TRAPS); static oop create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS); + static oop create_value_mirror(Klass* k, Handle mirror, TRAPS); // Archiving static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; @@ -316,6 +319,11 @@ static void set_module(oop java_class, oop module); static oop module(oop java_class); + static void set_box_mirror(oop java_class, oop mirror); + static oop box_mirror(oop java_class); + static void set_value_mirror(oop java_class, oop mirror); + static oop value_mirror(oop java_class); + static oop name(Handle java_class, TRAPS); static oop source_file(oop java_class); --- old/src/hotspot/share/classfile/placeholders.cpp 2019-03-11 14:25:38.218355542 +0100 +++ new/src/hotspot/share/classfile/placeholders.cpp 2019-03-11 14:25:38.018355544 +0100 @@ -45,6 +45,7 @@ entry->set_superThreadQ(NULL); entry->set_loadInstanceThreadQ(NULL); entry->set_defineThreadQ(NULL); + entry->set_flattenableFieldQ(NULL); entry->set_definer(NULL); entry->set_instance_klass(NULL); return entry; @@ -166,7 +167,8 @@ probe->remove_seen_thread(thread, action); // If no other threads using this entry, and this thread is not using this entry for other states if ((probe->superThreadQ() == NULL) && (probe->loadInstanceThreadQ() == NULL) - && (probe->defineThreadQ() == NULL) && (probe->definer() == NULL)) { + && (probe->defineThreadQ() == NULL) && (probe->definer() == NULL) + && (probe->flattenableFieldQ() == NULL)) { remove_entry(index, hash, name, loader_data); } } @@ -221,6 +223,9 @@ st->print("defineThreadQ threads:"); defineThreadQ()->print_action_queue(st); st->cr(); + st->print("flattenableFieldQ threads:"); + flattenableFieldQ()->print_action_queue(st); + st->cr(); } void PlaceholderTable::print_on(outputStream* st) const { --- old/src/hotspot/share/classfile/placeholders.hpp 2019-03-11 14:25:38.626355536 +0100 +++ new/src/hotspot/share/classfile/placeholders.hpp 2019-03-11 14:25:38.422355539 +0100 @@ -74,10 +74,12 @@ // on a class/classloader basis // so the head of that queue owns the token // and the rest of the threads return the result the first thread gets +// FLATTENABLE_FIELD: needed to check for value type flattenable fields circularity enum classloadAction { LOAD_INSTANCE = 1, // calling load_instance_class LOAD_SUPER = 2, // loading superclass for this class - DEFINE_CLASS = 3 // find_or_define class + DEFINE_CLASS = 3, // find_or_define class + FLATTENABLE_FIELD = 4 // flattenable value type fields }; // find_and_add returns probe pointer - old or new @@ -109,6 +111,8 @@ // For DEFINE_CLASS, the head of the queue owns the // define token and the rest of the threads wait to return the // result the first thread gets. +// For FLATTENABLE_FIELD, set when loading value type fields for +// class circularity checking. class SeenThread: public CHeapObj { private: Thread *_thread; @@ -160,6 +164,7 @@ // including _definer // _definer owns token // queue waits for and returns results from _definer + SeenThread* _flattenableFieldQ; // queue of value types for circularity checking public: // Simple accessors, used only by SystemDictionary @@ -192,6 +197,9 @@ SeenThread* defineThreadQ() const { return _defineThreadQ; } void set_defineThreadQ(SeenThread* SeenThread) { _defineThreadQ = SeenThread; } + SeenThread* flattenableFieldQ() const { return _flattenableFieldQ; } + void set_flattenableFieldQ(SeenThread* SeenThread) { _flattenableFieldQ = SeenThread; } + PlaceholderEntry* next() const { return (PlaceholderEntry*)HashtableEntry::next(); } @@ -216,7 +224,10 @@ queuehead = _superThreadQ; break; case PlaceholderTable::DEFINE_CLASS: - queuehead = _defineThreadQ; + queuehead = _defineThreadQ; + break; + case PlaceholderTable::FLATTENABLE_FIELD: + queuehead = _flattenableFieldQ; break; default: Unimplemented(); } @@ -234,6 +245,9 @@ case PlaceholderTable::DEFINE_CLASS: _defineThreadQ = seenthread; break; + case PlaceholderTable::FLATTENABLE_FIELD: + _flattenableFieldQ = seenthread; + break; default: Unimplemented(); } return; @@ -251,6 +265,10 @@ return (_defineThreadQ != NULL); } + bool flattenable_field_in_progress() { + return (_flattenableFieldQ != NULL); + } + // Doubly-linked list of Threads per action for class/classloader pair // Class circularity support: links in thread before loading superclass // bootstrapsearchpath support: links in a thread before load_instance_class --- old/src/hotspot/share/classfile/stackMapFrame.cpp 2019-03-11 14:25:39.038355530 +0100 +++ new/src/hotspot/share/classfile/stackMapFrame.cpp 2019-03-11 14:25:38.830355533 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -101,6 +101,7 @@ switch (ss.type()) { case T_OBJECT: case T_ARRAY: + case T_VALUETYPE: { Symbol* sig = ss.as_symbol(CHECK_(VerificationType::bogus_type())); // Create another symbol to save as signature stream unreferences @@ -109,6 +110,9 @@ verifier()->create_temporary_symbol(sig, 0, sig->utf8_length(), CHECK_(VerificationType::bogus_type())); assert(sig_copy == sig, "symbols don't match"); + if (ss.type() == T_VALUETYPE) { + return VerificationType::valuetype_type(sig_copy); + } return VerificationType::reference_type(sig_copy); } case T_INT: return VerificationType::integer_type(); --- old/src/hotspot/share/classfile/stackMapTable.cpp 2019-03-11 14:25:39.446355525 +0100 +++ new/src/hotspot/share/classfile/stackMapTable.cpp 2019-03-11 14:25:39.246355527 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -188,7 +188,17 @@ _stream->stackmap_format_error("bad class index", THREAD); return VerificationType::bogus_type(); } - return VerificationType::reference_type(_cp->klass_name_at(class_index)); + Symbol* klass_name = _cp->klass_name_at(class_index); + if (klass_name->is_Q_signature()) { + Symbol* fund_name = klass_name->fundamental_name(THREAD); + if (fund_name == NULL) { + _stream->stackmap_format_error("TBD something bad happened", THREAD); + return VerificationType::bogus_type(); + } + return VerificationType::valuetype_type(fund_name); + } else { + return VerificationType::reference_type(klass_name); + } } if (tag == ITEM_UninitializedThis) { if (flags != NULL) { --- old/src/hotspot/share/classfile/systemDictionary.cpp 2019-03-11 14:25:39.858355519 +0100 +++ new/src/hotspot/share/classfile/systemDictionary.cpp 2019-03-11 14:25:39.654355522 +0100 @@ -67,6 +67,7 @@ #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" #include "oops/typeArrayKlass.hpp" +#include "oops/valueKlass.hpp" #include "prims/jvmtiExport.hpp" #include "prims/resolvedMethodTable.hpp" #include "prims/methodHandles.hpp" @@ -78,6 +79,7 @@ #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/orderAccess.hpp" +#include "runtime/os.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/signature.hpp" #include "services/classLoadingService.hpp" @@ -261,9 +263,9 @@ Handle protection_domain, TRAPS) { assert(class_name != NULL && !FieldType::is_array(class_name), "must be"); - if (FieldType::is_obj(class_name)) { + if (FieldType::is_obj(class_name) || FieldType::is_valuetype(class_name)) { ResourceMark rm(THREAD); - // Ignore wrapping L and ;. + // Ignore wrapping L and ;. (and Q and ; for value types); TempNewSymbol name = SymbolTable::new_symbol(class_name->as_C_string() + 1, class_name->utf8_length() - 2, CHECK_NULL); return resolve_instance_class_or_null(name, class_loader, protection_domain, THREAD); @@ -288,7 +290,7 @@ // dimension and object_key in FieldArrayInfo are assigned as a side-effect // of this call BasicType t = FieldType::get_array_info(class_name, fd, CHECK_NULL); - if (t == T_OBJECT) { + if (t == T_OBJECT || t == T_VALUETYPE) { // naked oop "k" is OK here -- we assign back into it k = SystemDictionary::resolve_instance_class_or_null(fd.object_key(), class_loader, @@ -304,7 +306,6 @@ return k; } - // Must be called for any super-class or super-interface resolution // during class definition to allow class circularity checking // super-interface callers: @@ -448,6 +449,51 @@ return superk; } +Klass* SystemDictionary::resolve_flattenable_field_or_fail(AllFieldStream* fs, + Handle class_loader, + Handle protection_domain, + bool throw_error, + TRAPS) { + Symbol* class_name = fs->signature()->fundamental_name(THREAD); + class_loader = Handle(THREAD, java_lang_ClassLoader::non_reflection_class_loader(class_loader())); + ClassLoaderData* loader_data = class_loader_data(class_loader); + unsigned int p_hash = placeholders()->compute_hash(class_name); + int p_index = placeholders()->hash_to_index(p_hash); + bool throw_circularity_error = false; + PlaceholderEntry* oldprobe; + + { + MutexLocker mu(SystemDictionary_lock, THREAD); + oldprobe = placeholders()->get_entry(p_index, p_hash, class_name, loader_data); + if (oldprobe != NULL && + oldprobe->check_seen_thread(THREAD, PlaceholderTable::FLATTENABLE_FIELD)) { + throw_circularity_error = true; + + } else { + placeholders()->find_and_add(p_index, p_hash, class_name, loader_data, + PlaceholderTable::FLATTENABLE_FIELD, NULL, THREAD); + } + } + + Klass* klass = NULL; + if (!throw_circularity_error) { + klass = SystemDictionary::resolve_or_fail(class_name, class_loader, + protection_domain, true, THREAD); + } else { + ResourceMark rm(THREAD); + THROW_MSG_NULL(vmSymbols::java_lang_ClassCircularityError(), class_name->as_C_string()); + } + + { + MutexLocker mu(SystemDictionary_lock, THREAD); + placeholders()->find_and_remove(p_index, p_hash, class_name, loader_data, + PlaceholderTable::FLATTENABLE_FIELD, THREAD); + } + + class_name->decrement_refcount(); + return klass; +} + void SystemDictionary::validate_protection_domain(InstanceKlass* klass, Handle class_loader, Handle protection_domain, @@ -665,7 +711,7 @@ Handle protection_domain, TRAPS) { assert(name != NULL && !FieldType::is_array(name) && - !FieldType::is_obj(name), "invalid class name"); + !FieldType::is_obj(name) && !FieldType::is_valuetype(name), "invalid class name"); EventClassLoad class_load_start_event; @@ -976,7 +1022,7 @@ // side-effect of this call FieldArrayInfo fd; BasicType t = FieldType::get_array_info(class_name, fd, CHECK_(NULL)); - if (t != T_OBJECT) { + if (t != T_OBJECT && t != T_VALUETYPE) { k = Universe::typeArrayKlassObj(t); } else { k = SystemDictionary::find(fd.object_key(), class_loader, protection_domain, THREAD); @@ -2165,7 +2211,7 @@ // cleared if revocation occurs too often for this type // NOTE that we must only do this when the class is initally // defined, not each time it is referenced from a new class loader - if (oopDesc::equals(k->class_loader(), class_loader())) { + if (oopDesc::equals(k->class_loader(), class_loader()) && !k->is_value()) { k->set_prototype_header(markOopDesc::biased_locking_prototype()); } } @@ -2211,7 +2257,7 @@ // constraint table. The element Klass*s are. FieldArrayInfo fd; BasicType t = FieldType::get_array_info(class_name, fd, CHECK_(NULL)); - if (t != T_OBJECT) { + if (t != T_OBJECT && t != T_VALUETYPE) { klass = Universe::typeArrayKlassObj(t); } else { MutexLocker mu(SystemDictionary_lock, THREAD); @@ -2551,7 +2597,7 @@ assert(is_java_primitive(char2type(ch)) || ch == 'V', ""); return Handle(THREAD, find_java_mirror_for_type(ch)); - } else if (FieldType::is_obj(type) || FieldType::is_array(type)) { + } else if (FieldType::is_obj(type) || FieldType::is_valuetype(type) || FieldType::is_array(type)) { // It's a reference type. if (accessing_klass != NULL) { --- old/src/hotspot/share/classfile/systemDictionary.hpp 2019-03-11 14:25:40.286355513 +0100 +++ new/src/hotspot/share/classfile/systemDictionary.hpp 2019-03-11 14:25:40.086355516 +0100 @@ -27,6 +27,7 @@ #include "classfile/classLoader.hpp" #include "jvmci/systemDictionary_jvmci.hpp" +#include "oops/fieldStreams.hpp" #include "oops/objArrayOop.hpp" #include "oops/symbol.hpp" #include "runtime/java.hpp" @@ -170,6 +171,7 @@ do_klass(Context_klass, java_lang_invoke_MethodHandleNatives_CallSiteContext ) \ do_klass(ConstantCallSite_klass, java_lang_invoke_ConstantCallSite ) \ do_klass(MutableCallSite_klass, java_lang_invoke_MutableCallSite ) \ + do_klass(ValueBootstrapMethods_klass, java_lang_invoke_ValueBootstrapMethods ) \ do_klass(VolatileCallSite_klass, java_lang_invoke_VolatileCallSite ) \ /* Note: MethodHandle must be first, and VolatileCallSite last in group */ \ \ @@ -212,6 +214,7 @@ do_klass(Integer_klass, java_lang_Integer ) \ do_klass(Long_klass, java_lang_Long ) \ \ + \ /* JVMCI classes. These are loaded on-demand. */ \ JVMCI_WK_KLASSES_DO(do_klass) \ \ @@ -271,6 +274,12 @@ bool is_superclass, TRAPS); + static Klass* resolve_flattenable_field_or_fail(AllFieldStream* fs, + Handle class_loader, + Handle protection_domain, + bool throw_error, + TRAPS); + // Parse new stream. This won't update the dictionary or // class hierarchy, simply parse the stream. Used by JVMTI RedefineClasses. // Also used by Unsafe_DefineAnonymousClass @@ -385,6 +394,7 @@ } static bool resolve_wk_klass(WKID id, TRAPS); + static InstanceKlass* check_klass_ValhallaClasses(InstanceKlass* k) { return k; } static void resolve_wk_klasses_until(WKID limit_id, WKID &start_id, TRAPS); static void resolve_wk_klasses_through(WKID end_id, WKID &start_id, TRAPS) { int limit = (int)end_id + 1; --- old/src/hotspot/share/classfile/verificationType.cpp 2019-03-11 14:25:40.702355507 +0100 +++ new/src/hotspot/share/classfile/verificationType.cpp 2019-03-11 14:25:40.494355510 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,6 +54,7 @@ Verifier::trace_class_resolution(this_class, klass); } + if (this_class->access_flags().is_value_type()) return false; if (this_class->is_interface() && (!from_field_is_protected || from_name != vmSymbols::java_lang_Object())) { // If we are not trying to access a protected field or method in @@ -107,6 +108,35 @@ } else if (is_array() && from.is_array()) { VerificationType comp_this = get_component(context, CHECK_false); VerificationType comp_from = from.get_component(context, CHECK_false); + +/* + // This code implements non-covariance between value type arrays and both + // arrays of objects and arrays of interface types. If covariance is + // supported for value type arrays then this code should be removed. + if (comp_from.is_valuetype() && !comp_this.is_null() && comp_this.is_reference()) { + // An array of value types is not assignable to an array of java.lang.Objects. + if (comp_this.name() == vmSymbols::java_lang_Object()) { + return false; + } + + // Need to load 'comp_this' to see if it is an interface. + InstanceKlass* klass = context->current_class(); + { + HandleMark hm(THREAD); + Klass* comp_this_class = SystemDictionary::resolve_or_fail( + comp_this.name(), Handle(THREAD, klass->class_loader()), + Handle(THREAD, klass->protection_domain()), true, CHECK_false); + klass->class_loader_data()->record_dependency(comp_this_class); + if (log_is_enabled(Debug, class, resolve)) { + Verifier::trace_class_resolution(comp_this_class, klass); + } + // An array of value types is not assignable to an array of interface types. + if (comp_this_class->is_interface()) { + return false; + } + } + } +*/ if (!comp_this.is_bogus() && !comp_from.is_bogus()) { return comp_this.is_component_assignable_from(comp_from, context, from_field_is_protected, THREAD); @@ -115,6 +145,36 @@ return false; } +bool VerificationType::is_valuetype_assignable_from(const VerificationType& from) const { + // Check that 'from' is not null, is a value type, and is the same value type. + assert(is_valuetype(), "called with a non-valuetype type"); + assert(!is_null(), "valuetype is not null"); + assert(name() != vmSymbols::java_lang_Object(), "java.lang.Object is a value type?"); + return (!from.is_null() && from.is_valuetype() && name() == from.name()); +} + +bool VerificationType::is_ref_assignable_from_value_type(const VerificationType& from, ClassVerifier* context, TRAPS) const { + assert(!from.is_null(), "Value type should not be null"); + if (!is_null() && (name()->is_same_fundamental_type(from.name()) || + name() == vmSymbols::java_lang_Object())) { + return true; + } + + // Need to load 'this' to see if it is an interface. + InstanceKlass* klass = context->current_class(); + { + HandleMark hm(THREAD); + Klass* this_class = SystemDictionary::resolve_or_fail( + name(), Handle(THREAD, klass->class_loader()), + Handle(THREAD, klass->protection_domain()), true, CHECK_false); + klass->class_loader_data()->record_dependency(this_class); + if (log_is_enabled(Debug, class, resolve)) { + Verifier::trace_class_resolution(this_class, klass); + } + return (this_class->is_interface()); + } +} + VerificationType VerificationType::get_component(ClassVerifier *context, TRAPS) const { assert(is_array() && name()->utf8_length() >= 2, "Must be a valid array"); Symbol* component; @@ -137,6 +197,11 @@ name(), 2, name()->utf8_length() - 1, CHECK_(VerificationType::bogus_type())); return VerificationType::reference_type(component); + case 'Q': + component = context->create_temporary_symbol( + name(), 2, name()->utf8_length() - 1, + CHECK_(VerificationType::bogus_type())); + return VerificationType::valuetype_type(component); default: // Met an invalid type signature, e.g. [X return VerificationType::bogus_type(); @@ -161,6 +226,8 @@ case Double_2nd: st->print("double_2nd"); break; case Null: st->print("null"); break; case ReferenceQuery: st->print("reference type"); break; + case ValueTypeQuery: st->print("value type"); break; + case NonScalarQuery: st->print("reference or value type"); break; case Category1Query: st->print("category1 type"); break; case Category2Query: st->print("category2 type"); break; case Category2_2ndQuery: st->print("category2_2nd type"); break; @@ -169,6 +236,8 @@ st->print("uninitializedThis"); } else if (is_uninitialized()) { st->print("uninitialized %d", bci()); + } else if (is_valuetype()) { + name()->print_Qvalue_on(st); } else { if (name() != NULL) { name()->print_value_on(st); --- old/src/hotspot/share/classfile/verificationType.hpp 2019-03-11 14:25:41.118355501 +0100 +++ new/src/hotspot/share/classfile/verificationType.hpp 2019-03-11 14:25:40.910355504 +0100 @@ -69,21 +69,24 @@ // Enum for the _data field enum { - // Bottom two bits determine if the type is a reference, primitive, - // uninitialized or a query-type. - TypeMask = 0x00000003, + // Bottom three bits determine if the type is a reference, value type, + // primitive, uninitialized or a query-type. + TypeMask = 0x00000007, // Topmost types encoding - Reference = 0x0, // _sym contains the name + Reference = 0x0, // _sym contains the name of an object Primitive = 0x1, // see below for primitive list Uninitialized = 0x2, // 0x00ffff00 contains bci TypeQuery = 0x3, // Meta-types used for category testing + ValueType = 0x4, // _sym contains the name of a value type // Utility flags ReferenceFlag = 0x00, // For reference query types Category1Flag = 0x01, // One-word values Category2Flag = 0x02, // First word of a two-word value Category2_2ndFlag = 0x04, // Second word of a two-word value + ValueTypeFlag = 0x08, // For value type query types + NonScalarFlag = 0x10, // For either value type or reference queries // special reference values Null = 0x00000000, // A reference with a 0 sym is null @@ -115,7 +118,9 @@ ReferenceQuery = (ReferenceFlag << 1 * BitsPerByte) | TypeQuery, Category1Query = (Category1Flag << 1 * BitsPerByte) | TypeQuery, Category2Query = (Category2Flag << 1 * BitsPerByte) | TypeQuery, - Category2_2ndQuery = (Category2_2ndFlag << 1 * BitsPerByte) | TypeQuery + Category2_2ndQuery = (Category2_2ndFlag << 1 * BitsPerByte) | TypeQuery, + ValueTypeQuery = (ValueTypeFlag << 1 * BitsPerByte) | TypeQuery, + NonScalarQuery = (NonScalarFlag << 1 * BitsPerByte) | TypeQuery }; VerificationType(uintptr_t raw_data) { @@ -148,16 +153,20 @@ // any reference is assignable to reference_check. static VerificationType reference_check() { return VerificationType(ReferenceQuery); } + static VerificationType valuetype_check() + { return VerificationType(ValueTypeQuery); } static VerificationType category1_check() { return VerificationType(Category1Query); } static VerificationType category2_check() { return VerificationType(Category2Query); } static VerificationType category2_2nd_check() { return VerificationType(Category2_2ndQuery); } + static VerificationType nonscalar_check() + { return VerificationType(NonScalarQuery); } // For reference types, store the actual Symbol static VerificationType reference_type(Symbol* sh) { - assert(((uintptr_t)sh & 0x3) == 0, "Symbols must be aligned"); + assert(((uintptr_t)sh & TypeMask) == 0, "Symbols must be aligned"); // If the above assert fails in the future because oop* isn't aligned, // then this type encoding system will have to change to have a tag value // to descriminate between oops and primitives. @@ -168,6 +177,17 @@ static VerificationType uninitialized_this_type() { return uninitialized_type(BciForThis); } + // For value types, store the actual Symbol* and set the 3rd bit. + // Provides a way for a value type to be distinguished from a reference type. + static VerificationType valuetype_type(Symbol* sh) { + assert(((uintptr_t)sh & TypeMask) == 0, "Symbols must be aligned"); + assert((uintptr_t)sh != 0, "Null is not a valid value type"); + // If the above assert fails in the future because oop* isn't aligned, + // then this type encoding system will have to change to have a tag value + // to descriminate between oops and primitives. + return VerificationType((uintptr_t)sh | ValueType); + } + // Create based on u1 read from classfile static VerificationType from_tag(u1 tag); @@ -183,11 +203,12 @@ bool is_double() const { return (_u._data == Double); } bool is_long2() const { return (_u._data == Long_2nd); } bool is_double2() const { return (_u._data == Double_2nd); } - bool is_reference() const { return ((_u._data & TypeMask) == Reference); } + bool is_reference() const { return (((_u._data & TypeMask) == Reference) && !is_valuetype_check()); } + bool is_valuetype() const { return ((_u._data & TypeMask) == ValueType); } bool is_category1() const { // This should return true for all one-word types, which are category1 - // primitives, and references (including uninitialized refs). Though - // the 'query' types should technically return 'false' here, if we + // primitives, references (including uninitialized refs) and value types. + // Though the 'query' types should technically return 'false' here, if we // allow this to return true, we can perform the test using only // 2 operations rather than 8 (3 masks, 3 compares and 2 logical 'ands'). // Since noone should call this on a query type anyway, this is ok. @@ -201,6 +222,8 @@ return ((_u._data & Category2_2nd) == Category2_2nd); } bool is_reference_check() const { return _u._data == ReferenceQuery; } + bool is_valuetype_check() const { return _u._data == ValueTypeQuery; } + bool is_nonscalar_check() const { return _u._data == NonScalarQuery; } bool is_category1_check() const { return _u._data == Category1Query; } bool is_category2_check() const { return _u._data == Category2Query; } bool is_category2_2nd_check() const { return _u._data == Category2_2ndQuery; } @@ -218,9 +241,12 @@ bool is_float_array() const { return is_x_array('F'); } bool is_double_array() const { return is_x_array('D'); } bool is_object_array() const { return is_x_array('L'); } + bool is_value_array() const { return is_x_array('Q'); } bool is_array_array() const { return is_x_array('['); } bool is_reference_array() const { return is_object_array() || is_array_array(); } + bool is_nonscalar_array() const + { return is_object_array() || is_array_array() || is_value_array(); } bool is_object() const { return (is_reference() && !is_null() && name()->utf8_length() >= 1 && name()->char_at(0) != '['); } @@ -237,20 +263,28 @@ return VerificationType(is_long() ? Long_2nd : Double_2nd); } + static VerificationType change_ref_to_valuetype(VerificationType ref) { + assert(ref.is_reference(), "Bad arg"); + assert(!ref.is_null(), "Unexpected NULL"); + return valuetype_type(ref.name()); + } + u2 bci() const { assert(is_uninitialized(), "Must be uninitialized type"); return ((_u._data & BciMask) >> 1 * BitsPerByte); } Symbol* name() const { - assert(is_reference() && !is_null(), "Must be a non-null reference"); - return _u._sym; + assert(!is_null() && (is_reference() || is_valuetype()), "Must be a non-null reference or a value type"); + return (is_reference() ? _u._sym : ((Symbol*)(_u._data & ~(uintptr_t)ValueType))); } bool equals(const VerificationType& t) const { return (_u._data == t._u._data || - (is_reference() && t.is_reference() && !is_null() && !t.is_null() && - name() == t.name())); + (((is_reference() && t.is_reference()) || + (is_valuetype() && t.is_valuetype())) && + !is_null() && !t.is_null() && name() == t.name())); + } bool operator ==(const VerificationType& t) const { @@ -279,6 +313,11 @@ return from.is_category2_2nd(); case ReferenceQuery: return from.is_reference() || from.is_uninitialized(); + case NonScalarQuery: + return from.is_reference() || from.is_uninitialized() || + from.is_valuetype(); + case ValueTypeQuery: + return from.is_valuetype(); case Boolean: case Byte: case Char: @@ -286,7 +325,11 @@ // An int can be assigned to boolean, byte, char or short values. return from.is_integer(); default: - if (is_reference() && from.is_reference()) { + if (is_valuetype()) { + return is_valuetype_assignable_from(from); + } else if (is_reference() && from.is_valuetype()) { + return is_ref_assignable_from_value_type(from, context, THREAD); + } else if (is_reference() && from.is_reference()) { return is_reference_assignable_from(from, context, from_field_is_protected, THREAD); @@ -334,6 +377,11 @@ const VerificationType&, ClassVerifier*, bool from_field_is_protected, TRAPS) const; + bool is_valuetype_assignable_from(const VerificationType& from) const; + + bool is_ref_assignable_from_value_type(const VerificationType& from, ClassVerifier* context, TRAPS) const; + + public: static bool resolve_and_check_assignability(InstanceKlass* klass, Symbol* name, Symbol* from_name, bool from_field_is_protected, --- old/src/hotspot/share/classfile/verifier.cpp 2019-03-11 14:25:41.534355496 +0100 +++ new/src/hotspot/share/classfile/verifier.cpp 2019-03-11 14:25:41.330355499 +0100 @@ -58,6 +58,7 @@ #define NOFAILOVER_MAJOR_VERSION 51 #define NONZERO_PADDING_BYTES_IN_SWITCH_MAJOR_VERSION 51 #define STATIC_METHOD_IN_INTERFACE_MAJOR_VERSION 52 +#define VALUETYPE_MAJOR_VERSION 56 #define MAX_ARRAY_DIMENSIONS 255 // Access to external entry for VerifyClassCodes - old byte code verifier @@ -242,7 +243,7 @@ return (should_verify_for(klass->class_loader(), should_verify_class) && // return if the class is a bootstrapping class // or defineClass specified not to verify by default (flags override passed arg) - // We need to skip the following four for bootstraping + // We need to skip the following four for bootstrapping name != vmSymbols::java_lang_Object() && name != vmSymbols::java_lang_Class() && name != vmSymbols::java_lang_String() && @@ -473,6 +474,13 @@ case BAD_STACKMAP: ss->print("Invalid stackmap specification."); break; + case WRONG_VALUE_TYPE: + ss->print("Type "); + _type.details(ss); + ss->print(" and type "); + _expected.details(ss); + ss->print(" must be identical value types."); + break; case UNKNOWN: default: ShouldNotReachHere(); @@ -567,10 +575,18 @@ // Methods in ClassVerifier +VerificationType reference_or_valuetype(InstanceKlass* klass) { + if (klass->is_value()) { + return VerificationType::valuetype_type(klass->name()); + } else { + return VerificationType::reference_type(klass->name()); + } +} + ClassVerifier::ClassVerifier( InstanceKlass* klass, TRAPS) : _thread(THREAD), _exception_type(NULL), _message(NULL), _klass(klass) { - _this_type = VerificationType::reference_type(klass->name()); + _this_type = reference_or_valuetype(klass); // Create list to hold symbols in reference area. _symbols = new GrowableArray(100, 0, NULL); } @@ -960,7 +976,7 @@ VerificationType::integer_type(), CHECK_VERIFY(this)); atype = current_frame.pop_stack( VerificationType::reference_check(), CHECK_VERIFY(this)); - if (!atype.is_reference_array()) { + if (!atype.is_nonscalar_array()) { verify_error(ErrorContext::bad_type(bci, current_frame.stack_top_ctx(), TypeOrigin::implicit(VerificationType::reference_check())), @@ -1134,7 +1150,7 @@ atype = current_frame.pop_stack( VerificationType::reference_check(), CHECK_VERIFY(this)); // more type-checking is done at runtime - if (!atype.is_reference_array()) { + if (!atype.is_nonscalar_array()) { verify_error(ErrorContext::bad_type(bci, current_frame.stack_top_ctx(), TypeOrigin::implicit(VerificationType::reference_check())), @@ -1534,12 +1550,12 @@ case Bytecodes::_if_acmpeq : case Bytecodes::_if_acmpne : current_frame.pop_stack( - VerificationType::reference_check(), CHECK_VERIFY(this)); + VerificationType::nonscalar_check(), CHECK_VERIFY(this)); // fall through case Bytecodes::_ifnull : case Bytecodes::_ifnonnull : current_frame.pop_stack( - VerificationType::reference_check(), CHECK_VERIFY(this)); + VerificationType::nonscalar_check(), CHECK_VERIFY(this)); target = bcs.dest(); stackmap_table.check_jump_target (¤t_frame, target, CHECK_VERIFY(this)); @@ -1590,7 +1606,7 @@ no_control_flow = true; break; case Bytecodes::_areturn : type = current_frame.pop_stack( - VerificationType::reference_check(), CHECK_VERIFY(this)); + VerificationType::nonscalar_check(), CHECK_VERIFY(this)); verify_return_value(return_type, type, bci, ¤t_frame, CHECK_VERIFY(this)); no_control_flow = true; break; @@ -1622,6 +1638,17 @@ verify_field_instructions( &bcs, ¤t_frame, cp, false, CHECK_VERIFY(this)); no_control_flow = false; break; + case Bytecodes::_withfield : + if (_klass->major_version() < VALUETYPE_MAJOR_VERSION) { + class_format_error( + "withfield not supported by this class file version (%d.%d), class %s", + _klass->major_version(), _klass->minor_version(), _klass->external_name()); + return; + } + // pass FALSE, operand can't be an array type for withfield. + verify_field_instructions( + &bcs, ¤t_frame, cp, false, CHECK_VERIFY(this)); + no_control_flow = false; break; case Bytecodes::_invokevirtual : case Bytecodes::_invokespecial : case Bytecodes::_invokestatic : @@ -1651,6 +1678,28 @@ current_frame.push_stack(type, CHECK_VERIFY(this)); no_control_flow = false; break; } + case Bytecodes::_defaultvalue : + { + if (_klass->major_version() < VALUETYPE_MAJOR_VERSION) { + class_format_error( + "defaultvalue not supported by this class file version (%d.%d), class %s", + _klass->major_version(), _klass->minor_version(), _klass->external_name()); + return; + } + index = bcs.get_index_u2(); + verify_cp_class_type(bci, index, cp, CHECK_VERIFY(this)); + VerificationType ref_type = cp_index_to_type(index, cp, CHECK_VERIFY(this)); + if (!ref_type.is_object()) { + verify_error(ErrorContext::bad_type(bci, + TypeOrigin::cp(index, ref_type)), + "Illegal defaultvalue instruction"); + return; + } + VerificationType value_type = + VerificationType::change_ref_to_valuetype(ref_type); + current_frame.push_stack(value_type, CHECK_VERIFY(this)); + no_control_flow = false; break; + } case Bytecodes::_newarray : type = get_newarray_type(bcs.get_index(), bci, CHECK_VERIFY(this)); current_frame.pop_stack( @@ -1691,10 +1740,11 @@ no_control_flow = false; break; } case Bytecodes::_monitorenter : - case Bytecodes::_monitorexit : - current_frame.pop_stack( + case Bytecodes::_monitorexit : { + VerificationType ref = current_frame.pop_stack( VerificationType::reference_check(), CHECK_VERIFY(this)); no_control_flow = false; break; + } case Bytecodes::_multianewarray : { index = bcs.get_index_u2(); @@ -1953,6 +2003,7 @@ verify_cp_index(bci, cp, index, CHECK_VERIFY(this)); unsigned int tag = cp->tag_at(index).value(); + if ((types & (1 << tag)) == 0) { verify_error(ErrorContext::bad_cp_index(bci, index), "Illegal type at constant pool entry %d in class %s", @@ -2063,7 +2114,7 @@ if (opcode == Bytecodes::_ldc || opcode == Bytecodes::_ldc_w) { if (!tag.is_unresolved_klass()) { types = (1 << JVM_CONSTANT_Integer) | (1 << JVM_CONSTANT_Float) - | (1 << JVM_CONSTANT_String) | (1 << JVM_CONSTANT_Class) + | (1 << JVM_CONSTANT_String) | (1 << JVM_CONSTANT_Class) | (1 << JVM_CONSTANT_MethodHandle) | (1 << JVM_CONSTANT_MethodType) | (1 << JVM_CONSTANT_Dynamic); // Note: The class file parser already verified the legality of @@ -2246,13 +2297,14 @@ VerificationType ref_class_type = cp_ref_index_to_type( index, cp, CHECK_VERIFY(this)); if (!ref_class_type.is_object() && - (!allow_arrays || !ref_class_type.is_array())) { + (!allow_arrays || !ref_class_type.is_array())) { verify_error(ErrorContext::bad_type(bcs->bci(), TypeOrigin::cp(index, ref_class_type)), "Expecting reference to class in class %s at constant pool index %d", _klass->external_name(), index); return; } + VerificationType target_class_type = ref_class_type; assert(sizeof(VerificationType) == sizeof(uintptr_t), @@ -2283,6 +2335,25 @@ } break; } + case Bytecodes::_withfield: { + for (int i = n - 1; i >= 0; i--) { + current_frame->pop_stack(field_type[i], CHECK_VERIFY(this)); + } + // stack_object_type and target_class_type must be the same value type. + stack_object_type = + current_frame->pop_stack(VerificationType::valuetype_check(), CHECK_VERIFY(this)); + VerificationType target_value_type = + VerificationType::change_ref_to_valuetype(target_class_type); + if (!stack_object_type.equals(target_value_type)) { + verify_error(ErrorContext::bad_value_type(bci, + current_frame->stack_top_ctx(), + TypeOrigin::cp(index, target_class_type)), + "Invalid type on operand stack in withfield instruction"); + return; + } + current_frame->push_stack(target_value_type, CHECK_VERIFY(this)); + break; + } case Bytecodes::_getfield: { stack_object_type = current_frame->pop_stack( target_class_type, CHECK_VERIFY(this)); @@ -2726,7 +2797,7 @@ return; } - // Get referenced class type + // Get referenced class VerificationType ref_class_type; if (opcode == Bytecodes::_invokedynamic) { if (_klass->major_version() < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) { @@ -2818,22 +2889,22 @@ } else if (opcode == Bytecodes::_invokespecial && !is_same_or_direct_interface(current_class(), current_type(), ref_class_type) && !ref_class_type.equals(VerificationType::reference_type( - current_class()->super()->name()))) { + current_class()->super()->name()))) { // super() can never be a value_type. bool subtype = false; bool have_imr_indirect = cp->tag_at(index).value() == JVM_CONSTANT_InterfaceMethodref; if (!current_class()->is_unsafe_anonymous()) { subtype = ref_class_type.is_assignable_from( current_type(), this, false, CHECK_VERIFY(this)); } else { - VerificationType unsafe_anonymous_host_type = - VerificationType::reference_type(current_class()->unsafe_anonymous_host()->name()); + InstanceKlass* unsafe_host = current_class()->unsafe_anonymous_host(); + VerificationType unsafe_anonymous_host_type = reference_or_valuetype(unsafe_host); subtype = ref_class_type.is_assignable_from(unsafe_anonymous_host_type, this, false, CHECK_VERIFY(this)); // If invokespecial of IMR, need to recheck for same or // direct interface relative to the host class have_imr_indirect = (have_imr_indirect && !is_same_or_direct_interface( - current_class()->unsafe_anonymous_host(), + unsafe_host, unsafe_anonymous_host_type, ref_class_type)); } if (!subtype) { @@ -2871,9 +2942,10 @@ // objectref is a subtype of the unsafe_anonymous_host of the current class // to allow an anonymous class to reference methods in the unsafe_anonymous_host VerificationType top = current_frame->pop_stack(CHECK_VERIFY(this)); - VerificationType hosttype = - VerificationType::reference_type(current_class()->unsafe_anonymous_host()->name()); - bool subtype = hosttype.is_assignable_from(top, this, false, CHECK_VERIFY(this)); + + InstanceKlass* unsafe_host = current_class()->unsafe_anonymous_host(); + VerificationType host_type = reference_or_valuetype(unsafe_host); + bool subtype = host_type.is_assignable_from(top, this, false, CHECK_VERIFY(this)); if (!subtype) { verify_error( ErrorContext::bad_type(current_frame->offset(), current_frame->stack_top_ctx(), @@ -2986,10 +3058,11 @@ assert(n == length, "Unexpected number of characters in string"); } else { // it's an object or interface const char* component_name = component_type.name()->as_utf8(); - // add one dimension to component with 'L' prepended and ';' postpended. + char Q_or_L = component_type.is_valuetype() ? 'Q' : 'L'; + // add one dimension to component with 'L' or 'Q' prepended and ';' appended. length = (int)strlen(component_name) + 3; arr_sig_str = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, length + 1); - int n = os::snprintf(arr_sig_str, length + 1, "[L%s;", component_name); + int n = os::snprintf(arr_sig_str, length + 1, "[%c%s;", Q_or_L, component_name); assert(n == length, "Unexpected number of characters in string"); } Symbol* arr_sig = create_temporary_symbol( @@ -3032,7 +3105,7 @@ void ClassVerifier::verify_aload(u2 index, StackMapFrame* current_frame, TRAPS) { VerificationType type = current_frame->get_local( - index, VerificationType::reference_check(), CHECK_VERIFY(this)); + index, VerificationType::nonscalar_check(), CHECK_VERIFY(this)); current_frame->push_stack(type, CHECK_VERIFY(this)); } @@ -3069,7 +3142,7 @@ void ClassVerifier::verify_astore(u2 index, StackMapFrame* current_frame, TRAPS) { VerificationType type = current_frame->pop_stack( - VerificationType::reference_check(), CHECK_VERIFY(this)); + VerificationType::nonscalar_check(), CHECK_VERIFY(this)); current_frame->set_local(index, type, CHECK_VERIFY(this)); } --- old/src/hotspot/share/classfile/verifier.hpp 2019-03-11 14:25:41.966355490 +0100 +++ new/src/hotspot/share/classfile/verifier.hpp 2019-03-11 14:25:41.762355493 +0100 @@ -39,7 +39,7 @@ STACKMAP_ATTRIBUTE_MAJOR_VERSION = 50, INVOKEDYNAMIC_MAJOR_VERSION = 51, NO_RELAX_ACCESS_CTRL_CHECK_VERSION = 52, - DYNAMICCONSTANT_MAJOR_VERSION = 55 + DYNAMICCONSTANT_MAJOR_VERSION = 55, }; // Verify the bytecodes for a class. @@ -153,6 +153,7 @@ STACK_UNDERFLOW, // Attempt to pop and empty expression stack MISSING_STACKMAP, // No stackmap for this location and there should be BAD_STACKMAP, // Format error in stackmap + WRONG_VALUE_TYPE, // Mismatched value type NO_FAULT, // No error UNKNOWN } FaultType; @@ -216,6 +217,9 @@ static ErrorContext bad_stackmap(int index, StackMapFrame* frame) { return ErrorContext(0, BAD_STACKMAP, TypeOrigin::frame(frame)); } + static ErrorContext bad_value_type(u2 bci, TypeOrigin type, TypeOrigin exp) { + return ErrorContext(bci, WRONG_VALUE_TYPE, type, exp); + } bool is_valid() const { return _fault != NO_FAULT; } int bci() const { return _bci; } @@ -402,7 +406,14 @@ SignatureStream* sig_type, VerificationType* inference_type, TRAPS); VerificationType cp_index_to_type(int index, const constantPoolHandle& cp, TRAPS) { - return VerificationType::reference_type(cp->klass_name_at(index)); + Symbol* name = cp->klass_name_at(index); + if (name->is_Q_signature()) { + // Remove the Q and ; + // TBD need error msg if fundamental_name() returns NULL? + Symbol* fund_name = name->fundamental_name(CHECK_(VerificationType::bogus_type())); + return VerificationType::valuetype_type(fund_name); + } + return VerificationType::reference_type(name); } // Keep a list of temporary symbols created during verification because @@ -435,8 +446,16 @@ // Create another symbol to save as signature stream unreferences this symbol. Symbol* name_copy = create_temporary_symbol(name); assert(name_copy == name, "symbols don't match"); - *inference_type = - VerificationType::reference_type(name_copy); + *inference_type = VerificationType::reference_type(name_copy); + return 1; + } + case T_VALUETYPE: + { + Symbol* vname = sig_type->as_symbol(CHECK_0); + // Create another symbol to save as signature stream unreferences this symbol. + Symbol* vname_copy = create_temporary_symbol(vname); + assert(vname_copy == vname, "symbols don't match"); + *inference_type = VerificationType::valuetype_type(vname_copy); return 1; } case T_LONG: --- old/src/hotspot/share/classfile/vmSymbols.cpp 2019-03-11 14:25:42.374355484 +0100 +++ new/src/hotspot/share/classfile/vmSymbols.cpp 2019-03-11 14:25:42.174355487 +0100 @@ -98,7 +98,7 @@ _type_signatures[T_SHORT] = short_signature(); _type_signatures[T_BOOLEAN] = bool_signature(); _type_signatures[T_VOID] = void_signature(); - // no single signatures for T_OBJECT or T_ARRAY + // no single signatures for T_OBJECT, T_VALUETYPE or T_ARRAY #ifdef ASSERT for (int i = (int)T_BOOLEAN; i < (int)T_VOID+1; i++) { Symbol* s = _type_signatures[i]; @@ -218,7 +218,7 @@ return result; } } - return T_OBJECT; + return s->char_at(0) == 'Q' ? T_VALUETYPE : T_OBJECT; } @@ -598,6 +598,8 @@ case vmIntrinsics::_updateByteBufferCRC32: if (!UseCRC32Intrinsics) return true; break; + case vmIntrinsics::_makePrivateBuffer: + case vmIntrinsics::_finishPrivateBuffer: case vmIntrinsics::_getReference: case vmIntrinsics::_getBoolean: case vmIntrinsics::_getByte: @@ -607,6 +609,7 @@ case vmIntrinsics::_getLong: case vmIntrinsics::_getFloat: case vmIntrinsics::_getDouble: + case vmIntrinsics::_getValue: case vmIntrinsics::_putReference: case vmIntrinsics::_putBoolean: case vmIntrinsics::_putByte: @@ -616,6 +619,7 @@ case vmIntrinsics::_putLong: case vmIntrinsics::_putFloat: case vmIntrinsics::_putDouble: + case vmIntrinsics::_putValue: case vmIntrinsics::_getReferenceVolatile: case vmIntrinsics::_getBooleanVolatile: case vmIntrinsics::_getByteVolatile: --- old/src/hotspot/share/classfile/vmSymbols.hpp 2019-03-11 14:25:42.786355478 +0100 +++ new/src/hotspot/share/classfile/vmSymbols.hpp 2019-03-11 14:25:42.586355481 +0100 @@ -324,8 +324,9 @@ template(DEFAULT_CONTEXT_name, "DEFAULT_CONTEXT") \ NOT_LP64( do_alias(intptr_signature, int_signature) ) \ LP64_ONLY( do_alias(intptr_signature, long_signature) ) \ - \ - /* Support for JVMCI */ \ + \ + \ + /* Support for JVMCI */ \ JVMCI_VM_SYMBOLS_DO(template, do_alias) \ \ template(java_lang_StackWalker, "java/lang/StackWalker") \ @@ -447,6 +448,7 @@ template(module_entry_name, "module_entry") \ template(resolved_references_name, "") \ template(init_lock_name, "") \ + template(default_value_name, ".default") \ \ /* name symbols needed by intrinsics */ \ VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, VM_SYMBOL_IGNORE, template, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE) \ @@ -509,6 +511,7 @@ template(threadgroup_string_void_signature, "(Ljava/lang/ThreadGroup;Ljava/lang/String;)V") \ template(string_class_signature, "(Ljava/lang/String;)Ljava/lang/Class;") \ template(object_object_object_signature, "(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;") \ + template(object_object_boolean_signature, "(Ljava/lang/Object;Ljava/lang/Object;)Z") \ template(string_string_string_signature, "(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;") \ template(string_string_signature, "(Ljava/lang/String;)Ljava/lang/String;") \ template(classloader_string_long_signature, "(Ljava/lang/ClassLoader;Ljava/lang/String;)J") \ @@ -657,6 +660,8 @@ template(toFileURL_signature, "(Ljava/lang/String;)Ljava/net/URL;") \ template(url_void_signature, "(Ljava/net/URL;)V") \ \ + template(java_lang_invoke_ValueBootstrapMethods, "java/lang/invoke/ValueBootstrapMethods") \ + template(isSubstitutable_name, "isSubstitutable") \ /*end*/ // Here are all the intrinsics known to the runtime and the CI. @@ -1120,6 +1125,8 @@ do_signature(putFloat_signature, "(Ljava/lang/Object;JF)V") \ do_signature(getDouble_signature, "(Ljava/lang/Object;J)D") \ do_signature(putDouble_signature, "(Ljava/lang/Object;JD)V") \ + do_signature(getValue_signature, "(Ljava/lang/Object;JLjava/lang/Class;)Ljava/lang/Object;") \ + do_signature(putValue_signature, "(Ljava/lang/Object;JLjava/lang/Class;Ljava/lang/Object;)V") \ \ do_name(getReference_name,"getReference") do_name(putReference_name,"putReference") \ do_name(getBoolean_name,"getBoolean") do_name(putBoolean_name,"putBoolean") \ @@ -1130,6 +1137,9 @@ do_name(getLong_name,"getLong") do_name(putLong_name,"putLong") \ do_name(getFloat_name,"getFloat") do_name(putFloat_name,"putFloat") \ do_name(getDouble_name,"getDouble") do_name(putDouble_name,"putDouble") \ + do_name(getValue_name,"getValue") do_name(putValue_name,"putValue") \ + do_name(makePrivateBuffer_name,"makePrivateBuffer") \ + do_name(finishPrivateBuffer_name,"finishPrivateBuffer") \ \ do_intrinsic(_getReference, jdk_internal_misc_Unsafe, getReference_name, getReference_signature, F_RN) \ do_intrinsic(_getBoolean, jdk_internal_misc_Unsafe, getBoolean_name, getBoolean_signature, F_RN) \ @@ -1140,6 +1150,7 @@ do_intrinsic(_getLong, jdk_internal_misc_Unsafe, getLong_name, getLong_signature, F_RN) \ do_intrinsic(_getFloat, jdk_internal_misc_Unsafe, getFloat_name, getFloat_signature, F_RN) \ do_intrinsic(_getDouble, jdk_internal_misc_Unsafe, getDouble_name, getDouble_signature, F_RN) \ + do_intrinsic(_getValue, jdk_internal_misc_Unsafe, getValue_name, getValue_signature, F_RN) \ do_intrinsic(_putReference, jdk_internal_misc_Unsafe, putReference_name, putReference_signature, F_RN) \ do_intrinsic(_putBoolean, jdk_internal_misc_Unsafe, putBoolean_name, putBoolean_signature, F_RN) \ do_intrinsic(_putByte, jdk_internal_misc_Unsafe, putByte_name, putByte_signature, F_RN) \ @@ -1149,6 +1160,10 @@ do_intrinsic(_putLong, jdk_internal_misc_Unsafe, putLong_name, putLong_signature, F_RN) \ do_intrinsic(_putFloat, jdk_internal_misc_Unsafe, putFloat_name, putFloat_signature, F_RN) \ do_intrinsic(_putDouble, jdk_internal_misc_Unsafe, putDouble_name, putDouble_signature, F_RN) \ + do_intrinsic(_putValue, jdk_internal_misc_Unsafe, putValue_name, putValue_signature, F_RN) \ + \ + do_intrinsic(_makePrivateBuffer, jdk_internal_misc_Unsafe, makePrivateBuffer_name, object_object_signature, F_RN) \ + do_intrinsic(_finishPrivateBuffer, jdk_internal_misc_Unsafe, finishPrivateBuffer_name, object_object_signature, F_RN) \ \ do_name(getReferenceVolatile_name,"getReferenceVolatile") do_name(putReferenceVolatile_name,"putReferenceVolatile") \ do_name(getBooleanVolatile_name,"getBooleanVolatile") do_name(putBooleanVolatile_name,"putBooleanVolatile") \ --- old/src/hotspot/share/code/codeBlob.cpp 2019-03-11 14:25:43.206355473 +0100 +++ new/src/hotspot/share/code/codeBlob.cpp 2019-03-11 14:25:43.006355475 +0100 @@ -272,23 +272,27 @@ MemoryService::track_code_cache_memory_usage(); } +BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb, int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) + : RuntimeBlob(name, cb, sizeof(BufferBlob), size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) +{} + //---------------------------------------------------------------------------------------------------- // Implementation of AdapterBlob -AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) : - BufferBlob("I2C/C2I adapters", size, cb) { +AdapterBlob::AdapterBlob(int size, CodeBuffer* cb, int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) : + BufferBlob("I2C/C2I adapters", size, cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) { CodeCache::commit(this); } -AdapterBlob* AdapterBlob::create(CodeBuffer* cb) { +AdapterBlob* AdapterBlob::create(CodeBuffer* cb, int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) { ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock AdapterBlob* blob = NULL; unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob)); { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - blob = new (size) AdapterBlob(size, cb); + blob = new (size) AdapterBlob(size, cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments); } // Track memory usage statistic after releasing CodeCache_lock MemoryService::track_code_cache_memory_usage(); @@ -339,6 +343,30 @@ } // Track memory usage statistic after releasing CodeCache_lock MemoryService::track_code_cache_memory_usage(); + + return blob; +} + +//---------------------------------------------------------------------------------------------------- +// Implementation of BufferedValueTypeBlob +BufferedValueTypeBlob::BufferedValueTypeBlob(int size, CodeBuffer* cb, int pack_fields_off, int unpack_fields_off) : + BufferBlob("buffered value type", size, cb), + _pack_fields_off(pack_fields_off), + _unpack_fields_off(unpack_fields_off) { + CodeCache::commit(this); +} + +BufferedValueTypeBlob* BufferedValueTypeBlob::create(CodeBuffer* cb, int pack_fields_off, int unpack_fields_off) { + ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock + + BufferedValueTypeBlob* blob = NULL; + unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferedValueTypeBlob)); + { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + blob = new (size) BufferedValueTypeBlob(size, cb, pack_fields_off, unpack_fields_off); + } + // Track memory usage statistic after releasing CodeCache_lock + MemoryService::track_code_cache_memory_usage(); return blob; } --- old/src/hotspot/share/code/codeBlob.hpp 2019-03-11 14:25:43.614355467 +0100 +++ new/src/hotspot/share/code/codeBlob.hpp 2019-03-11 14:25:43.414355470 +0100 @@ -142,6 +142,7 @@ virtual bool is_method_handles_adapter_blob() const { return false; } virtual bool is_aot() const { return false; } virtual bool is_compiled() const { return false; } + virtual bool is_buffered_value_type_blob() const { return false; } inline bool is_compiled_by_c1() const { return _type == compiler_c1; }; inline bool is_compiled_by_c2() const { return _type == compiler_c2; }; @@ -391,12 +392,14 @@ friend class AdapterBlob; friend class VtableBlob; friend class MethodHandlesAdapterBlob; + friend class BufferedValueTypeBlob; friend class WhiteBox; private: // Creation support BufferBlob(const char* name, int size); BufferBlob(const char* name, int size, CodeBuffer* cb); + BufferBlob(const char* name, int size, CodeBuffer* cb, int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments = false); // This ordinary operator delete is needed even though not used, so the // below two-argument operator delete will be treated as a placement @@ -429,14 +432,20 @@ class AdapterBlob: public BufferBlob { private: - AdapterBlob(int size, CodeBuffer* cb); + AdapterBlob(int size, CodeBuffer* cb, int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments = false); public: // Creation - static AdapterBlob* create(CodeBuffer* cb); + static AdapterBlob* create(CodeBuffer* cb, + int frame_complete, + int frame_size, + OopMapSet* oop_maps, + bool caller_must_gc_arguments = false); // Typing virtual bool is_adapter_blob() const { return true; } + + bool caller_must_gc_arguments(JavaThread* thread) const { return true; } }; //--------------------------------------------------------------------------------------------------- @@ -467,6 +476,26 @@ virtual bool is_method_handles_adapter_blob() const { return true; } }; +//---------------------------------------------------------------------------------------------------- +// BufferedValueTypeBlob : used for pack/unpack handlers + +class BufferedValueTypeBlob: public BufferBlob { +private: + const int _pack_fields_off; + const int _unpack_fields_off; + + BufferedValueTypeBlob(int size, CodeBuffer* cb, int pack_fields_off, int unpack_fields_off); + +public: + // Creation + static BufferedValueTypeBlob* create(CodeBuffer* cb, int pack_fields_off, int unpack_fields_off); + + address pack_fields() const { return code_begin() + _pack_fields_off; } + address unpack_fields() const { return code_begin() + _unpack_fields_off; } + + // Typing + virtual bool is_buffered_value_type_blob() const { return true; } +}; //---------------------------------------------------------------------------------------------------- // RuntimeStub: describes stubs used by compiled code to call a (static) C++ runtime routine --- old/src/hotspot/share/code/compiledIC.hpp 2019-03-11 14:25:44.438355456 +0100 +++ new/src/hotspot/share/code/compiledIC.hpp 2019-03-11 14:25:44.234355458 +0100 @@ -328,7 +328,7 @@ // -----<----- Clean ----->----- // / \ // / \ -// compilled code <------------> interpreted code +// compiled code <------------> interpreted code // // Clean: Calls directly to runtime method for fixup // Compiled code: Calls directly to compiled code --- old/src/hotspot/share/code/compiledMethod.cpp 2019-03-11 14:25:44.850355450 +0100 +++ new/src/hotspot/share/code/compiledMethod.cpp 2019-03-11 14:25:44.646355453 +0100 @@ -39,6 +39,7 @@ #include "prims/methodHandles.hpp" #include "runtime/handles.inline.hpp" #include "runtime/mutexLocker.hpp" +#include "runtime/sharedRuntime.hpp" CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, @@ -276,7 +277,7 @@ guarantee(pd != NULL, "scope must be present"); return new ScopeDesc(this, pd->scope_decode_offset(), pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), - pd->return_oop()); + pd->return_oop(), pd->return_vt()); } ScopeDesc* CompiledMethod::scope_desc_near(address pc) { @@ -284,7 +285,7 @@ guarantee(pd != NULL, "scope must be present"); return new ScopeDesc(this, pd->scope_decode_offset(), pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), - pd->return_oop()); + pd->return_oop(), pd->return_vt()); } address CompiledMethod::oops_reloc_begin() const { @@ -344,19 +345,31 @@ void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { if (method() != NULL && !method()->is_native()) { address pc = fr.pc(); - SimpleScopeDesc ssd(this, pc); - Bytecode_invoke call(ssd.method(), ssd.bci()); - bool has_receiver = call.has_receiver(); - bool has_appendix = call.has_appendix(); - Symbol* signature = call.signature(); - // The method attached by JIT-compilers should be used, if present. // Bytecode can be inaccurate in such case. Method* callee = attached_method_before_pc(pc); + bool has_receiver = false; + bool has_appendix = false; + Symbol* signature = NULL; if (callee != NULL) { has_receiver = !(callee->access_flags().is_static()); has_appendix = false; signature = callee->signature(); + + // If value types are passed as fields, use the extended signature + // which contains the types of all (oop) fields of the value type. + if (callee->has_scalarized_args()) { + const GrowableArray* sig = callee->adapter()->get_sig_cc(); + assert(sig != NULL, "sig should never be null"); + signature = SigEntry::create_symbol(sig); + has_receiver = false; // The extended signature contains the receiver type + } + } else { + SimpleScopeDesc ssd(this, pc); + Bytecode_invoke call(ssd.method(), ssd.bci()); + has_receiver = call.has_receiver(); + has_appendix = call.has_appendix(); + signature = call.signature(); } fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f); --- old/src/hotspot/share/code/compiledMethod.hpp 2019-03-11 14:25:45.282355444 +0100 +++ new/src/hotspot/share/code/compiledMethod.hpp 2019-03-11 14:25:45.062355447 +0100 @@ -217,6 +217,8 @@ virtual int compile_id() const = 0; virtual address verified_entry_point() const = 0; + virtual address verified_value_entry_point() const = 0; + virtual address verified_value_ro_entry_point() const = 0; virtual void log_identity(xmlStream* log) const = 0; virtual void log_state_change() const = 0; virtual bool make_not_used() = 0; --- old/src/hotspot/share/code/debugInfoRec.cpp 2019-03-11 14:25:45.694355438 +0100 +++ new/src/hotspot/share/code/debugInfoRec.cpp 2019-03-11 14:25:45.494355441 +0100 @@ -287,6 +287,7 @@ bool rethrow_exception, bool is_method_handle_invoke, bool return_oop, + bool return_vt, DebugToken* locals, DebugToken* expressions, DebugToken* monitors) { @@ -303,6 +304,7 @@ last_pd->set_rethrow_exception(rethrow_exception); last_pd->set_is_method_handle_invoke(is_method_handle_invoke); last_pd->set_return_oop(return_oop); + last_pd->set_return_vt(return_vt); // serialize sender stream offest stream()->write_int(sender_stream_offset); --- old/src/hotspot/share/code/debugInfoRec.hpp 2019-03-11 14:25:46.110355432 +0100 +++ new/src/hotspot/share/code/debugInfoRec.hpp 2019-03-11 14:25:45.906355435 +0100 @@ -105,6 +105,7 @@ bool rethrow_exception = false, bool is_method_handle_invoke = false, bool return_oop = false, + bool return_vt = false, DebugToken* locals = NULL, DebugToken* expressions = NULL, DebugToken* monitors = NULL); --- old/src/hotspot/share/code/nmethod.cpp 2019-03-11 14:25:46.518355427 +0100 +++ new/src/hotspot/share/code/nmethod.cpp 2019-03-11 14:25:46.314355430 +0100 @@ -598,6 +598,8 @@ _comp_level = CompLevel_none; _entry_point = code_begin() + offsets->value(CodeOffsets::Entry); _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); + _verified_value_entry_point = _verified_entry_point; + _verified_value_ro_entry_point = _verified_entry_point; _osr_entry_point = NULL; _exception_cache = NULL; _pc_desc_container.reset_to(NULL); @@ -758,6 +760,8 @@ _nmethod_end_offset = _nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize); _entry_point = code_begin() + offsets->value(CodeOffsets::Entry); _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); + _verified_value_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Value_Entry); + _verified_value_ro_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Value_Entry_RO); _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry); _exception_cache = NULL; @@ -2224,7 +2228,7 @@ assert(pd != NULL, "PcDesc must exist"); for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(), pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), - pd->return_oop()); + pd->return_oop(), pd->return_vt()); !sd->is_top(); sd = sd->sender()) { sd->verify(); } @@ -2569,24 +2573,16 @@ if (p != NULL && p->real_pc(this) <= end) { return new ScopeDesc(this, p->scope_decode_offset(), p->obj_decode_offset(), p->should_reexecute(), p->rethrow_exception(), - p->return_oop()); + p->return_oop(), p->return_vt()); } return NULL; } void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) const { - if (block_begin == entry_point()) stream->print_cr("[Entry Point]"); - if (block_begin == verified_entry_point()) stream->print_cr("[Verified Entry Point]"); - if (JVMCI_ONLY(_exception_offset >= 0 &&) block_begin == exception_begin()) stream->print_cr("[Exception Handler]"); - if (block_begin == stub_begin()) stream->print_cr("[Stub Code]"); - if (JVMCI_ONLY(_deopt_handler_begin != NULL &&) block_begin == deopt_handler_begin()) stream->print_cr("[Deopt Handler Code]"); - - if (has_method_handle_invokes()) - if (block_begin == deopt_mh_handler_begin()) stream->print_cr("[Deopt MH Handler Code]"); - - if (block_begin == consts_begin()) stream->print_cr("[Constants]"); - - if (block_begin == entry_point()) { + address low = MIN4(entry_point(), verified_entry_point(), verified_value_entry_point(), verified_value_ro_entry_point()); + assert(low != 0, "sanity"); + if (block_begin == low) { + // Print method arguments before the method entry methodHandle m = method(); if (m.not_null()) { stream->print(" # "); @@ -2595,40 +2591,45 @@ } if (m.not_null() && !is_osr_method()) { ResourceMark rm; - int sizeargs = m->size_of_parameters(); - BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs); - VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs); - { - int sig_index = 0; - if (!m->is_static()) - sig_bt[sig_index++] = T_OBJECT; // 'this' - for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) { - BasicType t = ss.type(); - sig_bt[sig_index++] = t; - if (type2size[t] == 2) { - sig_bt[sig_index++] = T_VOID; - } else { - assert(type2size[t] == 1, "size is 1 or 2"); - } + int sizeargs = 0; + BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, 256); + VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, 256); + Symbol* sig = m->signature(); + const GrowableArray* sig_cc = m->adapter()->get_sig_cc(); + if (m->has_scalarized_args()) { + // Use extended signature if value type arguments are passed as fields + assert(sig_cc != NULL, "must have scalarized signature"); + sig = SigEntry::create_symbol(sig_cc); + } else if (!m->is_static()) { + sig_bt[sizeargs++] = T_OBJECT; // 'this' + } + for (SignatureStream ss(sig); !ss.at_return_type(); ss.next()) { + BasicType t = ss.type(); + sig_bt[sizeargs++] = t; + if (type2size[t] == 2) { + sig_bt[sizeargs++] = T_VOID; + } else { + assert(type2size[t] == 1, "size is 1 or 2"); } - assert(sig_index == sizeargs, ""); } const char* spname = "sp"; // make arch-specific? - intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, false); + SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, false); int stack_slot_offset = this->frame_size() * wordSize; int tab1 = 14, tab2 = 24; int sig_index = 0; - int arg_index = (m->is_static() ? 0 : -1); + int sig_index_cc = 0; + int arg_index = ((m->is_static() || m->has_scalarized_args()) ? 0 : -1); bool did_old_sp = false; - for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) { + for (SignatureStream ss(sig); !ss.at_return_type(); ) { bool at_this = (arg_index == -1); bool at_old_sp = false; BasicType t = (at_this ? T_OBJECT : ss.type()); assert(t == sig_bt[sig_index], "sigs in sync"); - if (at_this) + if (at_this) { stream->print(" # this: "); - else + } else { stream->print(" # parm%d: ", arg_index); + } stream->move_to(tab1); VMReg fst = regs[sig_index].first(); VMReg snd = regs[sig_index].second(); @@ -2661,6 +2662,15 @@ if (!did_name) stream->print("%s", type2name(t)); } + if (m->has_scalarized_args()) { + while (!SigEntry::skip_value_delimiters(sig_cc, sig_index_cc)) { + sig_index_cc++; + } + if (SigEntry::is_reserved_entry(sig_cc, sig_index_cc)) { + stream->print(" [RESERVED]"); + } + sig_index_cc += type2size[t]; + } if (at_old_sp) { stream->print(" (%s of caller)", spname); did_old_sp = true; @@ -2679,6 +2689,19 @@ } } } + + if (block_begin == entry_point()) stream->print_cr("[Entry Point]"); + if (block_begin == verified_entry_point()) stream->print_cr("[Verified Entry Point]"); + if (block_begin == verified_value_entry_point()) stream->print_cr("[Verified Value Entry Point]"); + if (block_begin == verified_value_ro_entry_point()) stream->print_cr("[Verified Value Entry Point (RO)]"); + if (JVMCI_ONLY(_exception_offset >= 0 &&) block_begin == exception_begin()) stream->print_cr("[Exception Handler]"); + if (block_begin == stub_begin()) stream->print_cr("[Stub Code]"); + if (JVMCI_ONLY(_deopt_handler_begin != NULL &&) block_begin == deopt_handler_begin()) stream->print_cr("[Deopt Handler Code]"); + + if (has_method_handle_invokes()) + if (block_begin == deopt_mh_handler_begin()) stream->print_cr("[Deopt MH Handler Code]"); + + if (block_begin == consts_begin() && consts_begin() != low) stream->print_cr("[Constants]"); } void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin, u_char* end) { @@ -2757,7 +2780,7 @@ break; } } - st->print(" {reexecute=%d rethrow=%d return_oop=%d}", sd->should_reexecute(), sd->rethrow_exception(), sd->return_oop()); + st->print(" {reexecute=%d rethrow=%d return_oop=%d return_vt=%d}", sd->should_reexecute(), sd->rethrow_exception(), sd->return_oop(), sd->return_vt()); } // Print all scopes --- old/src/hotspot/share/code/nmethod.hpp 2019-03-11 14:25:46.950355421 +0100 +++ new/src/hotspot/share/code/nmethod.hpp 2019-03-11 14:25:46.742355424 +0100 @@ -91,6 +91,8 @@ // offsets for entry points address _entry_point; // entry point with class check address _verified_entry_point; // entry point without class check + address _verified_value_entry_point; // value type entry point (unpack all value args) without class check + address _verified_value_ro_entry_point; // value type entry point (unpack receiver only) without class check address _osr_entry_point; // entry point for on stack replacement // Offsets for different nmethod parts @@ -324,8 +326,10 @@ bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); } // entry points - address entry_point() const { return _entry_point; } // normal entry point - address verified_entry_point() const { return _verified_entry_point; } // if klass is correct + address entry_point() const { return _entry_point; } // normal entry point + address verified_entry_point() const { return _verified_entry_point; } // normal entry point without class check + address verified_value_entry_point() const { return _verified_value_entry_point; } // value type entry point (unpack all value args) without class check + address verified_value_ro_entry_point() const { return _verified_value_ro_entry_point; } // value type entry point (only unpack receiver) without class check // flag accessing and manipulation bool is_not_installed() const { return _state == not_installed; } --- old/src/hotspot/share/code/pcDesc.hpp 2019-03-11 14:25:47.366355415 +0100 +++ new/src/hotspot/share/code/pcDesc.hpp 2019-03-11 14:25:47.166355418 +0100 @@ -42,7 +42,8 @@ PCDESC_reexecute = 1 << 0, PCDESC_is_method_handle_invoke = 1 << 1, PCDESC_return_oop = 1 << 2, - PCDESC_rethrow_exception = 1 << 3 + PCDESC_rethrow_exception = 1 << 3, + PCDESC_return_vt = 1 << 4 }; int _flags; @@ -89,6 +90,9 @@ bool return_oop() const { return (_flags & PCDESC_return_oop) != 0; } void set_return_oop(bool z) { set_flag(PCDESC_return_oop, z); } + bool return_vt() const { return (_flags & PCDESC_return_vt) != 0; } + void set_return_vt(bool z) { set_flag(PCDESC_return_vt, z); } + // Returns the real pc address real_pc(const CompiledMethod* code) const; --- old/src/hotspot/share/code/scopeDesc.cpp 2019-03-11 14:25:47.774355409 +0100 +++ new/src/hotspot/share/code/scopeDesc.cpp 2019-03-11 14:25:47.574355412 +0100 @@ -30,23 +30,25 @@ #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" -ScopeDesc::ScopeDesc(const CompiledMethod* code, int decode_offset, int obj_decode_offset, bool reexecute, bool rethrow_exception, bool return_oop) { +ScopeDesc::ScopeDesc(const CompiledMethod* code, int decode_offset, int obj_decode_offset, bool reexecute, bool rethrow_exception, bool return_oop, bool return_vt) { _code = code; _decode_offset = decode_offset; _objects = decode_object_values(obj_decode_offset); _reexecute = reexecute; _rethrow_exception = rethrow_exception; _return_oop = return_oop; + _return_vt = return_vt; decode_body(); } -ScopeDesc::ScopeDesc(const CompiledMethod* code, int decode_offset, bool reexecute, bool rethrow_exception, bool return_oop) { +ScopeDesc::ScopeDesc(const CompiledMethod* code, int decode_offset, bool reexecute, bool rethrow_exception, bool return_oop, bool return_vt) { _code = code; _decode_offset = decode_offset; _objects = decode_object_values(DebugInformationRecorder::serialized_null); _reexecute = reexecute; _rethrow_exception = rethrow_exception; _return_oop = return_oop; + _return_vt = return_vt; decode_body(); } @@ -58,6 +60,7 @@ _reexecute = false; //reexecute only applies to the first scope _rethrow_exception = false; _return_oop = false; + _return_vt = false; decode_body(); } --- old/src/hotspot/share/code/scopeDesc.hpp 2019-03-11 14:25:48.182355404 +0100 +++ new/src/hotspot/share/code/scopeDesc.hpp 2019-03-11 14:25:47.982355407 +0100 @@ -60,12 +60,12 @@ class ScopeDesc : public ResourceObj { public: // Constructor - ScopeDesc(const CompiledMethod* code, int decode_offset, int obj_decode_offset, bool reexecute, bool rethrow_exception, bool return_oop); + ScopeDesc(const CompiledMethod* code, int decode_offset, int obj_decode_offset, bool reexecute, bool rethrow_exception, bool return_oop, bool return_vt); // Calls above, giving default value of "serialized_null" to the // "obj_decode_offset" argument. (We don't use a default argument to // avoid a .hpp-.hpp dependency.) - ScopeDesc(const CompiledMethod* code, int decode_offset, bool reexecute, bool rethrow_exception, bool return_oop); + ScopeDesc(const CompiledMethod* code, int decode_offset, bool reexecute, bool rethrow_exception, bool return_oop, bool return_vt); // Direct access to scope ScopeDesc* at_offset(int decode_offset) { return new ScopeDesc(this, decode_offset); } @@ -76,6 +76,7 @@ bool should_reexecute() const { return _reexecute; } bool rethrow_exception() const { return _rethrow_exception; } bool return_oop() const { return _return_oop; } + bool return_vt() const { return _return_vt; } GrowableArray* locals(); GrowableArray* expressions(); @@ -105,6 +106,7 @@ bool _reexecute; bool _rethrow_exception; bool _return_oop; + bool _return_vt; // Decoding offsets int _decode_offset; --- old/src/hotspot/share/compiler/methodLiveness.cpp 2019-03-11 14:25:49.006355392 +0100 +++ new/src/hotspot/share/compiler/methodLiveness.cpp 2019-03-11 14:25:48.802355395 +0100 @@ -478,6 +478,8 @@ case Bytecodes::_goto_w: case Bytecodes::_aconst_null: case Bytecodes::_new: + case Bytecodes::_defaultvalue: + case Bytecodes::_withfield: case Bytecodes::_iconst_m1: case Bytecodes::_iconst_0: case Bytecodes::_iconst_1: --- old/src/hotspot/share/compiler/oopMap.cpp 2019-03-11 14:25:49.414355387 +0100 +++ new/src/hotspot/share/compiler/oopMap.cpp 2019-03-11 14:25:49.214355390 +0100 @@ -32,6 +32,7 @@ #include "memory/allocation.inline.hpp" #include "memory/iterator.hpp" #include "memory/resourceArea.hpp" +#include "oops/valueKlass.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/signature.hpp" @@ -385,7 +386,7 @@ // load barrier. if (!UseZGC && ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) || - !Universe::heap()->is_in_or_null(*loc))) { + (!Universe::heap()->is_in_or_null(*loc)))) { tty->print_cr("# Found non oop pointer. Dumping state at failure"); // try to dump out some helpful debugging information trace_codeblob_maps(fr, reg_map); --- old/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.cpp 2019-03-11 14:25:49.826355381 +0100 +++ new/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.cpp 2019-03-11 14:25:49.626355384 +0100 @@ -207,7 +207,7 @@ if (pre_val->bottom_type() == TypePtr::NULL_PTR) return; assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here"); } - assert(bt == T_OBJECT, "or we shouldn't be here"); + assert(bt == T_OBJECT || bt == T_VALUETYPE, "or we shouldn't be here"); IdealKit ideal(kit, true); --- old/src/hotspot/share/gc/parallel/psCompactionManager.cpp 2019-03-11 14:25:50.242355375 +0100 +++ new/src/hotspot/share/gc/parallel/psCompactionManager.cpp 2019-03-11 14:25:50.042355378 +0100 @@ -40,6 +40,7 @@ #include "oops/instanceMirrorKlass.inline.hpp" #include "oops/objArrayKlass.inline.hpp" #include "oops/oop.inline.hpp" +#include "oops/valueArrayKlass.inline.hpp" #include "runtime/atomic.hpp" PSOldGen* ParCompactionManager::_old_gen = NULL; --- old/src/hotspot/share/gc/parallel/psParallelCompact.cpp 2019-03-11 14:25:50.654355370 +0100 +++ new/src/hotspot/share/gc/parallel/psParallelCompact.cpp 2019-03-11 14:25:50.446355372 +0100 @@ -64,6 +64,7 @@ #include "oops/methodData.hpp" #include "oops/objArrayKlass.inline.hpp" #include "oops/oop.inline.hpp" +#include "oops/valueArrayKlass.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/handles.inline.hpp" #include "runtime/safepoint.hpp" --- old/src/hotspot/share/gc/parallel/psPromotionManager.cpp 2019-03-11 14:25:51.074355364 +0100 +++ new/src/hotspot/share/gc/parallel/psPromotionManager.cpp 2019-03-11 14:25:50.870355367 +0100 @@ -42,6 +42,7 @@ #include "memory/resourceArea.hpp" #include "oops/access.inline.hpp" #include "oops/compressedOops.inline.hpp" +#include "oops/valueArrayKlass.inline.hpp" PaddedEnd* PSPromotionManager::_manager_array = NULL; OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL; --- old/src/hotspot/share/gc/shared/c1/barrierSetC1.hpp 2019-03-11 14:25:51.482355358 +0100 +++ new/src/hotspot/share/gc/shared/c1/barrierSetC1.hpp 2019-03-11 14:25:51.282355361 +0100 @@ -100,7 +100,7 @@ BasicType type() const { return _type; } LIR_Opr resolved_addr() const { return _resolved_addr; } void set_resolved_addr(LIR_Opr addr) { _resolved_addr = addr; } - bool is_oop() const { return _type == T_ARRAY || _type == T_OBJECT; } + bool is_oop() const { return _type == T_ARRAY || _type == T_OBJECT || _type == T_VALUETYPE; } DecoratorSet decorators() const { return _decorators; } void clear_decorators(DecoratorSet ds) { _decorators &= ~ds; } bool is_raw() const { return (_decorators & AS_RAW) != 0; } --- old/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp 2019-03-11 14:25:51.890355353 +0100 +++ new/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp 2019-03-11 14:25:51.690355355 +0100 @@ -626,33 +626,15 @@ return atomic_add_at_resolved(access, new_val, value_type); } -void BarrierSetC2::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const { - // Exclude the header but include array length to copy by 8 bytes words. - // Can't use base_offset_in_bytes(bt) since basic type is unknown. - int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() : - instanceOopDesc::base_offset_in_bytes(); - // base_off: - // 8 - 32-bit VM - // 12 - 64-bit VM, compressed klass - // 16 - 64-bit VM, normal klass - if (base_off % BytesPerLong != 0) { - assert(UseCompressedClassPointers, ""); - if (is_array) { - // Exclude length to copy by 8 bytes words. - base_off += sizeof(int); - } else { - // Include klass to copy by 8 bytes words. - base_off = instanceOopDesc::klass_offset_in_bytes(); - } - assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment"); - } - Node* src_base = kit->basic_plus_adr(src, base_off); - Node* dst_base = kit->basic_plus_adr(dst, base_off); - - // Compute the length also, if needed: - Node* countx = size; - countx = kit->gvn().transform(new SubXNode(countx, kit->MakeConX(base_off))); - countx = kit->gvn().transform(new URShiftXNode(countx, kit->intcon(LogBytesPerLong) )); +void BarrierSetC2::clone(GraphKit* kit, Node* src_base, Node* dst_base, Node* countx, bool is_array) const { +#ifdef ASSERT + intptr_t src_offset; + Node* src = AddPNode::Ideal_base_and_offset(src_base, &kit->gvn(), src_offset); + intptr_t dst_offset; + Node* dst = AddPNode::Ideal_base_and_offset(dst_base, &kit->gvn(), dst_offset); + assert(src == NULL || (src_offset % BytesPerLong == 0), "expect 8 bytes alignment"); + assert(dst == NULL || (dst_offset % BytesPerLong == 0), "expect 8 bytes alignment"); +#endif const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; --- old/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp 2019-03-11 14:25:52.306355347 +0100 +++ new/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp 2019-03-11 14:25:52.102355350 +0100 @@ -120,7 +120,7 @@ Node* base() const { return _base; } C2AccessValuePtr& addr() const { return _addr; } BasicType type() const { return _type; } - bool is_oop() const { return _type == T_OBJECT || _type == T_ARRAY; } + bool is_oop() const { return _type == T_OBJECT || _type == T_VALUETYPE || _type == T_ARRAY; } bool is_raw() const { return (_decorators & AS_RAW) != 0; } Node* raw_access() const { return _raw_access; } @@ -241,7 +241,7 @@ virtual Node* atomic_xchg_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const; virtual Node* atomic_add_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const; - virtual void clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const; + virtual void clone(GraphKit* kit, Node* src_base, Node* dst_base, Node* countx, bool is_array) const; virtual Node* resolve(GraphKit* kit, Node* n, DecoratorSet decorators) const { return n; } --- old/src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp 2019-03-11 14:25:52.730355341 +0100 +++ new/src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp 2019-03-11 14:25:52.522355344 +0100 @@ -135,8 +135,8 @@ kit->final_sync(ideal); } -void CardTableBarrierSetC2::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const { - BarrierSetC2::clone(kit, src, dst, size, is_array); +void CardTableBarrierSetC2::clone(GraphKit* kit, Node* src_base, Node* dst_base, Node* countx, bool is_array) const { + BarrierSetC2::clone(kit, src_base, dst_base, countx, is_array); const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; // If necessary, emit some card marks afterwards. (Non-arrays only.) @@ -149,6 +149,9 @@ Node* no_particular_value = NULL; Node* no_particular_field = NULL; int raw_adr_idx = Compile::AliasIdxRaw; + intptr_t unused_offset; + Node* dst = AddPNode::Ideal_base_and_offset(dst_base, &kit->gvn(), unused_offset); + assert(dst != NULL, "dst_base not an Addp"); post_barrier(kit, kit->control(), kit->memory(raw_adr_type), dst, --- old/src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.hpp 2019-03-11 14:25:53.154355335 +0100 +++ new/src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.hpp 2019-03-11 14:25:52.946355338 +0100 @@ -42,7 +42,7 @@ Node* byte_map_base_node(GraphKit* kit) const; public: - virtual void clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const; + virtual void clone(GraphKit* kit, Node* src_base, Node* dst_base, Node* countx, bool is_array) const; virtual bool is_gc_barrier_node(Node* node) const; virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const; virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const; --- old/src/hotspot/share/gc/shared/memAllocator.cpp 2019-03-11 14:25:53.978355324 +0100 +++ new/src/hotspot/share/gc/shared/memAllocator.cpp 2019-03-11 14:25:53.770355327 +0100 @@ -376,12 +376,7 @@ oop MemAllocator::finish(HeapWord* mem) const { assert(mem != NULL, "NULL object pointer"); - if (UseBiasedLocking) { - oopDesc::set_mark_raw(mem, _klass->prototype_header()); - } else { - // May be bootstrapping - oopDesc::set_mark_raw(mem, markOopDesc::prototype()); - } + oopDesc::set_mark_raw(mem, Klass::default_prototype_header(_klass)); // Need a release store to ensure array/class length, mark word, and // object zeroing are visible before setting the klass non-NULL, for // concurrent collectors. --- old/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp 2019-03-11 14:25:54.462355317 +0100 +++ new/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp 2019-03-11 14:25:54.186355321 +0100 @@ -1188,7 +1188,7 @@ Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const { if (is_shenandoah_wb_pre_call(n)) { - uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt(); + uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain_sig()->cnt(); if (n->req() > cnt) { Node* addp = n->in(cnt); if (has_only_shenandoah_wb_pre_uses(addp)) { @@ -1312,9 +1312,9 @@ int offset = adr_type->offset(); if (offset == ShenandoahBrooksPointer::byte_offset()) { if (adr_type->isa_aryptr()) { - adr_type = TypeAryPtr::make(adr_type->ptr(), adr_type->isa_aryptr()->ary(), adr_type->isa_aryptr()->klass(), false, offset); + adr_type = TypeAryPtr::make(adr_type->ptr(), adr_type->isa_aryptr()->ary(), adr_type->isa_aryptr()->klass(), false, Type::Offset(offset)); } else if (adr_type->isa_instptr()) { - adr_type = TypeInstPtr::make(adr_type->ptr(), ciEnv::current()->Object_klass(), false, NULL, offset); + adr_type = TypeInstPtr::make(adr_type->ptr(), ciEnv::current()->Object_klass(), false, NULL, Type::Offset(offset)); } return true; } else { @@ -1329,7 +1329,7 @@ assert (n->is_Call(), ""); CallNode *call = n->as_Call(); if (ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(call)) { - uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt(); + uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain_sig()->cnt(); if (call->req() > cnt) { assert(call->req() == cnt + 1, "only one extra input"); Node *addp = call->in(cnt); --- old/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp 2019-03-11 14:25:54.930355310 +0100 +++ new/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp 2019-03-11 14:25:54.698355314 +0100 @@ -1031,7 +1031,7 @@ if (call->is_call_to_arraycopystub()) { Node* dest = NULL; - const TypeTuple* args = n->as_Call()->_tf->domain(); + const TypeTuple* args = n->as_Call()->_tf->domain_sig(); for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) { if (args->field_at(i)->isa_ptr()) { j++; @@ -1157,7 +1157,7 @@ break; } } - uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req(); + uint stop = n->is_Call() ? n->as_Call()->tf()->domain_sig()->cnt() : n->req(); if (i != others_len) { const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]); for (uint j = 0; j < inputs_len; j++) { @@ -2314,18 +2314,17 @@ } } else { if (c->is_Call() && c->as_Call()->adr_type() != NULL) { - CallProjections projs; - c->as_Call()->extract_projections(&projs, true, false); - if (projs.fallthrough_memproj != NULL) { - if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) { - if (projs.catchall_memproj == NULL) { - mem = projs.fallthrough_memproj; + CallProjections* projs = c->as_Call()->extract_projections(true, false); + if (projs->fallthrough_memproj != NULL) { + if (projs->fallthrough_memproj->adr_type() == TypePtr::BOTTOM) { + if (projs->catchall_memproj == NULL) { + mem = projs->fallthrough_memproj; } else { - if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) { - mem = projs.fallthrough_memproj; + if (phase->is_dominator(projs->fallthrough_catchproj, ctrl)) { + mem = projs->fallthrough_memproj; } else { - assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier"); - mem = projs.catchall_memproj; + assert(phase->is_dominator(projs->catchall_catchproj, ctrl), "one proj must dominate barrier"); + mem = projs->catchall_memproj; } } } @@ -3758,14 +3757,13 @@ if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Call()) { assert(c == n->in(0), ""); CallNode* call = c->as_Call(); - CallProjections projs; - call->extract_projections(&projs, true, false); - if (projs.catchall_memproj != NULL) { - if (projs.fallthrough_memproj == n) { - c = projs.fallthrough_catchproj; + CallProjections* projs = call->extract_projections(true, false); + if (projs->catchall_memproj != NULL) { + if (projs->fallthrough_memproj == n) { + c = projs->fallthrough_catchproj; } else { - assert(projs.catchall_memproj == n, ""); - c = projs.catchall_catchproj; + assert(projs->catchall_memproj == n, ""); + c = projs->catchall_catchproj; } } } --- old/src/hotspot/share/gc/z/zBarrierSet.cpp 2019-03-11 14:25:55.414355304 +0100 +++ new/src/hotspot/share/gc/z/zBarrierSet.cpp 2019-03-11 14:25:55.186355307 +0100 @@ -66,6 +66,7 @@ assert((decorators & AS_NO_KEEPALIVE) == 0, "Unexpected decorator"); //assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Unexpected decorator"); + assert(type != T_VALUETYPE, "Not supported yet"); if (type == T_OBJECT || type == T_ARRAY) { assert((decorators & (IN_HEAP | IN_NATIVE)) != 0, "Where is reference?"); // Barrier needed even when IN_NATIVE, to allow concurrent scanning. --- old/src/hotspot/share/interpreter/abstractInterpreter.hpp 2019-03-11 14:25:56.274355292 +0100 +++ new/src/hotspot/share/interpreter/abstractInterpreter.hpp 2019-03-11 14:25:56.054355295 +0100 @@ -102,7 +102,7 @@ } enum SomeConstants { - number_of_result_handlers = 10 // number of result handlers for native calls + number_of_result_handlers = 11 // number of result handlers for native calls }; protected: --- old/src/hotspot/share/interpreter/bytecode.hpp 2019-03-11 14:25:56.694355286 +0100 +++ new/src/hotspot/share/interpreter/bytecode.hpp 2019-03-11 14:25:56.486355289 +0100 @@ -293,6 +293,15 @@ long index() const { return get_index_u2(Bytecodes::_new); }; }; +class Bytecode_defaultvalue: public Bytecode { + public: + Bytecode_defaultvalue(Method* method, address bcp): Bytecode(method, bcp) { verify(); } + void verify() const { assert(java_code() == Bytecodes::_defaultvalue, "check defaultvalue"); } + + // Returns index + long index() const { return get_index_u2(Bytecodes::_defaultvalue); }; +}; + class Bytecode_multianewarray: public Bytecode { public: Bytecode_multianewarray(Method* method, address bcp): Bytecode(method, bcp) { verify(); } --- old/src/hotspot/share/interpreter/bytecodeTracer.cpp 2019-03-11 14:25:57.562355274 +0100 +++ new/src/hotspot/share/interpreter/bytecodeTracer.cpp 2019-03-11 14:25:57.350355277 +0100 @@ -449,7 +449,7 @@ case Bytecodes::_newarray: { BasicType atype = (BasicType)get_index_u1(); const char* str = type2name(atype); - if (str == NULL || atype == T_OBJECT || atype == T_ARRAY) { + if (str == NULL || atype == T_OBJECT || atype == T_ARRAY || atype == T_VALUETYPE) { assert(false, "Unidentified basic type"); } st->print_cr(" %s", str); @@ -544,6 +544,7 @@ case Bytecodes::_getstatic: case Bytecodes::_putfield: case Bytecodes::_getfield: + case Bytecodes::_withfield: print_field_or_method(get_index_u2_cpcache(), st); break; @@ -568,6 +569,7 @@ case Bytecodes::_new: case Bytecodes::_checkcast: case Bytecodes::_instanceof: + case Bytecodes::_defaultvalue: { int i = get_index_u2(); ConstantPool* constants = method()->constants(); Symbol* name = constants->klass_name_at(i); --- old/src/hotspot/share/interpreter/bytecodes.cpp 2019-03-11 14:25:57.998355268 +0100 +++ new/src/hotspot/share/interpreter/bytecodes.cpp 2019-03-11 14:25:57.786355271 +0100 @@ -484,11 +484,14 @@ def(_goto_w , "goto_w" , "boooo", NULL , T_VOID , 0, false); def(_jsr_w , "jsr_w" , "boooo", NULL , T_INT , 0, false); def(_breakpoint , "breakpoint" , "" , NULL , T_VOID , 0, true); + def(_defaultvalue , "defaultvalue" , "bkk" , NULL , T_OBJECT , 1, true); + def(_withfield , "withfield" , "bJJ" , NULL , T_OBJECT , -1, true ); // JVM bytecodes // bytecode bytecode name format wide f. result tp stk traps std code def(_fast_agetfield , "fast_agetfield" , "bJJ" , NULL , T_OBJECT , 0, true , _getfield ); + def(_fast_qgetfield , "fast_qgetfield" , "bJJ" , NULL , T_OBJECT , 0, true , _getfield ); def(_fast_bgetfield , "fast_bgetfield" , "bJJ" , NULL , T_INT , 0, true , _getfield ); def(_fast_cgetfield , "fast_cgetfield" , "bJJ" , NULL , T_CHAR , 0, true , _getfield ); def(_fast_dgetfield , "fast_dgetfield" , "bJJ" , NULL , T_DOUBLE , 0, true , _getfield ); @@ -498,6 +501,7 @@ def(_fast_sgetfield , "fast_sgetfield" , "bJJ" , NULL , T_SHORT , 0, true , _getfield ); def(_fast_aputfield , "fast_aputfield" , "bJJ" , NULL , T_OBJECT , 0, true , _putfield ); + def(_fast_qputfield , "fast_qputfield" , "bJJ" , NULL , T_OBJECT , 0, true , _putfield ); def(_fast_bputfield , "fast_bputfield" , "bJJ" , NULL , T_INT , 0, true , _putfield ); def(_fast_zputfield , "fast_zputfield" , "bJJ" , NULL , T_INT , 0, true , _putfield ); def(_fast_cputfield , "fast_cputfield" , "bJJ" , NULL , T_CHAR , 0, true , _putfield ); --- old/src/hotspot/share/interpreter/bytecodes.hpp 2019-03-11 14:25:58.430355262 +0100 +++ new/src/hotspot/share/interpreter/bytecodes.hpp 2019-03-11 14:25:58.218355265 +0100 @@ -243,10 +243,15 @@ _jsr_w = 201, // 0xc9 _breakpoint = 202, // 0xca + // value-type bytecodes + _defaultvalue = 203, // 0xcb + _withfield = 204, // 0xcc + number_of_java_codes, // JVM bytecodes _fast_agetfield = number_of_java_codes, + _fast_qgetfield , _fast_bgetfield , _fast_cgetfield , _fast_dgetfield , @@ -256,6 +261,7 @@ _fast_sgetfield , _fast_aputfield , + _fast_qputfield , _fast_bputfield , _fast_zputfield , _fast_cputfield , --- old/src/hotspot/share/interpreter/interpreterRuntime.cpp 2019-03-11 14:25:58.858355256 +0100 +++ new/src/hotspot/share/interpreter/interpreterRuntime.cpp 2019-03-11 14:25:58.646355259 +0100 @@ -47,6 +47,10 @@ #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" +#include "oops/valueKlass.hpp" +#include "oops/valueArrayKlass.hpp" +#include "oops/valueArrayOop.hpp" +#include "oops/valueArrayOop.inline.hpp" #include "prims/jvmtiExport.hpp" #include "prims/nativeLookup.hpp" #include "runtime/atomic.hpp" @@ -69,6 +73,7 @@ #include "utilities/align.hpp" #include "utilities/copy.hpp" #include "utilities/events.hpp" +#include "utilities/globalDefinitions.hpp" #ifdef COMPILER2 #include "opto/runtime.hpp" #endif @@ -253,6 +258,176 @@ thread->set_vm_result(obj); IRT_END +void copy_primitive_argument(intptr_t* addr, Handle instance, int offset, BasicType type) { + switch (type) { + case T_BOOLEAN: + instance()->bool_field_put(offset, (jboolean)*((int*)addr)); + break; + case T_CHAR: + instance()->char_field_put(offset, (jchar) *((int*)addr)); + break; + case T_FLOAT: + instance()->float_field_put(offset, (jfloat)*((float*)addr)); + break; + case T_DOUBLE: + instance()->double_field_put(offset, (jdouble)*((double*)addr)); + break; + case T_BYTE: + instance()->byte_field_put(offset, (jbyte)*((int*)addr)); + break; + case T_SHORT: + instance()->short_field_put(offset, (jshort)*((int*)addr)); + break; + case T_INT: + instance()->int_field_put(offset, (jint)*((int*)addr)); + break; + case T_LONG: + instance()->long_field_put(offset, (jlong)*((long long*)addr)); + break; + case T_OBJECT: + case T_ARRAY: + case T_VALUETYPE: + fatal("Should not be handled with this method"); + break; + default: + fatal("Unsupported BasicType"); + } +} + +IRT_ENTRY(void, InterpreterRuntime::defaultvalue(JavaThread* thread, ConstantPool* pool, int index)) + // Getting the ValueKlass + Klass* k = pool->klass_at(index, CHECK); + assert(k->is_value(), "defaultvalue argument must be the value type class"); + ValueKlass* vklass = ValueKlass::cast(k); + + vklass->initialize(THREAD); + oop res = vklass->default_value(); + thread->set_vm_result(res); +IRT_END + +IRT_ENTRY(int, InterpreterRuntime::withfield(JavaThread* thread, ConstantPoolCache* cp_cache)) + LastFrameAccessor last_frame(thread); + // Getting the ValueKlass + int index = ConstantPool::decode_cpcache_index(last_frame.get_index_u2_cpcache(Bytecodes::_withfield)); + ConstantPoolCacheEntry* cp_entry = cp_cache->entry_at(index); + assert(cp_entry->is_resolved(Bytecodes::_withfield), "Should have been resolved"); + Klass* klass = cp_entry->f1_as_klass(); + assert(klass->is_value(), "withfield only applies to value types"); + ValueKlass* vklass = ValueKlass::cast(klass); + + // Getting Field information + int offset = cp_entry->f2_as_index(); + int field_index = cp_entry->field_index(); + int field_offset = cp_entry->f2_as_offset(); + Symbol* field_signature = vklass->field_signature(field_index); + ResourceMark rm(THREAD); + const char* signature = (const char *) field_signature->as_utf8(); + BasicType field_type = char2type(signature[0]); + + // Getting old value + frame& f = last_frame.get_frame(); + jint tos_idx = f.interpreter_frame_expression_stack_size() - 1; + int vt_offset = type2size[field_type]; + oop old_value = *(oop*)f.interpreter_frame_expression_stack_at(tos_idx - vt_offset); + assert(old_value != NULL && oopDesc::is_oop(old_value) && old_value->is_value(),"Verifying receiver"); + Handle old_value_h(THREAD, old_value); + + // Creating new value by copying the one passed in argument + instanceOop new_value = vklass->allocate_instance( + CHECK_((type2size[field_type]) * AbstractInterpreter::stackElementSize)); + Handle new_value_h = Handle(THREAD, new_value); + int first_offset = vklass->first_field_offset(); + vklass->value_store(vklass->data_for_oop(old_value_h()), + vklass->data_for_oop(new_value_h()), true, false); + + // Updating the field specified in arguments + if (field_type == T_ARRAY || field_type == T_OBJECT) { + oop aoop = *(oop*)f.interpreter_frame_expression_stack_at(tos_idx); + assert(aoop == NULL || oopDesc::is_oop(aoop),"argument must be a reference type"); + new_value_h()->obj_field_put(field_offset, aoop); + } else if (field_type == T_VALUETYPE) { + if (cp_entry->is_flattened()) { + oop vt_oop = *(oop*)f.interpreter_frame_expression_stack_at(tos_idx); + if (vt_oop == NULL) { + THROW_(vmSymbols::java_lang_NullPointerException(), + (type2size[field_type] * AbstractInterpreter::stackElementSize)); + } + assert(vt_oop != NULL && oopDesc::is_oop(vt_oop) && vt_oop->is_value(),"argument must be a value type"); + Klass* field_k = vklass->get_value_field_klass(field_index); + ValueKlass* field_vk = ValueKlass::cast(field_k); + assert(field_vk == vt_oop->klass(), "Must match"); + field_vk->value_store(field_vk->data_for_oop(vt_oop), + ((char*)(oopDesc*)new_value_h()) + field_offset, false, false); + } else { // not flattened + oop voop = *(oop*)f.interpreter_frame_expression_stack_at(tos_idx); + if (voop == NULL && cp_entry->is_flattenable()) { + THROW_(vmSymbols::java_lang_NullPointerException(), + (type2size[field_type] * AbstractInterpreter::stackElementSize)); + } + assert(voop == NULL || oopDesc::is_oop(voop),"checking argument"); + new_value_h()->obj_field_put(field_offset, voop); + } + } else { // not T_OBJECT nor T_ARRAY nor T_VALUETYPE + intptr_t* addr = f.interpreter_frame_expression_stack_at(tos_idx); + copy_primitive_argument(addr, new_value_h, field_offset, field_type); + } + + // returning result + thread->set_vm_result(new_value_h()); + return (type2size[field_type] + type2size[T_OBJECT]) * AbstractInterpreter::stackElementSize; +IRT_END + +IRT_ENTRY(void, InterpreterRuntime::uninitialized_static_value_field(JavaThread* thread, oopDesc* mirror, int index)) + instanceHandle mirror_h(THREAD, (instanceOop)mirror); + InstanceKlass* klass = InstanceKlass::cast(java_lang_Class::as_Klass(mirror)); + int offset = klass->field_offset(index); + Klass* field_k = klass->get_value_field_klass_or_null(index); + assert(field_k != NULL, "Must have been initialized"); + ValueKlass* field_vklass = ValueKlass::cast(field_k); + instanceOop res = (instanceOop)field_vklass->default_value(); + thread->set_vm_result(res); +IRT_END + +IRT_ENTRY(void, InterpreterRuntime::uninitialized_instance_value_field(JavaThread* thread, oopDesc* obj, int index)) + instanceHandle obj_h(THREAD, (instanceOop)obj); + InstanceKlass* klass = InstanceKlass::cast(obj_h()->klass()); + Klass* field_k = klass->get_value_field_klass_or_null(index); + assert(field_k != NULL, "Must have been initialized"); + ValueKlass* field_vklass = ValueKlass::cast(field_k); + assert(field_vklass->is_initialized(), "Must have been initialized at this point"); + instanceOop res = (instanceOop)field_vklass->default_value(); + thread->set_vm_result(res); +IRT_END + +IRT_ENTRY(void, InterpreterRuntime::write_flattened_value(JavaThread* thread, oopDesc* value, int offset, oopDesc* rcv)) + assert(oopDesc::is_oop(value), "Sanity check"); + assert(oopDesc::is_oop(rcv), "Sanity check"); + assert(value->is_value(), "Sanity check"); + + ValueKlass* vklass = ValueKlass::cast(value->klass()); + vklass->value_store(vklass->data_for_oop(value), ((char*)(oopDesc*)rcv) + offset, true, true); +IRT_END + +IRT_ENTRY(void, InterpreterRuntime::read_flattened_field(JavaThread* thread, oopDesc* obj, int index, Klass* field_holder)) + Handle obj_h(THREAD, obj); + + assert(oopDesc::is_oop(obj), "Sanity check"); + + assert(field_holder->is_instance_klass(), "Sanity check"); + InstanceKlass* klass = InstanceKlass::cast(field_holder); + + assert(klass->field_is_flattened(index), "Sanity check"); + + ValueKlass* field_vklass = ValueKlass::cast(klass->get_value_field_klass(index)); + assert(field_vklass->is_initialized(), "Must be initialized at this point"); + + // allocate instance + instanceOop res = field_vklass->allocate_instance(CHECK); + // copy value + field_vklass->value_store(((char*)(oopDesc*)obj_h()) + klass->field_offset(index), + field_vklass->data_for_oop(res), true, true); + thread->set_vm_result(res); +IRT_END IRT_ENTRY(void, InterpreterRuntime::newarray(JavaThread* thread, BasicType type, jint size)) oop obj = oopFactory::new_typeArray(type, size, CHECK); @@ -262,10 +437,40 @@ IRT_ENTRY(void, InterpreterRuntime::anewarray(JavaThread* thread, ConstantPool* pool, int index, jint size)) Klass* klass = pool->klass_at(index, CHECK); - objArrayOop obj = oopFactory::new_objArray(klass, size, CHECK); + if (klass->is_value()) { // Logically creates elements, ensure klass init + klass->initialize(CHECK); + } + arrayOop obj = oopFactory::new_array(klass, size, CHECK); thread->set_vm_result(obj); IRT_END +IRT_ENTRY(void, InterpreterRuntime::value_array_load(JavaThread* thread, arrayOopDesc* array, int index)) + Klass* klass = array->klass(); + assert(klass->is_valueArray_klass(), "expected value array oop"); + + ValueArrayKlass* vaklass = ValueArrayKlass::cast(klass); + ValueKlass* vklass = vaklass->element_klass(); + arrayHandle ah(THREAD, array); + instanceOop value_holder = vklass->allocate_instance(CHECK); + void* src = ((valueArrayOop)ah())->value_at_addr(index, vaklass->layout_helper()); + vklass->value_store(src, vklass->data_for_oop(value_holder), + vaklass->element_byte_size(), true, false); + thread->set_vm_result(value_holder); +IRT_END + +IRT_ENTRY(void, InterpreterRuntime::value_array_store(JavaThread* thread, void* val, arrayOopDesc* array, int index)) + assert(val != NULL, "can't store null into flat array"); + Klass* klass = array->klass(); + assert(klass->is_valueArray_klass(), "expected value array"); + assert(ArrayKlass::cast(klass)->element_klass() == ((oop)val)->klass(), "Store type incorrect"); + + valueArrayOop varray = (valueArrayOop)array; + ValueArrayKlass* vaklass = ValueArrayKlass::cast(klass); + ValueKlass* vklass = vaklass->element_klass(); + const int lh = vaklass->layout_helper(); + vklass->value_store(vklass->data_for_oop((oop)val), varray->value_at_addr(index, lh), + vaklass->element_byte_size(), true, false); +IRT_END IRT_ENTRY(void, InterpreterRuntime::multianewarray(JavaThread* thread, jint* first_size_address)) // We may want to pass in more arguments - could make this slightly faster @@ -277,6 +482,10 @@ assert(klass->is_klass(), "not a class"); assert(nof_dims >= 1, "multianewarray rank must be nonzero"); + if (klass->is_value()) { // Logically creates elements, ensure klass init + klass->initialize(CHECK); + } + // We must create an array of jints to pass to multi_allocate. ResourceMark rm(thread); const int small_dims = 10; @@ -687,8 +896,9 @@ constantPoolHandle pool(thread, last_frame.method()->constants()); methodHandle m(thread, last_frame.method()); bool is_put = (bytecode == Bytecodes::_putfield || bytecode == Bytecodes::_nofast_putfield || - bytecode == Bytecodes::_putstatic); + bytecode == Bytecodes::_putstatic || bytecode == Bytecodes::_withfield); bool is_static = (bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic); + bool is_value = bytecode == Bytecodes::_withfield; { JvmtiHideSingleStepping jhss(thread); @@ -732,9 +942,15 @@ Bytecodes::Code get_code = (Bytecodes::Code)0; Bytecodes::Code put_code = (Bytecodes::Code)0; if (!uninitialized_static) { - get_code = ((is_static) ? Bytecodes::_getstatic : Bytecodes::_getfield); - if ((is_put && !has_initialized_final_update) || !info.access_flags().is_final()) { - put_code = ((is_static) ? Bytecodes::_putstatic : Bytecodes::_putfield); + if (is_static) { + get_code = Bytecodes::_getstatic; + } else { + get_code = Bytecodes::_getfield; + } + if (is_put && is_value) { + put_code = ((is_static) ? Bytecodes::_putstatic : Bytecodes::_withfield); + } else if ((is_put && !has_initialized_final_update) || !info.access_flags().is_final()) { + put_code = ((is_static) ? Bytecodes::_putstatic : Bytecodes::_putfield); } } @@ -747,6 +963,8 @@ state, info.access_flags().is_final(), info.access_flags().is_volatile(), + info.is_flattened(), + info.is_flattenable(), pool->pool_holder() ); } @@ -857,8 +1075,7 @@ Symbol* signature = call.signature(); receiver = Handle(thread, last_frame.callee_receiver(signature)); - assert(Universe::heap()->is_in_reserved_or_null(receiver()), - "sanity check"); + assert(Universe::heap()->is_in_reserved_or_null(receiver()), "sanity check"); assert(receiver.is_null() || !Universe::heap()->is_in_reserved(receiver->klass()), "sanity check"); @@ -1000,6 +1217,7 @@ case Bytecodes::_putstatic: case Bytecodes::_getfield: case Bytecodes::_putfield: + case Bytecodes::_withfield: resolve_get_put(thread, bytecode); break; case Bytecodes::_invokevirtual: @@ -1264,6 +1482,12 @@ case dtos: sig_type = 'D'; break; default: ShouldNotReachHere(); return; } + + // Both Q-signatures and L-signatures are mapped to atos + if (cp_entry->flag_state() == atos && ik->field_signature(index)->is_Q_signature()) { + sig_type = 'Q'; + } + bool is_static = (obj == NULL); HandleMark hm(thread); --- old/src/hotspot/share/interpreter/interpreterRuntime.hpp 2019-03-11 14:25:59.318355250 +0100 +++ new/src/hotspot/share/interpreter/interpreterRuntime.hpp 2019-03-11 14:25:59.086355253 +0100 @@ -66,6 +66,16 @@ static void anewarray (JavaThread* thread, ConstantPool* pool, int index, jint size); static void multianewarray(JavaThread* thread, jint* first_size_address); static void register_finalizer(JavaThread* thread, oopDesc* obj); + static void defaultvalue (JavaThread* thread, ConstantPool* pool, int index); + static int withfield (JavaThread* thread, ConstantPoolCache* cp_cache); + static void uninitialized_static_value_field(JavaThread* thread, oopDesc* mirror, int offset); + static void uninitialized_instance_value_field(JavaThread* thread, oopDesc* obj, int offset); + static void write_heap_copy (JavaThread* thread, oopDesc* value, int offset, oopDesc* rcv); + static void write_flattened_value(JavaThread* thread, oopDesc* value, int offset, oopDesc* rcv); + static void read_flattened_field(JavaThread* thread, oopDesc* value, int index, Klass* field_holder); + + static void value_array_load(JavaThread* thread, arrayOopDesc* array, int index); + static void value_array_store(JavaThread* thread, void* val, arrayOopDesc* array, int index); // Quicken instance-of and check-cast bytecodes static void quicken_io_cc(JavaThread* thread); --- old/src/hotspot/share/interpreter/linkResolver.cpp 2019-03-11 14:25:59.730355244 +0100 +++ new/src/hotspot/share/interpreter/linkResolver.cpp 2019-03-11 14:25:59.526355247 +0100 @@ -945,11 +945,13 @@ TRAPS) { assert(byte == Bytecodes::_getstatic || byte == Bytecodes::_putstatic || byte == Bytecodes::_getfield || byte == Bytecodes::_putfield || + byte == Bytecodes::_withfield || byte == Bytecodes::_nofast_getfield || byte == Bytecodes::_nofast_putfield || (byte == Bytecodes::_nop && !link_info.check_access()), "bad field access bytecode"); bool is_static = (byte == Bytecodes::_getstatic || byte == Bytecodes::_putstatic); - bool is_put = (byte == Bytecodes::_putfield || byte == Bytecodes::_putstatic || byte == Bytecodes::_nofast_putfield); + bool is_put = (byte == Bytecodes::_putfield || byte == Bytecodes::_putstatic || + byte == Bytecodes::_nofast_putfield || byte == Bytecodes::_withfield); // Check if there's a resolved klass containing the field Klass* resolved_klass = link_info.resolved_klass(); Symbol* field = link_info.name(); @@ -987,16 +989,28 @@ // (1) by methods declared in the class declaring the field and // (2) by the method (in case of a static field) // or by the method (in case of an instance field). + // (3) by withfield when field is in a value type and the + // selected class and current class are nest mates. if (is_put && fd.access_flags().is_final()) { ResourceMark rm(THREAD); stringStream ss; if (sel_klass != current_klass) { + // If byte code is a withfield check if they are nestmates. + bool are_nestmates = false; + if (sel_klass->is_instance_klass() && + InstanceKlass::cast(sel_klass)->is_value() && + current_klass->is_instance_klass()) { + are_nestmates = InstanceKlass::cast(link_info.current_klass())->has_nestmate_access_to( + InstanceKlass::cast(sel_klass), THREAD); + } + if (!are_nestmates) { ss.print("Update to %s final field %s.%s attempted from a different class (%s) than the field's declaring class", is_static ? "static" : "non-static", resolved_klass->external_name(), fd.name()->as_C_string(), - current_klass->external_name()); + current_klass->external_name()); THROW_MSG(vmSymbols::java_lang_IllegalAccessError(), ss.as_string()); } + } if (fd.constants()->pool_holder()->major_version() >= 53) { methodHandle m = link_info.current_method(); --- old/src/hotspot/share/interpreter/oopMapCache.cpp 2019-03-11 14:26:00.574355232 +0100 +++ new/src/hotspot/share/interpreter/oopMapCache.cpp 2019-03-11 14:26:00.370355235 +0100 @@ -246,6 +246,7 @@ void pass_float() { /* ignore */ } void pass_double() { /* ignore */ } void pass_object() { set_one(offset()); } + void pass_valuetype() { set_one(offset()); } MaskFillerForNative(const methodHandle& method, uintptr_t* mask, int size) : NativeSignatureIterator(method) { _mask = mask; @@ -274,7 +275,7 @@ st.print("Locals (%d): ", max_locals); for(int i = 0; i < max_locals; i++) { bool v1 = is_oop(i) ? true : false; - bool v2 = vars[i].is_reference() ? true : false; + bool v2 = vars[i].is_reference(); assert(v1 == v2, "locals oop mask generation error"); st.print("%d", v1 ? 1 : 0); } @@ -283,7 +284,7 @@ st.print("Stack (%d): ", stack_top); for(int j = 0; j < stack_top; j++) { bool v1 = is_oop(max_locals + j) ? true : false; - bool v2 = stack[j].is_reference() ? true : false; + bool v2 = stack[j].is_reference(); assert(v1 == v2, "stack oop mask generation error"); st.print("%d", v1 ? 1 : 0); } @@ -365,7 +366,7 @@ } // set oop bit - if ( cell->is_reference()) { + if (cell->is_reference()) { value |= (mask << oop_bit_number ); } --- old/src/hotspot/share/interpreter/rewriter.cpp 2019-03-11 14:26:00.990355227 +0100 +++ new/src/hotspot/share/interpreter/rewriter.cpp 2019-03-11 14:26:00.786355229 +0100 @@ -169,7 +169,7 @@ void Rewriter::rewrite_member_reference(address bcp, int offset, bool reverse) { address p = bcp + offset; if (!reverse) { - int cp_index = Bytes::get_Java_u2(p); + int cp_index = Bytes::get_Java_u2(p); int cache_index = cp_entry_to_cp_cache(cp_index); Bytes::put_native_u2(p, cache_index); if (!_method_handle_invokers.is_empty()) @@ -205,7 +205,6 @@ } } - // Adjust the invocation bytecode for a signature-polymorphic method (MethodHandle.invoke, etc.) void Rewriter::maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse) { if (!reverse) { @@ -466,6 +465,7 @@ // fall through case Bytecodes::_getstatic : // fall through case Bytecodes::_getfield : // fall through + case Bytecodes::_withfield : // fall through but may require more checks for correctness case Bytecodes::_invokevirtual : // fall through case Bytecodes::_invokestatic : case Bytecodes::_invokeinterface: --- old/src/hotspot/share/interpreter/templateInterpreter.cpp 2019-03-11 14:26:01.410355221 +0100 +++ new/src/hotspot/share/interpreter/templateInterpreter.cpp 2019-03-11 14:26:01.206355224 +0100 @@ -71,7 +71,7 @@ // Implementation of EntryPoint EntryPoint::EntryPoint() { - assert(number_of_states == 10, "check the code below"); + assert(number_of_states == 10 , "check the code below"); _entry[btos] = NULL; _entry[ztos] = NULL; _entry[ctos] = NULL; --- old/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp 2019-03-11 14:26:02.290355209 +0100 +++ new/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp 2019-03-11 14:26:02.062355212 +0100 @@ -52,7 +52,8 @@ T_VOID , T_FLOAT , T_DOUBLE , - T_OBJECT + T_OBJECT , + T_VALUETYPE }; void TemplateInterpreterGenerator::generate_all() { --- old/src/hotspot/share/interpreter/templateTable.cpp 2019-03-11 14:26:03.178355196 +0100 +++ new/src/hotspot/share/interpreter/templateTable.cpp 2019-03-11 14:26:02.966355199 +0100 @@ -160,7 +160,8 @@ // Implementation of TemplateTable: Debugging void TemplateTable::transition(TosState tos_in, TosState tos_out) { - assert(_desc->tos_in() == tos_in , "inconsistent tos_in information"); + assert(_desc->tos_in() == tos_in, + "inconsistent tos_in information"); assert(_desc->tos_out() == tos_out, "inconsistent tos_out information"); } @@ -237,6 +238,7 @@ const int disp = 1 << Template::does_dispatch_bit; const int clvm = 1 << Template::calls_vm_bit; const int iswd = 1 << Template::wide_bit; + // interpr. templates // Java spec bytecodes ubcp|disp|clvm|iswd in out generator argument def(Bytecodes::_nop , ____|____|____|____, vtos, vtos, nop , _ ); @@ -289,7 +291,7 @@ def(Bytecodes::_laload , ____|____|____|____, itos, ltos, laload , _ ); def(Bytecodes::_faload , ____|____|____|____, itos, ftos, faload , _ ); def(Bytecodes::_daload , ____|____|____|____, itos, dtos, daload , _ ); - def(Bytecodes::_aaload , ____|____|____|____, itos, atos, aaload , _ ); + def(Bytecodes::_aaload , ____|____|clvm|____, itos, atos, aaload , _ ); def(Bytecodes::_baload , ____|____|____|____, itos, itos, baload , _ ); def(Bytecodes::_caload , ____|____|____|____, itos, itos, caload , _ ); def(Bytecodes::_saload , ____|____|____|____, itos, itos, saload , _ ); @@ -441,6 +443,9 @@ def(Bytecodes::_ifnonnull , ubcp|____|clvm|____, atos, vtos, if_nullcmp , not_equal ); def(Bytecodes::_goto_w , ubcp|____|clvm|____, vtos, vtos, goto_w , _ ); def(Bytecodes::_jsr_w , ubcp|____|____|____, vtos, vtos, jsr_w , _ ); + def(Bytecodes::_breakpoint , ubcp|disp|clvm|____, vtos, vtos, _breakpoint , _ ); + def(Bytecodes::_defaultvalue , ubcp|____|clvm|____, vtos, atos, defaultvalue , _ ); + def(Bytecodes::_withfield , ubcp|____|clvm|____, vtos, atos, withfield , _ ); // wide Java spec bytecodes def(Bytecodes::_iload , ubcp|____|____|iswd, vtos, itos, wide_iload , _ ); @@ -459,6 +464,7 @@ // JVM bytecodes def(Bytecodes::_fast_agetfield , ubcp|____|____|____, atos, atos, fast_accessfield , atos ); + def(Bytecodes::_fast_qgetfield , ubcp|____|clvm|____, atos, atos, fast_accessfield , atos ); def(Bytecodes::_fast_bgetfield , ubcp|____|____|____, atos, itos, fast_accessfield , itos ); def(Bytecodes::_fast_cgetfield , ubcp|____|____|____, atos, itos, fast_accessfield , itos ); def(Bytecodes::_fast_dgetfield , ubcp|____|____|____, atos, dtos, fast_accessfield , dtos ); @@ -468,6 +474,7 @@ def(Bytecodes::_fast_sgetfield , ubcp|____|____|____, atos, itos, fast_accessfield , itos ); def(Bytecodes::_fast_aputfield , ubcp|____|____|____, atos, vtos, fast_storefield , atos ); + def(Bytecodes::_fast_qputfield , ubcp|____|clvm|____, atos, vtos, fast_storefield , atos ); def(Bytecodes::_fast_bputfield , ubcp|____|____|____, itos, vtos, fast_storefield , itos ); def(Bytecodes::_fast_zputfield , ubcp|____|____|____, itos, vtos, fast_storefield , itos ); def(Bytecodes::_fast_cputfield , ubcp|____|____|____, itos, vtos, fast_storefield , itos ); @@ -504,6 +511,7 @@ def(Bytecodes::_nofast_aload_0 , ____|____|clvm|____, vtos, atos, nofast_aload_0 , _ ); def(Bytecodes::_nofast_iload , ubcp|____|clvm|____, vtos, itos, nofast_iload , _ ); + def(Bytecodes::_shouldnotreachhere , ____|____|____|____, vtos, vtos, shouldnotreachhere , _ ); // platform specific bytecodes pd_initialize(); --- old/src/hotspot/share/interpreter/templateTable.hpp 2019-03-11 14:26:03.594355191 +0100 +++ new/src/hotspot/share/interpreter/templateTable.hpp 2019-03-11 14:26:03.382355194 +0100 @@ -297,8 +297,10 @@ static void putstatic(int byte_no); static void pop_and_check_object(Register obj); static void condy_helper(Label& Done); // shared by ldc instances + static void withfield(); static void _new(); + static void defaultvalue(); static void newarray(); static void anewarray(); static void arraylength(); @@ -332,7 +334,7 @@ // initialization helpers static void def(Bytecodes::Code code, int flags, TosState in, TosState out, void (*gen)( ), char filler ); static void def(Bytecodes::Code code, int flags, TosState in, TosState out, void (*gen)(int arg ), int arg ); - static void def(Bytecodes::Code code, int flags, TosState in, TosState out, void (*gen)(bool arg ), bool arg ); + static void def(Bytecodes::Code code, int flags, TosState in, TosState out, void (*gen)(bool arg ), bool arg ); static void def(Bytecodes::Code code, int flags, TosState in, TosState out, void (*gen)(TosState tos), TosState tos); static void def(Bytecodes::Code code, int flags, TosState in, TosState out, void (*gen)(Operation op), Operation op); static void def(Bytecodes::Code code, int flags, TosState in, TosState out, void (*gen)(Condition cc), Condition cc); --- old/src/hotspot/share/jvmci/compilerRuntime.cpp 2019-03-11 14:26:04.030355185 +0100 +++ new/src/hotspot/share/jvmci/compilerRuntime.cpp 2019-03-11 14:26:03.814355188 +0100 @@ -73,7 +73,7 @@ Handle protection_domain(THREAD, caller->method_holder()->protection_domain()); // Ignore wrapping L and ; - if (name[0] == 'L') { + if (name[0] == 'L' || name[0] == 'Q') { assert(len > 2, "small name %s", name); name++; len -= 2; --- old/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp 2019-03-11 14:26:04.502355178 +0100 +++ new/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp 2019-03-11 14:26:04.278355181 +0100 @@ -1173,7 +1173,7 @@ throw_exception = BytecodeFrame::rethrowException(frame) == JNI_TRUE; } - _debug_recorder->describe_scope(pc_offset, method, NULL, bci, reexecute, throw_exception, false, return_oop, + _debug_recorder->describe_scope(pc_offset, method, NULL, bci, reexecute, throw_exception, false, return_oop, false, locals_token, expressions_token, monitors_token); } @@ -1230,7 +1230,7 @@ OopMap *map = create_oop_map(debug_info, CHECK); _debug_recorder->add_safepoint(next_pc_offset, map); - bool return_oop = hotspot_method.not_null() && getMethodFromHotSpotMethod(hotspot_method())->is_returning_oop(); + bool return_oop = hotspot_method.not_null() && getMethodFromHotSpotMethod(hotspot_method())->may_return_oop(); record_scope(next_pc_offset, debug_info, CodeInstaller::FullFrame, return_oop, CHECK); } @@ -1321,6 +1321,8 @@ break; case VERIFIED_ENTRY: _offsets.set_value(CodeOffsets::Verified_Entry, pc_offset); + _offsets.set_value(CodeOffsets::Verified_Value_Entry, pc_offset); + _offsets.set_value(CodeOffsets::Verified_Value_Entry_RO, pc_offset); break; case OSR_ENTRY: _offsets.set_value(CodeOffsets::OSR_Entry, pc_offset); --- old/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp 2019-03-11 14:26:04.954355172 +0100 +++ new/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp 2019-03-11 14:26:04.730355175 +0100 @@ -436,7 +436,7 @@ if (resolve) { resolved_klass = SystemDictionary::resolve_or_null(class_name, class_loader, protection_domain, CHECK_0); } else { - if (class_name->char_at(0) == 'L' && + if ((class_name->char_at(0) == 'L' || class_name->char_at(0) == 'Q' ) && class_name->char_at(class_name->utf8_length()-1) == ';') { // This is a name from a signature. Strip off the trimmings. // Call recursive to keep scope of strippedsym. @@ -879,7 +879,8 @@ if (jap.get_ret_type() == T_VOID) { return NULL; - } else if (jap.get_ret_type() == T_OBJECT || jap.get_ret_type() == T_ARRAY) { + } else if (jap.get_ret_type() == T_OBJECT || jap.get_ret_type() == T_ARRAY + || jap.get_ret_type() == T_VALUETYPE) { return JNIHandles::make_local(THREAD, (oop) result.get_jobject()); } else { jvalue *value = (jvalue *) result.get_value_addr(); @@ -1083,7 +1084,7 @@ } } bool realloc_failures = Deoptimization::realloc_objects(thread, fst.current(), objects, CHECK_NULL); - Deoptimization::reassign_fields(fst.current(), fst.register_map(), objects, realloc_failures, false); + Deoptimization::reassign_fields(fst.current(), fst.register_map(), objects, realloc_failures, false, CHECK_NULL); realloc_called = true; GrowableArray* local_values = scope->locals(); @@ -1340,7 +1341,7 @@ } bool realloc_failures = Deoptimization::realloc_objects(thread, fstAfterDeopt.current(), objects, CHECK); - Deoptimization::reassign_fields(fstAfterDeopt.current(), fstAfterDeopt.register_map(), objects, realloc_failures, false); + Deoptimization::reassign_fields(fstAfterDeopt.current(), fstAfterDeopt.register_map(), objects, realloc_failures, false, THREAD); for (int frame_index = 0; frame_index < virtualFrames->length(); frame_index++) { compiledVFrame* cvf = virtualFrames->at(frame_index); --- old/src/hotspot/share/jvmci/jvmciCompilerToVM.hpp 2019-03-11 14:26:05.394355166 +0100 +++ new/src/hotspot/share/jvmci/jvmciCompilerToVM.hpp 2019-03-11 14:26:05.182355169 +0100 @@ -228,6 +228,7 @@ inline void do_object() { _jca->push_oop(next_arg(T_OBJECT)); } inline void do_object(int begin, int end) { if (!is_return_type()) _jca->push_oop(next_arg(T_OBJECT)); } + inline void do_valuetype(int begin, int end) { if (!is_return_type()) _jca->push_oop(next_arg(T_VALUETYPE)); } inline void do_array(int begin, int end) { if (!is_return_type()) _jca->push_oop(next_arg(T_OBJECT)); } inline void do_void() { } }; --- old/src/hotspot/share/jvmci/jvmciEnv.cpp 2019-03-11 14:26:05.810355160 +0100 +++ new/src/hotspot/share/jvmci/jvmciEnv.cpp 2019-03-11 14:26:05.602355163 +0100 @@ -120,7 +120,7 @@ JVMCI_EXCEPTION_CONTEXT; // Now we need to check the SystemDictionary - if (sym->char_at(0) == 'L' && + if ((sym->char_at(0) == 'L' || sym->char_at(0) == 'Q') && sym->char_at(sym->utf8_length()-1) == ';') { // This is a name from a signature. Strip off the trimmings. // Call recursive to keep scope of strippedsym. @@ -155,7 +155,7 @@ // to be loaded if their element klasses are loaded, except when memory // is exhausted. if (sym->char_at(0) == '[' && - (sym->char_at(1) == '[' || sym->char_at(1) == 'L')) { + (sym->char_at(1) == '[' || sym->char_at(1) == 'L' || sym->char_at(1) == 'Q')) { // We have an unloaded array. // Build it on the fly if the element class exists. TempNewSymbol elem_sym = SymbolTable::new_symbol(sym->as_utf8()+1, --- old/src/hotspot/share/logging/logTag.hpp 2019-03-11 14:26:06.642355149 +0100 +++ new/src/hotspot/share/logging/logTag.hpp 2019-03-11 14:26:06.430355151 +0100 @@ -167,6 +167,7 @@ LOG_TAG(unload) /* Trace unloading of classes */ \ LOG_TAG(unshareable) \ LOG_TAG(mirror) \ + LOG_TAG(valuetypes) \ LOG_TAG(verification) \ LOG_TAG(verify) \ LOG_TAG(vmmonitor) \ --- old/src/hotspot/share/memory/allocation.hpp 2019-03-11 14:26:07.118355142 +0100 +++ new/src/hotspot/share/memory/allocation.hpp 2019-03-11 14:26:06.862355145 +0100 @@ -144,6 +144,7 @@ */ enum MemoryType { MEMORY_TYPES_DO(MEMORY_TYPE_DECLARE_ENUM) + mtValueTypes, // memory for buffered value types mt_number_of_types // number of memory types (mtDontTrack // is not included as validate type) }; --- old/src/hotspot/share/memory/iterator.hpp 2019-03-11 14:26:08.458355123 +0100 +++ new/src/hotspot/share/memory/iterator.hpp 2019-03-11 14:26:08.250355126 +0100 @@ -46,6 +46,8 @@ public: virtual void do_oop(oop* o) = 0; virtual void do_oop(narrowOop* o) = 0; + virtual void do_oop_no_buffering(oop* o) { do_oop(o); } + virtual void do_oop_no_buffering(narrowOop* o) { do_oop(o); } }; class DoNothingClosure : public OopClosure { @@ -114,6 +116,11 @@ virtual void do_cld(ClassLoaderData* cld) { ShouldNotReachHere(); } }; +class BufferedValueClosure : public Closure { +public: + virtual void do_buffered_value(oop* p) = 0; +}; + class KlassClosure : public Closure { public: virtual void do_klass(Klass* k) = 0; --- old/src/hotspot/share/memory/iterator.inline.hpp 2019-03-11 14:26:08.874355118 +0100 +++ new/src/hotspot/share/memory/iterator.inline.hpp 2019-03-11 14:26:08.670355120 +0100 @@ -36,6 +36,7 @@ #include "oops/instanceRefKlass.inline.hpp" #include "oops/objArrayKlass.inline.hpp" #include "oops/typeArrayKlass.inline.hpp" +#include "oops/valueArrayKlass.inline.hpp" #include "utilities/debug.hpp" inline void MetadataVisitingOopIterateClosure::do_cld(ClassLoaderData* cld) { @@ -270,6 +271,7 @@ set_init_function(); set_init_function(); set_init_function(); + set_init_function(); } }; @@ -330,6 +332,7 @@ set_init_function(); set_init_function(); set_init_function(); + set_init_function(); } }; @@ -390,6 +393,7 @@ set_init_function(); set_init_function(); set_init_function(); + set_init_function(); } }; --- old/src/hotspot/share/memory/oopFactory.cpp 2019-03-11 14:26:09.382355111 +0100 +++ new/src/hotspot/share/memory/oopFactory.cpp 2019-03-11 14:26:09.158355114 +0100 @@ -33,9 +33,12 @@ #include "memory/universe.hpp" #include "oops/instanceKlass.hpp" #include "oops/instanceOop.hpp" +#include "oops/objArrayOop.inline.hpp" #include "oops/objArrayOop.hpp" #include "oops/oop.inline.hpp" #include "oops/typeArrayOop.inline.hpp" +#include "oops/valueKlass.hpp" +#include "oops/valueArrayKlass.hpp" #include "runtime/handles.inline.hpp" @@ -81,6 +84,8 @@ objArrayOop oopFactory::new_objArray(Klass* klass, int length, TRAPS) { assert(klass->is_klass(), "must be instance class"); + assert(!klass->is_value() || (!ValueKlass::cast(klass)->flatten_array()), + "Did not expect flatten array of value klass"); if (klass->is_array_klass()) { return ArrayKlass::cast(klass)->allocate_arrayArray(1, length, THREAD); } else { @@ -88,6 +93,36 @@ } } +arrayOop oopFactory::new_valueArray(Klass* klass, int length, TRAPS) { + assert(klass->is_value(), "Klass must be value type"); + Klass* array_klass = klass->array_klass(CHECK_NULL); // Flat value array or object array ? + assert(array_klass->is_valueArray_klass() || array_klass->is_objArray_klass(), + "Expect an array class here"); + + if (array_klass->is_valueArray_klass()) { + return (arrayOop) ValueArrayKlass::cast(array_klass)->allocate(length, THREAD); + } + + ValueKlass* vklass = ValueKlass::cast(klass); + objArrayOop array = oopFactory::new_objArray(klass, length, CHECK_NULL); + if (length == 0) { + return array; + } + + // Populate default values... + objArrayHandle array_h(THREAD, array); + instanceOop value = (instanceOop)vklass->default_value(); + for (int i = 0; i < length; i++) { + array_h->obj_at_put(i, value); + } + return array_h(); +} + +arrayOop oopFactory::new_array(Klass* klass, int length, TRAPS) { + return (klass->is_value()) ? new_valueArray(klass, length, THREAD) : + (arrayOop)new_objArray(klass, length, THREAD); +} + objArrayHandle oopFactory::new_objArray_handle(Klass* klass, int length, TRAPS) { objArrayOop obj = new_objArray(klass, length, CHECK_(objArrayHandle())); return objArrayHandle(THREAD, obj); --- old/src/hotspot/share/memory/oopFactory.hpp 2019-03-11 14:26:09.846355104 +0100 +++ new/src/hotspot/share/memory/oopFactory.hpp 2019-03-11 14:26:09.614355107 +0100 @@ -67,6 +67,12 @@ // Regular object arrays static objArrayOop new_objArray(Klass* klass, int length, TRAPS); + // Value arrays + static arrayOop new_valueArray(Klass* klass, int length, TRAPS); + + // Object/Value array for klass + static arrayOop new_array(Klass* klass, int length, TRAPS); + // Helpers that return handles static objArrayHandle new_objArray_handle(Klass* klass, int length, TRAPS); static typeArrayHandle new_byteArray_handle(int length, TRAPS); --- old/src/hotspot/share/memory/universe.cpp 2019-03-11 14:26:10.302355098 +0100 +++ new/src/hotspot/share/memory/universe.cpp 2019-03-11 14:26:10.086355101 +0100 @@ -671,7 +671,6 @@ SystemDictionary::initialize_oop_storage(); Metaspace::global_initialize(); - // Initialize performance counters for metaspaces MetaspaceCounters::initialize_performance_counters(); CompressedClassSpaceCounters::initialize_performance_counters(); --- old/src/hotspot/share/memory/universe.hpp 2019-03-11 14:26:10.734355092 +0100 +++ new/src/hotspot/share/memory/universe.hpp 2019-03-11 14:26:10.510355095 +0100 @@ -189,6 +189,7 @@ static address _narrow_ptrs_base; // CompressedClassSpaceSize set to 1GB, but appear 3GB away from _narrow_ptrs_base during CDS dump. static uint64_t _narrow_klass_range; + // array of dummy objects used with +FullGCAlot debug_only(static objArrayOop _fullgc_alot_dummy_array;) // index of next entry to clear --- old/src/hotspot/share/oops/arrayKlass.cpp 2019-03-11 14:26:11.178355086 +0100 +++ new/src/hotspot/share/oops/arrayKlass.cpp 2019-03-11 14:26:10.950355089 +0100 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "classfile/javaClasses.hpp" +#include "classfile/symbolTable.hpp" #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" #include "gc/shared/collectedHeap.inline.hpp" @@ -32,6 +33,7 @@ #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/arrayKlass.hpp" +#include "oops/objArrayKlass.hpp" #include "oops/arrayOop.hpp" #include "oops/instanceKlass.hpp" #include "oops/objArrayOop.hpp" @@ -96,6 +98,47 @@ JFR_ONLY(INIT_ID(this);) } +Symbol* ArrayKlass::create_element_klass_array_name(Klass* element_klass, TRAPS) { + Symbol* name = NULL; + if (!element_klass->is_instance_klass() || + (name = InstanceKlass::cast(element_klass)->array_name()) == NULL) { + + ResourceMark rm(THREAD); + char *name_str = element_klass->name()->as_C_string(); + int len = element_klass->name()->utf8_length(); + char *new_str = NEW_RESOURCE_ARRAY(char, len + 4); + int idx = 0; + new_str[idx++] = '['; + if (element_klass->is_instance_klass()) { // it could be an array or simple type + // Temporary hack, for arrays of value types, this code should be removed + // once value types have their own array types + // With Q-descriptors, the code below needs to be reworked. + // It is still correct today because the only kind of value array supported + // is array of null-free values which map to the Q-signature. + // As soon as both arrays of null-free values and arrays of nullable values + // are supported, this code has to be rewritten to consider the kind of the + // array instead of the kind of the elements. + if (element_klass->is_value()) { + new_str[idx++] = 'Q'; + } else { + new_str[idx++] = 'L'; + } + } + memcpy(&new_str[idx], name_str, len * sizeof(char)); + idx += len; + if (element_klass->is_instance_klass()) { + new_str[idx++] = ';'; + } + new_str[idx++] = '\0'; + name = SymbolTable::new_permanent_symbol(new_str, CHECK_NULL); + if (element_klass->is_instance_klass() || element_klass->is_value()) { + InstanceKlass* ik = InstanceKlass::cast(element_klass); + ik->set_array_name(name); + } + } + + return name; +} // Initialization of vtables and mirror object is done separatly from base_create_array_klass, // since a GC can happen. At this point all instance variables of the ArrayKlass must be setup. --- old/src/hotspot/share/oops/arrayKlass.hpp 2019-03-11 14:26:11.578355080 +0100 +++ new/src/hotspot/share/oops/arrayKlass.hpp 2019-03-11 14:26:11.378355083 +0100 @@ -43,13 +43,30 @@ Klass* volatile _lower_dimension; // Refers the (n-1)'th-dimensional array (if present). protected: + Klass* _element_klass; // The klass of the elements of this array type + // The element type must be registered for both object arrays + // (incl. object arrays with value type elements) and value type + // arrays containing flattened value types. However, the element + // type must not be registered for arrays of primitive types. + // TODO: Update the class hierarchy so that element klass appears + // only in array that contain non-primitive types. // Constructors // The constructor with the Symbol argument does the real array // initialization, the other is a dummy ArrayKlass(Symbol* name, KlassID id); ArrayKlass() { assert(DumpSharedSpaces || UseSharedSpaces, "only for cds"); } + // Create array_name for element klass, creates a permanent symbol, returns result + static Symbol* create_element_klass_array_name(Klass* element_klass, TRAPS); + public: + // Instance variables + virtual Klass* element_klass() const { return _element_klass; } + virtual void set_element_klass(Klass* k) { _element_klass = k; } + + // Compiler/Interpreter offset + static ByteSize element_klass_offset() { return in_ByteSize(offset_of(ArrayKlass, _element_klass)); } + // Testing operation DEBUG_ONLY(bool is_array_klass_slow() const { return true; }) --- old/src/hotspot/share/oops/arrayOop.hpp 2019-03-11 14:26:12.066355074 +0100 +++ new/src/hotspot/share/oops/arrayOop.hpp 2019-03-11 14:26:11.846355077 +0100 @@ -45,7 +45,7 @@ friend class arrayOopDescTest; // Interpreter/Compiler offsets - +protected: // Header size computation. // The header is considered the oop part of this type plus the length. // Returns the aligned header_size_in_bytes. This is not equivalent to @@ -66,7 +66,7 @@ // aligned 0 mod 8. The typeArrayOop itself must be aligned at least this // strongly. static bool element_type_should_be_aligned(BasicType type) { - return type == T_DOUBLE || type == T_LONG; + return type == T_DOUBLE || type == T_LONG || type == T_VALUETYPE; } public: @@ -126,6 +126,21 @@ : typesize_in_bytes/HeapWordSize); } + static int32_t max_array_length(int header_size, int elembytes) { + const size_t max_element_words_per_size_t = + align_down((SIZE_MAX/HeapWordSize - header_size), MinObjAlignment); + const size_t max_elements_per_size_t = + HeapWordSize * max_element_words_per_size_t / elembytes; + if ((size_t)max_jint < max_elements_per_size_t) { + // It should be ok to return max_jint here, but parts of the code + // (CollectedHeap, Klass::oop_oop_iterate(), and more) uses an int for + // passing around the size (in words) of an object. So, we need to avoid + // overflowing an int when we add the header. See CRs 4718400 and 7110613. + return align_down(max_jint - header_size, MinObjAlignment); + } + return (int32_t)max_elements_per_size_t; + } + // Return the maximum length of an array of BasicType. The length can passed // to typeArrayOop::object_size(scale, length, header_size) without causing an // overflow. We also need to make sure that this will not overflow a size_t on --- old/src/hotspot/share/oops/constantPool.cpp 2019-03-11 14:26:12.550355067 +0100 +++ new/src/hotspot/share/oops/constantPool.cpp 2019-03-11 14:26:12.322355070 +0100 @@ -47,6 +47,7 @@ #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/typeArrayOop.inline.hpp" +#include "oops/valueArrayKlass.hpp" #include "runtime/fieldType.hpp" #include "runtime/handles.inline.hpp" #include "runtime/init.hpp" @@ -210,7 +211,7 @@ case JVM_CONSTANT_Class: case JVM_CONSTANT_UnresolvedClass: case JVM_CONSTANT_UnresolvedClassInError: - // All of these should have been reverted back to ClassIndex before calling + // All of these should have been reverted back to Unresolved before calling // this function. ShouldNotReachHere(); #endif @@ -234,10 +235,11 @@ // The interpreter assumes when the tag is stored, the klass is resolved // and the Klass* non-NULL, so we need hardware store ordering here. + jbyte qdesc_bit = name->is_Q_signature() ? (jbyte)JVM_CONSTANT_QDESC_BIT : 0; if (k != NULL) { - release_tag_at_put(class_index, JVM_CONSTANT_Class); + release_tag_at_put(class_index, JVM_CONSTANT_Class | qdesc_bit); } else { - release_tag_at_put(class_index, JVM_CONSTANT_UnresolvedClass); + release_tag_at_put(class_index, JVM_CONSTANT_UnresolvedClass | qdesc_bit); } } @@ -442,6 +444,12 @@ } } +void check_is_value_type(Klass* k, TRAPS) { + if (!k->is_value()) { + THROW(vmSymbols::java_lang_IncompatibleClassChangeError()); + } +} + Klass* ConstantPool::klass_at_impl(const constantPoolHandle& this_cp, int which, bool save_resolution_error, TRAPS) { assert(THREAD->is_Java_thread(), "must be a Java thread"); @@ -476,6 +484,11 @@ Handle mirror_handle; Symbol* name = this_cp->symbol_at(name_index); + bool value_type_signature = false; + if (name->is_Q_signature()) { + name = name->fundamental_name(THREAD); + value_type_signature = true; + } Handle loader (THREAD, this_cp->pool_holder()->class_loader()); Handle protection_domain (THREAD, this_cp->pool_holder()->protection_domain()); @@ -485,6 +498,9 @@ JvmtiHideSingleStepping jhss(javaThread); k = SystemDictionary::resolve_or_fail(name, loader, protection_domain, true, THREAD); } // JvmtiHideSingleStepping jhss(javaThread); + if (value_type_signature) { + name->decrement_refcount(); + } if (!HAS_PENDING_EXCEPTION) { // preserve the resolved klass from unloading @@ -493,6 +509,22 @@ verify_constant_pool_resolve(this_cp, k, THREAD); } + if (!HAS_PENDING_EXCEPTION && value_type_signature) { + check_is_value_type(k, THREAD); + } + + if (!HAS_PENDING_EXCEPTION) { + Klass* bottom_klass = NULL; + if (k->is_objArray_klass()) { + bottom_klass = ObjArrayKlass::cast(k)->bottom_klass(); + assert(bottom_klass != NULL, "Should be set"); + assert(bottom_klass->is_instance_klass() || bottom_klass->is_typeArray_klass(), "Sanity check"); + } else if (k->is_valueArray_klass()) { + bottom_klass = ValueArrayKlass::cast(k)->element_klass(); + assert(bottom_klass != NULL, "Should be set"); + } + } + // Failed to resolve class. We must record the errors so that subsequent attempts // to resolve this constant pool entry fail with the same error (JVMS 5.4.3). if (HAS_PENDING_EXCEPTION) { @@ -518,7 +550,11 @@ // The interpreter assumes when the tag is stored, the klass is resolved // and the Klass* stored in _resolved_klasses is non-NULL, so we need // hardware store ordering here. - this_cp->release_tag_at_put(which, JVM_CONSTANT_Class); + jbyte tag = JVM_CONSTANT_Class; + if (this_cp->tag_at(which).is_Qdescriptor_klass()) { + tag |= JVM_CONSTANT_QDESC_BIT; + } + this_cp->release_tag_at_put(which, tag); return k; } --- old/src/hotspot/share/oops/constantPool.hpp 2019-03-11 14:26:13.070355060 +0100 +++ new/src/hotspot/share/oops/constantPool.hpp 2019-03-11 14:26:12.766355064 +0100 @@ -271,7 +271,7 @@ // Storing constants - // For temporary use while constructing constant pool + // For temporary use while constructing constant pool. Used during a retransform/class redefinition as well. void klass_index_at_put(int which, int name_index) { tag_at_put(which, JVM_CONSTANT_ClassIndex); *int_at_addr(which) = name_index; @@ -281,6 +281,15 @@ void klass_at_put(int class_index, int name_index, int resolved_klass_index, Klass* k, Symbol* name); void klass_at_put(int class_index, Klass* k); + void unresolved_qdescriptor_at_put(int which, int name_index, int resolved_klass_index) { + release_tag_at_put(which, JVM_CONSTANT_UnresolvedClass | (jbyte)JVM_CONSTANT_QDESC_BIT); + + assert((name_index & 0xffff0000) == 0, "must be"); + assert((resolved_klass_index & 0xffff0000) == 0, "must be"); + *int_at_addr(which) = + build_int_from_shorts((jushort)resolved_klass_index, (jushort)name_index); + } + void unresolved_klass_at_put(int which, int name_index, int resolved_klass_index) { release_tag_at_put(which, JVM_CONSTANT_UnresolvedClass); --- old/src/hotspot/share/oops/cpCache.cpp 2019-03-11 14:26:13.790355050 +0100 +++ new/src/hotspot/share/oops/cpCache.cpp 2019-03-11 14:26:13.446355054 +0100 @@ -133,14 +133,19 @@ TosState field_type, bool is_final, bool is_volatile, + bool is_flattened, + bool is_flattenable, Klass* root_klass) { set_f1(field_holder); set_f2(field_offset); assert((field_index & field_index_mask) == field_index, "field index does not fit in low flag bits"); + assert(!is_flattened || is_flattenable, "Sanity check"); set_field_flags(field_type, ((is_volatile ? 1 : 0) << is_volatile_shift) | - ((is_final ? 1 : 0) << is_final_shift), + ((is_final ? 1 : 0) << is_final_shift) | + ((is_flattened ? 1 : 0) << is_flattened_field_shift) | + ((is_flattenable ? 1 : 0) << is_flattenable_field_shift), field_index); set_bytecode_1(get_code); set_bytecode_2(put_code); @@ -287,6 +292,7 @@ // // We set bytecode_2() to _invokevirtual. // See also interpreterRuntime.cpp. (8/25/2000) + invoke_code = Bytecodes::_invokevirtual; } else { assert(invoke_code == Bytecodes::_invokevirtual || (invoke_code == Bytecodes::_invokeinterface && @@ -302,7 +308,7 @@ } } // set up for invokevirtual, even if linking for invokeinterface also: - set_bytecode_2(Bytecodes::_invokevirtual); + set_bytecode_2(invoke_code); } else { ShouldNotReachHere(); } --- old/src/hotspot/share/oops/cpCache.hpp 2019-03-11 14:26:14.438355041 +0100 +++ new/src/hotspot/share/oops/cpCache.hpp 2019-03-11 14:26:14.054355046 +0100 @@ -49,7 +49,7 @@ // _indices [ b2 | b1 | index ] index = constant_pool_index // _f1 [ entry specific ] metadata ptr (method or klass) // _f2 [ entry specific ] vtable or res_ref index, or vfinal method ptr -// _flags [tos|0|F=1|0|0|0|f|v|0 |0000|field_index] (for field entries) +// _flags [tos|0|F=1|0|N|i|f|v|0 |0000|field_index] (for field entries) // bit length [ 4 |1| 1 |1|1|1|1|1|1 |1 |-3-|----16-----] // _flags [tos|0|F=0|S|A|I|f|0|vf|indy_rf|000|00000|psize] (for method entries) // bit length [ 4 |1| 1 |1|1|1|1|1|1 |-4--|--8--|--8--] @@ -75,6 +75,8 @@ // // The flags after TosState have the following interpretation: // bit 27: 0 for fields, 1 for methods +// N flag true if field is marked flattenable (must never be null) +// i flag true if field is inlined (flattened) // f flag true if field is marked final // v flag true if field is volatile (only for fields) // f2 flag true if f2 contains an oop (e.g., virtual final method) @@ -182,7 +184,9 @@ is_field_entry_shift = 26, // (F) is it a field or a method? has_local_signature_shift = 25, // (S) does the call site have a per-site signature (sig-poly methods)? has_appendix_shift = 24, // (A) does the call site have an appendix argument? + is_flattenable_field_shift = 24, // (N) is the field flattenable (must never be null) is_forced_virtual_shift = 23, // (I) is the interface reference forced to virtual mode? + is_flattened_field_shift = 23, // (i) is the value field flattened? is_final_shift = 22, // (f) is the field or method final? is_volatile_shift = 21, // (v) is the field volatile? is_vfinal_shift = 20, // (vf) did the call resolve to a final method? @@ -222,6 +226,8 @@ TosState field_type, // the (machine) field type bool is_final, // the field is final bool is_volatile, // the field is volatile + bool is_flattened, // the field is flattened (value field) + bool is_flattenable, // the field is flattenable (must never be null) Klass* root_klass // needed by the GC to dirty the klass ); @@ -311,6 +317,7 @@ case Bytecodes::_invokeinterface : return 1; case Bytecodes::_putstatic : // fall through case Bytecodes::_putfield : // fall through + case Bytecodes::_withfield : // fall through case Bytecodes::_invokevirtual : return 2; default : break; } @@ -338,11 +345,13 @@ int f2_as_index() const { assert(!is_vfinal(), ""); return (int) _f2; } Method* f2_as_vfinal_method() const { assert(is_vfinal(), ""); return (Method*)_f2; } Method* f2_as_interface_method() const; + int f2_as_offset() const { assert(is_field_entry(), ""); return (int)_f2; } intx flags_ord() const; int field_index() const { assert(is_field_entry(), ""); return (_flags & field_index_mask); } int parameter_size() const { assert(is_method_entry(), ""); return (_flags & parameter_size_mask); } bool is_volatile() const { return (_flags & (1 << is_volatile_shift)) != 0; } bool is_final() const { return (_flags & (1 << is_final_shift)) != 0; } + bool is_flattened() const { return (_flags & (1 << is_flattened_field_shift)) != 0; } bool is_forced_virtual() const { return (_flags & (1 << is_forced_virtual_shift)) != 0; } bool is_vfinal() const { return (_flags & (1 << is_vfinal_shift)) != 0; } bool indy_resolution_failed() const; @@ -352,6 +361,7 @@ bool is_field_entry() const { return (_flags & (1 << is_field_entry_shift)) != 0; } bool is_long() const { return flag_state() == ltos; } bool is_double() const { return flag_state() == dtos; } + bool is_flattenable() const { return (_flags & (1 << is_flattenable_field_shift)) != 0; } TosState flag_state() const { assert((uint)number_of_states <= (uint)tos_state_mask+1, ""); return (TosState)((_flags >> tos_state_shift) & tos_state_mask); } void set_indy_resolution_failed(); --- old/src/hotspot/share/oops/fieldInfo.hpp 2019-03-11 14:26:15.298355029 +0100 +++ new/src/hotspot/share/oops/fieldInfo.hpp 2019-03-11 14:26:14.950355034 +0100 @@ -45,20 +45,25 @@ // Field info extracted from the class file and stored // as an array of 6 shorts. -#define FIELDINFO_TAG_SIZE 2 +#define FIELDINFO_TAG_SIZE 3 #define FIELDINFO_TAG_BLANK 0 #define FIELDINFO_TAG_OFFSET 1 #define FIELDINFO_TAG_TYPE_PLAIN 2 #define FIELDINFO_TAG_TYPE_CONTENDED 3 -#define FIELDINFO_TAG_MASK 3 +#define FIELDINFO_TAG_TYPE_MASK 3 +#define FIELDINFO_TAG_MASK 7 +#define FIELDINFO_TAG_FLATTENED 4 // Packed field has the tag, and can be either of: // hi bits <--------------------------- lo bits // |---------high---------|---------low---------| // ..........................................00 - blank - // [------------------offset----------------]01 - real field offset - // ......................[-------type-------]10 - plain field with type - // [--contention_group--][-------type-------]11 - contended field with type and contention group + // [------------------offset---------------]F01 - real field offset + // ......................[-------type------]F10 - plain field with type + // [--contention_group--][-------type------]F11 - contended field with type and contention group + // + // Bit F indicates if the field has been flattened (F=1) or nor (F=0) + enum FieldOffset { access_flags_offset = 0, name_index_offset = 1, @@ -103,7 +108,7 @@ u2 access_flags() const { return _shorts[access_flags_offset]; } u4 offset() const { u2 lo = _shorts[low_packed_offset]; - switch(lo & FIELDINFO_TAG_MASK) { + switch(lo & FIELDINFO_TAG_TYPE_MASK) { case FIELDINFO_TAG_OFFSET: return build_int_from_shorts(_shorts[low_packed_offset], _shorts[high_packed_offset]) >> FIELDINFO_TAG_SIZE; #ifndef PRODUCT @@ -121,7 +126,7 @@ bool is_contended() const { u2 lo = _shorts[low_packed_offset]; - switch(lo & FIELDINFO_TAG_MASK) { + switch(lo & FIELDINFO_TAG_TYPE_MASK) { case FIELDINFO_TAG_TYPE_PLAIN: return false; case FIELDINFO_TAG_TYPE_CONTENDED: @@ -139,7 +144,7 @@ u2 contended_group() const { u2 lo = _shorts[low_packed_offset]; - switch(lo & FIELDINFO_TAG_MASK) { + switch(lo & FIELDINFO_TAG_TYPE_MASK) { case FIELDINFO_TAG_TYPE_PLAIN: return 0; case FIELDINFO_TAG_TYPE_CONTENDED: @@ -157,7 +162,7 @@ u2 allocation_type() const { u2 lo = _shorts[low_packed_offset]; - switch(lo & FIELDINFO_TAG_MASK) { + switch(lo & FIELDINFO_TAG_TYPE_MASK) { case FIELDINFO_TAG_TYPE_PLAIN: case FIELDINFO_TAG_TYPE_CONTENDED: return (lo >> FIELDINFO_TAG_SIZE); @@ -173,7 +178,7 @@ } bool is_offset_set() const { - return (_shorts[low_packed_offset] & FIELDINFO_TAG_MASK) == FIELDINFO_TAG_OFFSET; + return (_shorts[low_packed_offset] & FIELDINFO_TAG_TYPE_MASK) == FIELDINFO_TAG_OFFSET; } Symbol* name(const constantPoolHandle& cp) const { @@ -195,17 +200,22 @@ void set_access_flags(u2 val) { _shorts[access_flags_offset] = val; } void set_offset(u4 val) { val = val << FIELDINFO_TAG_SIZE; // make room for tag + bool flattened = is_flattened(); _shorts[low_packed_offset] = extract_low_short_from_int(val) | FIELDINFO_TAG_OFFSET; + if (flattened) set_flattened(true); _shorts[high_packed_offset] = extract_high_short_from_int(val); + assert(is_flattened() || !flattened, "just checking"); } void set_allocation_type(int type) { + bool b = is_flattened(); u2 lo = _shorts[low_packed_offset]; - switch(lo & FIELDINFO_TAG_MASK) { + switch(lo & FIELDINFO_TAG_TYPE_MASK) { case FIELDINFO_TAG_BLANK: - _shorts[low_packed_offset] = ((type << FIELDINFO_TAG_SIZE)) & 0xFFFF; - _shorts[low_packed_offset] &= ~FIELDINFO_TAG_MASK; + _shorts[low_packed_offset] |= ((type << FIELDINFO_TAG_SIZE)) & 0xFFFF; + _shorts[low_packed_offset] &= ~FIELDINFO_TAG_TYPE_MASK; _shorts[low_packed_offset] |= FIELDINFO_TAG_TYPE_PLAIN; + assert(is_flattened() || !b, "Just checking"); return; #ifndef PRODUCT case FIELDINFO_TAG_TYPE_PLAIN: @@ -217,9 +227,21 @@ ShouldNotReachHere(); } + void set_flattened(bool b) { + if (b) { + _shorts[low_packed_offset] |= FIELDINFO_TAG_FLATTENED; + } else { + _shorts[low_packed_offset] &= ~FIELDINFO_TAG_FLATTENED; + } + } + + bool is_flattened() { + return (_shorts[low_packed_offset] & FIELDINFO_TAG_FLATTENED) != 0; + } + void set_contended_group(u2 val) { u2 lo = _shorts[low_packed_offset]; - switch(lo & FIELDINFO_TAG_MASK) { + switch(lo & FIELDINFO_TAG_TYPE_MASK) { case FIELDINFO_TAG_TYPE_PLAIN: _shorts[low_packed_offset] |= FIELDINFO_TAG_TYPE_CONTENDED; _shorts[high_packed_offset] = val; @@ -248,6 +270,10 @@ else _shorts[access_flags_offset] &= ~JVM_ACC_FIELD_STABLE; } + bool is_flattenable() const { + return (access_flags() & JVM_ACC_FLATTENABLE) != 0; + } + Symbol* lookup_symbol(int symbol_index) const { assert(is_internal(), "only internal fields"); return vmSymbols::symbol_at((vmSymbols::SID)symbol_index); --- old/src/hotspot/share/oops/fieldStreams.hpp 2019-03-11 14:26:15.946355020 +0100 +++ new/src/hotspot/share/oops/fieldStreams.hpp 2019-03-11 14:26:15.714355023 +0100 @@ -38,6 +38,7 @@ // AllFieldStream exposes all fields and should only be used in rare // cases. class FieldStreamBase : public StackObj { + protected: Array* _fields; constantPoolHandle _constants; @@ -161,6 +162,20 @@ return field()->allocation_type(); } + bool is_flattened() { + return field()->is_flattened(); + } + + void set_flattened(bool b) { + field()->set_flattened(b); + } + + bool is_flattenable() const { + AccessFlags flags; + flags.set_flags(field()->access_flags()); + return flags.is_flattenable(); + } + void set_offset(int offset) { field()->set_offset(offset); } --- old/src/hotspot/share/oops/generateOopMap.cpp 2019-03-11 14:26:16.758355009 +0100 +++ new/src/hotspot/share/oops/generateOopMap.cpp 2019-03-11 14:26:16.446355013 +0100 @@ -122,6 +122,7 @@ virtual void do_int () { set(CellTypeState::value); }; virtual void do_void () { set(CellTypeState::bottom);}; virtual void do_object(int begin, int end) { set(CellTypeState::ref); }; + virtual void do_valuetype(int begin, int end) { set(CellTypeState::ref); }; virtual void do_array (int begin, int end) { set(CellTypeState::ref); }; void do_double() { set(CellTypeState::value); @@ -137,8 +138,9 @@ _idx = 0; _effect = effect; - if (!is_static) + if (!is_static) { effect[_idx++] = CellTypeState::ref; + } iterate_parameters(); @@ -176,6 +178,7 @@ virtual void do_int () { set(CellTypeState::value); }; virtual void do_void () { set(CellTypeState::bottom);}; virtual void do_object(int begin, int end) { set(CellTypeState::make_slot_ref(_idx)); } + virtual void do_valuetype(int begin, int end) { set(CellTypeState::make_slot_ref(_idx)); } virtual void do_array (int begin, int end) { set(CellTypeState::make_slot_ref(_idx)); } void do_double() { set(CellTypeState::value); @@ -1376,6 +1379,9 @@ case Bytecodes::_new: ppush1(CellTypeState::make_line_ref(itr->bci())); break; + case Bytecodes::_defaultvalue: ppush1(CellTypeState::make_line_ref(itr->bci())); break; + case Bytecodes::_withfield: do_withfield(itr->get_index_u2_cpcache(), itr->bci()); break; + case Bytecodes::_iconst_m1: case Bytecodes::_iconst_0: case Bytecodes::_iconst_1: @@ -1589,16 +1595,16 @@ case Bytecodes::_jsr: do_jsr(itr->dest()); break; case Bytecodes::_jsr_w: do_jsr(itr->dest_w()); break; - case Bytecodes::_getstatic: do_field(true, true, itr->get_index_u2_cpcache(), itr->bci()); break; - case Bytecodes::_putstatic: do_field(false, true, itr->get_index_u2_cpcache(), itr->bci()); break; + case Bytecodes::_getstatic: do_field(true, true, itr->get_index_u2_cpcache(), itr->bci()); break; + case Bytecodes::_putstatic: do_field(false, true, itr->get_index_u2_cpcache(), itr->bci()); break; case Bytecodes::_getfield: do_field(true, false, itr->get_index_u2_cpcache(), itr->bci()); break; case Bytecodes::_putfield: do_field(false, false, itr->get_index_u2_cpcache(), itr->bci()); break; + case Bytecodes::_invokeinterface: case Bytecodes::_invokevirtual: - case Bytecodes::_invokespecial: do_method(false, false, itr->get_index_u2_cpcache(), itr->bci()); break; - case Bytecodes::_invokestatic: do_method(true, false, itr->get_index_u2_cpcache(), itr->bci()); break; - case Bytecodes::_invokedynamic: do_method(true, false, itr->get_index_u4(), itr->bci()); break; - case Bytecodes::_invokeinterface: do_method(false, true, itr->get_index_u2_cpcache(), itr->bci()); break; + case Bytecodes::_invokespecial: do_method(false, itr->get_index_u2_cpcache(), itr->bci()); break; + case Bytecodes::_invokestatic: do_method(true , itr->get_index_u2_cpcache(), itr->bci()); break; + case Bytecodes::_invokedynamic: do_method(true , itr->get_index_u4(), itr->bci()); break; case Bytecodes::_newarray: case Bytecodes::_anewarray: pp_new_ref(vCTS, itr->bci()); break; case Bytecodes::_checkcast: do_checkcast(); break; @@ -1618,6 +1624,7 @@ case Bytecodes::_areturn: do_return_monitor_check(); ppop1(refCTS); break; + case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: ppop1(refCTS); break; case Bytecodes::_multianewarray: do_multianewarray(*(itr->bcp()+3), itr->bci()); break; @@ -1724,7 +1731,7 @@ } void GenerateOopMap::ppush1(CellTypeState in) { - assert(in.is_reference() | in.is_value(), "sanity check"); + assert(in.is_reference() || in.is_value(), "sanity check"); push(in); } @@ -1947,13 +1954,15 @@ out = epsilonCTS; i = copy_cts(in, eff); } - if (!is_static) in[i++] = CellTypeState::ref; + if (!is_static) { + in[i++] = CellTypeState::ref; + } in[i] = CellTypeState::bottom; assert(i<=3, "sanity check"); pp(in, out); } -void GenerateOopMap::do_method(int is_static, int is_interface, int idx, int bci) { +void GenerateOopMap::do_method(int is_static, int idx, int bci) { // Dig up signature for field in constant pool ConstantPool* cp = _method->constants(); Symbol* signature = cp->signature_ref_at(idx); @@ -1990,10 +1999,38 @@ ppush(out); } +void GenerateOopMap::do_withfield(int idx, int bci) { + // Dig up signature for field in constant pool + ConstantPool* cp = method()->constants(); + int nameAndTypeIdx = cp->name_and_type_ref_index_at(idx); + int signatureIdx = cp->signature_ref_index_at(nameAndTypeIdx); + Symbol* signature = cp->symbol_at(signatureIdx); + + // Parse signature (especially simple for fields) + assert(signature->utf8_length() > 0, + "field signatures cannot have zero length"); + // The signature is UFT8 encoded, but the first char is always ASCII for signatures. + char sigch = (char) *(signature->base()); + CellTypeState temp[4]; + CellTypeState *eff = sigchar_to_effect(sigch, bci, temp); + + CellTypeState in[4]; + int i = copy_cts(in, eff); + in[i++] = CellTypeState::ref; + in[i] = CellTypeState::bottom; + assert(i <= 3, "sanity check"); + + CellTypeState out[2]; + out[0] = CellTypeState::ref; + out[1] = CellTypeState::bottom; + + pp(in, out); +} + // This is used to parse the signature for fields, since they are very simple... CellTypeState *GenerateOopMap::sigchar_to_effect(char sigch, int bci, CellTypeState *out) { // Object and array - if (sigch=='L' || sigch=='[') { + if (sigch=='L' || sigch=='[' || sigch=='Q') { out[0] = CellTypeState::make_line_ref(bci); out[1] = CellTypeState::bottom; return out; --- old/src/hotspot/share/oops/generateOopMap.hpp 2019-03-11 14:26:17.326355001 +0100 +++ new/src/hotspot/share/oops/generateOopMap.hpp 2019-03-11 14:26:17.066355004 +0100 @@ -92,7 +92,7 @@ unsigned int _state; // Masks for separating the BITS and INFO portions of a CellTypeState - enum { info_mask = right_n_bits(28), + enum { info_mask = right_n_bits(27), bits_mask = (int)(~info_mask) }; // These constant are used for manipulating the BITS portion of a @@ -105,18 +105,23 @@ // These constants are used for manipulating the INFO portion of a // CellTypeState - enum { top_info_bit = nth_bit(27), - not_bottom_info_bit = nth_bit(26), - info_data_mask = right_n_bits(26), + enum { top_info_bit = nth_bit(26), + not_bottom_info_bit = nth_bit(25), + info_data_mask = right_n_bits(25), info_conflict = info_mask }; // Within the INFO data, these values are used to distinguish different // kinds of references. - enum { ref_not_lock_bit = nth_bit(25), // 0 if this reference is locked as a monitor - ref_slot_bit = nth_bit(24), // 1 if this reference is a "slot" reference, + enum { ref_not_lock_bit = nth_bit(24), // 0 if this reference is locked as a monitor + ref_slot_bit = nth_bit(23), // 1 if this reference is a "slot" reference, // 0 if it is a "line" reference. - ref_data_mask = right_n_bits(24) }; + ref_data_mask = right_n_bits(23) }; + // Within the INFO data, these values are used to distinguish different + // kinds of value types. + enum { valuetype_slot_bit = nth_bit(24), // 1 if this reference is a "slot" value type, + // 0 if it is a "line" value type. + valuetype_data_mask = right_n_bits(24) }; // These values are used to initialize commonly used CellTypeState // constants. @@ -398,7 +403,8 @@ void do_astore (int idx); void do_jsr (int delta); void do_field (int is_get, int is_static, int idx, int bci); - void do_method (int is_static, int is_interface, int idx, int bci); + void do_method (int is_static, int idx, int bci); + void do_withfield (int idx, int bci); void do_multianewarray (int dims, int bci); void do_monitorenter (int bci); void do_monitorexit (int bci); --- old/src/hotspot/share/oops/instanceKlass.cpp 2019-03-11 14:26:17.778354995 +0100 +++ new/src/hotspot/share/oops/instanceKlass.cpp 2019-03-11 14:26:17.562354998 +0100 @@ -62,6 +62,7 @@ #include "oops/method.hpp" #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" +#include "oops/valueKlass.hpp" #include "prims/jvmtiExport.hpp" #include "prims/jvmtiRedefineClasses.hpp" #include "prims/jvmtiThreadState.hpp" @@ -370,7 +371,9 @@ nonstatic_oop_map_size(parser.total_oop_map_count()), parser.is_interface(), parser.is_unsafe_anonymous(), - should_store_fingerprint(parser.is_unsafe_anonymous())); + should_store_fingerprint(parser.is_unsafe_anonymous()), + parser.has_flattenable_fields() ? parser.java_fields_count() : 0, + parser.is_value_type()); const Symbol* const class_name = parser.class_name(); assert(class_name != NULL, "invariant"); @@ -384,10 +387,12 @@ if (class_name == vmSymbols::java_lang_Class()) { // mirror ik = new (loader_data, size, THREAD) InstanceMirrorKlass(parser); - } - else if (is_class_loader(class_name, parser)) { + } else if (is_class_loader(class_name, parser)) { // class loader ik = new (loader_data, size, THREAD) InstanceClassLoaderKlass(parser); + } else if (parser.is_value_type()) { + // value type + ik = new (loader_data, size, THREAD) ValueKlass(parser); } else { // normal ik = new (loader_data, size, THREAD) InstanceKlass(parser, InstanceKlass::_misc_kind_other); @@ -403,9 +408,39 @@ return NULL; } +#ifdef ASSERT + assert(ik->size() == size, ""); + ik->bounds_check((address) ik->start_of_vtable(), false, size); + ik->bounds_check((address) ik->start_of_itable(), false, size); + ik->bounds_check((address) ik->end_of_itable(), true, size); + ik->bounds_check((address) ik->end_of_nonstatic_oop_maps(), true, size); +#endif //ASSERT return ik; } +#ifndef PRODUCT +bool InstanceKlass::bounds_check(address addr, bool edge_ok, intptr_t size_in_bytes) const { + const char* bad = NULL; + address end = NULL; + if (addr < (address)this) { + bad = "before"; + } else if (addr == (address)this) { + if (edge_ok) return true; + bad = "just before"; + } else if (addr == (end = (address)this + sizeof(intptr_t) * (size_in_bytes < 0 ? size() : size_in_bytes))) { + if (edge_ok) return true; + bad = "just after"; + } else if (addr > end) { + bad = "after"; + } else { + return true; + } + tty->print_cr("%s object bounds: " INTPTR_FORMAT " [" INTPTR_FORMAT ".." INTPTR_FORMAT "]", + bad, (intptr_t)addr, (intptr_t)this, (intptr_t)end); + Verbose = WizardMode = true; this->print(); //@@ + return false; +} +#endif //PRODUCT // copy method ordering from resource area to Metaspace void InstanceKlass::copy_method_ordering(const intArray* m, TRAPS) { @@ -436,22 +471,27 @@ _static_field_size(parser.static_field_size()), _nonstatic_oop_map_size(nonstatic_oop_map_size(parser.total_oop_map_count())), _itable_len(parser.itable_size()), - _reference_type(parser.reference_type()) -{ - set_vtable_length(parser.vtable_size()); - set_kind(kind); - set_access_flags(parser.access_flags()); - set_is_unsafe_anonymous(parser.is_unsafe_anonymous()); - set_layout_helper(Klass::instance_layout_helper(parser.layout_size(), + _extra_flags(0), + _reference_type(parser.reference_type()), + _adr_valueklass_fixed_block(NULL) { + set_vtable_length(parser.vtable_size()); + set_kind(kind); + set_access_flags(parser.access_flags()); + set_is_unsafe_anonymous(parser.is_unsafe_anonymous()); + set_layout_helper(Klass::instance_layout_helper(parser.layout_size(), false)); + if (parser.has_flattenable_fields()) { + set_has_value_fields(); + } + _java_fields_count = parser.java_fields_count(); - assert(NULL == _methods, "underlying memory not zeroed?"); - assert(is_instance_klass(), "is layout incorrect?"); - assert(size_helper() == parser.layout_size(), "incorrect size_helper?"); + assert(NULL == _methods, "underlying memory not zeroed?"); + assert(is_instance_klass(), "is layout incorrect?"); + assert(size_helper() == parser.layout_size(), "incorrect size_helper?"); - if (DumpSharedSpaces) { - SystemDictionaryShared::init_dumptime_info(this); - } + if (DumpSharedSpaces) { + SystemDictionaryShared::init_dumptime_info(this); + } } void InstanceKlass::deallocate_methods(ClassLoaderData* loader_data, @@ -789,6 +829,67 @@ interk->link_class_impl(CHECK_false); } + + // If a class declares a method that uses a value class as an argument + // type or return value type, this value class must be loaded during the + // linking of this class because size and properties of the value class + // must be known in order to be able to perform value type optimizations. + // The implementation below is an approximation of this rule, the code + // iterates over all methods of the current class (including overridden + // methods), not only the methods declared by this class. This + // approximation makes the code simpler, and doesn't change the semantic + // because classes declaring methods overridden by the current class are + // linked (and have performed their own pre-loading) before the linking + // of the current class. + // This is also the moment to detect potential mismatch between the + // ValueTypes attribute and the kind of the class effectively loaded. + + + // Note: + // Value class types used for flattenable fields are loaded during + // the loading phase (see ClassFileParser::post_process_parsed_stream()). + // Value class types used as element types for array creation + // are not pre-loaded. Their loading is triggered by either anewarray + // or multianewarray bytecodes. + + // Could it be possible to do the following processing only if the + // class uses value types? + { + ResourceMark rm(THREAD); + for (int i = 0; i < methods()->length(); i++) { + Method* m = methods()->at(i); + for (SignatureStream ss(m->signature()); !ss.is_done(); ss.next()) { + Symbol* sig = ss.as_symbol(THREAD); + if (ss.is_object()) { + Symbol* symb = sig; + if (ss.is_array()) { + int i=0; + while (sig->char_at(i) == '[') i++; + if (i == sig->utf8_length() - 1 ) continue; // primitive array + symb = SymbolTable::lookup(sig->as_C_string() + i + 1, + sig->utf8_length() - 3, CHECK_false); + } + if (ss.type() == T_VALUETYPE) { + oop loader = class_loader(); + oop protection_domain = this->protection_domain(); + Klass* klass = SystemDictionary::resolve_or_fail(symb, + Handle(THREAD, loader), Handle(THREAD, protection_domain), true, + CHECK_false); + if (symb != sig) { + symb->decrement_refcount(); + } + if (klass == NULL) { + THROW_(vmSymbols::java_lang_LinkageError(), false); + } + if (!klass->is_value()) { + THROW_(vmSymbols::java_lang_IncompatibleClassChangeError(), false); + } + } + } + } + } + } + // in case the class is linked in the process of linking its superclasses if (is_linked()) { return true; @@ -858,6 +959,7 @@ // itable().verify(tty, true); } #endif + set_init_state(linked); if (JvmtiExport::should_post_class_prepare()) { Thread *thread = THREAD; @@ -1011,11 +1113,22 @@ } } + // Step 8 + // Initialize classes of flattenable fields + { + for (AllFieldStream fs(this); !fs.done(); fs.next()) { + if (fs.is_flattenable()) { + InstanceKlass* field_klass = InstanceKlass::cast(this->get_value_field_klass(fs.index())); + field_klass->initialize(CHECK); + } + } + } + // Look for aot compiled methods for this klass, including class initializer. AOTLoader::load_for_klass(this, THREAD); - // Step 8 + // Step 9 { DTRACE_CLASSINIT_PROBE_WAIT(clinit, -1, wait); // Timer includes any side effects of class initialization (resolution, @@ -1029,7 +1142,7 @@ call_class_initializer(THREAD); } - // Step 9 + // Step 10 if (!HAS_PENDING_EXCEPTION) { set_initialization_state_and_notify(fully_initialized, CHECK); { @@ -1037,7 +1150,7 @@ } } else { - // Step 10 and 11 + // Step 11 and 12 Handle e(THREAD, PENDING_EXCEPTION); CLEAR_PENDING_EXCEPTION; // JVMTI has already reported the pending exception @@ -1359,7 +1472,7 @@ // Lock-free access requires load_acquire. OopMapCache* oop_map_cache = OrderAccess::load_acquire(&_oop_map_cache); if (oop_map_cache == NULL) { - MutexLocker x(OopMapCacheAlloc_lock); + MutexLockerEx x(OopMapCacheAlloc_lock, Mutex::_no_safepoint_check_flag); // Check if _oop_map_cache was allocated while we were waiting for this lock if ((oop_map_cache = _oop_map_cache) == NULL) { oop_map_cache = new OopMapCache(); @@ -2521,6 +2634,14 @@ // unreference array name derived from this class name (arrays of an unloaded // class can't be referenced anymore). if (_array_name != NULL) _array_name->decrement_refcount(); + if (_value_types != NULL) { + for (int i = 0; i < _value_types->length(); i++) { + Symbol* s = _value_types->at(i)._class_name; + if (s != NULL) { + s->decrement_refcount(); + } + } + } if (_source_debug_extension != NULL) FREE_C_HEAP_ARRAY(char, _source_debug_extension); } @@ -2543,6 +2664,10 @@ } const char* InstanceKlass::signature_name() const { + return signature_name_of(is_value() ? 'Q' : 'L'); +} + +const char* InstanceKlass::signature_name_of(char c) const { int hash_len = 0; char hash_buf[40]; @@ -2559,9 +2684,9 @@ char* dest = NEW_RESOURCE_ARRAY(char, src_length + hash_len + 3); - // Add L as type indicator + // Add L or Q as type indicator int dest_index = 0; - dest[dest_index++] = 'L'; + dest[dest_index++] = c; // Add the actual class name for (int src_index = 0; src_index < src_length; ) { @@ -3083,20 +3208,55 @@ "allocated", "loaded", "linked", "being_initialized", "fully_initialized", "initialization_error" }; -static void print_vtable(intptr_t* start, int len, outputStream* st) { +static void print_vtable(address self, intptr_t* start, int len, outputStream* st) { + ResourceMark rm; + int* forward_refs = NEW_RESOURCE_ARRAY(int, len); + for (int i = 0; i < len; i++) forward_refs[i] = 0; for (int i = 0; i < len; i++) { intptr_t e = start[i]; st->print("%d : " INTPTR_FORMAT, i, e); + if (forward_refs[i] != 0) { + int from = forward_refs[i]; + int off = (int) start[from]; + st->print(" (offset %d <= [%d])", off, from); + } if (e != 0 && ((Metadata*)e)->is_metaspace_object()) { st->print(" "); ((Metadata*)e)->print_value_on(st); + } else if (self != NULL && e > 0 && e < 0x10000) { + address location = self + e; + int index = (int)((intptr_t*)location - start); + st->print(" (offset %d => [%d])", (int)e, index); + if (index >= 0 && index < len) + forward_refs[index] = i; } st->cr(); } } static void print_vtable(vtableEntry* start, int len, outputStream* st) { - return print_vtable(reinterpret_cast(start), len, st); + return print_vtable(NULL, reinterpret_cast(start), len, st); +} + +template + static void print_array_on(outputStream* st, Array* array) { + if (array == NULL) { st->print_cr("NULL"); return; } + array->print_value_on(st); st->cr(); + if (Verbose || WizardMode) { + for (int i = 0; i < array->length(); i++) { + st->print("%d : ", i); array->at(i)->print_value_on(st); st->cr(); + } + } + } + +static void print_array_on(outputStream* st, Array* array) { + if (array == NULL) { st->print_cr("NULL"); return; } + array->print_value_on(st); st->cr(); + if (Verbose || WizardMode) { + for (int i = 0; i < array->length(); i++) { + st->print("%d : %d", i, array->at(i)); st->cr(); + } + } } void InstanceKlass::print_on(outputStream* st) const { @@ -3106,6 +3266,7 @@ st->print(BULLET"instance size: %d", size_helper()); st->cr(); st->print(BULLET"klass size: %d", size()); st->cr(); st->print(BULLET"access: "); access_flags().print_on(st); st->cr(); + st->print(BULLET"misc flags: 0x%x", _misc_flags); st->cr(); st->print(BULLET"state: "); st->print_cr("%s", state_names[_init_state]); st->print(BULLET"name: "); name()->print_value_on(st); st->cr(); st->print(BULLET"super: "); Metadata::print_value_on_maybe_null(st, super()); st->cr(); @@ -3132,26 +3293,14 @@ } st->print(BULLET"arrays: "); Metadata::print_value_on_maybe_null(st, array_klasses()); st->cr(); - st->print(BULLET"methods: "); methods()->print_value_on(st); st->cr(); - if (Verbose || WizardMode) { - Array* method_array = methods(); - for (int i = 0; i < method_array->length(); i++) { - st->print("%d : ", i); method_array->at(i)->print_value(); st->cr(); - } - } - st->print(BULLET"method ordering: "); method_ordering()->print_value_on(st); st->cr(); - st->print(BULLET"default_methods: "); default_methods()->print_value_on(st); st->cr(); - if (Verbose && default_methods() != NULL) { - Array* method_array = default_methods(); - for (int i = 0; i < method_array->length(); i++) { - st->print("%d : ", i); method_array->at(i)->print_value(); st->cr(); - } - } + st->print(BULLET"methods: "); print_array_on(st, methods()); + st->print(BULLET"method ordering: "); print_array_on(st, method_ordering()); + st->print(BULLET"default_methods: "); print_array_on(st, default_methods()); if (default_vtable_indices() != NULL) { - st->print(BULLET"default vtable indices: "); default_vtable_indices()->print_value_on(st); st->cr(); + st->print(BULLET"default vtable indices: "); print_array_on(st, default_vtable_indices()); } - st->print(BULLET"local interfaces: "); local_interfaces()->print_value_on(st); st->cr(); - st->print(BULLET"trans. interfaces: "); transitive_interfaces()->print_value_on(st); st->cr(); + st->print(BULLET"local interfaces: "); print_array_on(st, local_interfaces()); + st->print(BULLET"trans. interfaces: "); print_array_on(st, transitive_interfaces()); st->print(BULLET"constants: "); constants()->print_value_on(st); st->cr(); if (class_loader_data() != NULL) { st->print(BULLET"class loader data: "); @@ -3204,7 +3353,7 @@ st->print(BULLET"vtable length %d (start addr: " INTPTR_FORMAT ")", vtable_length(), p2i(start_of_vtable())); st->cr(); if (vtable_length() > 0 && (Verbose || WizardMode)) print_vtable(start_of_vtable(), vtable_length(), st); st->print(BULLET"itable length %d (start addr: " INTPTR_FORMAT ")", itable_length(), p2i(start_of_itable())); st->cr(); - if (itable_length() > 0 && (Verbose || WizardMode)) print_vtable(start_of_itable(), itable_length(), st); + if (itable_length() > 0 && (Verbose || WizardMode)) print_vtable(NULL, start_of_itable(), itable_length(), st); st->print_cr(BULLET"---- static fields (%d words):", static_field_size()); FieldPrinter print_static_field(st); ((InstanceKlass*)this)->do_local_static_fields(&print_static_field); @@ -3978,3 +4127,8 @@ return VM_RedefineClasses::get_cached_class_file_bytes(_cached_class_file); } #endif + +#define THROW_DVT_ERROR(s) \ + Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::java_lang_IncompatibleClassChangeError(), \ + "ValueCapableClass class '%s' %s", external_name(),(s)); \ + return --- old/src/hotspot/share/oops/instanceKlass.hpp 2019-03-11 14:26:18.238354988 +0100 +++ new/src/hotspot/share/oops/instanceKlass.hpp 2019-03-11 14:26:18.022354991 +0100 @@ -29,6 +29,7 @@ #include "classfile/classLoaderData.hpp" #include "classfile/moduleEntry.hpp" #include "classfile/packageEntry.hpp" +#include "code/vmreg.hpp" #include "memory/referenceType.hpp" #include "oops/annotations.hpp" #include "oops/constMethod.hpp" @@ -56,6 +57,7 @@ // [EMBEDDED implementor of the interface] only exist for interface // [EMBEDDED unsafe_anonymous_host klass] only exist for an unsafe anonymous class (JSR 292 enabled) // [EMBEDDED fingerprint ] only if should_store_fingerprint()==true +// [EMBEDDED ValueKlassFixedBlock] only if is a ValueKlass instance // forward declaration for class -- see below for definition @@ -72,6 +74,7 @@ class nmethodBucket; class SuperTypeClosure; class OopMapCache; +class BufferedValueTypeBlob; class InterpreterOopMap; // This is used in iterators below. @@ -103,12 +106,28 @@ uint count() const { return _count; } void set_count(uint count) { _count = count; } + void increment_count(int diff) { _count += diff; } + + int offset_span() const { return _count * heapOopSize; } + + int end_offset() const { + return offset() + offset_span(); + } + + bool is_contiguous(int another_offset) const { + return another_offset == end_offset(); + } + // sizeof(OopMapBlock) in words. static const int size_in_words() { return align_up((int)sizeof(OopMapBlock), wordSize) >> LogBytesPerWord; } + static int compare_offset(const OopMapBlock* a, const OopMapBlock* b) { + return a->offset() - b->offset(); + } + private: int _offset; uint _count; @@ -116,6 +135,24 @@ struct JvmtiCachedClassFileData; +class SigEntry; + +class ValueKlassFixedBlock { + Array** _extended_sig; + Array** _return_regs; + address* _pack_handler; + address* _unpack_handler; + int* _default_value_offset; + + friend class ValueKlass; +}; + +class ValueTypes { +public: + u2 _class_info_index; + Symbol* _class_name; +}; + class InstanceKlass: public Klass { friend class VMStructs; friend class JVMCIVMStructs; @@ -183,6 +220,8 @@ // By always being set it makes nest-member access checks simpler. InstanceKlass* _nest_host; + Array* _value_types; + // the source debug extension for this klass, NULL if not specified. // Specified as UTF-8 string without terminating zero byte in the classfile, // it is stored in the instanceklass as a NULL-terminated UTF-8 string @@ -209,13 +248,23 @@ // _is_marked_dependent can be set concurrently, thus cannot be part of the // _misc_flags. bool _is_marked_dependent; // used for marking during flushing and deoptimization - bool _is_being_redefined; // used for locking redefinition - // The low two bits of _misc_flags contains the kind field. - // This can be used to quickly discriminate among the four kinds of + public: + enum { + _extra_is_being_redefined = 1 << 0, // used for locking redefinition + _extra_has_resolved_methods = 1 << 1, // resolved methods table entries added for this class + _extra_has_value_fields = 1 << 2, // has value fields and related embedded section is not empty + _extra_is_bufferable = 1 << 3 // value can be buffered out side of the Java heap + }; + + protected: + u1 _extra_flags; + + // The low three bits of _misc_flags contains the kind field. + // This can be used to quickly discriminate among the five kinds of // InstanceKlass. - static const unsigned _misc_kind_field_size = 2; + static const unsigned _misc_kind_field_size = 3; static const unsigned _misc_kind_field_pos = 0; static const unsigned _misc_kind_field_mask = (1u << _misc_kind_field_size) - 1u; @@ -223,24 +272,25 @@ static const unsigned _misc_kind_reference = 1; // InstanceRefKlass static const unsigned _misc_kind_class_loader = 2; // InstanceClassLoaderKlass static const unsigned _misc_kind_mirror = 3; // InstanceMirrorKlass + static const unsigned _misc_kind_value_type = 4; // ValueKlass // Start after _misc_kind field. enum { - _misc_rewritten = 1 << 2, // methods rewritten. - _misc_has_nonstatic_fields = 1 << 3, // for sizing with UseCompressedOops - _misc_should_verify_class = 1 << 4, // allow caching of preverification - _misc_is_unsafe_anonymous = 1 << 5, // has embedded _unsafe_anonymous_host field - _misc_is_contended = 1 << 6, // marked with contended annotation - _misc_has_nonstatic_concrete_methods = 1 << 7, // class/superclass/implemented interfaces has non-static, concrete methods - _misc_declares_nonstatic_concrete_methods = 1 << 8, // directly declares non-static, concrete methods - _misc_has_been_redefined = 1 << 9, // class has been redefined - _misc_has_passed_fingerprint_check = 1 << 10, // when this class was loaded, the fingerprint computed from its + _misc_rewritten = 1 << 3, // methods rewritten. + _misc_has_nonstatic_fields = 1 << 4, // for sizing with UseCompressedOops + _misc_should_verify_class = 1 << 5, // allow caching of preverification + _misc_is_unsafe_anonymous = 1 << 6, // has embedded _unsafe_anonymous_host field + _misc_is_contended = 1 << 7, // marked with contended annotation + _misc_has_nonstatic_concrete_methods = 1 << 8, // class/superclass/implemented interfaces has non-static, concrete methods + _misc_declares_nonstatic_concrete_methods = 1 << 9, // directly declares non-static, concrete methods + _misc_has_been_redefined = 1 << 10, // class has been redefined + _misc_has_passed_fingerprint_check = 1 << 11, // when this class was loaded, the fingerprint computed from its // code source was found to be matching the value recorded by AOT. - _misc_is_scratch_class = 1 << 11, // class is the redefined scratch class - _misc_is_shared_boot_class = 1 << 12, // defining class loader is boot class loader - _misc_is_shared_platform_class = 1 << 13, // defining class loader is platform class loader - _misc_is_shared_app_class = 1 << 14, // defining class loader is app class loader - _misc_has_resolved_methods = 1 << 15 // resolved methods table entries added for this class + _misc_is_scratch_class = 1 << 12, // class is the redefined scratch class + _misc_is_shared_boot_class = 1 << 13, // defining class loader is boot class loader + _misc_is_shared_platform_class = 1 << 14, // defining class loader is platform class loader + _misc_is_shared_app_class = 1 << 15 // defining class loader is app class loader + // u2 _misc_flags full (see _extra_flags) }; u2 loader_type_bits() { return _misc_is_shared_boot_class|_misc_is_shared_platform_class|_misc_is_shared_app_class; @@ -308,6 +358,8 @@ // ... Array* _fields; + const ValueKlassFixedBlock* _adr_valueklass_fixed_block; + // embedded Java vtable follows here // embedded Java itables follows here // embedded static fields follows here @@ -377,6 +429,13 @@ } } + bool has_value_fields() const { + return (_extra_flags & _extra_has_value_fields) != 0; + } + void set_has_value_fields() { + _extra_flags |= _extra_has_value_fields; + } + // field sizes int nonstatic_field_size() const { return _nonstatic_field_size; } void set_nonstatic_field_size(int size) { _nonstatic_field_size = size; } @@ -439,6 +498,7 @@ int field_access_flags(int index) const { return field(index)->access_flags(); } Symbol* field_name (int index) const { return field(index)->name(constants()); } Symbol* field_signature (int index) const { return field(index)->signature(constants()); } + bool field_is_flattened(int index) const { return field(index)->is_flattened(); } // Number of Java declared fields int java_fields_count() const { return (int)_java_fields_count; } @@ -538,6 +598,8 @@ bool is_marked_dependent() const { return _is_marked_dependent; } void set_is_marked_dependent(bool value) { _is_marked_dependent = value; } + static ByteSize extra_flags_offset() { return in_ByteSize(offset_of(InstanceKlass, _extra_flags)); } + // initialization (virtuals from Klass) bool should_be_initialized() const; // means that initialize should be called void initialize(TRAPS); @@ -574,7 +636,7 @@ // find a non-static or static field given its offset within the class. bool contains_field_offset(int offset) { - return instanceOopDesc::contains_field_offset(offset, nonstatic_field_size()); + return instanceOopDesc::contains_field_offset(offset, nonstatic_field_size(), is_value()); } bool find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const; @@ -731,8 +793,16 @@ #if INCLUDE_JVMTI // Redefinition locking. Class can only be redefined by one thread at a time. - bool is_being_redefined() const { return _is_being_redefined; } - void set_is_being_redefined(bool value) { _is_being_redefined = value; } + bool is_being_redefined() const { + return (_extra_flags & _extra_is_being_redefined); + } + void set_is_being_redefined(bool value) { + if (value) { + _extra_flags |= _extra_is_being_redefined; + } else { + _extra_flags &= ~_extra_is_being_redefined; + } + } // RedefineClasses() support for previous versions: void add_previous_version(InstanceKlass* ik, int emcp_method_count); @@ -786,11 +856,11 @@ } bool has_resolved_methods() const { - return (_misc_flags & _misc_has_resolved_methods) != 0; + return (_extra_flags & _extra_has_resolved_methods) != 0; } void set_has_resolved_methods() { - _misc_flags |= _misc_has_resolved_methods; + _extra_flags |= _extra_has_resolved_methods; } private: @@ -813,6 +883,7 @@ bool is_reference_instance_klass() const { return is_kind(_misc_kind_reference); } bool is_mirror_instance_klass() const { return is_kind(_misc_kind_mirror); } bool is_class_loader_instance_klass() const { return is_kind(_misc_kind_class_loader); } + bool is_value_type_klass() const { return is_kind(_misc_kind_value_type); } #if INCLUDE_JVMTI @@ -988,6 +1059,8 @@ JFR_ONLY(DEFINE_KLASS_TRACE_ID_OFFSET;) static ByteSize init_thread_offset() { return in_ByteSize(offset_of(InstanceKlass, _init_thread)); } + static ByteSize adr_valueklass_fixed_block_offset() { return in_ByteSize(offset_of(InstanceKlass, _adr_valueklass_fixed_block)); } + // subclass/subinterface checks bool implements_interface(Klass* k) const; bool is_same_or_direct_interface(Klass* k) const; @@ -1046,21 +1119,26 @@ static int size(int vtable_length, int itable_length, int nonstatic_oop_map_size, - bool is_interface, bool is_unsafe_anonymous, bool has_stored_fingerprint) { + bool is_interface, bool is_unsafe_anonymous, bool has_stored_fingerprint, + int java_fields, bool is_value_type) { return align_metadata_size(header_size() + vtable_length + itable_length + nonstatic_oop_map_size + (is_interface ? (int)sizeof(Klass*)/wordSize : 0) + (is_unsafe_anonymous ? (int)sizeof(Klass*)/wordSize : 0) + - (has_stored_fingerprint ? (int)sizeof(uint64_t*)/wordSize : 0)); + (has_stored_fingerprint ? (int)sizeof(uint64_t*)/wordSize : 0) + + (java_fields * (int)sizeof(Klass*)/wordSize) + + (is_value_type ? (int)sizeof(ValueKlassFixedBlock) : 0)); } int size() const { return size(vtable_length(), itable_length(), nonstatic_oop_map_size(), is_interface(), is_unsafe_anonymous(), - has_stored_fingerprint()); + has_stored_fingerprint(), + has_value_fields() ? java_fields_count() : 0, + is_value()); } #if INCLUDE_SERVICES virtual void collect_statistics(KlassSizeStats *sz) const; @@ -1073,6 +1151,8 @@ oop static_field_base_raw() { return java_mirror(); } + bool bounds_check(address addr, bool edge_ok = false, intptr_t size_in_bytes = -1) const PRODUCT_RETURN0; + OopMapBlock* start_of_nonstatic_oop_maps() const { return (OopMapBlock*)(start_of_itable() + itable_length()); } @@ -1121,8 +1201,53 @@ } } + address adr_value_fields_klasses() const { + if (has_value_fields()) { + address adr_fing = adr_fingerprint(); + if (adr_fing != NULL) { + return adr_fingerprint() + sizeof(u8); + } + + InstanceKlass** adr_host = adr_unsafe_anonymous_host(); + if (adr_host != NULL) { + return (address)(adr_host + 1); + } + + Klass* volatile* adr_impl = adr_implementor(); + if (adr_impl != NULL) { + return (address)(adr_impl + 1); + } + + return (address)end_of_nonstatic_oop_maps(); + } else { + return NULL; + } + } + + Klass* get_value_field_klass(int idx) const { + assert(has_value_fields(), "Sanity checking"); + Klass* k = ((Klass**)adr_value_fields_klasses())[idx]; + assert(k != NULL, "Should always be set before being read"); + assert(k->is_value(), "Must be a value type"); + return k; + } + + Klass* get_value_field_klass_or_null(int idx) const { + assert(has_value_fields(), "Sanity checking"); + Klass* k = ((Klass**)adr_value_fields_klasses())[idx]; + assert(k == NULL || k->is_value(), "Must be a value type"); + return k; + } + + void set_value_field_klass(int idx, Klass* k) { + assert(has_value_fields(), "Sanity checking"); + assert(k != NULL, "Should not be set to NULL"); + assert(((Klass**)adr_value_fields_klasses())[idx] == NULL, "Should not be set twice"); + ((Klass**)adr_value_fields_klasses())[idx] = k; + } + // Use this to return the size of an instance in heap words: - int size_helper() const { + virtual int size_helper() const { return layout_helper_to_size_helper(layout_helper()); } @@ -1171,6 +1296,7 @@ // Naming const char* signature_name() const; + const char* signature_name_of(char c) const; static Symbol* package_from_name(const Symbol* name, TRAPS); // Oop fields (and metadata) iterators @@ -1264,12 +1390,14 @@ void eager_initialize_impl (); /* jni_id_for_impl for jfieldID only */ JNIid* jni_id_for_impl (int offset); - +protected: // Returns the array class for the n'th dimension - Klass* array_klass_impl(bool or_null, int n, TRAPS); + virtual Klass* array_klass_impl(bool or_null, int n, TRAPS); // Returns the array class with this class as element type - Klass* array_klass_impl(bool or_null, TRAPS); + virtual Klass* array_klass_impl(bool or_null, TRAPS); + +private: // find a local method (returns NULL if not found) Method* find_method_impl(const Symbol* name, --- old/src/hotspot/share/oops/instanceOop.hpp 2019-03-11 14:26:19.222354975 +0100 +++ new/src/hotspot/share/oops/instanceOop.hpp 2019-03-11 14:26:18.994354978 +0100 @@ -44,8 +44,12 @@ sizeof(instanceOopDesc); } - static bool contains_field_offset(int offset, int nonstatic_field_size) { + static bool contains_field_offset(int offset, int nonstatic_field_size, bool is_value) { int base_in_bytes = base_offset_in_bytes(); + if (is_value) { + // The first field of value types is aligned on a long boundary + base_in_bytes = align_up(base_in_bytes, BytesPerLong); + } return (offset >= base_in_bytes && (offset-base_in_bytes) < nonstatic_field_size * heapOopSize); } --- old/src/hotspot/share/oops/klass.hpp 2019-03-11 14:26:20.130354962 +0100 +++ new/src/hotspot/share/oops/klass.hpp 2019-03-11 14:26:19.922354965 +0100 @@ -28,6 +28,7 @@ #include "classfile/classLoaderData.hpp" #include "memory/iterator.hpp" #include "memory/memRegion.hpp" +#include "oops/markOop.hpp" #include "oops/metadata.hpp" #include "oops/oop.hpp" #include "oops/oopHandle.hpp" @@ -44,10 +45,11 @@ InstanceMirrorKlassID, InstanceClassLoaderKlassID, TypeArrayKlassID, + ValueArrayKlassID, ObjArrayKlassID }; -const uint KLASS_ID_COUNT = 6; +const uint KLASS_ID_COUNT = 7; // // A Klass provides: @@ -98,7 +100,7 @@ // distinct bytes, as follows: // MSB:[tag, hsz, ebt, log2(esz)]:LSB // where: - // tag is 0x80 if the elements are oops, 0xC0 if non-oops + // tag is 0x80 if the elements are oops, 0xC0 if non-oops, 0xA0 if value types // hsz is array header size in bytes (i.e., offset of first element) // ebt is the BasicType of the elements // esz is the element size in bytes @@ -347,12 +349,13 @@ _lh_element_type_mask = right_n_bits(BitsPerByte), // shifted mask _lh_header_size_shift = BitsPerByte*2, _lh_header_size_mask = right_n_bits(BitsPerByte), // shifted mask - _lh_array_tag_bits = 2, - _lh_array_tag_shift = BitsPerInt - _lh_array_tag_bits, - _lh_array_tag_obj_value = ~0x01 // 0x80000000 >> 30 + _lh_array_tag_bits = 3, + _lh_array_tag_shift = BitsPerInt - _lh_array_tag_bits }; - static const unsigned int _lh_array_tag_type_value = 0Xffffffff; // ~0x00, // 0xC0000000 >> 30 + static const unsigned int _lh_array_tag_type_value = 0Xfffffffc; + static const unsigned int _lh_array_tag_vt_value = 0Xfffffffd; + static const unsigned int _lh_array_tag_obj_value = 0Xfffffffe; static int layout_helper_size_in_bytes(jint lh) { assert(lh > (jint)_lh_neutral_value, "must be instance"); @@ -369,12 +372,13 @@ return (jint)lh < (jint)_lh_neutral_value; } static bool layout_helper_is_typeArray(jint lh) { - // _lh_array_tag_type_value == (lh >> _lh_array_tag_shift); - return (juint)lh >= (juint)(_lh_array_tag_type_value << _lh_array_tag_shift); + return (juint) _lh_array_tag_type_value == (juint)(lh >> _lh_array_tag_shift); } static bool layout_helper_is_objArray(jint lh) { - // _lh_array_tag_obj_value == (lh >> _lh_array_tag_shift); - return (jint)lh < (jint)(_lh_array_tag_type_value << _lh_array_tag_shift); + return (juint)_lh_array_tag_obj_value == (juint)(lh >> _lh_array_tag_shift); + } + static bool layout_helper_is_valueArray(jint lh) { + return (juint)_lh_array_tag_vt_value == (juint)(lh >> _lh_array_tag_shift); } static int layout_helper_header_size(jint lh) { assert(lh < (jint)_lh_neutral_value, "must be array"); @@ -385,7 +389,7 @@ static BasicType layout_helper_element_type(jint lh) { assert(lh < (jint)_lh_neutral_value, "must be array"); int btvalue = (lh >> _lh_element_type_shift) & _lh_element_type_mask; - assert(btvalue >= T_BOOLEAN && btvalue <= T_OBJECT, "sanity"); + assert((btvalue >= T_BOOLEAN && btvalue <= T_OBJECT) || btvalue == T_VALUETYPE, "sanity"); return (BasicType) btvalue; } @@ -406,7 +410,7 @@ static int layout_helper_log2_element_size(jint lh) { assert(lh < (jint)_lh_neutral_value, "must be array"); int l2esz = (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask; - assert(l2esz <= LogBytesPerLong, + assert(layout_helper_element_type(lh) == T_VALUETYPE || l2esz <= LogBytesPerLong, "sanity. l2esz: 0x%x for lh: 0x%x", (uint)l2esz, (uint)lh); return l2esz; } @@ -548,6 +552,8 @@ // For arrays, this returns the name of the element with a leading '['. // For classes, this returns the name with a leading 'L' and a trailing ';' // and the package separators as '/'. + // For value classes, this returns the name with a leading 'Q' and a trailing ';' + // and the package separators as '/'. virtual const char* signature_name() const; const char* joint_in_module_of_loader(const Klass* class2, bool include_parent_loader = false) const; @@ -563,7 +569,10 @@ virtual bool is_array_klass_slow() const { return false; } virtual bool is_objArray_klass_slow() const { return false; } virtual bool is_typeArray_klass_slow() const { return false; } + virtual bool is_valueArray_klass_slow() const { return false; } #endif // ASSERT + // current implementation uses this method even in non debug builds + virtual bool is_value_slow() const { return false; } public: // Fast non-virtual versions @@ -589,6 +598,11 @@ inline bool is_typeArray_klass() const { return assert_same_query( layout_helper_is_typeArray(layout_helper()), is_typeArray_klass_slow()); } + inline bool is_value() const { return is_value_slow(); } //temporary hack + inline bool is_valueArray_klass() const { return assert_same_query( + layout_helper_is_valueArray(layout_helper()), + is_valueArray_klass_slow()); } + #undef assert_same_query // Access flags @@ -621,6 +635,10 @@ // prototype markOop. If biased locking is enabled it may further be // biasable and have an epoch. markOop prototype_header() const { return _prototype_header; } + static inline markOop default_prototype_header(Klass* k) { + return (k == NULL) ? markOopDesc::prototype() : k->prototype_header(); + } + // NOTE: once instances of this klass are floating around in the // system, this header must only be updated at a safepoint. // NOTE 2: currently we only ever set the prototype header to the --- old/src/hotspot/share/oops/klass.inline.hpp 2019-03-11 14:26:20.550354956 +0100 +++ new/src/hotspot/share/oops/klass.inline.hpp 2019-03-11 14:26:20.346354959 +0100 @@ -30,7 +30,7 @@ #include "oops/markOop.hpp" inline void Klass::set_prototype_header(markOop header) { - assert(!header->has_bias_pattern() || is_instance_klass(), "biased locking currently only supported for Java instances"); + assert(!is_value() || header->is_always_locked(), "Unexpected prototype"); _prototype_header = header; } --- old/src/hotspot/share/oops/klassVtable.cpp 2019-03-11 14:26:20.966354950 +0100 +++ new/src/hotspot/share/oops/klassVtable.cpp 2019-03-11 14:26:20.758354953 +0100 @@ -1333,6 +1333,18 @@ virtual void doit(InstanceKlass* intf, int method_count) = 0; }; +int count_interface_methods_needing_itable_index(Array* methods) { + int method_count = 0; + if (methods->length() > 0) { + for (int i = methods->length(); --i >= 0; ) { + if (interface_method_needs_itable_index(methods->at(i))) { + method_count++; + } + } + } + return method_count; +} + // Visit all interfaces with at least one itable method void visit_all_interfaces(Array* transitive_intf, InterfaceVisiterClosure *blk) { // Handle array argument @@ -1401,7 +1413,7 @@ CountInterfacesClosure cic; visit_all_interfaces(transitive_interfaces, &cic); - // There's alway an extra itable entry so we can null-terminate it. + // There's always an extra itable entry so we can null-terminate it. int itable_size = calc_itable_size(cic.nof_interfaces() + 1, cic.nof_methods()); // Statistics --- old/src/hotspot/share/oops/klassVtable.hpp 2019-03-11 14:26:21.398354944 +0100 +++ new/src/hotspot/share/oops/klassVtable.hpp 2019-03-11 14:26:21.186354947 +0100 @@ -310,7 +310,10 @@ itableMethodEntry* method_entry(int i) { assert(0 <= i && i <= _size_method_table, "index out of bounds"); return &((itableMethodEntry*)method_start())[i]; } - int size_offset_table() { return _size_offset_table; } + InstanceKlass* klass() const { return _klass; } + int table_offset() const { return _table_offset; } + int size_offset_table() const { return _size_offset_table; } + int size_method_table() const { return _size_method_table; } // Initialization void initialize_itable(bool checkconstraints, TRAPS); --- old/src/hotspot/share/oops/markOop.hpp 2019-03-11 14:26:21.822354939 +0100 +++ new/src/hotspot/share/oops/markOop.hpp 2019-03-11 14:26:21.614354941 +0100 @@ -38,6 +38,7 @@ // -------- // hash:25 ------------>| age:4 biased_lock:1 lock:2 (normal object) // JavaThread*:23 epoch:2 age:4 biased_lock:1 lock:2 (biased object) +// "1" :23 epoch:2 age:4 biased_lock:1 lock:2 (biased always locked object) // size:32 ------------------------------------------>| (CMS free block) // PromotedObject*:29 ---------->| promo_bits:3 ----->| (CMS promoted object) // @@ -45,6 +46,7 @@ // -------- // unused:25 hash:31 -->| unused:1 age:4 biased_lock:1 lock:2 (normal object) // JavaThread*:54 epoch:2 unused:1 age:4 biased_lock:1 lock:2 (biased object) +// "1" :54 epoch:2 unused:1 age:4 biased_lock:1 lock:2 (biased always locked object) // PromotedObject*:61 --------------------->| promo_bits:3 ----->| (CMS promoted object) // size:64 ----------------------------------------------------->| (CMS free block) // @@ -96,6 +98,18 @@ // not valid at any other time // // We assume that stack/thread pointers have the lowest two bits cleared. +// +// Always locked: since displaced and monitor references require memory at a +// fixed address, and hash code can be displaced, an efficiently providing a +// *permanent lock* leaves us with specializing the biased pattern (even when +// biased locking isn't enabled). Since biased_lock_alignment for the thread +// reference doesn't use the lowest bit ("2 << thread_shift"), we can use +// this illegal thread pointer alignment to denote "always locked" pattern. +// +// [ | larval |1| epoch | age | 1 | 01] permanently locked +// +// A private buffered value is always locked and can be in a larval state. +// class BasicLock; class ObjectMonitor; @@ -114,7 +128,9 @@ max_hash_bits = BitsPerWord - age_bits - lock_bits - biased_lock_bits, hash_bits = max_hash_bits > 31 ? 31 : max_hash_bits, cms_bits = LP64_ONLY(1) NOT_LP64(0), - epoch_bits = 2 + epoch_bits = 2, + always_locked_bits = 1, + larval_bits = 1 }; // The biased locking code currently requires that the age bits be @@ -124,7 +140,9 @@ age_shift = lock_bits + biased_lock_bits, cms_shift = age_shift + age_bits, hash_shift = cms_shift + cms_bits, - epoch_shift = hash_shift + epoch_shift = hash_shift, + thread_shift = epoch_shift + epoch_bits, + larval_shift = thread_shift + always_locked_bits }; enum { lock_mask = right_n_bits(lock_bits), @@ -137,15 +155,17 @@ epoch_mask = right_n_bits(epoch_bits), epoch_mask_in_place = epoch_mask << epoch_shift, cms_mask = right_n_bits(cms_bits), - cms_mask_in_place = cms_mask << cms_shift + cms_mask_in_place = cms_mask << cms_shift, #ifndef _WIN64 - ,hash_mask = right_n_bits(hash_bits), - hash_mask_in_place = (address_word)hash_mask << hash_shift + hash_mask = right_n_bits(hash_bits), + hash_mask_in_place = (address_word)hash_mask << hash_shift, #endif + larval_mask = right_n_bits(larval_bits), + larval_mask_in_place = larval_mask << larval_shift }; // Alignment of JavaThread pointers encoded in object header required by biased locking - enum { biased_lock_alignment = 2 << (epoch_shift + epoch_bits) + enum { biased_lock_alignment = 2 << thread_shift }; #ifdef _WIN64 @@ -159,7 +179,8 @@ unlocked_value = 1, monitor_value = 2, marked_value = 3, - biased_lock_pattern = 5 + biased_lock_pattern = 5, + always_locked_pattern = 1 << thread_shift | biased_lock_pattern }; enum { no_hash = 0 }; // no hash value assigned @@ -172,6 +193,14 @@ enum { max_bias_epoch = epoch_mask }; + enum { larval_state_pattern = (1 << larval_shift) }; + + static markOop always_locked_prototype() { + return markOop(always_locked_pattern); + } + + bool is_always_locked() const { return mask_bits(value(), always_locked_pattern) == always_locked_pattern; } + // Biased Locking accessors. // These must be checked by all code which calls into the // ObjectSynchronizer and other code. The biasing is not understood @@ -183,6 +212,7 @@ } JavaThread* biased_locker() const { assert(has_bias_pattern(), "should not call this otherwise"); + assert(!is_always_locked(), "invariant"); return (JavaThread*) ((intptr_t) (mask_bits(value(), ~(biased_lock_mask_in_place | age_mask_in_place | epoch_mask_in_place)))); } // Indicates that the mark has the bias bit set but that it has not @@ -200,6 +230,7 @@ markOop set_bias_epoch(int epoch) { assert(has_bias_pattern(), "should not call this otherwise"); assert((epoch & (~epoch_mask)) == 0, "epoch overflow"); + assert(!is_always_locked(), "Rebias needs to fail"); return markOop(mask_bits(value(), ~epoch_mask_in_place) | (epoch << epoch_shift)); } markOop incr_bias_epoch() { @@ -349,6 +380,17 @@ return hash() == no_hash; } + // private buffered value operations + markOop enter_larval_state() const { + return markOop((value() & ~larval_mask_in_place) | larval_state_pattern); + } + markOop exit_larval_state() const { + return markOop(value() & ~larval_mask_in_place); + } + bool is_larval_state() const { + return (value() & larval_mask_in_place) == larval_state_pattern; + } + // Prototype mark for initialization static markOop prototype() { return markOop( no_hash_in_place | no_lock_in_place ); @@ -364,7 +406,7 @@ inline static markOop encode_pointer_as_mark(void* p) { return markOop(p)->set_marked(); } // Recover address of oop from encoded form used in mark - inline void* decode_pointer() { if (UseBiasedLocking && has_bias_pattern()) return NULL; return clear_lock_bits(); } + inline void* decode_pointer() { if (has_bias_pattern()) return NULL; return clear_lock_bits(); } // These markOops indicate cms free chunk blocks and not objects. // In 64 bit, the markOop is set to distinguish them from oops. --- old/src/hotspot/share/oops/method.cpp 2019-03-11 14:26:22.250354933 +0100 +++ new/src/hotspot/share/oops/method.cpp 2019-03-11 14:26:22.038354936 +0100 @@ -48,6 +48,7 @@ #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" +#include "oops/valueKlass.hpp" #include "prims/jvmtiExport.hpp" #include "prims/methodHandles.hpp" #include "prims/nativeLookup.hpp" @@ -107,7 +108,6 @@ clear_native_function(); set_signature_handler(NULL); } - NOT_PRODUCT(set_compiled_invocation_count(0);) } @@ -467,6 +467,26 @@ return rtf.type(); } +#ifdef ASSERT +// ValueKlass the method is declared to return. This must not +// safepoint as it is called with references live on the stack at +// locations the GC is unaware of. +ValueKlass* Method::returned_value_type(Thread* thread) const { + SignatureStream ss(signature()); + while (!ss.at_return_type()) { + ss.next(); + } + Handle class_loader(thread, method_holder()->class_loader()); + Handle protection_domain(thread, method_holder()->protection_domain()); + Klass* k = NULL; + { + NoSafepointVerifier nsv; + k = ss.as_klass(class_loader, protection_domain, SignatureStream::ReturnNull, thread); + } + assert(k != NULL && !thread->has_pending_exception(), "can't resolve klass"); + return ValueKlass::cast(k); +} +#endif bool Method::is_empty_method() const { return code_size() == 1 @@ -717,7 +737,7 @@ bool Method::is_klass_loaded_by_klass_index(int klass_index) const { - if( constants()->tag_at(klass_index).is_unresolved_klass() ) { + if( constants()->tag_at(klass_index).is_unresolved_klass()) { Thread *thread = Thread::current(); Symbol* klass_name = constants()->klass_name_at(klass_index); Handle loader(thread, method_holder()->class_loader()); @@ -733,7 +753,9 @@ int klass_index = constants()->klass_ref_index_at(refinfo_index); if (must_be_resolved) { // Make sure klass is resolved in constantpool. - if (constants()->tag_at(klass_index).is_unresolved_klass()) return false; + if (constants()->tag_at(klass_index).is_unresolved_klass()) { + return false; + } } return is_klass_loaded_by_klass_index(klass_index); } @@ -912,8 +934,12 @@ // Only should happen at allocate time. if (adapter() == NULL) { _from_compiled_entry = NULL; + _from_compiled_value_entry = NULL; + _from_compiled_value_ro_entry = NULL; } else { _from_compiled_entry = adapter()->get_c2i_entry(); + _from_compiled_value_entry = adapter()->get_c2i_value_entry(); + _from_compiled_value_ro_entry = adapter()->get_c2i_value_ro_entry(); } OrderAccess::storestore(); _from_interpreted_entry = _i2i_entry; @@ -942,6 +968,10 @@ constMethod()->set_adapter_trampoline(cds_adapter->get_adapter_trampoline()); _from_compiled_entry = cds_adapter->get_c2i_entry_trampoline(); assert(*((int*)_from_compiled_entry) == 0, "must be NULL during dump time, to be initialized at run time"); + _from_compiled_value_entry = cds_adapter->get_c2i_entry_trampoline(); + assert(*((int*)_from_compiled_value_entry) == 0, "must be NULL during dump time, to be initialized at run time"); + _from_compiled_value_ro_entry = cds_adapter->get_c2i_entry_trampoline(); + assert(*((int*)_from_compiled_value_ro_entry) == 0, "must be NULL during dump time, to be initialized at run time"); set_method_data(NULL); clear_method_counters(); @@ -1089,9 +1119,13 @@ if (mh->is_shared()) { assert(mh->adapter() == adapter, "must be"); assert(mh->_from_compiled_entry != NULL, "must be"); + assert(mh->_from_compiled_value_entry != NULL, "must be"); + assert(mh->_from_compiled_value_ro_entry != NULL, "must be"); } else { mh->set_adapter_entry(adapter); mh->_from_compiled_entry = adapter->get_c2i_entry(); + mh->_from_compiled_value_entry = adapter->get_c2i_value_entry(); + mh->_from_compiled_value_ro_entry = adapter->get_c2i_value_ro_entry(); } return adapter->get_c2i_entry(); } @@ -1129,6 +1163,12 @@ return _from_compiled_entry; } +address Method::verified_value_ro_code_entry() { + debug_only(NoSafepointVerifier nsv;) + assert(_from_compiled_value_ro_entry != NULL, "must be set"); + return _from_compiled_value_ro_entry; +} + // Check that if an nmethod ref exists, it has a backlink to this or no backlink at all // (could be racing a deopt). // Not inline to avoid circular ref. @@ -1160,6 +1200,8 @@ OrderAccess::storestore(); mh->_from_compiled_entry = code->verified_entry_point(); + mh->_from_compiled_value_entry = code->verified_value_entry_point(); + mh->_from_compiled_value_ro_entry = code->verified_value_ro_entry_point(); OrderAccess::storestore(); // Instantly compiled code can execute. if (!mh->is_method_handle_intrinsic()) @@ -2217,6 +2259,8 @@ if (highest_comp_level() != CompLevel_none) st->print_cr(" - highest level: %d", highest_comp_level()); st->print_cr(" - vtable index: %d", _vtable_index); + if (valid_itable_index()) + st->print_cr(" - itable index: %d", itable_index()); st->print_cr(" - i2i entry: " INTPTR_FORMAT, p2i(interpreter_entry())); st->print( " - adapters: "); AdapterHandlerEntry* a = ((Method*)this)->adapter(); @@ -2294,6 +2338,7 @@ st->print("%s", internal_name()); print_address_on(st); st->print(" "); + if (WizardMode) access_flags().print_on(st); name()->print_value_on(st); st->print(" "); signature()->print_value_on(st); --- old/src/hotspot/share/oops/method.hpp 2019-03-11 14:26:22.694354927 +0100 +++ new/src/hotspot/share/oops/method.hpp 2019-03-11 14:26:22.482354929 +0100 @@ -91,7 +91,9 @@ _has_injected_profile = 1 << 4, _running_emcp = 1 << 5, _intrinsic_candidate = 1 << 6, - _reserved_stack_access = 1 << 7 + _reserved_stack_access = 1 << 7, + _scalarized_args = 1 << 8, + _needs_stack_repair = 1 << 9 }; mutable u2 _flags; @@ -104,7 +106,9 @@ address _i2i_entry; // All-args-on-stack calling convention // Entry point for calling from compiled code, to compiled code if it exists // or else the interpreter. - volatile address _from_compiled_entry; // Cache of: _code ? _code->entry_point() : _adapter->c2i_entry() + volatile address _from_compiled_entry; // Cache of: _code ? _code->verified_entry_point() : _adapter->c2i_entry() + volatile address _from_compiled_value_ro_entry; // Cache of: _code ? _code->verified_value_ro_entry_point() : _adapter->c2i_value_ro_entry() + volatile address _from_compiled_value_entry; // Cache of: _code ? _code->verified_value_entry_point() : _adapter->c2i_value_entry() // The entry point for calling both from and to compiled code is // "_code->entry_point()". Because of tiered compilation and de-opt, this // field can come and go. It can transition from NULL to not-null at any @@ -112,6 +116,7 @@ // NULL only at safepoints (because of a de-opt). CompiledMethod* volatile _code; // Points to the corresponding piece of native code volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry + int _max_vt_buffer; // max number of VT buffer chunk to use before recycling #if INCLUDE_AOT && defined(TIERED) CompiledMethod* _aot_code; @@ -449,6 +454,7 @@ // nmethod/verified compiler entry address verified_code_entry(); + address verified_value_ro_code_entry(); bool check_code() const; // Not inline to avoid circular ref CompiledMethod* volatile code() const; void clear_code(bool acquire_lock = true); // Clear out any compiled code @@ -574,8 +580,10 @@ void compute_size_of_parameters(Thread *thread); // word size of parameters (receiver if any + arguments) Symbol* klass_name() const; // returns the name of the method holder BasicType result_type() const; // type of the method result - bool is_returning_oop() const { BasicType r = result_type(); return (r == T_OBJECT || r == T_ARRAY); } - bool is_returning_fp() const { BasicType r = result_type(); return (r == T_FLOAT || r == T_DOUBLE); } + bool may_return_oop() const { BasicType r = result_type(); return (r == T_OBJECT || r == T_ARRAY || r == T_VALUETYPE); } +#ifdef ASSERT + ValueKlass* returned_value_type(Thread* thread) const; +#endif // Checked exceptions thrown by this method (resolved to mirrors) objArrayHandle resolved_checked_exceptions(TRAPS) { return resolved_checked_exceptions_impl(this, THREAD); } @@ -685,7 +693,10 @@ static ByteSize const_offset() { return byte_offset_of(Method, _constMethod ); } static ByteSize access_flags_offset() { return byte_offset_of(Method, _access_flags ); } static ByteSize from_compiled_offset() { return byte_offset_of(Method, _from_compiled_entry); } + static ByteSize from_compiled_value_offset() { return byte_offset_of(Method, _from_compiled_value_entry); } + static ByteSize from_compiled_value_ro_offset(){ return byte_offset_of(Method, _from_compiled_value_ro_entry); } static ByteSize code_offset() { return byte_offset_of(Method, _code); } + static ByteSize flags_offset() { return byte_offset_of(Method, _flags); } static ByteSize method_data_offset() { return byte_offset_of(Method, _method_data); } @@ -706,6 +717,8 @@ static int intrinsic_id_offset_in_bytes() { return offset_of(Method, _intrinsic_id); } static int intrinsic_id_size_in_bytes() { return sizeof(u2); } + static ByteSize max_vt_buffer_offset() { return byte_offset_of(Method, _max_vt_buffer); } + // Static methods that are used to implement member methods where an exposed this pointer // is needed due to possible GCs static objArrayHandle resolved_checked_exceptions_impl(Method* method, TRAPS); @@ -885,6 +898,22 @@ _flags = x ? (_flags | _reserved_stack_access) : (_flags & ~_reserved_stack_access); } + bool has_scalarized_args() { + return (_flags & _scalarized_args) != 0; + } + + void set_has_scalarized_args(bool x) { + _flags = x ? (_flags | _scalarized_args) : (_flags & ~_scalarized_args); + } + + bool needs_stack_repair() { + return (_flags & _needs_stack_repair) != 0; + } + + void set_needs_stack_repair(bool x) { + _flags = x ? (_flags | _needs_stack_repair) : (_flags & ~_needs_stack_repair); + } + JFR_ONLY(DEFINE_TRACE_FLAG_ACCESSOR;) ConstMethod::MethodType method_type() const { --- old/src/hotspot/share/oops/methodData.cpp 2019-03-11 14:26:23.122354921 +0100 +++ new/src/hotspot/share/oops/methodData.cpp 2019-03-11 14:26:22.910354924 +0100 @@ -216,7 +216,8 @@ args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), false, TypeProfileArgsLimit); } int ret_cell = 0; - if (MethodData::profile_return_for_invoke(m, bci) && (inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY)) { + if (MethodData::profile_return_for_invoke(m, bci) + && (inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY || inv.result_type() == T_VALUETYPE)) { ret_cell = ReturnTypeEntry::static_cell_count(); } int header_cell = 0; @@ -239,6 +240,12 @@ } SignatureInfo::do_object(begin, end); } + void do_valuetype(int begin, int end) { + if (_offsets.length() < _max) { + _offsets.push(_size); + } + SignatureInfo::do_valuetype(begin, end); + } void do_array (int begin, int end) { if (_offsets.length() < _max) { _offsets.push(_size); @@ -289,7 +296,8 @@ } if (has_return()) { - assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY, "room for a ret type but doesn't return obj?"); + assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY || inv.result_type() == T_VALUETYPE, + "room for a ret type but doesn't return obj?"); _ret.post_initialize(); } } @@ -310,7 +318,8 @@ } if (has_return()) { - assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY, "room for a ret type but doesn't return obj?"); + assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY || inv.result_type() == T_VALUETYPE, + "room for a ret type but doesn't return obj?"); _ret.post_initialize(); } } --- old/src/hotspot/share/oops/objArrayKlass.cpp 2019-03-11 14:26:23.558354915 +0100 +++ new/src/hotspot/share/oops/objArrayKlass.cpp 2019-03-11 14:26:23.346354918 +0100 @@ -32,6 +32,7 @@ #include "memory/iterator.inline.hpp" #include "memory/metadataFactory.hpp" #include "memory/metaspaceClosure.hpp" +#include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/arrayKlass.inline.hpp" @@ -97,31 +98,7 @@ } // Create type name for klass. - Symbol* name = NULL; - if (!element_klass->is_instance_klass() || - (name = InstanceKlass::cast(element_klass)->array_name()) == NULL) { - - ResourceMark rm(THREAD); - char *name_str = element_klass->name()->as_C_string(); - int len = element_klass->name()->utf8_length(); - char *new_str = NEW_RESOURCE_ARRAY(char, len + 4); - int idx = 0; - new_str[idx++] = '['; - if (element_klass->is_instance_klass()) { // it could be an array or simple type - new_str[idx++] = 'L'; - } - memcpy(&new_str[idx], name_str, len * sizeof(char)); - idx += len; - if (element_klass->is_instance_klass()) { - new_str[idx++] = ';'; - } - new_str[idx++] = '\0'; - name = SymbolTable::new_permanent_symbol(new_str, CHECK_0); - if (element_klass->is_instance_klass()) { - InstanceKlass* ik = InstanceKlass::cast(element_klass); - ik->set_array_name(name); - } - } + Symbol* name = ArrayKlass::create_element_klass_array_name(element_klass, CHECK_NULL); // Initialize instance variables ObjArrayKlass* oak = ObjArrayKlass::allocate(loader_data, n, element_klass, name, CHECK_0); @@ -151,10 +128,13 @@ Klass* bk; if (element_klass->is_objArray_klass()) { bk = ObjArrayKlass::cast(element_klass)->bottom_klass(); + } else if (element_klass->is_valueArray_klass()) { + bk = ValueArrayKlass::cast(element_klass)->element_klass(); } else { bk = element_klass; } - assert(bk != NULL && (bk->is_instance_klass() || bk->is_typeArray_klass()), "invalid bottom klass"); + assert(bk != NULL && (bk->is_instance_klass() + || bk->is_typeArray_klass()), "invalid bottom klass"); this->set_bottom_klass(bk); this->set_class_loader_data(bk->class_loader_data()); @@ -179,28 +159,30 @@ oop ObjArrayKlass::multi_allocate(int rank, jint* sizes, TRAPS) { int length = *sizes; + if (rank == 1) { // last dim may be valueArray + return oopFactory::new_array(element_klass(), length, CHECK_NULL); + } + guarantee(rank > 1, "Rank below 1"); // Call to lower_dimension uses this pointer, so most be called before a // possible GC Klass* ld_klass = lower_dimension(); // If length < 0 allocate will throw an exception. objArrayOop array = allocate(length, CHECK_NULL); objArrayHandle h_array (THREAD, array); - if (rank > 1) { - if (length != 0) { - for (int index = 0; index < length; index++) { - ArrayKlass* ak = ArrayKlass::cast(ld_klass); - oop sub_array = ak->multi_allocate(rank-1, &sizes[1], CHECK_NULL); - h_array->obj_at_put(index, sub_array); - } - } else { - // Since this array dimension has zero length, nothing will be - // allocated, however the lower dimension values must be checked - // for illegal values. - for (int i = 0; i < rank - 1; ++i) { - sizes += 1; - if (*sizes < 0) { - THROW_MSG_0(vmSymbols::java_lang_NegativeArraySizeException(), err_msg("%d", *sizes)); - } + if (length != 0) { + for (int index = 0; index < length; index++) { + ArrayKlass* ak = ArrayKlass::cast(ld_klass); + oop sub_array = ak->multi_allocate(rank-1, &sizes[1], CHECK_NULL); + h_array->obj_at_put(index, sub_array); + } + } else { + // Since this array dimension has zero length, nothing will be + // allocated, however the lower dimension values must be checked + // for illegal values. + for (int i = 0; i < rank - 1; ++i) { + sizes += 1; + if (*sizes < 0) { + THROW_MSG_0(vmSymbols::java_lang_NegativeArraySizeException(), err_msg("%d", *sizes)); } } } @@ -247,6 +229,13 @@ int dst_pos, int length, TRAPS) { assert(s->is_objArray(), "must be obj array"); + if (EnableValhalla) { + if (d->is_valueArray()) { + ValueArrayKlass::cast(d->klass())->copy_array(s, src_pos, d, dst_pos, length, THREAD); + return; + } + } + if (!d->is_objArray()) { ResourceMark rm(THREAD); stringStream ss; @@ -298,7 +287,26 @@ if (length==0) { return; } - if (UseCompressedOops) { + if (EnableValhalla && ArrayKlass::cast(d->klass())->element_klass()->is_value()) { + assert(d->is_objArray(), "Expected objArray"); + ValueKlass* d_elem_vklass = ValueKlass::cast(ArrayKlass::cast(d->klass())->element_klass()); + objArrayOop da = objArrayOop(d); + objArrayOop sa = objArrayOop(s); + int src_end = src_pos + length; + while (src_pos < src_end) { + oop se = sa->obj_at(src_pos); + if (se == NULL) { + THROW(vmSymbols::java_lang_NullPointerException()); + } + // Check exact type per element + if (se->klass() != d_elem_vklass) { + THROW(vmSymbols::java_lang_ArrayStoreException()); + } + da->obj_at_put(dst_pos, se); // TODO: review with ValueArrayKlass::copy_array and Access API + dst_pos++; + src_pos++; + } + } else if (UseCompressedOops) { size_t src_offset = (size_t) objArrayOopDesc::obj_at_offset(src_pos); size_t dst_offset = (size_t) objArrayOopDesc::obj_at_offset(dst_pos); assert(arrayOopDesc::obj_offset_to_raw(s, src_offset, NULL) == @@ -436,7 +444,7 @@ void ObjArrayKlass::print_on(outputStream* st) const { #ifndef PRODUCT Klass::print_on(st); - st->print(" - instance klass: "); + st->print(" - element klass: "); element_klass()->print_value_on(st); st->cr(); #endif //PRODUCT @@ -498,7 +506,8 @@ guarantee(element_klass()->is_klass(), "should be klass"); guarantee(bottom_klass()->is_klass(), "should be klass"); Klass* bk = bottom_klass(); - guarantee(bk->is_instance_klass() || bk->is_typeArray_klass(), "invalid bottom klass"); + guarantee(bk->is_instance_klass() || bk->is_typeArray_klass() || bk->is_valueArray_klass(), + "invalid bottom klass"); } void ObjArrayKlass::oop_verify_on(oop obj, outputStream* st) { --- old/src/hotspot/share/oops/objArrayKlass.hpp 2019-03-11 14:26:24.018354908 +0100 +++ new/src/hotspot/share/oops/objArrayKlass.hpp 2019-03-11 14:26:23.802354911 +0100 @@ -41,7 +41,6 @@ private: // If you add a new field that points to any metaspace object, you // must add this field to ObjArrayKlass::metaspace_pointers_do(). - Klass* _element_klass; // The klass of the elements of this array type Klass* _bottom_klass; // The one-dimensional type (InstanceKlass or TypeArrayKlass) // Constructor @@ -51,11 +50,6 @@ // For dummy objects ObjArrayKlass() {} - // Instance variables - Klass* element_klass() const { return _element_klass; } - void set_element_klass(Klass* k) { _element_klass = k; } - Klass** element_klass_addr() { return &_element_klass; } - Klass* bottom_klass() const { return _bottom_klass; } void set_bottom_klass(Klass* k) { _bottom_klass = k; } Klass** bottom_klass_addr() { return &_bottom_klass; } @@ -63,9 +57,6 @@ ModuleEntry* module() const; PackageEntry* package() const; - // Compiler/Interpreter offset - static ByteSize element_klass_offset() { return in_ByteSize(offset_of(ObjArrayKlass, _element_klass)); } - // Dispatched operation bool can_be_primary_super_slow() const; GrowableArray* compute_secondary_supers(int num_extra_slots, --- old/src/hotspot/share/oops/oop.cpp 2019-03-11 14:26:24.486354902 +0100 +++ new/src/hotspot/share/oops/oop.cpp 2019-03-11 14:26:24.274354905 +0100 @@ -94,7 +94,7 @@ ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY HandleMark hm(THREAD); Handle object(THREAD, this); - return ObjectSynchronizer::identity_hash_value_for(object); + return ObjectSynchronizer::FastHashCode(THREAD, object()); } // used only for asserts and guarantees @@ -113,7 +113,7 @@ if (obj->mark_raw() != NULL) { return true; } - return !SafepointSynchronize::is_at_safepoint(); + return !SafepointSynchronize::is_at_safepoint() ; } // used only for asserts and guarantees @@ -150,6 +150,8 @@ bool oopDesc::is_array_noinline() const { return is_array(); } bool oopDesc::is_objArray_noinline() const { return is_objArray(); } bool oopDesc::is_typeArray_noinline() const { return is_typeArray(); } +bool oopDesc::is_value_noinline() const { return is_value(); } +bool oopDesc::is_valueArray_noinline() const { return is_valueArray(); } bool oopDesc::has_klass_gap() { // Only has a klass gap when compressed class pointers are used. --- old/src/hotspot/share/oops/oop.hpp 2019-03-11 14:26:24.910354896 +0100 +++ new/src/hotspot/share/oops/oop.hpp 2019-03-11 14:26:24.698354899 +0100 @@ -116,12 +116,16 @@ inline bool is_array() const; inline bool is_objArray() const; inline bool is_typeArray() const; + inline bool is_value() const; + inline bool is_valueArray() const; // type test operations that don't require inclusion of oop.inline.hpp. bool is_instance_noinline() const; bool is_array_noinline() const; bool is_objArray_noinline() const; bool is_typeArray_noinline() const; + bool is_value_noinline() const; + bool is_valueArray_noinline() const; protected: inline oop as_oop() const { return const_cast(this); } --- old/src/hotspot/share/oops/oop.inline.hpp 2019-03-11 14:26:25.338354890 +0100 +++ new/src/hotspot/share/oops/oop.inline.hpp 2019-03-11 14:26:25.126354893 +0100 @@ -276,6 +276,8 @@ bool oopDesc::is_array() const { return klass()->is_array_klass(); } bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); } bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); } +bool oopDesc::is_value() const { return klass()->is_value(); } +bool oopDesc::is_valueArray() const { return klass()->is_valueArray_klass(); } void* oopDesc::field_addr_raw(int offset) const { return reinterpret_cast(cast_from_oop(as_oop()) + offset); } void* oopDesc::field_addr(int offset) const { return Access<>::resolve(as_oop())->field_addr_raw(offset); } @@ -329,6 +331,7 @@ return mark()->has_bias_pattern(); } + bool oopDesc::has_bias_pattern_raw() const { return mark_raw()->has_bias_pattern(); } --- old/src/hotspot/share/oops/oopsHierarchy.hpp 2019-03-11 14:26:25.762354884 +0100 +++ new/src/hotspot/share/oops/oopsHierarchy.hpp 2019-03-11 14:26:25.554354887 +0100 @@ -49,6 +49,7 @@ typedef class arrayOopDesc* arrayOop; typedef class objArrayOopDesc* objArrayOop; typedef class typeArrayOopDesc* typeArrayOop; +typedef class valueArrayOopDesc* valueArrayOop; #else @@ -179,6 +180,7 @@ DEF_OOP(array); DEF_OOP(objArray); DEF_OOP(typeArray); +DEF_OOP(valueArray); #endif // CHECK_UNHANDLED_OOPS @@ -216,8 +218,10 @@ class InstanceMirrorKlass; class InstanceClassLoaderKlass; class InstanceRefKlass; +class ValueKlass; class ArrayKlass; class ObjArrayKlass; class TypeArrayKlass; +class ValueArrayKlass; #endif // SHARE_OOPS_OOPSHIERARCHY_HPP --- old/src/hotspot/share/oops/symbol.cpp 2019-03-11 14:26:26.190354878 +0100 +++ new/src/hotspot/share/oops/symbol.cpp 2019-03-11 14:26:25.982354881 +0100 @@ -87,6 +87,49 @@ return true; } +bool Symbol::is_Q_signature() const { + return utf8_length() > 2 && char_at(0) == 'Q' && char_at(utf8_length() - 1) == ';'; +} + +Symbol* Symbol::fundamental_name(TRAPS) { + if ((char_at(0) == 'Q' || char_at(0) == 'L') && char_at(utf8_length() - 1) == ';') { + return SymbolTable::lookup(this, 1, utf8_length() - 1, CHECK_NULL); + } else { + // reference count is incremented to be consistent with the behavior with + // the SymbolTable::lookup() call above + this->increment_refcount(); + return this; + } +} + +bool Symbol::is_same_fundamental_type(Symbol* s) const { + if (this == s) return true; + if (utf8_length() < 3) return false; + int offset1, offset2, len; + if (char_at(utf8_length() - 1) == ';') { + if (char_at(0) != 'Q' && char_at(0) != 'L') return false; + offset1 = 1; + len = utf8_length() - 2; + } else { + offset1 = 0; + len = utf8_length(); + } + if (s->char_at(s->utf8_length() - 1) == ';') { + if (s->char_at(0) != 'Q' && s->char_at(0) != 'L') return false; + offset2 = 1; + } else { + offset2 = 0; + } + if ((offset2 + len) > s->utf8_length()) return false; + if ((utf8_length() - offset1 * 2) != (s->utf8_length() - offset2 * 2)) + return false; + int l = len; + while (l-- > 0) { + if (char_at(offset1 + l) != s->char_at(offset2 + l)) + return false; + } + return true; +} // ------------------------------------------------------------------ // Symbol::index_of @@ -306,5 +349,17 @@ return os::is_readable_range(bytes, bytes + len); } +void Symbol::print_Qvalue_on(outputStream* st) const { + if (this == NULL) { + st->print("NULL"); + } else { + st->print("'Q"); + for (int i = 0; i < utf8_length(); i++) { + st->print("%c", char_at(i)); + } + st->print(";'"); + } +} + // SymbolTable prints this in its statistics NOT_PRODUCT(size_t Symbol::_total_count = 0;) --- old/src/hotspot/share/oops/symbol.hpp 2019-03-11 14:26:26.618354872 +0100 +++ new/src/hotspot/share/oops/symbol.hpp 2019-03-11 14:26:26.406354875 +0100 @@ -202,6 +202,9 @@ bool starts_with(const char* prefix) const { return starts_with(prefix, (int) strlen(prefix)); } + bool is_Q_signature() const; + Symbol* fundamental_name(TRAPS); + bool is_same_fundamental_type(Symbol*) const; // Tests if the symbol starts with the given prefix. int index_of_at(int i, const char* str, int len) const; @@ -237,6 +240,7 @@ void print_utf8_on(outputStream* st) const; void print_on(outputStream* st) const; // First level print void print_value_on(outputStream* st) const; // Second level print. + void print_Qvalue_on(outputStream* st) const; // Second level print for Q-types. // printing on default output stream void print() { print_on(tty); } --- old/src/hotspot/share/oops/typeArrayOop.hpp 2019-03-11 14:26:27.466354861 +0100 +++ new/src/hotspot/share/oops/typeArrayOop.hpp 2019-03-11 14:26:27.258354863 +0100 @@ -114,7 +114,7 @@ // Returns the number of words necessary to hold an array of "len" // elements each of the given "byte_size". - private: + static int object_size(int lh, int length) { int instance_header_size = Klass::layout_helper_header_size(lh); int element_shift = Klass::layout_helper_log2_element_size(lh); @@ -130,7 +130,6 @@ return align_object_size((intptr_t)size_in_words); } - public: inline int object_size(); }; --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/src/hotspot/share/oops/valueArrayKlass.cpp 2019-03-11 14:26:27.682354858 +0100 @@ -0,0 +1,495 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "classfile/moduleEntry.hpp" +#include "classfile/packageEntry.hpp" +#include "classfile/symbolTable.hpp" +#include "classfile/systemDictionary.hpp" +#include "classfile/vmSymbols.hpp" +#include "gc/shared/collectedHeap.inline.hpp" +#include "memory/iterator.inline.hpp" +#include "memory/metadataFactory.hpp" +#include "memory/metaspaceClosure.hpp" +#include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" +#include "memory/universe.hpp" +#include "oops/instanceKlass.hpp" +#include "oops/klass.inline.hpp" +#include "oops/objArrayKlass.hpp" +#include "oops/objArrayOop.inline.hpp" +#include "oops/oop.inline.hpp" +#include "oops/arrayKlass.hpp" +#include "oops/arrayOop.hpp" +#include "oops/valueKlass.hpp" +#include "oops/valueArrayOop.hpp" +#include "oops/valueArrayOop.inline.hpp" +#include "oops/verifyOopClosure.hpp" +#include "runtime/handles.inline.hpp" +#include "runtime/mutexLocker.hpp" +#include "utilities/copy.hpp" +#include "utilities/macros.hpp" + +#include "oops/valueArrayKlass.hpp" + +// Allocation... + +ValueArrayKlass::ValueArrayKlass(Klass* element_klass, Symbol* name) : ArrayKlass(name, ID) { + assert(element_klass->is_value(), "Expected Value"); + + set_element_klass(ValueKlass::cast(element_klass)); + set_class_loader_data(element_klass->class_loader_data()); + set_layout_helper(array_layout_helper(ValueKlass::cast(element_klass))); + + assert(is_array_klass(), "sanity"); + assert(is_valueArray_klass(), "sanity"); + + CMH("tweak name symbol refcnt ?") +#ifndef PRODUCT + if (PrintValueArrayLayout) { + print(); + } +#endif +} + +ValueKlass* ValueArrayKlass::element_klass() const { + return ValueKlass::cast(_element_klass); +} + +void ValueArrayKlass::set_element_klass(Klass* k) { + _element_klass = k; +} + +ValueArrayKlass* ValueArrayKlass::allocate_klass(Klass* element_klass, + Symbol* name, + TRAPS) { + assert(ValueArrayFlatten, "Flatten array required"); + assert(ValueKlass::cast(element_klass)->is_atomic() || (!ValueArrayAtomicAccess), "Atomic by-default"); + + /* + * MVT->LWorld, now need to allocate secondaries array types, just like objArrayKlass... + * ...so now we are trying out covariant array types, just copy objArrayKlass + * TODO refactor any remaining commonality + */ + + // Eagerly allocate the direct array supertype. + Klass* super_klass = NULL; + if (!Universe::is_bootstrapping() || SystemDictionary::Object_klass_loaded()) { + Klass* element_super = element_klass->super(); + if (element_super != NULL) { + // The element type has a direct super. E.g., String[] has direct super of Object[]. + super_klass = element_super->array_klass_or_null(); + bool supers_exist = super_klass != NULL; + // Also, see if the element has secondary supertypes. + // We need an array type for each. + Array* element_supers = element_klass->secondary_supers(); + for( int i = element_supers->length()-1; i >= 0; i-- ) { + Klass* elem_super = element_supers->at(i); + if (elem_super->array_klass_or_null() == NULL) { + supers_exist = false; + break; + } + } + if (!supers_exist) { + // Oops. Not allocated yet. Back out, allocate it, and retry. + Klass* ek = NULL; + { + MutexUnlocker mu(MultiArray_lock); + super_klass = element_super->array_klass(CHECK_0); + for( int i = element_supers->length()-1; i >= 0; i-- ) { + Klass* elem_super = element_supers->at(i); + elem_super->array_klass(CHECK_0); + } + // Now retry from the beginning + ek = element_klass->array_klass(1, CHECK_0); + } // re-lock + return ValueArrayKlass::cast(ek); + } + } else { + ShouldNotReachHere(); // Value array klass cannot be the object array klass + } + } + + + ClassLoaderData* loader_data = element_klass->class_loader_data(); + int size = ArrayKlass::static_size(ValueArrayKlass::header_size()); + ValueArrayKlass* vak = new (loader_data, size, THREAD) ValueArrayKlass(element_klass, name); + if (vak == NULL) { + return NULL; + } + loader_data->add_class(vak); + + ModuleEntry* module = vak->module(); + assert(module != NULL, "No module entry for array"); + complete_create_array_klass(vak, super_klass, module, CHECK_NULL); + return vak; +} + +ValueArrayKlass* ValueArrayKlass::allocate_klass(Klass* element_klass, TRAPS) { + Symbol* name = ArrayKlass::create_element_klass_array_name(element_klass, CHECK_NULL); + return allocate_klass(element_klass, name, THREAD); +} + +void ValueArrayKlass::initialize(TRAPS) { + element_klass()->initialize(THREAD); +} + +// Oops allocation... +valueArrayOop ValueArrayKlass::allocate(int length, TRAPS) { + if (length < 0) { + THROW_0(vmSymbols::java_lang_NegativeArraySizeException()); + } + if (length > max_elements()) { + report_java_out_of_memory("Requested array size exceeds VM limit"); + JvmtiExport::post_array_size_exhausted(); + THROW_OOP_0(Universe::out_of_memory_error_array_size()); + } + + int size = valueArrayOopDesc::object_size(layout_helper(), length); + return (valueArrayOop) Universe::heap()->array_allocate(this, size, length, true, THREAD); +} + + +oop ValueArrayKlass::multi_allocate(int rank, jint* last_size, TRAPS) { + // For valueArrays this is only called for the last dimension + assert(rank == 1, "just checking"); + int length = *last_size; + return allocate(length, THREAD); +} + +jint ValueArrayKlass::array_layout_helper(ValueKlass* vk) { + BasicType etype = T_VALUETYPE; + int atag = _lh_array_tag_vt_value; + int esize = upper_log2(vk->raw_value_byte_size()); + int hsize = arrayOopDesc::base_offset_in_bytes(etype); + + int lh = (atag << _lh_array_tag_shift) + | (hsize << _lh_header_size_shift) + | ((int)etype << _lh_element_type_shift) + | ((esize) << _lh_log2_element_size_shift); + + assert(lh < (int)_lh_neutral_value, "must look like an array layout"); + assert(layout_helper_is_array(lh), "correct kind"); + assert(layout_helper_is_valueArray(lh), "correct kind"); + assert(!layout_helper_is_typeArray(lh), "correct kind"); + assert(!layout_helper_is_objArray(lh), "correct kind"); + assert(layout_helper_header_size(lh) == hsize, "correct decode"); + assert(layout_helper_element_type(lh) == etype, "correct decode"); + assert(layout_helper_log2_element_size(lh) == esize, "correct decode"); + assert((1 << esize) < BytesPerLong || is_aligned(hsize, HeapWordsPerLong), "unaligned base"); + + return lh; +} + +int ValueArrayKlass::oop_size(oop obj) const { + assert(obj->is_valueArray(),"must be a value array"); + valueArrayOop array = valueArrayOop(obj); + return array->object_size(); +} + +jint ValueArrayKlass::max_elements() const { + return arrayOopDesc::max_array_length(arrayOopDesc::header_size(T_VALUETYPE), element_byte_size()); +} + +oop ValueArrayKlass::protection_domain() const { + return element_klass()->protection_domain(); +} + +void ValueArrayKlass::copy_array(arrayOop s, int src_pos, + arrayOop d, int dst_pos, int length, TRAPS) { + + assert(s->is_objArray() || s->is_valueArray(), "must be obj or value array"); + + // Check destination + if ((!d->is_valueArray()) && (!d->is_objArray())) { + THROW(vmSymbols::java_lang_ArrayStoreException()); + } + + // Check if all offsets and lengths are non negative + if (src_pos < 0 || dst_pos < 0 || length < 0) { + THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); + } + // Check if the ranges are valid + if ( (((unsigned int) length + (unsigned int) src_pos) > (unsigned int) s->length()) + || (((unsigned int) length + (unsigned int) dst_pos) > (unsigned int) d->length()) ) { + THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); + } + // Check zero copy + if (length == 0) + return; + + ArrayKlass* sk = ArrayKlass::cast(s->klass()); + ArrayKlass* dk = ArrayKlass::cast(d->klass()); + Klass* d_elem_klass = dk->element_klass(); + Klass* s_elem_klass = sk->element_klass(); + /**** CMH: compare and contrast impl, re-factor once we find edge cases... ****/ + + if (sk->is_valueArray_klass()) { + assert(sk == this, "Unexpected call to copy_array"); + // Check subtype, all src homogeneous, so just once + if (!s_elem_klass->is_subtype_of(d_elem_klass)) { + THROW(vmSymbols::java_lang_ArrayStoreException()); + } + + valueArrayOop sa = valueArrayOop(s); + ValueKlass* s_elem_vklass = element_klass(); + + // valueArray-to-valueArray + if (dk->is_valueArray_klass()) { + // element types MUST be exact, subtype check would be dangerous + if (dk != this) { + THROW(vmSymbols::java_lang_ArrayStoreException()); + } + + valueArrayOop da = valueArrayOop(d); + address dst = (address) da->value_at_addr(dst_pos, layout_helper()); + address src = (address) sa->value_at_addr(src_pos, layout_helper()); + if (contains_oops()) { + int elem_incr = 1 << log2_element_size(); + address src_end = src + (length << log2_element_size()); + while (src < src_end) { + s_elem_vklass->value_store(src, dst, element_byte_size(), true, false); + src += elem_incr; + dst += elem_incr; + } + } else { + // we are basically a type array...don't bother limiting element copy + // it would have to be a lot wasted space to be worth value_store() calls, need a setting here ? + Copy::conjoint_memory_atomic(src, dst, (size_t)length << log2_element_size()); + } + } + else { // valueArray-to-objArray + assert(dk->is_objArray_klass(), "Expected objArray here"); + // Need to allocate each new src elem payload -> dst oop + objArrayHandle dh(THREAD, (objArrayOop)d); + valueArrayHandle sh(THREAD, sa); + int dst_end = dst_pos + length; + while (dst_pos < dst_end) { + oop o = s_elem_vklass->allocate_instance(CHECK); + s_elem_vklass->value_store(sh->value_at_addr(src_pos, layout_helper()), + s_elem_vklass->data_for_oop(o), true, true); + dh->obj_at_put(dst_pos, o); + dst_pos++; + src_pos++; + } + } + } else { + assert(s->is_objArray(), "Expected objArray"); + objArrayOop sa = objArrayOop(s); + assert(d->is_valueArray(), "Excepted valueArray"); // objArray-to-valueArray + ValueKlass* d_elem_vklass = ValueKlass::cast(d_elem_klass); + valueArrayOop da = valueArrayOop(d); + + int src_end = src_pos + length; + int delem_incr = 1 << dk->log2_element_size(); + address dst = (address) da->value_at_addr(dst_pos, layout_helper()); + while (src_pos < src_end) { + oop se = sa->obj_at(src_pos); + if (se == NULL) { + THROW(vmSymbols::java_lang_NullPointerException()); + } + // Check exact type per element + if (se->klass() != d_elem_klass) { + THROW(vmSymbols::java_lang_ArrayStoreException()); + } + d_elem_vklass->value_store(d_elem_vklass->data_for_oop(se), dst, true, false); + dst += delem_incr; + src_pos++; + } + } +} + + +Klass* ValueArrayKlass::array_klass_impl(bool or_null, int n, TRAPS) { + + assert(dimension() <= n, "check order of chain"); + int dim = dimension(); + if (dim == n) return this; + + if (higher_dimension() == NULL) { + if (or_null) return NULL; + + ResourceMark rm; + JavaThread *jt = (JavaThread *)THREAD; + { + // Ensure atomic creation of higher dimensions + MutexLocker mu(MultiArray_lock, THREAD); + + // Check if another thread beat us + if (higher_dimension() == NULL) { + + // Create multi-dim klass object and link them together + Klass* k = + ObjArrayKlass::allocate_objArray_klass(class_loader_data(), dim + 1, this, CHECK_NULL); + ObjArrayKlass* ak = ObjArrayKlass::cast(k); + ak->set_lower_dimension(this); + OrderAccess::storestore(); + set_higher_dimension(ak); + assert(ak->is_objArray_klass(), "incorrect initialization of ObjArrayKlass"); + } + } + } else { + CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); + } + + ObjArrayKlass *ak = ObjArrayKlass::cast(higher_dimension()); + if (or_null) { + return ak->array_klass_or_null(n); + } + return ak->array_klass(n, THREAD); +} + +Klass* ValueArrayKlass::array_klass_impl(bool or_null, TRAPS) { + return array_klass_impl(or_null, dimension() + 1, THREAD); +} + +ModuleEntry* ValueArrayKlass::module() const { + assert(element_klass() != NULL, "ValueArrayKlass returned unexpected NULL bottom_klass"); + // The array is defined in the module of its bottom class + return element_klass()->module(); +} + +PackageEntry* ValueArrayKlass::package() const { + assert(element_klass() != NULL, "ValuerrayKlass returned unexpected NULL bottom_klass"); + return element_klass()->package(); +} + +bool ValueArrayKlass::can_be_primary_super_slow() const { + return true; +} + +GrowableArray* ValueArrayKlass::compute_secondary_supers(int num_extra_slots, + Array* transitive_interfaces) { + assert(transitive_interfaces == NULL, "sanity"); + // interfaces = { cloneable_klass, serializable_klass, elemSuper[], ... }; + Array* elem_supers = element_klass()->secondary_supers(); + int num_elem_supers = elem_supers == NULL ? 0 : elem_supers->length(); + int num_secondaries = num_extra_slots + 2 + num_elem_supers; + if (num_secondaries == 2) { + // Must share this for correct bootstrapping! + set_secondary_supers(Universe::the_array_interfaces_array()); + return NULL; + } else { + GrowableArray* secondaries = new GrowableArray(num_elem_supers+2); + secondaries->push(SystemDictionary::Cloneable_klass()); + secondaries->push(SystemDictionary::Serializable_klass()); + for (int i = 0; i < num_elem_supers; i++) { + Klass* elem_super = (Klass*) elem_supers->at(i); + Klass* array_super = elem_super->array_klass_or_null(); + assert(array_super != NULL, "must already have been created"); + secondaries->push(array_super); + } + return secondaries; + } +} + +void ValueArrayKlass::print_on(outputStream* st) const { +#ifndef PRODUCT + assert(!is_objArray_klass(), "Unimplemented"); + + st->print("Value Type Array: "); + Klass::print_on(st); + + st->print(" - element klass: "); + element_klass()->print_value_on(st); + st->cr(); + + int elem_size = element_byte_size(); + st->print(" - element size %i ", elem_size); + st->print("aligned layout size %i", 1 << layout_helper_log2_element_size(layout_helper())); + st->cr(); +#endif //PRODUCT +} + +void ValueArrayKlass::print_value_on(outputStream* st) const { + assert(is_klass(), "must be klass"); + + element_klass()->print_value_on(st); + st->print("[]"); +} + + +#ifndef PRODUCT +void ValueArrayKlass::oop_print_on(oop obj, outputStream* st) { + ArrayKlass::oop_print_on(obj, st); + valueArrayOop va = valueArrayOop(obj); + ValueKlass* vk = element_klass(); + int print_len = MIN2((intx) va->length(), MaxElementPrintSize); + for(int index = 0; index < print_len; index++) { + int off = (address) va->value_at_addr(index, layout_helper()) - (address) obj; + st->print_cr(" - Index %3d offset %3d: ", index, off); + oop obj = (oop) ((address)va->value_at_addr(index, layout_helper()) - vk->first_field_offset()); + FieldPrinter print_field(st, obj); + vk->do_nonstatic_fields(&print_field); + st->cr(); + } + int remaining = va->length() - print_len; + if (remaining > 0) { + st->print_cr(" - <%d more elements, increase MaxElementPrintSize to print>", remaining); + } +} +#endif //PRODUCT + +void ValueArrayKlass::oop_print_value_on(oop obj, outputStream* st) { + assert(obj->is_valueArray(), "must be valueArray"); + st->print("a "); + element_klass()->print_value_on(st); + int len = valueArrayOop(obj)->length(); + st->print("[%d] ", len); + obj->print_address_on(st); + if (PrintMiscellaneous && (WizardMode || Verbose)) { + int lh = layout_helper(); + st->print("{"); + for (int i = 0; i < len; i++) { + if (i > 4) { + st->print("..."); break; + } + st->print(" " INTPTR_FORMAT, (intptr_t)(void*)valueArrayOop(obj)->value_at_addr(i , lh)); + } + st->print(" }"); + } +} + +// Verification +class VerifyElementClosure: public BasicOopIterateClosure { + public: + virtual void do_oop(oop* p) { VerifyOopClosure::verify_oop.do_oop(p); } + virtual void do_oop(narrowOop* p) { VerifyOopClosure::verify_oop.do_oop(p); } +}; + +void ValueArrayKlass::oop_verify_on(oop obj, outputStream* st) { + ArrayKlass::oop_verify_on(obj, st); + guarantee(obj->is_valueArray(), "must be valueArray"); + + if (contains_oops()) { + valueArrayOop va = valueArrayOop(obj); + VerifyElementClosure ec; + va->oop_iterate(&ec); + } +} + +void ValueArrayKlass::verify_on(outputStream* st) { + ArrayKlass::verify_on(st); + guarantee(element_klass()->is_value(), "should be value type klass"); +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/src/hotspot/share/oops/valueArrayKlass.hpp 2019-03-11 14:26:28.126354851 +0100 @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OOPS_VALUEARRAYKLASS_HPP +#define SHARE_VM_OOPS_VALUEARRAYKLASS_HPP + +#include "classfile/classLoaderData.hpp" +#include "oops/arrayKlass.hpp" +#include "oops/valueKlass.hpp" +#include "utilities/macros.hpp" + +/** + * Array of values, gives a layout of typeArrayOop, but needs oops iterators + */ +class ValueArrayKlass : public ArrayKlass { + friend class VMStructs; + + public: + static const KlassID ID = ValueArrayKlassID; + + private: + // Constructor + ValueArrayKlass(Klass* element_klass, Symbol* name); + + static ValueArrayKlass* allocate_klass(Klass* element_klass, Symbol* name, TRAPS); + protected: + // Returns the ArrayKlass for n'th dimension. + Klass* array_klass_impl(bool or_null, int n, TRAPS); + + // Returns the array class with this class as element type. + Klass* array_klass_impl(bool or_null, TRAPS); + + public: + + ValueArrayKlass() {} + + virtual ValueKlass* element_klass() const; + virtual void set_element_klass(Klass* k); + + // Casting from Klass* + static ValueArrayKlass* cast(Klass* k) { + assert(k->is_valueArray_klass(), "cast to ValueArrayKlass"); + return (ValueArrayKlass*) k; + } + + // klass allocation + static ValueArrayKlass* allocate_klass(Klass* element_klass, TRAPS); + + void initialize(TRAPS); + + ModuleEntry* module() const; + PackageEntry* package() const; + + bool can_be_primary_super_slow() const; + GrowableArray* compute_secondary_supers(int num_extra_slots, + Array* transitive_interfaces); + + int element_byte_size() const { return 1 << layout_helper_log2_element_size(_layout_helper); } + + bool is_valueArray_klass_slow() const { return true; } + + bool contains_oops() { + return element_klass()->contains_oops(); + } + + bool is_atomic() { + return element_klass()->is_atomic(); + } + + oop protection_domain() const; + + static jint array_layout_helper(ValueKlass* vklass); // layout helper for values + + // sizing + static int header_size() { return sizeof(ValueArrayKlass)/HeapWordSize; } + int size() const { return ArrayKlass::static_size(header_size()); } + + jint max_elements() const; + + int oop_size(oop obj) const; + + // Oop Allocation + valueArrayOop allocate(int length, TRAPS); + oop multi_allocate(int rank, jint* sizes, TRAPS); + + // Naming + const char* internal_name() const { return external_name(); } + + // Copying + void copy_array(arrayOop s, int src_pos, arrayOop d, int dst_pos, int length, TRAPS); + + // GC specific object visitors + // + // Mark Sweep + int oop_ms_adjust_pointers(oop obj); + + + template + inline void oop_oop_iterate(oop obj, OopClosureType* closure); + + template + inline void oop_oop_iterate_reverse(oop obj, OopClosureType* closure); + + template + inline void oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr); + + template + inline void oop_oop_iterate_elements(valueArrayOop a, OopClosureType* closure); + +private: + template + inline void oop_oop_iterate_elements_specialized(valueArrayOop a, OopClosureType* closure); + + template + inline void oop_oop_iterate_elements_bounded(valueArrayOop a, OopClosureType* closure, MemRegion mr); + + template + inline void oop_oop_iterate_elements_specialized_bounded(valueArrayOop a, OopClosureType* closure, void* low, void* high); + + public: + // Printing + void print_on(outputStream* st) const; + void print_value_on(outputStream* st) const; + + void oop_print_value_on(oop obj, outputStream* st); +#ifndef PRODUCT + void oop_print_on(oop obj, outputStream* st); +#endif + + // Verification + void verify_on(outputStream* st); + void oop_verify_on(oop obj, outputStream* st); +}; + +#endif + --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/src/hotspot/share/oops/valueArrayKlass.inline.hpp 2019-03-11 14:26:28.582354845 +0100 @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#ifndef SHARE_VM_OOPS_VALUEARRAYKLASS_INLINE_HPP +#define SHARE_VM_OOPS_VALUEARRAYKLASS_INLINE_HPP + +#include "memory/memRegion.hpp" +#include "memory/iterator.hpp" +#include "oops/arrayKlass.hpp" +#include "oops/klass.hpp" +#include "oops/oop.inline.hpp" +#include "oops/valueArrayKlass.hpp" +#include "oops/valueArrayOop.hpp" +#include "oops/valueArrayOop.inline.hpp" +#include "oops/valueKlass.hpp" +#include "oops/valueKlass.inline.hpp" +#include "utilities/macros.hpp" + +/* + * Warning incomplete: requires embedded oops, not yet enabled, so consider this a "sketch-up" of oop iterators + */ + +template +void ValueArrayKlass::oop_oop_iterate_elements_specialized(valueArrayOop a, + OopClosureType* closure) { + assert(contains_oops(), "Nothing to iterate"); + + const int shift = Klass::layout_helper_log2_element_size(layout_helper()); + const int addr_incr = 1 << shift; + uintptr_t elem_addr = (uintptr_t) a->base(); + const uintptr_t stop_addr = elem_addr + ((uintptr_t)a->length() << shift); + const int oop_offset = element_klass()->first_field_offset(); + + while (elem_addr < stop_addr) { + element_klass()->oop_iterate_specialized((address)(elem_addr - oop_offset), closure); + elem_addr += addr_incr; + } +} + +template +void ValueArrayKlass::oop_oop_iterate_elements_specialized_bounded(valueArrayOop a, + OopClosureType* closure, + void* lo, void* hi) { + assert(contains_oops(), "Nothing to iterate"); + + const int shift = Klass::layout_helper_log2_element_size(layout_helper()); + const int addr_incr = 1 << shift; + uintptr_t elem_addr = (uintptr_t)a->base(); + uintptr_t stop_addr = elem_addr + ((uintptr_t)a->length() << shift); + const int oop_offset = element_klass()->first_field_offset(); + + if (elem_addr < (uintptr_t) lo) { + uintptr_t diff = ((uintptr_t) lo) - elem_addr; + elem_addr += (diff >> shift) << shift; + } + if (stop_addr > (uintptr_t) hi) { + uintptr_t diff = stop_addr - ((uintptr_t) hi); + stop_addr -= (diff >> shift) << shift; + } + + const uintptr_t end = stop_addr; + while (elem_addr < end) { + element_klass()->oop_iterate_specialized_bounded((address)(elem_addr - oop_offset), closure, lo, hi); + elem_addr += addr_incr; + } +} + +template +void ValueArrayKlass::oop_oop_iterate_elements(valueArrayOop a, OopClosureType* closure) { + if (contains_oops()) { + oop_oop_iterate_elements_specialized(a, closure); + } +} + +template +void ValueArrayKlass::oop_oop_iterate(oop obj, OopClosureType* closure) { + assert(obj->is_valueArray(),"must be a value array"); + valueArrayOop a = valueArrayOop(obj); + + if (Devirtualizer::do_metadata(closure)) { + Devirtualizer::do_klass(closure, obj->klass()); + Devirtualizer::do_klass(closure, ValueArrayKlass::cast(obj->klass())->element_klass()); + } + + oop_oop_iterate_elements(a, closure); +} + +template +void ValueArrayKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) { + // TODO + oop_oop_iterate(obj, closure); +} + +template +void ValueArrayKlass::oop_oop_iterate_elements_bounded(valueArrayOop a, OopClosureType* closure, MemRegion mr) { + if (contains_oops()) { + oop_oop_iterate_elements_specialized_bounded(a, closure, mr.start(), mr.end()); + } +} + + +template +void ValueArrayKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) { + valueArrayOop a = valueArrayOop(obj); + if (Devirtualizer::do_metadata(closure)) { + Devirtualizer::do_klass(closure, a->klass()); + Devirtualizer::do_klass(closure, ValueArrayKlass::cast(obj->klass())->element_klass()); + } + oop_oop_iterate_elements_bounded(a, closure, mr); +} + +#endif // SHARE_VM_OOPS_VALUEARRAYKLASS_INLINE_HPP --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/src/hotspot/share/oops/valueArrayOop.cpp 2019-03-11 14:26:29.034354839 +0100 @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2015, 2018 Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "valueArrayOop.hpp" +#include "valueArrayKlass.inline.hpp" + --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/src/hotspot/share/oops/valueArrayOop.hpp 2019-03-11 14:26:29.486354833 +0100 @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OOPS_VALUEARRAYOOP_HPP +#define SHARE_VM_OOPS_VALUEARRAYOOP_HPP + +#include "oops/arrayOop.hpp" +#include "oops/klass.hpp" +#include "oops/oop.inline.hpp" + +// A valueArrayOop is an array containing value types (may include flatten embedded oop elements). + +class valueArrayOopDesc : public arrayOopDesc { + + public: + void* base() const; + void* value_at_addr(int index, jint lh) const; + // Sizing + static size_t element_size(int lh, int nof_elements) { + return nof_elements << Klass::layout_helper_log2_element_size(lh); + } + + static int object_size(int lh, int length) { + julong size_in_bytes = header_size_in_bytes() + element_size(lh, length); + julong size_in_words = ((size_in_bytes + (HeapWordSize-1)) >> LogHeapWordSize); + assert(size_in_words <= (julong)max_jint, "no overflow"); + return align_object_size((intptr_t)size_in_words); + } + + int object_size() const; + +}; + +#endif // SHARE_VM_OOPS_VALUEARRAYOOP_HPP --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/src/hotspot/share/oops/valueArrayOop.inline.hpp 2019-03-11 14:26:29.938354826 +0100 @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OOPS_VALUEARRAYOOP_INLINE_HPP +#define SHARE_VM_OOPS_VALUEARRAYOOP_INLINE_HPP + +#include "oops/access.inline.hpp" +#include "oops/arrayOop.inline.hpp" +#include "oops/valueArrayOop.hpp" +#include "oops/oop.inline.hpp" +#include "runtime/globals.hpp" + +inline void* valueArrayOopDesc::base() const { return arrayOopDesc::base(T_VALUETYPE); } + +inline void* valueArrayOopDesc::value_at_addr(int index, jint lh) const { + assert(is_within_bounds(index), "index out of bounds"); + + address addr = (address) base(); + addr += (index << Klass::layout_helper_log2_element_size(lh)); + return (void*) addr; +} + +inline int valueArrayOopDesc::object_size() const { + return object_size(klass()->layout_helper(), length()); +} + + + +#endif // SHARE_VM_OOPS_VALUEARRAYOOP_INLINE_HPP --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/src/hotspot/share/oops/valueKlass.cpp 2019-03-11 14:26:30.394354820 +0100 @@ -0,0 +1,585 @@ +/* + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shared/barrierSet.hpp" +#include "gc/shared/collectedHeap.inline.hpp" +#include "gc/shared/gcLocker.inline.hpp" +#include "interpreter/interpreter.hpp" +#include "logging/log.hpp" +#include "memory/metadataFactory.hpp" +#include "oops/access.hpp" +#include "oops/compressedOops.inline.hpp" +#include "oops/fieldStreams.hpp" +#include "oops/instanceKlass.hpp" +#include "oops/method.hpp" +#include "oops/oop.inline.hpp" +#include "oops/objArrayKlass.hpp" +#include "oops/valueKlass.hpp" +#include "oops/valueArrayKlass.hpp" +#include "runtime/fieldDescriptor.inline.hpp" +#include "runtime/handles.inline.hpp" +#include "runtime/safepointVerifiers.hpp" +#include "runtime/sharedRuntime.hpp" +#include "runtime/signature.hpp" +#include "runtime/thread.inline.hpp" +#include "utilities/copy.hpp" + +int ValueKlass::first_field_offset() const { +#ifdef ASSERT + int first_offset = INT_MAX; + for (JavaFieldStream fs(this); !fs.done(); fs.next()) { + if (fs.offset() < first_offset) first_offset= fs.offset(); + } +#endif + int base_offset = instanceOopDesc::base_offset_in_bytes(); + // The first field of value types is aligned on a long boundary + base_offset = align_up(base_offset, BytesPerLong); + assert(base_offset == first_offset, "inconsistent offsets"); + return base_offset; +} + +int ValueKlass::raw_value_byte_size() const { + int heapOopAlignedSize = nonstatic_field_size() << LogBytesPerHeapOop; + // If bigger than 64 bits or needs oop alignment, then use jlong aligned + // which for values should be jlong aligned, asserts in raw_field_copy otherwise + if (heapOopAlignedSize >= longSize || contains_oops()) { + return heapOopAlignedSize; + } + // Small primitives... + // If a few small basic type fields, return the actual size, i.e. + // 1 byte = 1 + // 2 byte = 2 + // 3 byte = 4, because pow2 needed for element stores + int first_offset = first_field_offset(); + int last_offset = 0; // find the last offset, add basic type size + int last_tsz = 0; + for (JavaFieldStream fs(this); !fs.done(); fs.next()) { + if (fs.access_flags().is_static()) { + continue; + } else if (fs.offset() > last_offset) { + BasicType type = fs.field_descriptor().field_type(); + if (is_java_primitive(type)) { + last_tsz = type2aelembytes(type); + } else if (type == T_VALUETYPE) { + // Not just primitives. Layout aligns embedded value, so use jlong aligned it is + return heapOopAlignedSize; + } else { + guarantee(0, "Unknown type %d", type); + } + assert(last_tsz != 0, "Invariant"); + last_offset = fs.offset(); + } + } + // Assumes VT with no fields are meaningless and illegal + last_offset += last_tsz; + assert(last_offset > first_offset && last_tsz, "Invariant"); + return 1 << upper_log2(last_offset - first_offset); +} + +instanceOop ValueKlass::allocate_instance(TRAPS) { + int size = size_helper(); // Query before forming handle. + + instanceOop oop = (instanceOop)Universe::heap()->obj_allocate(this, size, CHECK_NULL); + assert(oop->mark()->is_always_locked(), "Unlocked value type"); + return oop; +} + +bool ValueKlass::is_atomic() { + return (nonstatic_field_size() * heapOopSize) <= longSize; +} + +int ValueKlass::nonstatic_oop_count() { + int oops = 0; + int map_count = nonstatic_oop_map_count(); + OopMapBlock* block = start_of_nonstatic_oop_maps(); + OopMapBlock* end = block + map_count; + while (block != end) { + oops += block->count(); + block++; + } + return oops; +} + +// Arrays of... + +bool ValueKlass::flatten_array() { + if (!ValueArrayFlatten) { + return false; + } + + int elem_bytes = raw_value_byte_size(); + // Too big + if ((ValueArrayElemMaxFlatSize >= 0) && (elem_bytes > ValueArrayElemMaxFlatSize)) { + return false; + } + // Too many embedded oops + if ((ValueArrayElemMaxFlatOops >= 0) && (nonstatic_oop_count() > ValueArrayElemMaxFlatOops)) { + return false; + } + + return true; +} + + +Klass* ValueKlass::array_klass_impl(bool or_null, int n, TRAPS) { + if (!flatten_array()) { + return InstanceKlass::array_klass_impl(or_null, n, THREAD); + } + + // Basically the same as instanceKlass, but using "ValueArrayKlass::allocate_klass" + if (array_klasses() == NULL) { + if (or_null) return NULL; + + ResourceMark rm; + JavaThread *jt = (JavaThread *)THREAD; + { + // Atomic creation of array_klasses + MutexLocker ma(MultiArray_lock, THREAD); + + // Check if update has already taken place + if (array_klasses() == NULL) { + Klass* ak; + if (is_atomic() || (!ValueArrayAtomicAccess)) { + ak = ValueArrayKlass::allocate_klass(this, CHECK_NULL); + } else { + ak = ObjArrayKlass::allocate_objArray_klass(class_loader_data(), 1, this, CHECK_NULL); + } + set_array_klasses(ak); + } + } + } + // _this will always be set at this point + ArrayKlass* ak = ArrayKlass::cast(array_klasses()); + if (or_null) { + return ak->array_klass_or_null(n); + } + return ak->array_klass(n, THREAD); +} + +Klass* ValueKlass::array_klass_impl(bool or_null, TRAPS) { + return array_klass_impl(or_null, 1, THREAD); +} + +void ValueKlass::raw_field_copy(void* src, void* dst, size_t raw_byte_size) { + /* + * Try not to shear fields even if not an atomic store... + * + * First 3 cases handle value array store, otherwise works on the same basis + * as JVM_Clone, at this size data is aligned. The order of primitive types + * is largest to smallest, and it not possible for fields to stradle long + * copy boundaries. + * + * If MT without exclusive access, possible to observe partial value store, + * but not partial primitive and reference field values + */ + switch (raw_byte_size) { + case 1: + *((jbyte*) dst) = *(jbyte*)src; + break; + case 2: + *((jshort*) dst) = *(jshort*)src; + break; + case 4: + *((jint*) dst) = *(jint*) src; + break; + default: + assert(raw_byte_size % sizeof(jlong) == 0, "Unaligned raw_byte_size"); + Copy::conjoint_jlongs_atomic((jlong*)src, (jlong*)dst, raw_byte_size >> LogBytesPerLong); + } +} + +/* + * Store the value of this klass contained with src into dst. + * + * This operation is appropriate for use from vastore, vaload and putfield (for values) + * + * GC barriers currently can lock with no safepoint check and allocate c-heap, + * so raw point is "safe" for now. + * + * Going forward, look to use machine generated (stub gen or bc) version for most used klass layouts + * + */ +void ValueKlass::value_store(void* src, void* dst, size_t raw_byte_size, bool dst_heap, bool dst_uninitialized) { + if (contains_oops()) { + if (dst_heap) { + // src/dst aren't oops, need offset to adjust oop map offset + const address dst_oop_addr = ((address) dst) - first_field_offset(); + + ModRefBarrierSet* bs = barrier_set_cast(BarrierSet::barrier_set()); + + // Pre-barriers... + OopMapBlock* map = start_of_nonstatic_oop_maps(); + OopMapBlock* const end = map + nonstatic_oop_map_count(); + while (map != end) { + // Shame we can't just use the existing oop iterator...src/dst aren't oop + address doop_address = dst_oop_addr + map->offset(); + // TEMP HACK: barrier code need to migrate to => access API (need own versions of value type ops) + if (UseCompressedOops) { + bs->write_ref_array_pre((narrowOop*) doop_address, map->count(), dst_uninitialized); + } else { + bs->write_ref_array_pre((oop*) doop_address, map->count(), dst_uninitialized); + } + map++; + } + + raw_field_copy(src, dst, raw_byte_size); + + // Post-barriers... + map = start_of_nonstatic_oop_maps(); + while (map != end) { + address doop_address = dst_oop_addr + map->offset(); + bs->write_ref_array((HeapWord*) doop_address, map->count()); + map++; + } + } else { // Buffered value case + raw_field_copy(src, dst, raw_byte_size); + } + } else { // Primitive-only case... + raw_field_copy(src, dst, raw_byte_size); + } +} + +// Value type arguments are not passed by reference, instead each +// field of the value type is passed as an argument. This helper +// function collects the fields of the value types (including embedded +// value type's fields) in a list. Included with the field's type is +// the offset of each field in the value type: i2c and c2i adapters +// need that to load or store fields. Finally, the list of fields is +// sorted in order of increasing offsets: the adapters and the +// compiled code need to agree upon the order of fields. +// +// The list of basic types that is returned starts with a T_VALUETYPE +// and ends with an extra T_VOID. T_VALUETYPE/T_VOID pairs are used as +// delimiters. Every entry between the two is a field of the value +// type. If there's an embedded value type in the list, it also starts +// with a T_VALUETYPE and ends with a T_VOID. This is so we can +// generate a unique fingerprint for the method's adapters and we can +// generate the list of basic types from the interpreter point of view +// (value types passed as reference: iterate on the list until a +// T_VALUETYPE, drop everything until and including the closing +// T_VOID) or the compiler point of view (each field of the value +// types is an argument: drop all T_VALUETYPE/T_VOID from the list). +int ValueKlass::collect_fields(GrowableArray* sig, int base_off) const { + int count = 0; + SigEntry::add_entry(sig, T_VALUETYPE, base_off); + for (JavaFieldStream fs(this); !fs.done(); fs.next()) { + if (fs.access_flags().is_static()) continue; + int offset = base_off + fs.offset() - (base_off > 0 ? first_field_offset() : 0); + if (fs.is_flattened()) { + // Resolve klass of flattened value type field and recursively collect fields + Klass* vk = get_value_field_klass(fs.index()); + count += ValueKlass::cast(vk)->collect_fields(sig, offset); + } else { + BasicType bt = FieldType::basic_type(fs.signature()); + if (bt == T_VALUETYPE) { + bt = T_OBJECT; + } + SigEntry::add_entry(sig, bt, offset); + count += type2size[bt]; + } + } + int offset = base_off + size_helper()*HeapWordSize - (base_off > 0 ? first_field_offset() : 0); + SigEntry::add_entry(sig, T_VOID, offset); + if (base_off == 0) { + sig->sort(SigEntry::compare); + } + assert(sig->at(0)._bt == T_VALUETYPE && sig->at(sig->length()-1)._bt == T_VOID, "broken structure"); + return count; +} + +void ValueKlass::initialize_calling_convention(TRAPS) { + // Because the pack and unpack handler addresses need to be loadable from generated code, + // they are stored at a fixed offset in the klass metadata. Since value type klasses do + // not have a vtable, the vtable offset is used to store these addresses. + if (ValueTypeReturnedAsFields || ValueTypePassFieldsAsArgs) { + ResourceMark rm; + GrowableArray sig_vk; + int nb_fields = collect_fields(&sig_vk); + Array* extended_sig = MetadataFactory::new_array(class_loader_data(), sig_vk.length(), CHECK); + *((Array**)adr_extended_sig()) = extended_sig; + for (int i = 0; i < sig_vk.length(); i++) { + extended_sig->at_put(i, sig_vk.at(i)); + } + + if (ValueTypeReturnedAsFields) { + nb_fields++; + BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, nb_fields); + sig_bt[0] = T_METADATA; + SigEntry::fill_sig_bt(&sig_vk, sig_bt+1); + VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, nb_fields); + int total = SharedRuntime::java_return_convention(sig_bt, regs, nb_fields); + + if (total > 0) { + Array* return_regs = MetadataFactory::new_array(class_loader_data(), nb_fields, CHECK); + *((Array**)adr_return_regs()) = return_regs; + for (int i = 0; i < nb_fields; i++) { + return_regs->at_put(i, regs[i]); + } + + BufferedValueTypeBlob* buffered_blob = SharedRuntime::generate_buffered_value_type_adapter(this); + *((address*)adr_pack_handler()) = buffered_blob->pack_fields(); + *((address*)adr_unpack_handler()) = buffered_blob->unpack_fields(); + assert(CodeCache::find_blob(pack_handler()) == buffered_blob, "lost track of blob"); + } + } + } +} + +void ValueKlass::deallocate_contents(ClassLoaderData* loader_data) { + if (extended_sig() != NULL) { + MetadataFactory::free_array(loader_data, extended_sig()); + } + if (return_regs() != NULL) { + MetadataFactory::free_array(loader_data, return_regs()); + } + cleanup_blobs(); + InstanceKlass::deallocate_contents(loader_data); +} + +void ValueKlass::cleanup(ValueKlass* ik) { + ik->cleanup_blobs(); +} + +void ValueKlass::cleanup_blobs() { + if (pack_handler() != NULL) { + CodeBlob* buffered_blob = CodeCache::find_blob(pack_handler()); + assert(buffered_blob->is_buffered_value_type_blob(), "bad blob type"); + BufferBlob::free((BufferBlob*)buffered_blob); + *((address*)adr_pack_handler()) = NULL; + *((address*)adr_unpack_handler()) = NULL; + } +} + +// Can this value type be returned as multiple values? +bool ValueKlass::can_be_returned_as_fields() const { + return return_regs() != NULL; +} + +// Create handles for all oop fields returned in registers that are going to be live across a safepoint +void ValueKlass::save_oop_fields(const RegisterMap& reg_map, GrowableArray& handles) const { + Thread* thread = Thread::current(); + const Array* sig_vk = extended_sig(); + const Array* regs = return_regs(); + int j = 1; + + for (int i = 0; i < sig_vk->length(); i++) { + BasicType bt = sig_vk->at(i)._bt; + if (bt == T_OBJECT || bt == T_ARRAY) { + VMRegPair pair = regs->at(j); + address loc = reg_map.location(pair.first()); + oop v = *(oop*)loc; + assert(v == NULL || oopDesc::is_oop(v), "not an oop?"); + assert(Universe::heap()->is_in_or_null(v), "must be heap pointer"); + handles.push(Handle(thread, v)); + } + if (bt == T_VALUETYPE) { + continue; + } + if (bt == T_VOID && + sig_vk->at(i-1)._bt != T_LONG && + sig_vk->at(i-1)._bt != T_DOUBLE) { + continue; + } + j++; + } + assert(j == regs->length(), "missed a field?"); +} + +// Update oop fields in registers from handles after a safepoint +void ValueKlass::restore_oop_results(RegisterMap& reg_map, GrowableArray& handles) const { + assert(ValueTypeReturnedAsFields, "inconsistent"); + const Array* sig_vk = extended_sig(); + const Array* regs = return_regs(); + assert(regs != NULL, "inconsistent"); + + int j = 1; + for (int i = 0, k = 0; i < sig_vk->length(); i++) { + BasicType bt = sig_vk->at(i)._bt; + if (bt == T_OBJECT || bt == T_ARRAY) { + VMRegPair pair = regs->at(j); + address loc = reg_map.location(pair.first()); + *(oop*)loc = handles.at(k++)(); + } + if (bt == T_VALUETYPE) { + continue; + } + if (bt == T_VOID && + sig_vk->at(i-1)._bt != T_LONG && + sig_vk->at(i-1)._bt != T_DOUBLE) { + continue; + } + j++; + } + assert(j == regs->length(), "missed a field?"); +} + +// Fields are in registers. Create an instance of the value type and +// initialize it with the values of the fields. +oop ValueKlass::realloc_result(const RegisterMap& reg_map, const GrowableArray& handles, TRAPS) { + oop new_vt = allocate_instance(CHECK_NULL); + const Array* sig_vk = extended_sig(); + const Array* regs = return_regs(); + + int j = 1; + int k = 0; + for (int i = 0; i < sig_vk->length(); i++) { + BasicType bt = sig_vk->at(i)._bt; + if (bt == T_VALUETYPE) { + continue; + } + if (bt == T_VOID) { + if (sig_vk->at(i-1)._bt == T_LONG || + sig_vk->at(i-1)._bt == T_DOUBLE) { + j++; + } + continue; + } + int off = sig_vk->at(i)._offset; + assert(off > 0, "offset in object should be positive"); + VMRegPair pair = regs->at(j); + address loc = reg_map.location(pair.first()); + switch(bt) { + case T_BOOLEAN: { + new_vt->bool_field_put(off, *(jboolean*)loc); + break; + } + case T_CHAR: { + new_vt->char_field_put(off, *(jchar*)loc); + break; + } + case T_BYTE: { + new_vt->byte_field_put(off, *(jbyte*)loc); + break; + } + case T_SHORT: { + new_vt->short_field_put(off, *(jshort*)loc); + break; + } + case T_INT: { + new_vt->int_field_put(off, *(jint*)loc); + break; + } + case T_LONG: { +#ifdef _LP64 + new_vt->double_field_put(off, *(jdouble*)loc); +#else + Unimplemented(); +#endif + break; + } + case T_OBJECT: + case T_ARRAY: { + Handle handle = handles.at(k++); + new_vt->obj_field_put(off, handle()); + break; + } + case T_FLOAT: { + new_vt->float_field_put(off, *(jfloat*)loc); + break; + } + case T_DOUBLE: { + new_vt->double_field_put(off, *(jdouble*)loc); + break; + } + default: + ShouldNotReachHere(); + } + *(intptr_t*)loc = 0xDEAD; + j++; + } + assert(j == regs->length(), "missed a field?"); + assert(k == handles.length(), "missed an oop?"); + return new_vt; +} + +// Check the return register for a ValueKlass oop +ValueKlass* ValueKlass::returned_value_klass(const RegisterMap& map) { + BasicType bt = T_METADATA; + VMRegPair pair; + int nb = SharedRuntime::java_return_convention(&bt, &pair, 1); + assert(nb == 1, "broken"); + + address loc = map.location(pair.first()); + intptr_t ptr = *(intptr_t*)loc; + if (is_set_nth_bit(ptr, 0)) { + // Oop is tagged, must be a ValueKlass oop + clear_nth_bit(ptr, 0); + assert(Metaspace::contains((void*)ptr), "should be klass"); + ValueKlass* vk = (ValueKlass*)ptr; + assert(vk->can_be_returned_as_fields(), "must be able to return as fields"); + return vk; + } +#ifdef ASSERT + // Oop is not tagged, must be a valid oop + if (VerifyOops) { + oopDesc::verify(oop((HeapWord*)ptr)); + } +#endif + return NULL; +} + +void ValueKlass::iterate_over_inside_oops(OopClosure* f, oop value) { + assert(!Universe::heap()->is_in_reserved(value), "This method is used on buffered values"); + + oop* addr_mirror = (oop*)(value)->mark_addr_raw(); + f->do_oop_no_buffering(addr_mirror); + + if (!contains_oops()) return; + + OopMapBlock* map = start_of_nonstatic_oop_maps(); + OopMapBlock* const end_map = map + nonstatic_oop_map_count(); + + if (!UseCompressedOops) { + for (; map < end_map; map++) { + oop* p = (oop*) (((char*)(oopDesc*)value) + map->offset()); + oop* const end = p + map->count(); + for (; p < end; ++p) { + assert(oopDesc::is_oop_or_null(*p), "Sanity check"); + f->do_oop(p); + } + } + } else { + for (; map < end_map; map++) { + narrowOop* p = (narrowOop*) (((char*)(oopDesc*)value) + map->offset()); + narrowOop* const end = p + map->count(); + for (; p < end; ++p) { + oop o = CompressedOops::decode(*p); + assert(Universe::heap()->is_in_reserved_or_null(o), "Sanity check"); + assert(oopDesc::is_oop_or_null(o), "Sanity check"); + f->do_oop(p); + } + } + } +} + +void ValueKlass::verify_on(outputStream* st) { + InstanceKlass::verify_on(st); + guarantee(prototype_header()->is_always_locked(), "Prototype header is not always locked"); +} + +void ValueKlass::oop_verify_on(oop obj, outputStream* st) { + InstanceKlass::oop_verify_on(obj, st); + guarantee(obj->mark()->is_always_locked(), "Header is not always locked"); +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/src/hotspot/share/oops/valueKlass.hpp 2019-03-11 14:26:30.906354813 +0100 @@ -0,0 +1,254 @@ +/* + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OOPS_VALUEKLASS_HPP +#define SHARE_VM_OOPS_VALUEKLASS_HPP + +#include "classfile/javaClasses.hpp" +#include "oops/instanceKlass.hpp" +#include "oops/method.hpp" +#include "oops/oop.inline.hpp" + +// A ValueKlass is a specialized InstanceKlass for value types. + + +class ValueKlass: public InstanceKlass { + friend class VMStructs; + friend class InstanceKlass; + + private: + + // Constructor + ValueKlass(const ClassFileParser& parser) + : InstanceKlass(parser, InstanceKlass::_misc_kind_value_type, InstanceKlass::ID) { + _adr_valueklass_fixed_block = valueklass_static_block(); + // Addresses used for value type calling convention + *((Array**)adr_extended_sig()) = NULL; + *((Array**)adr_return_regs()) = NULL; + *((address*)adr_pack_handler()) = NULL; + *((address*)adr_unpack_handler()) = NULL; + assert(pack_handler() == NULL, "pack handler not null"); + *((int*)adr_default_value_offset()) = 0; + set_prototype_header(markOopDesc::always_locked_prototype()); + } + + ValueKlassFixedBlock* valueklass_static_block() const { + address adr_jf = adr_value_fields_klasses(); + if (adr_jf != NULL) { + return (ValueKlassFixedBlock*)(adr_jf + this->java_fields_count() * sizeof(Klass*)); + } + + address adr_fing = adr_fingerprint(); + if (adr_fing != NULL) { + return (ValueKlassFixedBlock*)(adr_fingerprint() + sizeof(u8)); + } + + InstanceKlass** adr_host = adr_unsafe_anonymous_host(); + if (adr_host != NULL) { + return (ValueKlassFixedBlock*)(adr_host + 1); + } + + Klass* volatile* adr_impl = adr_implementor(); + if (adr_impl != NULL) { + return (ValueKlassFixedBlock*)(adr_impl + 1); + } + + return (ValueKlassFixedBlock*)end_of_nonstatic_oop_maps(); + } + + address adr_extended_sig() const { + assert(_adr_valueklass_fixed_block != NULL, "Should have been initialized"); + return ((address)_adr_valueklass_fixed_block) + in_bytes(byte_offset_of(ValueKlassFixedBlock, _extended_sig)); + } + + address adr_return_regs() const { + ValueKlassFixedBlock* vkst = valueklass_static_block(); + return ((address)_adr_valueklass_fixed_block) + in_bytes(byte_offset_of(ValueKlassFixedBlock, _return_regs)); + } + + // pack and unpack handlers for value types return + address adr_pack_handler() const { + assert(_adr_valueklass_fixed_block != NULL, "Should have been initialized"); + return ((address)_adr_valueklass_fixed_block) + in_bytes(byte_offset_of(ValueKlassFixedBlock, _pack_handler)); + } + + address adr_unpack_handler() const { + assert(_adr_valueklass_fixed_block != NULL, "Should have been initialized"); + return ((address)_adr_valueklass_fixed_block) + in_bytes(byte_offset_of(ValueKlassFixedBlock, _unpack_handler)); + } + + address pack_handler() const { + return *(address*)adr_pack_handler(); + } + + address unpack_handler() const { + return *(address*)adr_unpack_handler(); + } + + address adr_default_value_offset() const { + assert(_adr_valueklass_fixed_block != NULL, "Should have been initialized"); + return ((address)_adr_valueklass_fixed_block) + in_bytes(default_value_offset_offset()); + } + + int collect_fields(GrowableArray* sig, int base_off = 0) const; + + void cleanup_blobs(); + + protected: + // Returns the array class for the n'th dimension + Klass* array_klass_impl(bool or_null, int n, TRAPS); + + // Returns the array class with this class as element type + Klass* array_klass_impl(bool or_null, TRAPS); + + public: + // Type testing + bool is_value_slow() const { return true; } + + oop value_mirror() const { + return java_lang_Class::value_mirror(java_mirror()); + } + + // Casting from Klass* + static ValueKlass* cast(Klass* k) { + assert(k->is_value(), "cast to ValueKlass"); + return (ValueKlass*) k; + } + + // Use this to return the size of an instance in heap words + // Implementation is currently simple because all value types are allocated + // in Java heap like Java objects. + virtual int size_helper() const { + return layout_helper_to_size_helper(layout_helper()); + } + + // allocate_instance() allocates a stand alone value in the Java heap + instanceOop allocate_instance(TRAPS); + + // minimum number of bytes occupied by nonstatic fields, HeapWord aligned or pow2 + int raw_value_byte_size() const; + + int first_field_offset() const; + + address data_for_oop(oop o) const { + return ((address) (void*) o) + first_field_offset(); + } + + oop oop_for_data(address data) const { + oop o = (oop) (data - first_field_offset()); + assert(oopDesc::is_oop(o, false), "Not an oop"); + return o; + } + + // Query if h/w provides atomic load/store + bool is_atomic(); + + bool flatten_array(); + + bool contains_oops() const { return nonstatic_oop_map_count() > 0; } + int nonstatic_oop_count(); + + // Prototype general store methods... + + // copy the fields, with no concern for GC barriers + void raw_field_copy(void* src, void* dst, size_t raw_byte_size); + + void value_store(void* src, void* dst, bool dst_is_heap, bool dst_uninitialized) { + value_store(src, dst, nonstatic_field_size() << LogBytesPerHeapOop, dst_is_heap, dst_uninitialized); + } + + // store the value of this klass contained with src into dst, raw data ptr + void value_store(void* src, void* dst, size_t raw_byte_size, bool dst_is_heap, bool dst_uninitialized); + + // GC support... + + void iterate_over_inside_oops(OopClosure* f, oop value); + + // oop iterate raw value type data pointer (where oop_addr may not be an oop, but backing/array-element) + template + inline void oop_iterate_specialized(const address oop_addr, OopClosureType* closure); + + template + inline void oop_iterate_specialized_bounded(const address oop_addr, OopClosureType* closure, void* lo, void* hi); + + // calling convention support + void initialize_calling_convention(TRAPS); + Array* extended_sig() const { + return *((Array**)adr_extended_sig()); + } + Array* return_regs() const { + return *((Array**)adr_return_regs()); + } + bool can_be_returned_as_fields() const; + void save_oop_fields(const RegisterMap& map, GrowableArray& handles) const; + void restore_oop_results(RegisterMap& map, GrowableArray& handles) const; + oop realloc_result(const RegisterMap& reg_map, const GrowableArray& handles, TRAPS); + static ValueKlass* returned_value_klass(const RegisterMap& reg_map); + + // pack and unpack handlers. Need to be loadable from generated code + // so at a fixed offset from the base of the klass pointer. + static ByteSize pack_handler_offset() { + return byte_offset_of(ValueKlassFixedBlock, _pack_handler); + } + + static ByteSize unpack_handler_offset() { + return byte_offset_of(ValueKlassFixedBlock, _unpack_handler); + } + + static ByteSize default_value_offset_offset() { + return byte_offset_of(ValueKlassFixedBlock, _default_value_offset); + } + + void set_default_value_offset(int offset) { + *((int*)adr_default_value_offset()) = offset; + } + + int default_value_offset() { + int offset = *((int*)adr_default_value_offset()); + assert(offset != 0, "must not be called if not initialized"); + return offset; + } + + void set_default_value(oop val) { + java_mirror()->obj_field_put(default_value_offset(), val); + } + + oop default_value() { + oop val = java_mirror()->obj_field_acquire(default_value_offset()); + assert(oopDesc::is_oop(val), "Sanity check"); + assert(val->is_value(), "Sanity check"); + assert(val->klass() == this, "sanity check"); + return val; + } + + void deallocate_contents(ClassLoaderData* loader_data); + static void cleanup(ValueKlass* ik) ; + + // Verification + void verify_on(outputStream* st); + void oop_verify_on(oop obj, outputStream* st); + +}; + +#endif /* SHARE_VM_OOPS_VALUEKLASS_HPP */ --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/src/hotspot/share/oops/valueKlass.inline.hpp 2019-03-11 14:26:31.418354806 +0100 @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#ifndef SHARE_VM_OOPS_VALUEKLASS_INLINE_HPP +#define SHARE_VM_OOPS_VALUEKLASS_INLINE_HPP + +#include "memory/iterator.hpp" +#include "oops/klass.hpp" +#include "oops/valueArrayKlass.hpp" +#include "oops/oop.inline.hpp" +#include "oops/valueKlass.hpp" +#include "utilities/macros.hpp" + +template +void ValueKlass::oop_iterate_specialized(const address oop_addr, OopClosureType* closure) { + OopMapBlock* map = start_of_nonstatic_oop_maps(); + OopMapBlock* const end_map = map + nonstatic_oop_map_count(); + + for (; map < end_map; map++) { + T* p = (T*) (oop_addr + map->offset()); + T* const end = p + map->count(); + for (; p < end; ++p) { + Devirtualizer::do_oop(closure, p); + } + } +} + +template +inline void ValueKlass::oop_iterate_specialized_bounded(const address oop_addr, OopClosureType* closure, void* lo, void* hi) { + OopMapBlock* map = start_of_nonstatic_oop_maps(); + OopMapBlock* const end_map = map + nonstatic_oop_map_count(); + + T* const l = (T*) lo; + T* const h = (T*) hi; + + for (; map < end_map; map++) { + T* p = (T*) (oop_addr + map->offset()); + T* end = p + map->count(); + if (p < l) { + p = l; + } + if (end > h) { + end = h; + } + for (; p < end; ++p) { + Devirtualizer::do_oop(closure, p); + } + } +} + + +#endif // SHARE_VM_OOPS_VALUEKLASS_INLINE_HPP --- old/src/hotspot/share/opto/addnode.cpp 2019-03-11 14:26:32.226354795 +0100 +++ new/src/hotspot/share/opto/addnode.cpp 2019-03-11 14:26:31.986354798 +0100 @@ -655,6 +655,12 @@ if (tx->is_con()) { // Left input is an add of a constant? txoffset = tx->get_con(); } + if (tp->isa_aryptr()) { + // In the case of a flattened value type array, each field has its + // own slice so we need to extract the field being accessed from + // the address computation + return tp->is_aryptr()->add_field_offset_and_offset(txoffset); + } return tp->add_offset(txoffset); } @@ -675,6 +681,12 @@ if (p2->is_con()) { // Left input is an add of a constant? p2offset = p2->get_con(); } + if (p1->isa_aryptr()) { + // In the case of a flattened value type array, each field has its + // own slice so we need to extract the field being accessed from + // the address computation + return p1->is_aryptr()->add_field_offset_and_offset(p2offset); + } return p1->add_offset(p2offset); } --- old/src/hotspot/share/opto/arraycopynode.cpp 2019-03-11 14:26:32.690354788 +0100 +++ new/src/hotspot/share/opto/arraycopynode.cpp 2019-03-11 14:26:32.450354792 +0100 @@ -28,6 +28,7 @@ #include "gc/shared/c2/cardTableBarrierSetC2.hpp" #include "opto/arraycopynode.hpp" #include "opto/graphKit.hpp" +#include "opto/valuetypenode.hpp" #include "runtime/sharedRuntime.hpp" #include "utilities/macros.hpp" @@ -112,10 +113,14 @@ } int ArrayCopyNode::get_count(PhaseGVN *phase) const { - Node* src = in(ArrayCopyNode::Src); - const Type* src_type = phase->type(src); - if (is_clonebasic()) { + Node* src = in(ArrayCopyNode::Src); + const Type* src_type = phase->type(src); + + if (src_type == Type::TOP) { + return -1; + } + if (src_type->isa_instptr()) { const TypeInstPtr* inst_src = src_type->is_instptr(); ciInstanceKlass* ik = inst_src->klass()->as_instance_klass(); @@ -136,7 +141,8 @@ // array must be too. assert((get_length_if_constant(phase) == -1) == !ary_src->size()->is_con() || - phase->is_IterGVN(), "inconsistent"); + (ValueArrayFlatten && ary_src->elem()->make_oopptr() != NULL && ary_src->elem()->make_oopptr()->can_be_value_type()) || + phase->is_IterGVN() || phase->C->inlining_incrementally(), "inconsistent"); if (ary_src->size()->is_con()) { return ary_src->size()->get_con(); @@ -268,8 +274,14 @@ BasicType src_elem = ary_src->klass()->as_array_klass()->element_type()->basic_type(); BasicType dest_elem = ary_dest->klass()->as_array_klass()->element_type()->basic_type(); - if (src_elem == T_ARRAY) src_elem = T_OBJECT; - if (dest_elem == T_ARRAY) dest_elem = T_OBJECT; + if (src_elem == T_ARRAY || + (src_elem == T_VALUETYPE && ary_src->klass()->is_obj_array_klass())) { + src_elem = T_OBJECT; + } + if (dest_elem == T_ARRAY || + (dest_elem == T_VALUETYPE && ary_dest->klass()->is_obj_array_klass())) { + dest_elem = T_OBJECT; + } if (src_elem != dest_elem || dest_elem == T_VOID) { // We don't know if arguments are arrays of the same type @@ -289,6 +301,10 @@ base_dest = dest; uint shift = exact_log2(type2aelembytes(dest_elem)); + if (dest_elem == T_VALUETYPE) { + ciValueArrayKlass* vak = ary_src->klass()->as_value_array_klass(); + shift = vak->log2_element_size(); + } uint header = arrayOopDesc::base_offset_in_bytes(dest_elem); adr_src = src; @@ -300,15 +316,12 @@ Node* src_scale = phase->transform(new LShiftXNode(src_offset, phase->intcon(shift))); Node* dest_scale = phase->transform(new LShiftXNode(dest_offset, phase->intcon(shift))); + adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(header))); + adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(header))); + adr_src = phase->transform(new AddPNode(base_src, adr_src, src_scale)); adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, dest_scale)); - adr_src = new AddPNode(base_src, adr_src, phase->MakeConX(header)); - adr_dest = new AddPNode(base_dest, adr_dest, phase->MakeConX(header)); - - adr_src = phase->transform(adr_src); - adr_dest = phase->transform(adr_dest); - copy_type = dest_elem; } else { assert(ary_src != NULL, "should be a clone"); @@ -323,8 +336,17 @@ base_dest = dest->in(AddPNode::Base); assert(phase->type(src->in(AddPNode::Offset))->is_intptr_t()->get_con() == phase->type(dest->in(AddPNode::Offset))->is_intptr_t()->get_con(), "same start offset?"); + + if (ary_src->elem()->make_oopptr() != NULL && + ary_src->elem()->make_oopptr()->can_be_value_type()) { + return false; + } + BasicType elem = ary_src->klass()->as_array_klass()->element_type()->basic_type(); - if (elem == T_ARRAY) elem = T_OBJECT; + if (elem == T_ARRAY || + (elem == T_VALUETYPE && ary_src->klass()->is_obj_array_klass())) { + elem = T_OBJECT; + } BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); if (bs->array_copy_requires_gc_barriers(true, elem, true, BarrierSetC2::Optimization)) { @@ -344,40 +366,117 @@ return true; } -const TypePtr* ArrayCopyNode::get_address_type(PhaseGVN *phase, Node* n) { +const TypeAryPtr* ArrayCopyNode::get_address_type(PhaseGVN *phase, Node* n) { const Type* at = phase->type(n); assert(at != Type::TOP, "unexpected type"); - const TypePtr* atp = at->isa_ptr(); + const TypeAryPtr* atp = at->is_aryptr(); // adjust atp to be the correct array element address type - atp = atp->add_offset(Type::OffsetBot); + atp = atp->add_offset(Type::OffsetBot)->is_aryptr(); return atp; } -void ArrayCopyNode::array_copy_test_overlap(PhaseGVN *phase, bool can_reshape, bool disjoint_bases, int count, Node*& forward_ctl, Node*& backward_ctl) { - Node* ctl = in(TypeFunc::Control); +void ArrayCopyNode::array_copy_test_overlap(GraphKit& kit, bool disjoint_bases, int count, Node*& backward_ctl) { + Node* ctl = kit.control(); if (!disjoint_bases && count > 1) { + PhaseGVN& gvn = kit.gvn(); Node* src_offset = in(ArrayCopyNode::SrcPos); Node* dest_offset = in(ArrayCopyNode::DestPos); assert(src_offset != NULL && dest_offset != NULL, "should be"); - Node* cmp = phase->transform(new CmpINode(src_offset, dest_offset)); - Node *bol = phase->transform(new BoolNode(cmp, BoolTest::lt)); + Node* cmp = gvn.transform(new CmpINode(src_offset, dest_offset)); + Node *bol = gvn.transform(new BoolNode(cmp, BoolTest::lt)); IfNode *iff = new IfNode(ctl, bol, PROB_FAIR, COUNT_UNKNOWN); - phase->transform(iff); + gvn.transform(iff); - forward_ctl = phase->transform(new IfFalseNode(iff)); - backward_ctl = phase->transform(new IfTrueNode(iff)); + kit.set_control(gvn.transform(new IfFalseNode(iff))); + backward_ctl = gvn.transform(new IfTrueNode(iff)); + } +} + +void ArrayCopyNode::copy(GraphKit& kit, + const TypeAryPtr* atp_src, + const TypeAryPtr* atp_dest, + int i, + Node* base_src, + Node* base_dest, + Node* adr_src, + Node* adr_dest, + BasicType copy_type, + const Type* value_type) { + if (copy_type == T_VALUETYPE) { + ciValueArrayKlass* vak = atp_src->klass()->as_value_array_klass(); + ciValueKlass* vk = vak->element_klass()->as_value_klass(); + for (int j = 0; j < vk->nof_nonstatic_fields(); j++) { + ciField* field = vk->nonstatic_field_at(j); + int off_in_vt = field->offset() - vk->first_field_offset(); + Node* off = kit.MakeConX(off_in_vt + i * vak->element_byte_size()); + ciType* ft = field->type(); + BasicType bt = type2field[ft->basic_type()]; + assert(!field->is_flattened(), "flattened field encountered"); + if (bt == T_VALUETYPE) { + bt = T_OBJECT; + } + const Type* rt = Type::get_const_type(ft); + const TypePtr* adr_type = atp_src->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot); + Node* next_src = kit.gvn().transform(new AddPNode(base_src, adr_src, off)); + Node* v = kit.make_load(kit.control(), next_src, rt, bt, adr_type, MemNode::unordered); + + Node* next_dest = kit.gvn().transform(new AddPNode(base_dest, adr_dest, off)); + if (is_java_primitive(bt)) { + kit.store_to_memory(kit.control(), next_dest, v, bt, adr_type, MemNode::unordered); + } else { + const TypeOopPtr* val_type = Type::get_const_type(ft)->is_oopptr(); + kit.access_store_at(base_dest, next_dest, adr_type, v, + val_type, bt, StoreNode::release_if_reference(T_OBJECT)); + } + } } else { - forward_ctl = ctl; + Node* off = kit.MakeConX(type2aelembytes(copy_type) * i); + Node* next_src = kit.gvn().transform(new AddPNode(base_src, adr_src, off)); + Node* v = kit.make_load(kit.control(), next_src, value_type, copy_type, atp_src, MemNode::unordered); + Node* next_dest = kit.gvn().transform(new AddPNode(base_dest, adr_dest, off)); + BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); + if (copy_type == T_OBJECT && (bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, BarrierSetC2::Optimization))) { + kit.access_store_at(base_dest, next_dest, atp_dest, v, + value_type->make_ptr()->is_oopptr(), copy_type, + StoreNode::release_if_reference(T_OBJECT)); + } else { + kit.store_to_memory(kit.control(), next_dest, v, copy_type, atp_dest, MemNode::unordered); + } + } +} + + +void ArrayCopyNode::array_copy_forward(GraphKit& kit, + bool can_reshape, + const TypeAryPtr* atp_src, + const TypeAryPtr* atp_dest, + Node* adr_src, + Node* base_src, + Node* adr_dest, + Node* base_dest, + BasicType copy_type, + const Type* value_type, + int count) { + if (!kit.stopped()) { + // copy forward + if (count > 0) { + for (int i = 0; i < count; i++) { + copy(kit, atp_src, atp_dest, i, base_src, base_dest, adr_src, adr_dest, copy_type, value_type); + } + } else if(can_reshape) { + PhaseGVN& gvn = kit.gvn(); + assert(gvn.is_IterGVN(), ""); + gvn.record_for_igvn(adr_src); + gvn.record_for_igvn(adr_dest); + } } } -Node* ArrayCopyNode::array_copy_forward(PhaseGVN *phase, +void ArrayCopyNode::array_copy_backward(GraphKit& kit, bool can_reshape, - Node*& forward_ctl, - MergeMemNode* mm, - const TypePtr* atp_src, - const TypePtr* atp_dest, + const TypeAryPtr* atp_src, + const TypeAryPtr* atp_dest, Node* adr_src, Node* base_src, Node* adr_dest, @@ -385,69 +484,21 @@ BasicType copy_type, const Type* value_type, int count) { - if (!forward_ctl->is_top()) { - // copy forward - mm = mm->clone()->as_MergeMem(); - - if (count > 0) { - BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); - Node* v = load(bs, phase, forward_ctl, mm, adr_src, atp_src, value_type, copy_type); - store(bs, phase, forward_ctl, mm, adr_dest, atp_dest, v, value_type, copy_type); - for (int i = 1; i < count; i++) { - Node* off = phase->MakeConX(type2aelembytes(copy_type) * i); - Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off)); - Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off)); - v = load(bs, phase, forward_ctl, mm, next_src, atp_src, value_type, copy_type); - store(bs, phase, forward_ctl, mm, next_dest, atp_dest, v, value_type, copy_type); - } - } else if(can_reshape) { - PhaseIterGVN* igvn = phase->is_IterGVN(); - igvn->_worklist.push(adr_src); - igvn->_worklist.push(adr_dest); - } - return mm; - } - return phase->C->top(); -} - -Node* ArrayCopyNode::array_copy_backward(PhaseGVN *phase, - bool can_reshape, - Node*& backward_ctl, - MergeMemNode* mm, - const TypePtr* atp_src, - const TypePtr* atp_dest, - Node* adr_src, - Node* base_src, - Node* adr_dest, - Node* base_dest, - BasicType copy_type, - const Type* value_type, - int count) { - if (!backward_ctl->is_top()) { + if (!kit.stopped()) { // copy backward - mm = mm->clone()->as_MergeMem(); - - BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); - assert(copy_type != T_OBJECT || !bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, BarrierSetC2::Optimization), "only tightly coupled allocations for object arrays"); + PhaseGVN& gvn = kit.gvn(); if (count > 0) { - for (int i = count-1; i >= 1; i--) { - Node* off = phase->MakeConX(type2aelembytes(copy_type) * i); - Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off)); - Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off)); - Node* v = load(bs, phase, backward_ctl, mm, next_src, atp_src, value_type, copy_type); - store(bs, phase, backward_ctl, mm, next_dest, atp_dest, v, value_type, copy_type); + for (int i = count-1; i >= 0; i--) { + copy(kit, atp_src, atp_dest, i, base_src, base_dest, adr_src, adr_dest, copy_type, value_type); } - Node* v = load(bs, phase, backward_ctl, mm, adr_src, atp_src, value_type, copy_type); - store(bs, phase, backward_ctl, mm, adr_dest, atp_dest, v, value_type, copy_type); } else if(can_reshape) { - PhaseIterGVN* igvn = phase->is_IterGVN(); - igvn->_worklist.push(adr_src); - igvn->_worklist.push(adr_dest); + PhaseGVN& gvn = kit.gvn(); + assert(gvn.is_IterGVN(), ""); + gvn.record_for_igvn(adr_src); + gvn.record_for_igvn(adr_dest); } - return phase->transform(mm); } - return phase->C->top(); } bool ArrayCopyNode::finish_transform(PhaseGVN *phase, bool can_reshape, @@ -472,17 +523,16 @@ } else { // replace fallthrough projections of the ArrayCopyNode by the // new memory, control and the input IO. - CallProjections callprojs; - extract_projections(&callprojs, true, false); + CallProjections* callprojs = extract_projections(true, false); - if (callprojs.fallthrough_ioproj != NULL) { - igvn->replace_node(callprojs.fallthrough_ioproj, in(TypeFunc::I_O)); + if (callprojs->fallthrough_ioproj != NULL) { + igvn->replace_node(callprojs->fallthrough_ioproj, in(TypeFunc::I_O)); } - if (callprojs.fallthrough_memproj != NULL) { - igvn->replace_node(callprojs.fallthrough_memproj, mem); + if (callprojs->fallthrough_memproj != NULL) { + igvn->replace_node(callprojs->fallthrough_memproj, mem); } - if (callprojs.fallthrough_catchproj != NULL) { - igvn->replace_node(callprojs.fallthrough_catchproj, ctl); + if (callprojs->fallthrough_catchproj != NULL) { + igvn->replace_node(callprojs->fallthrough_catchproj, ctl); } // The ArrayCopyNode is not disconnected. It still has the @@ -497,6 +547,15 @@ } else { if (in(TypeFunc::Control) != ctl) { // we can't return new memory and control from Ideal at parse time +#ifdef ASSERT + Node* src = in(ArrayCopyNode::Src); + const Type* src_type = phase->type(src); + const TypeAryPtr* ary_src = src_type->isa_aryptr(); + BasicType elem = ary_src != NULL ? ary_src->klass()->as_array_klass()->element_type()->basic_type() : T_CONFLICT; + BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); + assert(!is_clonebasic() || bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, BarrierSetC2::Optimization) || + (ary_src != NULL && elem == T_VALUETYPE && ary_src->klass()->is_obj_array_klass()), "added control for clone?"); +#endif assert(!is_clonebasic() || UseShenandoahGC, "added control for clone?"); phase->record_for_igvn(this); return false; @@ -507,7 +566,11 @@ Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) { - if (remove_dead_region(phase, can_reshape)) return this; + // Perform any generic optimizations first + Node* result = SafePointNode::Ideal(phase, can_reshape); + if (result != NULL) { + return result; + } if (StressArrayCopyMacroNode && !can_reshape) { phase->record_for_igvn(this); @@ -549,6 +612,17 @@ return NULL; } + Node* src = in(ArrayCopyNode::Src); + Node* dest = in(ArrayCopyNode::Dest); + const Type* src_type = phase->type(src); + const Type* dest_type = phase->type(dest); + + if (src_type->isa_aryptr() && dest_type->isa_instptr()) { + // clone used for load of unknown value type can't be optimized at + // this point + return NULL; + } + Node* mem = try_clone_instance(phase, can_reshape, count); if (mem != NULL) { return (mem == NodeSentinel) ? NULL : mem; @@ -568,63 +642,78 @@ return NULL; } - Node* src = in(ArrayCopyNode::Src); - Node* dest = in(ArrayCopyNode::Dest); - const TypePtr* atp_src = get_address_type(phase, src); - const TypePtr* atp_dest = get_address_type(phase, dest); - - Node *in_mem = in(TypeFunc::Memory); - if (!in_mem->is_MergeMem()) { - in_mem = MergeMemNode::make(in_mem); - } - + JVMState* new_jvms = NULL; + SafePointNode* new_map = NULL; + if (!is_clonebasic()) { + new_jvms = jvms()->clone_shallow(phase->C); + new_map = new SafePointNode(req(), new_jvms); + for (uint i = TypeFunc::FramePtr; i < req(); i++) { + new_map->init_req(i, in(i)); + } + new_jvms->set_map(new_map); + } else { + new_jvms = new (phase->C) JVMState(0); + new_map = new SafePointNode(TypeFunc::Parms, new_jvms); + new_jvms->set_map(new_map); + } + new_map->set_control(in(TypeFunc::Control)); + new_map->set_memory(MergeMemNode::make(in(TypeFunc::Memory))); + new_map->set_i_o(in(TypeFunc::I_O)); + + const TypeAryPtr* atp_src = get_address_type(phase, src); + const TypeAryPtr* atp_dest = get_address_type(phase, dest); + uint alias_idx_src = phase->C->get_alias_index(atp_src); + uint alias_idx_dest = phase->C->get_alias_index(atp_dest); if (can_reshape) { assert(!phase->is_IterGVN()->delay_transform(), "cannot delay transforms"); phase->is_IterGVN()->set_delay_transform(true); } + GraphKit kit(new_jvms, phase); + + SafePointNode* backward_map = NULL; + SafePointNode* forward_map = NULL; Node* backward_ctl = phase->C->top(); - Node* forward_ctl = phase->C->top(); - array_copy_test_overlap(phase, can_reshape, disjoint_bases, count, forward_ctl, backward_ctl); - Node* forward_mem = array_copy_forward(phase, can_reshape, forward_ctl, - in_mem->as_MergeMem(), - atp_src, atp_dest, - adr_src, base_src, adr_dest, base_dest, - copy_type, value_type, count); - - Node* backward_mem = array_copy_backward(phase, can_reshape, backward_ctl, - in_mem->as_MergeMem(), - atp_src, atp_dest, - adr_src, base_src, adr_dest, base_dest, - copy_type, value_type, count); - - Node* ctl = NULL; - if (!forward_ctl->is_top() && !backward_ctl->is_top()) { - ctl = new RegionNode(3); - ctl->init_req(1, forward_ctl); - ctl->init_req(2, backward_ctl); - ctl = phase->transform(ctl); - MergeMemNode* forward_mm = forward_mem->as_MergeMem(); - MergeMemNode* backward_mm = backward_mem->as_MergeMem(); - for (MergeMemStream mms(forward_mm, backward_mm); mms.next_non_empty2(); ) { - if (mms.memory() != mms.memory2()) { - Node* phi = new PhiNode(ctl, Type::MEMORY, phase->C->get_adr_type(mms.alias_idx())); - phi->init_req(1, mms.memory()); - phi->init_req(2, mms.memory2()); - phi = phase->transform(phi); - mms.set_memory(phi); - } - } - mem = forward_mem; - } else if (!forward_ctl->is_top()) { - ctl = forward_ctl; - mem = forward_mem; + array_copy_test_overlap(kit, disjoint_bases, count, backward_ctl); + + { + PreserveJVMState pjvms(&kit); + + array_copy_forward(kit, can_reshape, + atp_src, atp_dest, + adr_src, base_src, adr_dest, base_dest, + copy_type, value_type, count); + + forward_map = kit.stop(); + } + + kit.set_control(backward_ctl); + array_copy_backward(kit, can_reshape, + atp_src, atp_dest, + adr_src, base_src, adr_dest, base_dest, + copy_type, value_type, count); + + backward_map = kit.stop(); + + if (!forward_map->control()->is_top() && !backward_map->control()->is_top()) { + assert(forward_map->i_o() == backward_map->i_o(), "need a phi on IO?"); + Node* ctl = new RegionNode(3); + Node* mem = new PhiNode(ctl, Type::MEMORY, TypePtr::BOTTOM); + kit.set_map(forward_map); + ctl->init_req(1, kit.control()); + mem->init_req(1, kit.reset_memory()); + kit.set_map(backward_map); + ctl->init_req(2, kit.control()); + mem->init_req(2, kit.reset_memory()); + kit.set_control(phase->transform(ctl)); + kit.set_all_memory(phase->transform(mem)); + } else if (!forward_map->control()->is_top()) { + kit.set_map(forward_map); } else { - assert(!backward_ctl->is_top(), "no copy?"); - ctl = backward_ctl; - mem = backward_mem; + assert(!backward_map->control()->is_top(), "no copy?"); + kit.set_map(backward_map); } if (can_reshape) { @@ -632,7 +721,11 @@ phase->is_IterGVN()->set_delay_transform(false); } - if (!finish_transform(phase, can_reshape, ctl, mem)) { + mem = kit.map()->memory(); + if (!finish_transform(phase, can_reshape, kit.control(), mem)) { + if (!can_reshape) { + phase->record_for_igvn(this); + } return NULL; } --- old/src/hotspot/share/opto/arraycopynode.hpp 2019-03-11 14:26:33.118354782 +0100 +++ new/src/hotspot/share/opto/arraycopynode.hpp 2019-03-11 14:26:32.910354785 +0100 @@ -88,27 +88,29 @@ intptr_t get_length_if_constant(PhaseGVN *phase) const; int get_count(PhaseGVN *phase) const; - static const TypePtr* get_address_type(PhaseGVN *phase, Node* n); + static const TypeAryPtr* get_address_type(PhaseGVN *phase, Node* n); Node* try_clone_instance(PhaseGVN *phase, bool can_reshape, int count); bool prepare_array_copy(PhaseGVN *phase, bool can_reshape, Node*& adr_src, Node*& base_src, Node*& adr_dest, Node*& base_dest, BasicType& copy_type, const Type*& value_type, bool& disjoint_bases); - void array_copy_test_overlap(PhaseGVN *phase, bool can_reshape, + void array_copy_test_overlap(GraphKit& kit, bool disjoint_bases, int count, - Node*& forward_ctl, Node*& backward_ctl); - Node* array_copy_forward(PhaseGVN *phase, bool can_reshape, Node*& ctl, - MergeMemNode* mm, - const TypePtr* atp_src, const TypePtr* atp_dest, + Node*& backward_ctl); + void array_copy_forward(GraphKit& kit, bool can_reshape, + const TypeAryPtr* atp_src, const TypeAryPtr* atp_dest, + Node* adr_src, Node* base_src, Node* adr_dest, Node* base_dest, + BasicType copy_type, const Type* value_type, int count); + void array_copy_backward(GraphKit& kit, bool can_reshape, + const TypeAryPtr* atp_src, const TypeAryPtr* atp_dest, Node* adr_src, Node* base_src, Node* adr_dest, Node* base_dest, BasicType copy_type, const Type* value_type, int count); - Node* array_copy_backward(PhaseGVN *phase, bool can_reshape, Node*& ctl, - MergeMemNode* mm, - const TypePtr* atp_src, const TypePtr* atp_dest, - Node* adr_src, Node* base_src, Node* adr_dest, Node* base_dest, - BasicType copy_type, const Type* value_type, int count); bool finish_transform(PhaseGVN *phase, bool can_reshape, Node* ctl, Node *mem); + void copy(GraphKit& kit, const TypeAryPtr* atp_src, const TypeAryPtr* atp_dest, int i, + Node* base_src, Node* base_dest, Node* adr_src, Node* adr_dest, + BasicType copy_type, const Type* value_type); + static bool may_modify_helper(const TypeOopPtr *t_oop, Node* n, PhaseTransform *phase, CallNode*& call); static Node* load(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* addr, const TypePtr* adr_type, const Type *type, BasicType bt); --- old/src/hotspot/share/opto/buildOopMap.cpp 2019-03-11 14:26:33.546354776 +0100 +++ new/src/hotspot/share/opto/buildOopMap.cpp 2019-03-11 14:26:33.334354779 +0100 @@ -257,12 +257,12 @@ regalloc->C->record_method_not_compilable("illegal oopMap register name"); continue; } - if( t->is_ptr()->_offset == 0 ) { // Not derived? + if (t->is_ptr()->offset() == 0) { // Not derived? if( mcall ) { // Outgoing argument GC mask responsibility belongs to the callee, // not the caller. Inspect the inputs to the call, to see if // this live-range is one of them. - uint cnt = mcall->tf()->domain()->cnt(); + uint cnt = mcall->tf()->domain_cc()->cnt(); uint j; for( j = TypeFunc::Parms; j < cnt; j++) if( mcall->in(j) == def ) @@ -332,7 +332,7 @@ // Outgoing argument GC mask responsibility belongs to the callee, // not the caller. Inspect the inputs to the call, to see if // this live-range is one of them. - uint cnt = mcall->tf()->domain()->cnt(); + uint cnt = mcall->tf()->domain_cc()->cnt(); uint j; for( j = TypeFunc::Parms; j < cnt; j++) if( mcall->in(j) == def ) --- old/src/hotspot/share/opto/c2compiler.cpp 2019-03-11 14:26:33.966354771 +0100 +++ new/src/hotspot/share/opto/c2compiler.cpp 2019-03-11 14:26:33.762354773 +0100 @@ -480,6 +480,8 @@ case vmIntrinsics::_getCharsStringU: case vmIntrinsics::_getCharStringU: case vmIntrinsics::_putCharStringU: + case vmIntrinsics::_makePrivateBuffer: + case vmIntrinsics::_finishPrivateBuffer: case vmIntrinsics::_getReference: case vmIntrinsics::_getBoolean: case vmIntrinsics::_getByte: @@ -489,6 +491,7 @@ case vmIntrinsics::_getLong: case vmIntrinsics::_getFloat: case vmIntrinsics::_getDouble: + case vmIntrinsics::_getValue: case vmIntrinsics::_putReference: case vmIntrinsics::_putBoolean: case vmIntrinsics::_putByte: @@ -498,6 +501,7 @@ case vmIntrinsics::_putLong: case vmIntrinsics::_putFloat: case vmIntrinsics::_putDouble: + case vmIntrinsics::_putValue: case vmIntrinsics::_getReferenceVolatile: case vmIntrinsics::_getBooleanVolatile: case vmIntrinsics::_getByteVolatile: --- old/src/hotspot/share/opto/callGenerator.cpp 2019-03-11 14:26:34.390354765 +0100 +++ new/src/hotspot/share/opto/callGenerator.cpp 2019-03-11 14:26:34.182354768 +0100 @@ -39,6 +39,7 @@ #include "opto/rootnode.hpp" #include "opto/runtime.hpp" #include "opto/subnode.hpp" +#include "opto/valuetypenode.hpp" #include "runtime/sharedRuntime.hpp" // Utility function. @@ -123,14 +124,23 @@ private: CallStaticJavaNode* _call_node; // Force separate memory and I/O projections for the exceptional - // paths to facilitate late inlinig. + // paths to facilitate late inlining. bool _separate_io_proj; public: DirectCallGenerator(ciMethod* method, bool separate_io_proj) : CallGenerator(method), + _call_node(NULL), _separate_io_proj(separate_io_proj) { + if (ValueTypeReturnedAsFields && method->is_method_handle_intrinsic()) { + // If that call has not been optimized by the time optimizations are over, + // we'll need to add a call to create a value type instance from the klass + // returned by the call (see PhaseMacroExpand::expand_mh_intrinsic_return). + // Separating memory and I/O projections for exceptions is required to + // perform that graph transformation. + _separate_io_proj = true; + } } virtual JVMState* generate(JVMState* jvms); @@ -140,6 +150,7 @@ JVMState* DirectCallGenerator::generate(JVMState* jvms) { GraphKit kit(jvms); kit.C->print_inlining_update(this); + PhaseGVN& gvn = kit.gvn(); bool is_static = method()->is_static(); address target = is_static ? SharedRuntime::get_resolve_static_call_stub() : SharedRuntime::get_resolve_opt_virtual_call_stub(); @@ -172,7 +183,10 @@ call->set_method_handle_invoke(true); } } - kit.set_arguments_for_java_call(call); + kit.set_arguments_for_java_call(call, is_late_inline()); + if (kit.stopped()) { + return kit.transfer_exceptions_into_jvms(); + } kit.set_edges_for_java_call(call, false, _separate_io_proj); Node* ret = kit.set_results_for_java_call(call, _separate_io_proj); kit.push_node(method()->return_type()->basic_type(), ret); @@ -198,7 +212,6 @@ JVMState* VirtualCallGenerator::generate(JVMState* jvms) { GraphKit kit(jvms); Node* receiver = kit.argument(0); - kit.C->print_inlining_update(this); if (kit.C->log() != NULL) { @@ -210,7 +223,7 @@ // correctly, but may bail out in final_graph_reshaping, because // the call instruction will have a seemingly deficient out-count. // (The bailout says something misleading about an "infinite loop".) - if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) { + if (!receiver->is_ValueType() && kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) { assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc())); ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci()); int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc()); @@ -256,6 +269,9 @@ call->set_override_symbolic_info(true); } kit.set_arguments_for_java_call(call); + if (kit.stopped()) { + return kit.transfer_exceptions_into_jvms(); + } kit.set_edges_for_java_call(call); Node* ret = kit.set_results_for_java_call(call); kit.push_node(method()->return_type()->basic_type(), ret); @@ -356,7 +372,7 @@ return; } - const TypeTuple *r = call->tf()->domain(); + const TypeTuple *r = call->tf()->domain_cc(); for (int i1 = 0; i1 < method()->arg_size(); i1++) { if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) { assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); @@ -370,18 +386,27 @@ } // check for unreachable loop - CallProjections callprojs; - call->extract_projections(&callprojs, true); - if (callprojs.fallthrough_catchproj == call->in(0) || - callprojs.catchall_catchproj == call->in(0) || - callprojs.fallthrough_memproj == call->in(TypeFunc::Memory) || - callprojs.catchall_memproj == call->in(TypeFunc::Memory) || - callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O) || - callprojs.catchall_ioproj == call->in(TypeFunc::I_O) || - (callprojs.resproj != NULL && call->find_edge(callprojs.resproj) != -1) || - (callprojs.exobj != NULL && call->find_edge(callprojs.exobj) != -1)) { + CallProjections* callprojs = call->extract_projections(true); + if (callprojs->fallthrough_catchproj == call->in(0) || + callprojs->catchall_catchproj == call->in(0) || + callprojs->fallthrough_memproj == call->in(TypeFunc::Memory) || + callprojs->catchall_memproj == call->in(TypeFunc::Memory) || + callprojs->fallthrough_ioproj == call->in(TypeFunc::I_O) || + callprojs->catchall_ioproj == call->in(TypeFunc::I_O) || + (callprojs->exobj != NULL && call->find_edge(callprojs->exobj) != -1)) { return; } + bool result_not_used = true; + for (uint i = 0; i < callprojs->nb_resproj; i++) { + if (callprojs->resproj[i] != NULL) { + if (callprojs->resproj[i]->outcnt() != 0) { + result_not_used = false; + } + if (call->find_edge(callprojs->resproj[i]) != -1) { + return; + } + } + } Compile* C = Compile::current(); // Remove inlined methods from Compiler's lists. @@ -389,7 +414,6 @@ C->remove_macro_node(call); } - bool result_not_used = (callprojs.resproj == NULL || callprojs.resproj->outcnt() == 0); if (_is_pure_call && result_not_used) { // The call is marked as pure (no important side effects), but result isn't used. // It's safe to remove the call. @@ -405,26 +429,47 @@ map->init_req(i1, call->in(i1)); } + PhaseGVN& gvn = *C->initial_gvn(); // Make sure the state is a MergeMem for parsing. if (!map->in(TypeFunc::Memory)->is_MergeMem()) { Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory)); - C->initial_gvn()->set_type_bottom(mem); + gvn.set_type_bottom(mem); map->set_req(TypeFunc::Memory, mem); } - uint nargs = method()->arg_size(); // blow away old call arguments Node* top = C->top(); - for (uint i1 = 0; i1 < nargs; i1++) { - map->set_req(TypeFunc::Parms + i1, top); + for (uint i1 = TypeFunc::Parms; i1 < call->_tf->domain_cc()->cnt(); i1++) { + map->set_req(i1, top); } jvms->set_map(map); // Make enough space in the expression stack to transfer // the incoming arguments and return value. map->ensure_stack(jvms, jvms->method()->max_stack()); + const TypeTuple *domain_sig = call->_tf->domain_sig(); + ExtendedSignature sig_cc = ExtendedSignature(method()->get_sig_cc(), SigEntryFilter()); + uint nargs = method()->arg_size(); + assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature"); + + uint j = TypeFunc::Parms; for (uint i1 = 0; i1 < nargs; i1++) { - map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1)); + const Type* t = domain_sig->field_at(TypeFunc::Parms + i1); + if (method()->has_scalarized_args() && t->is_valuetypeptr() && !t->maybe_null()) { + // Value type arguments are not passed by reference: we get an argument per + // field of the value type. Build ValueTypeNodes from the value type arguments. + GraphKit arg_kit(jvms, &gvn); + arg_kit.set_control(map->control()); + ValueTypeNode* vt = ValueTypeNode::make_from_multi(&arg_kit, call, sig_cc, t->value_klass(), j, true); + map->set_control(arg_kit.control()); + map->set_argument(jvms, i1, vt); + } else { + map->set_argument(jvms, i1, call->in(j++)); + BasicType bt = t->basic_type(); + while (SigEntry::next_is_reserved(sig_cc, bt, true)) { + j += type2size[bt]; // Skip reserved arguments + } + } } C->print_inlining_assert_ready(); @@ -467,6 +512,31 @@ C->env()->notice_inlined_method(_inline_cg->method()); C->set_inlining_progress(true); C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup + + // Handle value type returns + bool returned_as_fields = call->tf()->returns_value_type_as_fields(); + if (result->is_ValueType()) { + ValueTypeNode* vt = result->as_ValueType(); + if (returned_as_fields) { + // Return of multiple values (the fields of a value type) + vt->replace_call_results(&kit, call, C); + if (vt->is_allocated(&gvn) && !StressValueTypeReturnedAsFields) { + result = vt->get_oop(); + } else { + result = vt->tagged_klass(gvn); + } + } else { + result = ValueTypePtrNode::make_from_value_type(&kit, vt); + } + } else if (gvn.type(result)->is_valuetypeptr() && returned_as_fields) { + const Type* vt_t = call->_tf->range_sig()->field_at(TypeFunc::Parms); + Node* cast = new CheckCastPPNode(NULL, result, vt_t); + gvn.record_for_igvn(cast); + ValueTypePtrNode* vtptr = ValueTypePtrNode::make_from_oop(&kit, gvn.transform(cast)); + vtptr->replace_call_results(&kit, call, C); + result = cast; + } + kit.replace_call(call, result, true); } } @@ -506,7 +576,7 @@ bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) { - CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const); + CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const, AlwaysIncrementalInline); Compile::current()->print_inlining_update_delayed(this); @@ -514,7 +584,7 @@ _attempt++; } - if (cg != NULL && cg->is_inline()) { + if (cg != NULL && (cg->is_inline() || cg->is_inlined_method_handle_intrinsic(jvms, cg->method()))) { assert(!cg->is_late_inline(), "we're doing late inlining"); _inline_cg = cg; Compile::current()->dec_number_of_mh_late_inlines(); @@ -784,6 +854,28 @@ return kit.transfer_exceptions_into_jvms(); } + // Allocate value types if they are merged with objects (similar to Parse::merge_common()) + uint tos = kit.jvms()->stkoff() + kit.sp(); + uint limit = slow_map->req(); + for (uint i = TypeFunc::Parms; i < limit; i++) { + Node* m = kit.map()->in(i); + Node* n = slow_map->in(i); + const Type* t = gvn.type(m)->meet_speculative(gvn.type(n)); + if (m->is_ValueType() && !t->isa_valuetype()) { + // Allocate value type in fast path + m = ValueTypePtrNode::make_from_value_type(&kit, m->as_ValueType()); + kit.map()->set_req(i, m); + } + if (n->is_ValueType() && !t->isa_valuetype()) { + // Allocate value type in slow path + PreserveJVMState pjvms(&kit); + kit.set_map(slow_map); + n = ValueTypePtrNode::make_from_value_type(&kit, n->as_ValueType()); + kit.map()->set_req(i, n); + slow_map = kit.stop(); + } + } + // There are 2 branches and the replaced nodes are only valid on // one: restore the replaced nodes to what they were before the // branch. @@ -807,8 +899,6 @@ mms.set_memory(gvn.transform(phi)); } } - uint tos = kit.jvms()->stkoff() + kit.sp(); - uint limit = slow_map->req(); for (uint i = TypeFunc::Parms; i < limit; i++) { // Skip unused stack slots; fast forward to monoff(); if (i == tos) { @@ -831,7 +921,7 @@ CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) { assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch"); bool input_not_const; - CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const); + CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const, false); Compile* C = Compile::current(); if (cg != NULL) { if (!delayed_forbidden && AlwaysIncrementalInline) { @@ -844,8 +934,8 @@ ciCallProfile profile = caller->call_profile_at_bci(bci); int call_site_count = caller->scale_count(profile.count()); - if (IncrementalInline && call_site_count > 0 && - (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) { + if (IncrementalInline && (AlwaysIncrementalInline || + (call_site_count > 0 && (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) { return CallGenerator::for_mh_late_inline(caller, callee, input_not_const); } else { // Out-of-line call. @@ -853,7 +943,24 @@ } } -CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) { +static void cast_argument(int nargs, int arg_nb, ciType* t, GraphKit& kit) { + PhaseGVN& gvn = kit.gvn(); + Node* arg = kit.argument(arg_nb); + const Type* arg_type = arg->bottom_type(); + const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass()); + if (arg_type->isa_oopptr() && !arg_type->higher_equal(sig_type)) { + const Type* narrowed_arg_type = arg_type->join_speculative(sig_type); // keep speculative part + arg = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type)); + kit.set_argument(arg_nb, arg); + } + if (sig_type->is_valuetypeptr() && !arg->is_ValueType() && + !kit.gvn().type(arg)->maybe_null() && t->as_value_klass()->is_scalarizable()) { + arg = ValueTypeNode::make_from_oop(&kit, arg, t->as_value_klass()); + kit.set_argument(arg_nb, arg); + } +} + +CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const, bool delayed_forbidden) { GraphKit kit(jvms); PhaseGVN& gvn = kit.gvn(); Compile* C = kit.C; @@ -880,7 +987,10 @@ false /* call_does_dispatch */, jvms, true /* allow_inline */, - PROB_ALWAYS); + PROB_ALWAYS, + NULL, + true, + delayed_forbidden); return cg; } else { print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), @@ -894,8 +1004,9 @@ case vmIntrinsics::_linkToSpecial: case vmIntrinsics::_linkToInterface: { + int nargs = callee->arg_size(); // Get MemberName argument: - Node* member_name = kit.argument(callee->arg_size() - 1); + Node* member_name = kit.argument(nargs - 1); if (member_name->Opcode() == Op_ConP) { input_not_const = false; const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr(); @@ -915,27 +1026,13 @@ const int receiver_skip = target->is_static() ? 0 : 1; // Cast receiver to its type. if (!target->is_static()) { - Node* arg = kit.argument(0); - const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); - const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass()); - if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { - const Type* recv_type = arg_type->join_speculative(sig_type); // keep speculative part - Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, recv_type)); - kit.set_argument(0, cast_obj); - } + cast_argument(nargs, 0, signature->accessing_klass(), kit); } // Cast reference arguments to its type. for (int i = 0, j = 0; i < signature->count(); i++) { ciType* t = signature->type_at(i); if (t->is_klass()) { - Node* arg = kit.argument(receiver_skip + j); - const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); - const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass()); - if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { - const Type* narrowed_arg_type = arg_type->join_speculative(sig_type); // keep speculative part - Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type)); - kit.set_argument(receiver_skip + j, cast_obj); - } + cast_argument(nargs, receiver_skip + j, t, kit); } j += t->size(); // long and double take two slots } @@ -966,7 +1063,9 @@ CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, !StressMethodHandleLinkerInlining /* allow_inline */, PROB_ALWAYS, - speculative_receiver_type); + speculative_receiver_type, + true, + delayed_forbidden); return cg; } else { print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), --- old/src/hotspot/share/opto/callGenerator.hpp 2019-03-11 14:26:34.822354759 +0100 +++ new/src/hotspot/share/opto/callGenerator.hpp 2019-03-11 14:26:34.614354762 +0100 @@ -125,7 +125,7 @@ static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index); // virtual, interface static CallGenerator* for_method_handle_call( JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden); - static CallGenerator* for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const); + static CallGenerator* for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const, bool delayed_forbidden); // How to generate a replace a direct call with an inline version static CallGenerator* for_late_inline(ciMethod* m, CallGenerator* inline_cg); --- old/src/hotspot/share/opto/callnode.cpp 2019-03-11 14:26:35.250354753 +0100 +++ new/src/hotspot/share/opto/callnode.cpp 2019-03-11 14:26:35.034354756 +0100 @@ -42,6 +42,8 @@ #include "opto/regmask.hpp" #include "opto/rootnode.hpp" #include "opto/runtime.hpp" +#include "opto/valuetypenode.hpp" +#include "runtime/sharedRuntime.hpp" // Portions of code courtesy of Clifford Click @@ -75,7 +77,7 @@ //------------------------------match------------------------------------------ // Construct projections for incoming parameters, and their RegMask info -Node *StartNode::match( const ProjNode *proj, const Matcher *match ) { +Node *StartNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) { switch (proj->_con) { case TypeFunc::Control: case TypeFunc::I_O: @@ -479,6 +481,14 @@ while (ndim-- > 0) { st->print("[]"); } + } else if (cik->is_value_array_klass()) { + ciKlass* cie = cik->as_value_array_klass()->base_element_klass(); + cie->print_name_on(st); + st->print("[%d]", spobj->n_fields()); + int ndim = cik->as_array_klass()->dimension() - 1; + while (ndim-- > 0) { + st->print("[]"); + } } st->print("={"); uint nf = spobj->n_fields(); @@ -688,14 +698,23 @@ } #endif -const Type *CallNode::bottom_type() const { return tf()->range(); } +const Type *CallNode::bottom_type() const { return tf()->range_cc(); } const Type* CallNode::Value(PhaseGVN* phase) const { - if (phase->type(in(0)) == Type::TOP) return Type::TOP; - return tf()->range(); + if (!in(0) || phase->type(in(0)) == Type::TOP) { + return Type::TOP; + } + return tf()->range_cc(); } //------------------------------calling_convention----------------------------- -void CallNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { +void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const { + if (_entry_point == StubRoutines::store_value_type_fields_to_buf()) { + // The call to that stub is a special case: its inputs are + // multiple values returned from a call and so it should follow + // the return convention. + SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt); + return; + } // Use the standard compiler calling convention Matcher::calling_convention( sig_bt, parm_regs, argcnt, true ); } @@ -704,29 +723,39 @@ //------------------------------match------------------------------------------ // Construct projections for control, I/O, memory-fields, ..., and // return result(s) along with their RegMask info -Node *CallNode::match( const ProjNode *proj, const Matcher *match ) { - switch (proj->_con) { +Node *CallNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) { + uint con = proj->_con; + const TypeTuple *range_cc = tf()->range_cc(); + if (con >= TypeFunc::Parms) { + if (is_CallRuntime()) { + if (con == TypeFunc::Parms) { + uint ideal_reg = range_cc->field_at(TypeFunc::Parms)->ideal_reg(); + OptoRegPair regs = match->c_return_value(ideal_reg,true); + RegMask rm = RegMask(regs.first()); + if (OptoReg::is_valid(regs.second())) { + rm.Insert(regs.second()); + } + return new MachProjNode(this,con,rm,ideal_reg); + } else { + assert(con == TypeFunc::Parms+1, "only one return value"); + assert(range_cc->field_at(TypeFunc::Parms+1) == Type::HALF, ""); + return new MachProjNode(this,con, RegMask::Empty, (uint)OptoReg::Bad); + } + } else { + // The Call may return multiple values (value type fields): we + // create one projection per returned values. + assert(con <= TypeFunc::Parms+1 || ValueTypeReturnedAsFields, "only for multi value return"); + uint ideal_reg = range_cc->field_at(con)->ideal_reg(); + return new MachProjNode(this, con, mask[con-TypeFunc::Parms], ideal_reg); + } + } + + switch (con) { case TypeFunc::Control: case TypeFunc::I_O: case TypeFunc::Memory: return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); - case TypeFunc::Parms+1: // For LONG & DOUBLE returns - assert(tf()->range()->field_at(TypeFunc::Parms+1) == Type::HALF, ""); - // 2nd half of doubles and longs - return new MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad); - - case TypeFunc::Parms: { // Normal returns - uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg(); - OptoRegPair regs = is_CallRuntime() - ? match->c_return_value(ideal_reg,true) // Calls into C runtime - : match-> return_value(ideal_reg,true); // Calls into compiled Java code - RegMask rm = RegMask(regs.first()); - if( OptoReg::is_valid(regs.second()) ) - rm.Insert( regs.second() ); - return new MachProjNode(this,proj->_con,rm,ideal_reg); - } - case TypeFunc::ReturnAdr: case TypeFunc::FramePtr: default: @@ -747,7 +776,7 @@ bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { assert((t_oop != NULL), "sanity"); if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) { - const TypeTuple* args = _tf->domain(); + const TypeTuple* args = _tf->domain_sig(); Node* dest = NULL; // Stubs that can be called once an ArrayCopyNode is expanded have // different signatures. Look for the second pointer argument, @@ -796,7 +825,7 @@ return true; } } - const TypeTuple* d = tf()->domain(); + const TypeTuple* d = tf()->domain_cc(); for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr(); if ((inst_t != NULL) && (!inst_t->klass_is_exact() || @@ -812,7 +841,7 @@ // Does this call have a direct reference to n other than debug information? bool CallNode::has_non_debug_use(Node *n) { - const TypeTuple * d = tf()->domain(); + const TypeTuple * d = tf()->domain_cc(); for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { Node *arg = in(i); if (arg == n) { @@ -822,6 +851,17 @@ return false; } +bool CallNode::has_debug_use(Node *n) { + assert(jvms() != NULL, "jvms should not be null"); + for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) { + Node *arg = in(i); + if (arg == n) { + return true; + } + } + return false; +} + // Returns the unique CheckCastPP of a call // or 'this' if there are several CheckCastPP or unexpected uses // or returns NULL if there is no one. @@ -853,16 +893,21 @@ } -void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) { - projs->fallthrough_proj = NULL; - projs->fallthrough_catchproj = NULL; - projs->fallthrough_ioproj = NULL; - projs->catchall_ioproj = NULL; - projs->catchall_catchproj = NULL; - projs->fallthrough_memproj = NULL; - projs->catchall_memproj = NULL; - projs->resproj = NULL; - projs->exobj = NULL; +CallProjections* CallNode::extract_projections(bool separate_io_proj, bool do_asserts) { + uint max_res = TypeFunc::Parms-1; + for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { + ProjNode *pn = fast_out(i)->as_Proj(); + max_res = MAX2(max_res, pn->_con); + } + + assert(max_res < _tf->range_cc()->cnt(), "result out of bounds"); + + uint projs_size = sizeof(CallProjections); + if (max_res > TypeFunc::Parms) { + projs_size += (max_res-TypeFunc::Parms)*sizeof(Node*); + } + char* projs_storage = resource_allocate_bytes(projs_size); + CallProjections* projs = new(projs_storage)CallProjections(max_res - TypeFunc::Parms + 1); for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { ProjNode *pn = fast_out(i)->as_Proj(); @@ -909,10 +954,12 @@ projs->fallthrough_memproj = pn; break; case TypeFunc::Parms: - projs->resproj = pn; + projs->resproj[0] = pn; break; default: - assert(false, "unexpected projection from allocation node."); + assert(pn->_con <= max_res, "unexpected projection from allocation node."); + projs->resproj[pn->_con-TypeFunc::Parms] = pn; + break; } } @@ -929,6 +976,7 @@ assert(!do_asserts || projs->catchall_memproj != NULL, "must be found"); assert(!do_asserts || projs->catchall_ioproj != NULL, "must be found"); } + return projs; } Node *CallNode::Ideal(PhaseGVN *phase, bool can_reshape) { @@ -972,6 +1020,10 @@ if (method() == NULL) { return true; // call into runtime or uncommon trap } + Bytecodes::Code bc = jvms()->method()->java_code_at_bci(_bci); + if (ACmpOnValues == 3 && (bc == Bytecodes::_if_acmpeq || bc == Bytecodes::_if_acmpne)) { + return true; + } ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(_bci); ciMethod* callee = method(); if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) { @@ -1083,6 +1135,13 @@ //------------------------------calling_convention----------------------------- void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { + if (_entry_point == NULL) { + // The call to that stub is a special case: its inputs are + // multiple values returned from a call and so it should follow + // the return convention. + SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt); + return; + } Matcher::c_calling_convention( sig_bt, parm_regs, argcnt ); } @@ -1099,6 +1158,12 @@ } #endif +uint CallLeafNoFPNode::match_edge(uint idx) const { + // Null entry point is a special case for which the target is in a + // register. Need to match that edge. + return entry_point() == NULL && idx == TypeFunc::Parms; +} + //============================================================================= void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) { @@ -1359,7 +1424,9 @@ AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, - Node *size, Node *klass_node, Node *initial_test) + Node *size, Node *klass_node, + Node* initial_test, + ValueTypeBaseNode* value_node) : CallNode(atype, NULL, TypeRawPtr::BOTTOM) { init_class_id(Class_Allocate); @@ -1367,6 +1434,7 @@ _is_scalar_replaceable = false; _is_non_escaping = false; _is_allocation_MemBar_redundant = false; + _larval = false; Node *topnode = C->top(); init_req( TypeFunc::Control , ctrl ); @@ -1378,6 +1446,9 @@ init_req( KlassNode , klass_node); init_req( InitialTest , initial_test); init_req( ALength , topnode); + init_req( ValueNode , value_node); + // DefaultValue defaults to NULL + // RawDefaultValue defaults to NULL C->add_macro_node(this); } @@ -1398,9 +1469,78 @@ } } +Node* AllocateNode::Ideal(PhaseGVN* phase, bool can_reshape) { + // Check for unused value type allocation + if (can_reshape && in(AllocateNode::ValueNode) != NULL && + outcnt() != 0 && result_cast() == NULL) { + // Remove allocation by replacing the projection nodes with its inputs + InitializeNode* init = initialization(); + PhaseIterGVN* igvn = phase->is_IterGVN(); + CallProjections* projs = extract_projections(true, false); + assert(projs->nb_resproj <= 1, "unexpected number of results"); + if (projs->fallthrough_catchproj != NULL) { + igvn->replace_node(projs->fallthrough_catchproj, in(TypeFunc::Control)); + } + if (projs->fallthrough_memproj != NULL) { + igvn->replace_node(projs->fallthrough_memproj, in(TypeFunc::Memory)); + } + if (projs->catchall_memproj != NULL) { + igvn->replace_node(projs->catchall_memproj, phase->C->top()); + } + if (projs->fallthrough_ioproj != NULL) { + igvn->replace_node(projs->fallthrough_ioproj, in(TypeFunc::I_O)); + } + if (projs->catchall_ioproj != NULL) { + igvn->replace_node(projs->catchall_ioproj, phase->C->top()); + } + if (projs->catchall_catchproj != NULL) { + igvn->replace_node(projs->catchall_catchproj, phase->C->top()); + } + if (projs->resproj[0] != NULL) { + igvn->replace_node(projs->resproj[0], phase->C->top()); + } + igvn->replace_node(this, phase->C->top()); + if (init != NULL) { + Node* ctrl_proj = init->proj_out_or_null(TypeFunc::Control); + Node* mem_proj = init->proj_out_or_null(TypeFunc::Memory); + if (ctrl_proj != NULL) { + igvn->replace_node(ctrl_proj, init->in(TypeFunc::Control)); + } + if (mem_proj != NULL) { + igvn->replace_node(mem_proj, init->in(TypeFunc::Memory)); + } + } + return NULL; + } + + return CallNode::Ideal(phase, can_reshape); +} + +Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem, Node* klass_node) { + Node* mark_node = NULL; + // For now only enable fast locking for non-array types + if ((EnableValhalla || UseBiasedLocking) && Opcode() == Op_Allocate) { + if (klass_node == NULL) { + Node* k_adr = phase->transform(new AddPNode(obj, obj, phase->MakeConX(oopDesc::klass_offset_in_bytes()))); + klass_node = phase->transform(LoadKlassNode::make(*phase, NULL, phase->C->immutable_memory(), k_adr, phase->type(k_adr)->is_ptr())); + } + Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset())))); + mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); + } else { + mark_node = phase->MakeConX((intptr_t)markOopDesc::prototype()); + } + mark_node = phase->transform(mark_node); + // Avoid returning a constant (old node) here because this method is used by LoadNode::Ideal + return new OrXNode(mark_node, phase->MakeConX(_larval ? markOopDesc::larval_state_pattern : 0)); +} + + //============================================================================= Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) { - if (remove_dead_region(phase, can_reshape)) return this; + Node* res = SafePointNode::Ideal(phase, can_reshape); + if (res != NULL) { + return res; + } // Don't bother trying to transform a dead node if (in(0) && in(0)->is_top()) return NULL; @@ -2075,7 +2215,8 @@ return true; } - dest_t = dest_t->add_offset(Type::OffsetBot)->is_oopptr(); + dest_t = dest_t->is_aryptr()->with_field_offset(Type::OffsetBot)->add_offset(Type::OffsetBot)->is_oopptr(); + t_oop = t_oop->is_aryptr()->with_field_offset(Type::OffsetBot); uint dest_alias = phase->C->get_alias_index(dest_t); uint t_oop_alias = phase->C->get_alias_index(t_oop); --- old/src/hotspot/share/opto/callnode.hpp 2019-03-11 14:26:35.754354746 +0100 +++ new/src/hotspot/share/opto/callnode.hpp 2019-03-11 14:26:35.514354749 +0100 @@ -80,7 +80,7 @@ virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const; virtual const RegMask &in_RegMask(uint) const; - virtual Node *match( const ProjNode *proj, const Matcher *m ); + virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask); virtual uint ideal_reg() const { return 0; } #ifndef PRODUCT virtual void dump_spec(outputStream *st) const; @@ -542,7 +542,7 @@ // Simple container for the outgoing projections of a call. Useful // for serious surgery on calls. -class CallProjections : public StackObj { +class CallProjections { public: Node* fallthrough_proj; Node* fallthrough_catchproj; @@ -551,8 +551,26 @@ Node* catchall_catchproj; Node* catchall_memproj; Node* catchall_ioproj; - Node* resproj; Node* exobj; + uint nb_resproj; + Node* resproj[1]; // at least one projection + + CallProjections(uint nbres) { + fallthrough_proj = NULL; + fallthrough_catchproj = NULL; + fallthrough_memproj = NULL; + fallthrough_ioproj = NULL; + catchall_catchproj = NULL; + catchall_memproj = NULL; + catchall_ioproj = NULL; + exobj = NULL; + nb_resproj = nbres; + resproj[0] = NULL; + for (uint i = 1; i < nb_resproj; i++) { + resproj[i] = NULL; + } + } + }; class CallGenerator; @@ -574,7 +592,7 @@ const char *_name; // Printable name, if _method is NULL CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type) - : SafePointNode(tf->domain()->cnt(), NULL, adr_type), + : SafePointNode(tf->domain_cc()->cnt(), NULL, adr_type), _tf(tf), _entry_point(addr), _cnt(COUNT_UNKNOWN), @@ -601,7 +619,7 @@ virtual uint cmp( const Node &n ) const; virtual uint size_of() const = 0; virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; - virtual Node *match( const ProjNode *proj, const Matcher *m ); + virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask); virtual uint ideal_reg() const { return NotAMachineReg; } // Are we guaranteed that this node is a safepoint? Not true for leaf calls and // for some macro nodes whose expansion does not have a safepoint on the fast path. @@ -620,21 +638,23 @@ virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase); // Does this node have a use of n other than in debug information? bool has_non_debug_use(Node *n); + bool has_debug_use(Node *n); // Returns the unique CheckCastPP of a call // or result projection is there are several CheckCastPP // or returns NULL if there is no one. Node *result_cast(); // Does this node returns pointer? bool returns_pointer() const { - const TypeTuple *r = tf()->range(); - return (r->cnt() > TypeFunc::Parms && + const TypeTuple *r = tf()->range_sig(); + return (!tf()->returns_value_type_as_fields() && + r->cnt() > TypeFunc::Parms && r->field_at(TypeFunc::Parms)->isa_ptr()); } // Collect all the interesting edges from a call for use in // replacing the call by something else. Used by macro expansion // and the late inlining support. - void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true); + CallProjections* extract_projections(bool separate_io_proj, bool do_asserts = true); virtual uint match_edge(uint idx) const; @@ -706,6 +726,18 @@ init_flags(Flag_is_macro); C->add_macro_node(this); } + const TypeTuple *r = tf->range_sig(); + if (ValueTypeReturnedAsFields && + method != NULL && + method->is_method_handle_intrinsic() && + r->cnt() > TypeFunc::Parms && + r->field_at(TypeFunc::Parms)->isa_oopptr() && + r->field_at(TypeFunc::Parms)->is_oopptr()->can_be_value_type()) { + // Make sure this call is processed by PhaseMacroExpand::expand_mh_intrinsic_return + init_flags(Flag_is_macro); + C->add_macro_node(this); + } + _is_scalar_replaceable = false; _is_non_escaping = false; } @@ -815,6 +847,7 @@ { } virtual int Opcode() const; + virtual uint match_edge(uint idx) const; }; @@ -838,6 +871,9 @@ KlassNode, // type (maybe dynamic) of the obj. InitialTest, // slow-path test (may be constant) ALength, // array length (or TOP if none) + ValueNode, + DefaultValue, // default value in case of non flattened value array + RawDefaultValue, // same as above but as raw machine word ParmLimit }; @@ -847,6 +883,9 @@ fields[KlassNode] = TypeInstPtr::NOTNULL; fields[InitialTest] = TypeInt::BOOL; fields[ALength] = t; // length (can be a bad length) + fields[ValueNode] = Type::BOTTOM; + fields[DefaultValue] = TypeInstPtr::NOTNULL; + fields[RawDefaultValue] = TypeX_X; const TypeTuple *domain = TypeTuple::make(ParmLimit, fields); @@ -864,10 +903,12 @@ bool _is_non_escaping; // True when MemBar for new is redundant with MemBar at initialzer exit bool _is_allocation_MemBar_redundant; + bool _larval; virtual uint size_of() const; // Size is bigger AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, - Node *size, Node *klass_node, Node *initial_test); + Node *size, Node *klass_node, Node *initial_test, + ValueTypeBaseNode* value_node = NULL); // Expansion modifies the JVMState, so we need to clone it virtual void clone_jvms(Compile* C) { if (jvms() != NULL) { @@ -879,6 +920,8 @@ virtual uint ideal_reg() const { return Op_RegP; } virtual bool guaranteed_safepoint() { return false; } + virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); + // allocations do not modify their arguments virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;} @@ -936,6 +979,8 @@ // allocation node. void compute_MemBar_redundancy(ciMethod* initializer); bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; } + + Node* make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem, Node* klass_node); }; //------------------------------AllocateArray--------------------------------- @@ -946,13 +991,15 @@ public: AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, Node* size, Node* klass_node, Node* initial_test, - Node* count_val + Node* count_val, Node* default_value, Node* raw_default_value ) : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node, initial_test) { init_class_id(Class_AllocateArray); set_req(AllocateNode::ALength, count_val); + init_req(AllocateNode::DefaultValue, default_value); + init_req(AllocateNode::RawDefaultValue, raw_default_value); } virtual int Opcode() const; virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); @@ -1072,7 +1119,7 @@ const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); - return TypeFunc::make(domain,range); + return TypeFunc::make(domain, range); } virtual int Opcode() const; --- old/src/hotspot/share/opto/castnode.cpp 2019-03-11 14:26:36.194354740 +0100 +++ new/src/hotspot/share/opto/castnode.cpp 2019-03-11 14:26:35.986354743 +0100 @@ -27,10 +27,13 @@ #include "opto/callnode.hpp" #include "opto/castnode.hpp" #include "opto/connode.hpp" +#include "opto/graphKit.hpp" #include "opto/matcher.hpp" #include "opto/phaseX.hpp" +#include "opto/rootnode.hpp" #include "opto/subnode.hpp" #include "opto/type.hpp" +#include "opto/valuetypenode.hpp" //============================================================================= // If input is already higher or equal to cast type, then this is an identity. @@ -462,6 +465,22 @@ uintptr_t bits = (uintptr_t) t->is_rawptr()->get_con(); return TypeX::make(bits); } + + if (t->is_zero_type() || !t->maybe_null()) { + for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { + Node* u = fast_out(i); + if (u->Opcode() == Op_OrL) { + for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) { + Node* cmp = u->fast_out(j); + if (cmp->Opcode() == Op_CmpL) { + // Give CmpL a chance to get optimized + phase->record_for_igvn(cmp); + } + } + } + } + } + return CastP2XNode::bottom_type(); } --- old/src/hotspot/share/opto/cfgnode.cpp 2019-03-11 14:26:37.046354728 +0100 +++ new/src/hotspot/share/opto/cfgnode.cpp 2019-03-11 14:26:36.838354731 +0100 @@ -43,6 +43,7 @@ #include "opto/regmask.hpp" #include "opto/runtime.hpp" #include "opto/subnode.hpp" +#include "opto/valuetypenode.hpp" #include "utilities/vmError.hpp" // Portions of code courtesy of Clifford Click @@ -372,7 +373,7 @@ return true; // The Region node is unreachable - it is dead. } -bool RegionNode::try_clean_mem_phi(PhaseGVN *phase) { +Node* PhiNode::try_clean_mem_phi(PhaseGVN *phase) { // Incremental inlining + PhaseStringOpts sometimes produce: // // cmpP with 1 top input @@ -392,27 +393,26 @@ // code below replaces the Phi with the MergeMem so that the Region // is simplified. - PhiNode* phi = has_unique_phi(); - if (phi && phi->type() == Type::MEMORY && req() == 3 && phi->is_diamond_phi(true)) { + if (type() == Type::MEMORY && is_diamond_phi(true)) { MergeMemNode* m = NULL; - assert(phi->req() == 3, "same as region"); + assert(req() == 3, "same as region"); + Node* r = in(0); for (uint i = 1; i < 3; ++i) { - Node *mem = phi->in(i); - if (mem && mem->is_MergeMem() && in(i)->outcnt() == 1) { + Node *mem = in(i); + if (mem && mem->is_MergeMem() && r->in(i)->outcnt() == 1) { // Nothing is control-dependent on path #i except the region itself. m = mem->as_MergeMem(); uint j = 3 - i; - Node* other = phi->in(j); + Node* other = in(j); if (other && other == m->base_memory()) { // m is a successor memory to other, and is not pinned inside the diamond, so push it out. // This will allow the diamond to collapse completely. - phase->is_IterGVN()->replace_node(phi, m); - return true; + return m; } } } } - return false; + return NULL; } //------------------------------Ideal------------------------------------------ @@ -427,8 +427,15 @@ bool has_phis = false; if (can_reshape) { // Need DU info to check for Phi users has_phis = (has_phi() != NULL); // Cache result - if (has_phis && try_clean_mem_phi(phase)) { - has_phis = false; + if (has_phis) { + PhiNode* phi = has_unique_phi(); + if (phi != NULL) { + Node* m = phi->try_clean_mem_phi(phase); + if (m != NULL) { + phase->is_IterGVN()->replace_node(phi, m); + has_phis = false; + } + } } if (!has_phis) { // No Phi users? Nothing merging? @@ -1106,15 +1113,10 @@ const TypeInstPtr* ttip = (ttp != NULL) ? ttp->isa_instptr() : NULL; const TypeKlassPtr* ttkp = (ttp != NULL) ? ttp->isa_klassptr() : NULL; bool is_intf = false; - if (ttip != NULL) { - ciKlass* k = ttip->klass(); - if (k->is_loaded() && k->is_interface()) - is_intf = true; - } - if (ttkp != NULL) { - ciKlass* k = ttkp->klass(); - if (k->is_loaded() && k->is_interface()) - is_intf = true; + if (ttip != NULL && ttip->is_loaded() && ttip->klass()->is_interface()) { + is_intf = true; + } else if (ttkp != NULL && ttkp->is_loaded() && ttkp->klass()->is_interface()) { + is_intf = true; } // Default case: merge all inputs @@ -1171,9 +1173,9 @@ // be 'I' or 'j/l/O'. Thus we'll pick 'j/l/O'. If this then flows // into a Phi which "knows" it's an Interface type we'll have to // uplift the type. - if (!t->empty() && ttip && ttip->is_loaded() && ttip->klass()->is_interface()) { + if (!t->empty() && ttip != NULL && ttip->is_loaded() && ttip->klass()->is_interface()) { assert(ft == _type, ""); // Uplift to interface - } else if (!t->empty() && ttkp && ttkp->is_loaded() && ttkp->klass()->is_interface()) { + } else if (!t->empty() && ttkp != NULL && ttkp->is_loaded() && ttkp->klass()->is_interface()) { assert(ft == _type, ""); // Uplift to interface } else { // We also have to handle 'evil cases' of interface- vs. class-arrays @@ -1335,6 +1337,14 @@ if (id != NULL) return id; } + if (phase->is_IterGVN()) { + Node* m = try_clean_mem_phi(phase); + if (m != NULL) { + return m; + } + } + + return this; // No identity } @@ -1796,6 +1806,24 @@ if( phase->type_or_null(r) == Type::TOP ) // Dead code? return NULL; // No change + // If all inputs are value types of the same type, push the value type node down + // through the phi because value type nodes should be merged through their input values. + if (req() > 2 && in(1) != NULL && in(1)->is_ValueTypeBase() && (can_reshape || in(1)->is_ValueType())) { + int opcode = in(1)->Opcode(); + uint i = 2; + // Check if inputs are values of the same type + for (; i < req() && in(i) && in(i)->is_ValueTypeBase() && in(i)->cmp(*in(1)); i++) { + assert(in(i)->Opcode() == opcode, "mixing pointers and values?"); + } + if (i == req()) { + ValueTypeBaseNode* vt = in(1)->as_ValueTypeBase()->clone_with_phis(phase, in(0)); + for (uint i = 2; i < req(); ++i) { + vt->merge_with(phase, in(i)->as_ValueTypeBase(), i, i == (req()-1)); + } + return vt; + } + } + Node *top = phase->C->top(); bool new_phi = (outcnt() == 0); // transforming new Phi // No change for igvn if new phi is not hooked @@ -2503,6 +2531,12 @@ // We only come from CatchProj, unless the CatchProj goes away. // If the CatchProj is optimized away, then we just carry the // exception oop through. + + // CheckCastPPNode::Ideal() for value types reuses the exception + // paths of a call to perform an allocation: we can see a Phi here. + if (in(1)->is_Phi()) { + return this; + } CallNode *call = in(1)->in(0)->as_Call(); return ( in(0)->is_CatchProj() && in(0)->in(0)->in(1) == in(1) ) --- old/src/hotspot/share/opto/cfgnode.hpp 2019-03-11 14:26:37.490354722 +0100 +++ new/src/hotspot/share/opto/cfgnode.hpp 2019-03-11 14:26:37.282354725 +0100 @@ -95,7 +95,6 @@ virtual Node* Identity(PhaseGVN* phase); virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); virtual const RegMask &out_RegMask() const; - bool try_clean_mem_phi(PhaseGVN *phase); bool optimize_trichotomy(PhaseIterGVN* igvn); }; @@ -215,6 +214,7 @@ inst_offset() == offset && type()->higher_equal(tp); } + Node* try_clean_mem_phi(PhaseGVN *phase); virtual const Type* Value(PhaseGVN* phase) const; virtual Node* Identity(PhaseGVN* phase); --- old/src/hotspot/share/opto/chaitin.cpp 2019-03-11 14:26:37.914354716 +0100 +++ new/src/hotspot/share/opto/chaitin.cpp 2019-03-11 14:26:37.706354719 +0100 @@ -1668,10 +1668,10 @@ // can't happen at run-time but the optimizer cannot deduce it so // we have to handle it gracefully. assert(!derived->bottom_type()->isa_narrowoop() || - derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity"); + derived->bottom_type()->make_ptr()->is_ptr()->offset() == 0, "sanity"); const TypePtr *tj = derived->bottom_type()->isa_ptr(); // If its an OOP with a non-zero offset, then it is derived. - if( tj == NULL || tj->_offset == 0 ) { + if (tj == NULL || tj->offset() == 0) { derived_base_map[derived->_idx] = derived; return derived; } @@ -1837,9 +1837,9 @@ Node *derived = lrgs(neighbor)._def; const TypePtr *tj = derived->bottom_type()->isa_ptr(); assert(!derived->bottom_type()->isa_narrowoop() || - derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity"); + derived->bottom_type()->make_ptr()->is_ptr()->offset() == 0, "sanity"); // If its an OOP with a non-zero offset, then it is derived. - if( tj && tj->_offset != 0 && tj->isa_oop_ptr() ) { + if (tj && tj->offset() != 0 && tj->isa_oop_ptr()) { Node *base = find_base_for_derived(derived_base_map, derived, maxlrg); assert(base->_idx < _lrg_map.size(), ""); // Add reaching DEFs of derived pointer and base pointer as a @@ -2131,7 +2131,7 @@ void PhaseChaitin::dump_frame() const { const char *fp = OptoReg::regname(OptoReg::c_frame_pointer); - const TypeTuple *domain = C->tf()->domain(); + const TypeTuple *domain = C->tf()->domain_cc(); const int argcnt = domain->cnt() - TypeFunc::Parms; // Incoming arguments in registers dump @@ -2168,6 +2168,11 @@ _matcher._parm_regs[j].second() == reg ) { tty->print("parm %d: ",j); domain->field_at(j + TypeFunc::Parms)->dump(); + if (!C->FIRST_STACK_mask().Member(reg)) { + // Reserved entry in the argument stack area that is not used because + // it may hold the return address (see Matcher::init_first_stack_mask()). + tty->print(" [RESERVED] "); + } tty->cr(); break; } --- old/src/hotspot/share/opto/classes.cpp 2019-03-11 14:26:38.350354710 +0100 +++ new/src/hotspot/share/opto/classes.cpp 2019-03-11 14:26:38.138354713 +0100 @@ -46,6 +46,7 @@ #include "opto/opaquenode.hpp" #include "opto/rootnode.hpp" #include "opto/subnode.hpp" +#include "opto/valuetypenode.hpp" #include "opto/vectornode.hpp" #include "utilities/macros.hpp" #if INCLUDE_ZGC --- old/src/hotspot/share/opto/classes.hpp 2019-03-11 14:26:38.774354704 +0100 +++ new/src/hotspot/share/opto/classes.hpp 2019-03-11 14:26:38.566354707 +0100 @@ -320,6 +320,8 @@ macro(URShiftL) macro(XorI) macro(XorL) +macro(ValueType) +macro(ValueTypePtr) macro(Vector) macro(AddVB) macro(AddVS) --- old/src/hotspot/share/opto/compile.cpp 2019-03-11 14:26:39.238354698 +0100 +++ new/src/hotspot/share/opto/compile.cpp 2019-03-11 14:26:38.994354701 +0100 @@ -67,6 +67,7 @@ #include "opto/runtime.hpp" #include "opto/stringopts.hpp" #include "opto/type.hpp" +#include "opto/valuetypenode.hpp" #include "opto/vectornode.hpp" #include "runtime/arguments.hpp" #include "runtime/sharedRuntime.hpp" @@ -417,6 +418,10 @@ remove_opaque4_node(opaq); } } + // Remove useless value type nodes + if (_value_type_nodes != NULL) { + _value_type_nodes->remove_useless_nodes(useful.member_set()); + } BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); bs->eliminate_useless_gc_barriers(useful, this); // clean up the late inline lists @@ -542,6 +547,12 @@ ResourceMark rm; _scratch_const_size = const_size; int size = C2Compiler::initial_code_buffer_size(const_size); +#ifdef ASSERT + if (C->has_scalarized_args()) { + // Oop verification for loading object fields from scalarized value types in the new entry point requires lots of space + size += 5120; + } +#endif blob = BufferBlob::create("Compile::scratch_buffer", size); // Record the buffer blob for next time. set_scratch_buffer_blob(blob); @@ -606,14 +617,21 @@ masm.bind(fakeL); n->as_MachBranch()->save_label(&saveL, &save_bnum); n->as_MachBranch()->label_set(&fakeL, 0); + } else if (n->is_MachProlog()) { + saveL = ((MachPrologNode*)n)->_verified_entry; + ((MachPrologNode*)n)->_verified_entry = &fakeL; } n->emit(buf, this->regalloc()); // Emitting into the scratch buffer should not fail assert (!failing(), "Must not have pending failure. Reason is: %s", failure_reason()); - if (is_branch) // Restore label. + // Restore label. + if (is_branch) { n->as_MachBranch()->label_set(saveL, save_bnum); + } else if (n->is_MachProlog()) { + ((MachPrologNode*)n)->_verified_entry = saveL; + } // End scratch_emit_size section. set_in_scratch_emit_size(false); @@ -646,6 +664,8 @@ _max_node_limit(MaxNodeLimit), _orig_pc_slot(0), _orig_pc_slot_offset_in_bytes(0), + _sp_inc_slot(0), + _sp_inc_slot_offset_in_bytes(0), _inlining_progress(false), _inlining_incrementally(false), _do_cleanup(false), @@ -781,7 +801,7 @@ } else { // Normal case. init_tf(TypeFunc::make(method())); - StartNode* s = new StartNode(root(), tf()->domain()); + StartNode* s = new StartNode(root(), tf()->domain_cc()); initial_gvn()->set_type_bottom(s); init_start(s); if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) { @@ -906,8 +926,15 @@ // Now that we know the size of all the monitors we can add a fixed slot // for the original deopt pc. - _orig_pc_slot = fixed_slots(); + _orig_pc_slot = fixed_slots(); int next_slot = _orig_pc_slot + (sizeof(address) / VMRegImpl::stack_slot_size); + + if (needs_stack_repair()) { + // One extra slot for the special stack increment value + _sp_inc_slot = next_slot; + next_slot += 2; + } + set_fixed_slots(next_slot); // Compute when to use implicit null checks. Used by matching trap based @@ -933,6 +960,15 @@ _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size); } else { _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size); + if (_code_offsets.value(CodeOffsets::Verified_Value_Entry) == -1) { + _code_offsets.set_value(CodeOffsets::Verified_Value_Entry, _first_block_size); + } + if (_code_offsets.value(CodeOffsets::Verified_Value_Entry_RO) == -1) { + _code_offsets.set_value(CodeOffsets::Verified_Value_Entry_RO, _first_block_size); + } + if (_code_offsets.value(CodeOffsets::Entry) == -1) { + _code_offsets.set_value(CodeOffsets::Entry, _first_block_size); + } _code_offsets.set_value(CodeOffsets::OSR_Entry, 0); } @@ -978,6 +1014,8 @@ _max_node_limit(MaxNodeLimit), _orig_pc_slot(0), _orig_pc_slot_offset_in_bytes(0), + _sp_inc_slot(0), + _sp_inc_slot_offset_in_bytes(0), _inlining_progress(false), _inlining_incrementally(false), _has_reserved_stack_access(false), @@ -1204,6 +1242,7 @@ _expensive_nodes = new(comp_arena()) GrowableArray(comp_arena(), 8, 0, NULL); _range_check_casts = new(comp_arena()) GrowableArray(comp_arena(), 8, 0, NULL); _opaque4_nodes = new(comp_arena()) GrowableArray(comp_arena(), 8, 0, NULL); + _value_type_nodes = new (comp_arena()) Unique_Node_List(comp_arena()); register_library_intrinsics(); } @@ -1428,7 +1467,8 @@ // Process weird unsafe references. if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) { - assert(InlineUnsafeOps, "indeterminate pointers come only from unsafe ops"); + bool default_value_load = EnableValhalla && tj->is_instptr()->klass() == ciEnv::current()->Class_klass(); + assert(InlineUnsafeOps || default_value_load, "indeterminate pointers come only from unsafe ops"); assert(!is_known_inst, "scalarizable allocation should not have unsafe references"); tj = TypeOopPtr::BOTTOM; ptr = tj->ptr(); @@ -1445,16 +1485,18 @@ if ( offset != Type::OffsetBot && offset > arrayOopDesc::length_offset_in_bytes() ) { offset = Type::OffsetBot; // Flatten constant access into array body only - tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, offset, ta->instance_id()); + tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, Type::Offset(offset), ta->field_offset(), ta->instance_id()); } } else if( ta && _AliasLevel >= 2 ) { // For arrays indexed by constant indices, we flatten the alias // space to include all of the array body. Only the header, klass // and array length can be accessed un-aliased. + // For flattened value type array, each field has its own slice so + // we must include the field offset. if( offset != Type::OffsetBot ) { if( ta->const_oop() ) { // MethodData* or Method* offset = Type::OffsetBot; // Flatten constant access into array body - tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,offset); + tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset()); } else if( offset == arrayOopDesc::length_offset_in_bytes() ) { // range is OK as-is. tj = ta = TypeAryPtr::RANGE; @@ -1470,35 +1512,35 @@ ta = tj->isa_aryptr(); } else { // Random constant offset into array body offset = Type::OffsetBot; // Flatten constant access into array body - tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset); + tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset()); } } // Arrays of fixed size alias with arrays of unknown size. if (ta->size() != TypeInt::POS) { const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS); - tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset); + tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,Type::Offset(offset), ta->field_offset()); } // Arrays of known objects become arrays of unknown objects. if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) { const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size()); - tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset); + tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), ta->field_offset()); } if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) { const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size()); - tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset); + tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), ta->field_offset()); } // Arrays of bytes and of booleans both use 'bastore' and 'baload' so // cannot be distinguished by bytecode alone. if (ta->elem() == TypeInt::BOOL) { const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size()); ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE); - tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset); + tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,Type::Offset(offset), ta->field_offset()); } // During the 2nd round of IterGVN, NotNull castings are removed. // Make sure the Bottom and NotNull variants alias the same. // Also, make sure exact and non-exact variants alias the same. if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) { - tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset); + tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset()); } } @@ -1512,7 +1554,7 @@ // No constant oop pointers (such as Strings); they alias with // unknown strings. assert(!is_known_inst, "not scalarizable allocation"); - tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset); + tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,Type::Offset(offset)); } } else if( is_known_inst ) { tj = to; // Keep NotNull and klass_is_exact for instance type @@ -1520,17 +1562,17 @@ // During the 2nd round of IterGVN, NotNull castings are removed. // Make sure the Bottom and NotNull variants alias the same. // Also, make sure exact and non-exact variants alias the same. - tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset); + tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,Type::Offset(offset)); } if (to->speculative() != NULL) { - tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),to->offset(), to->instance_id()); + tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),Type::Offset(to->offset()), to->instance_id()); } // Canonicalize the holder of this field if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) { // First handle header references such as a LoadKlassNode, even if the // object's klass is unloaded at compile time (4965979). if (!is_known_inst) { // Do it only for non-instance types - tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset); + tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, Type::Offset(offset)); } } else if (BarrierSet::barrier_set()->barrier_set_c2()->flatten_gc_alias_type(tj)) { to = tj->is_instptr(); @@ -1546,9 +1588,9 @@ ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset); if (!k->equals(canonical_holder) || tj->offset() != offset) { if( is_known_inst ) { - tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id()); + tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, Type::Offset(offset), to->instance_id()); } else { - tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset); + tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, Type::Offset(offset)); } } } @@ -1565,15 +1607,15 @@ tj = tk = TypeKlassPtr::make(TypePtr::NotNull, TypeKlassPtr::OBJECT->klass(), - offset); + Type::Offset(offset)); } ciKlass* klass = tk->klass(); - if( klass->is_obj_array_klass() ) { + if (klass != NULL && klass->is_obj_array_klass()) { ciKlass* k = TypeAryPtr::OOPS->klass(); if( !k || !k->is_loaded() ) // Only fails for some -Xcomp runs k = TypeInstPtr::BOTTOM->klass(); - tj = tk = TypeKlassPtr::make( TypePtr::NotNull, k, offset ); + tj = tk = TypeKlassPtr::make(TypePtr::NotNull, k, Type::Offset(offset)); } // Check for precise loads from the primary supertype array and force them @@ -1589,7 +1631,7 @@ offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) || offset == (int)in_bytes(Klass::secondary_super_cache_offset())) { offset = in_bytes(Klass::secondary_super_cache_offset()); - tj = tk = TypeKlassPtr::make( TypePtr::NotNull, tk->klass(), offset ); + tj = tk = TypeKlassPtr::make(TypePtr::NotNull, tk->klass(), Type::Offset(offset)); } } @@ -1784,14 +1826,22 @@ && flat->is_instptr()->klass() == env()->Class_klass()) alias_type(idx)->set_rewritable(false); } + ciField* field = NULL; if (flat->isa_aryptr()) { #ifdef ASSERT const int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE); // (T_BYTE has the weakest alignment and size restrictions...) assert(flat->offset() < header_size_min, "array body reference must be OffsetBot"); #endif + const Type* elemtype = flat->is_aryptr()->elem(); if (flat->offset() == TypePtr::OffsetBot) { - alias_type(idx)->set_element(flat->is_aryptr()->elem()); + alias_type(idx)->set_element(elemtype); + } + int field_offset = flat->is_aryptr()->field_offset().get(); + if (elemtype->isa_valuetype() && field_offset != Type::OffsetBot) { + ciValueKlass* vk = elemtype->is_valuetype()->value_klass(); + field_offset += vk->first_field_offset(); + field = vk->get_field_by_offset(field_offset, false); } } if (flat->isa_klassptr()) { @@ -1811,25 +1861,28 @@ // Check for final fields. const TypeInstPtr* tinst = flat->isa_instptr(); if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) { - ciField* field; if (tinst->const_oop() != NULL && tinst->klass() == ciEnv::current()->Class_klass() && tinst->offset() >= (tinst->klass()->as_instance_klass()->size_helper() * wordSize)) { // static field ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass(); field = k->get_field_by_offset(tinst->offset(), true); + } else if (tinst->klass()->is_valuetype()) { + // Value type field + ciValueKlass* vk = tinst->value_klass(); + field = vk->get_field_by_offset(tinst->offset(), false); } else { - ciInstanceKlass *k = tinst->klass()->as_instance_klass(); + ciInstanceKlass* k = tinst->klass()->as_instance_klass(); field = k->get_field_by_offset(tinst->offset(), false); } - assert(field == NULL || - original_field == NULL || - (field->holder() == original_field->holder() && - field->offset() == original_field->offset() && - field->is_static() == original_field->is_static()), "wrong field?"); - // Set field() and is_rewritable() attributes. - if (field != NULL) alias_type(idx)->set_field(field); } + assert(field == NULL || + original_field == NULL || + (field->holder() == original_field->holder() && + field->offset() == original_field->offset() && + field->is_static() == original_field->is_static()), "wrong field?"); + // Set field() and is_rewritable() attributes. + if (field != NULL) alias_type(idx)->set_field(field); } // Fill the cache for next time. @@ -2003,6 +2056,37 @@ assert(opaque4_count() == 0, "should be empty"); } +void Compile::add_value_type(Node* n) { + assert(n->is_ValueTypeBase(), "unexpected node"); + if (_value_type_nodes != NULL) { + _value_type_nodes->push(n); + } +} + +void Compile::remove_value_type(Node* n) { + assert(n->is_ValueTypeBase(), "unexpected node"); + if (_value_type_nodes != NULL) { + _value_type_nodes->remove(n); + } +} + +void Compile::process_value_types(PhaseIterGVN &igvn) { + // Make value types scalar in safepoints + while (_value_type_nodes->size() != 0) { + ValueTypeBaseNode* vt = _value_type_nodes->pop()->as_ValueTypeBase(); + vt->make_scalar_in_safepoints(&igvn); + if (vt->is_ValueTypePtr()) { + igvn.replace_node(vt, vt->get_oop()); + } else { + if (vt->outcnt() == 0) { + igvn.remove_dead_node(vt); + } + } + } + _value_type_nodes = NULL; + igvn.optimize(); +} + // StringOpts and late inlining of string methods void Compile::inline_string_calls(bool parse_time) { { @@ -2277,6 +2361,11 @@ igvn.optimize(); } + if (_value_type_nodes->size() > 0) { + // Do this once all inlining is over to avoid getting inconsistent debug info + process_value_types(igvn); + } + // Perform escape analysis if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) { if (has_loops()) { @@ -2442,7 +2531,6 @@ print_method(PHASE_OPTIMIZE_FINISHED, 2); } - //------------------------------Code_Gen--------------------------------------- // Given a graph, generate code for it void Compile::Code_Gen() { @@ -2757,6 +2845,7 @@ } } + //------------------------------final_graph_reshaping_impl---------------------- // Implement items 1-5 from final_graph_reshaping below. void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) { @@ -3491,6 +3580,14 @@ } break; } +#ifdef ASSERT + case Op_ValueTypePtr: + case Op_ValueType: { + n->dump(-1); + assert(false, "value type node was not removed"); + break; + } +#endif default: assert(!n->is_Call(), ""); assert(!n->is_Mem(), ""); @@ -4151,7 +4248,7 @@ // (2) subklass does not overlap with superklass => always fail // (3) superklass has NO subtypes and we can check with a simple compare. int Compile::static_subtype_check(ciKlass* superk, ciKlass* subk) { - if (StressReflectiveCode) { + if (StressReflectiveCode || superk == NULL || subk == NULL) { return SSC_full_test; // Let caller generate the general case. } @@ -4600,6 +4697,27 @@ } } +Node* Compile::optimize_acmp(PhaseGVN* phase, Node* a, Node* b) { + const TypeInstPtr* ta = phase->type(a)->isa_instptr(); + const TypeInstPtr* tb = phase->type(b)->isa_instptr(); + if (!EnableValhalla || ta == NULL || tb == NULL || + ta->is_zero_type() || tb->is_zero_type() || + !ta->can_be_value_type() || !tb->can_be_value_type()) { + // Use old acmp if one operand is null or not a value type + return new CmpPNode(a, b); + } else if (ta->is_valuetypeptr() || tb->is_valuetypeptr()) { + // We know that one operand is a value type. Therefore, + // new acmp will only return true if both operands are NULL. + // Check if both operands are null by or'ing the oops. + a = phase->transform(new CastP2XNode(NULL, a)); + b = phase->transform(new CastP2XNode(NULL, b)); + a = phase->transform(new OrXNode(a, b)); + return new CmpXNode(a, phase->MakeConX(0)); + } + // Use new acmp + return NULL; +} + // Auxiliary method to support randomized stressing/fuzzing. // // This method can be called the arbitrary number of times, with current count --- old/src/hotspot/share/opto/compile.hpp 2019-03-11 14:26:39.718354691 +0100 +++ new/src/hotspot/share/opto/compile.hpp 2019-03-11 14:26:39.486354694 +0100 @@ -48,6 +48,7 @@ class AddPNode; class Block; class Bundle; +class CallNode; class C2Compiler; class CallGenerator; class CloneMap; @@ -84,6 +85,7 @@ class TypePtr; class TypeOopPtr; class TypeFunc; +class ValueTypeBaseNode; class Unique_Node_List; class nmethod; class WarmCallInfo; @@ -380,6 +382,10 @@ int _orig_pc_slot; int _orig_pc_slot_offset_in_bytes; + // For value type calling convention + int _sp_inc_slot; + int _sp_inc_slot_offset_in_bytes; + int _major_progress; // Count of something big happening bool _inlining_progress; // progress doing incremental inlining? bool _inlining_incrementally;// Are we doing incremental inlining (post parse) @@ -430,6 +436,7 @@ GrowableArray* _expensive_nodes; // List of nodes that are expensive to compute and that we'd better not let the GVN freely common GrowableArray* _range_check_casts; // List of CastII nodes with a range check dependency GrowableArray* _opaque4_nodes; // List of Opaque4 nodes that have a default value + Unique_Node_List* _value_type_nodes; // List of ValueType nodes ConnectionGraph* _congraph; #ifndef PRODUCT IdealGraphPrinter* _printer; @@ -715,6 +722,11 @@ uint max_node_limit() const { return (uint)_max_node_limit; } void set_max_node_limit(uint n) { _max_node_limit = n; } + // Support for scalarized value type calling convention + bool has_scalarized_args() const { return _method != NULL && _method->has_scalarized_args(); } + bool needs_stack_repair() const { return _method != NULL && _method->get_Method()->needs_stack_repair(); } + int sp_inc_offset() const { return _sp_inc_slot_offset_in_bytes; } + // check the CompilerOracle for special behaviours for this compile bool method_has_option(const char * option) { return method() != NULL && method()->has_option(option); @@ -836,6 +848,12 @@ int opaque4_count() const { return _opaque4_nodes->length(); } void remove_opaque4_nodes(PhaseIterGVN &igvn); + // Keep track of value type nodes for later processing + void add_value_type(Node* n); + void remove_value_type(Node* n); + void process_value_types(PhaseIterGVN &igvn); + bool can_add_value_type() const { return _value_type_nodes != NULL; } + // remove the opaque nodes that protect the predicates so that the unused checks and // uncommon traps will be eliminated from the graph. void cleanup_loop_predicates(PhaseIterGVN &igvn); @@ -1368,6 +1386,8 @@ // Convert integer value to a narrowed long type dependent on ctrl (for example, a range check) static Node* constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl); + Node* optimize_acmp(PhaseGVN* phase, Node* a, Node* b); + // Auxiliary method for randomized fuzzing/stressing static bool randomized_select(int count); --- old/src/hotspot/share/opto/divnode.cpp 2019-03-11 14:26:40.762354677 +0100 +++ new/src/hotspot/share/opto/divnode.cpp 2019-03-11 14:26:40.486354681 +0100 @@ -1311,7 +1311,7 @@ //------------------------------match------------------------------------------ // return result(s) along with their RegMask info -Node *DivModINode::match( const ProjNode *proj, const Matcher *match ) { +Node *DivModINode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) { uint ideal_reg = proj->ideal_reg(); RegMask rm; if (proj->_con == div_proj_num) { @@ -1326,7 +1326,7 @@ //------------------------------match------------------------------------------ // return result(s) along with their RegMask info -Node *DivModLNode::match( const ProjNode *proj, const Matcher *match ) { +Node *DivModLNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) { uint ideal_reg = proj->ideal_reg(); RegMask rm; if (proj->_con == div_proj_num) { --- old/src/hotspot/share/opto/divnode.hpp 2019-03-11 14:26:41.250354670 +0100 +++ new/src/hotspot/share/opto/divnode.hpp 2019-03-11 14:26:41.014354673 +0100 @@ -165,7 +165,7 @@ DivModINode( Node *c, Node *dividend, Node *divisor ) : DivModNode(c, dividend, divisor) {} virtual int Opcode() const; virtual const Type *bottom_type() const { return TypeTuple::INT_PAIR; } - virtual Node *match( const ProjNode *proj, const Matcher *m ); + virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask); // Make a divmod and associated projections from a div or mod. static DivModINode* make(Node* div_or_mod); @@ -178,7 +178,7 @@ DivModLNode( Node *c, Node *dividend, Node *divisor ) : DivModNode(c, dividend, divisor) {} virtual int Opcode() const; virtual const Type *bottom_type() const { return TypeTuple::LONG_PAIR; } - virtual Node *match( const ProjNode *proj, const Matcher *m ); + virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask); // Make a divmod and associated projections from a div or mod. static DivModLNode* make(Node* div_or_mod); --- old/src/hotspot/share/opto/doCall.cpp 2019-03-11 14:26:41.730354663 +0100 +++ new/src/hotspot/share/opto/doCall.cpp 2019-03-11 14:26:41.474354667 +0100 @@ -38,6 +38,7 @@ #include "opto/rootnode.hpp" #include "opto/runtime.hpp" #include "opto/subnode.hpp" +#include "opto/valuetypenode.hpp" #include "prims/nativeLookup.hpp" #include "runtime/sharedRuntime.hpp" @@ -537,7 +538,12 @@ ciKlass* speculative_receiver_type = NULL; if (is_virtual_or_interface) { Node* receiver_node = stack(sp() - nargs); - const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr(); + const TypeOopPtr* receiver_type = NULL; + if (receiver_node->is_ValueType()) { + receiver_type = TypeInstPtr::make(TypePtr::NotNull, _gvn.type(receiver_node)->is_valuetype()->value_klass()); + } else { + receiver_type = _gvn.type(receiver_node)->isa_oopptr(); + } // call_does_dispatch and vtable_index are out-parameters. They might be changed. // For arrays, klass below is Object. When vtable calls are used, // resolving the call with Object would allow an illegal call to @@ -624,7 +630,7 @@ Node* receiver = has_receiver ? argument(0) : NULL; // The extra CheckCastPPs for speculative types mess with PhaseStringOpts - if (receiver != NULL && !call_does_dispatch && !cg->is_string_late_inline()) { + if (receiver != NULL && !receiver->is_ValueType() && !call_does_dispatch && !cg->is_string_late_inline()) { // Feed profiling data for a single receiver to the type system so // it can propagate it as a speculative type receiver = record_profiled_receiver_for_speculation(receiver); @@ -693,19 +699,23 @@ BasicType rt = rtype->basic_type(); BasicType ct = ctype->basic_type(); if (ct == T_VOID) { - // It's OK for a method to return a value that is discarded. + // It's OK for a method to return a value that is discarded. // The discarding does not require any special action from the caller. // The Java code knows this, at VerifyType.isNullConversion. pop_node(rt); // whatever it was, pop it } else if (rt == T_INT || is_subword_type(rt)) { // Nothing. These cases are handled in lambda form bytecode. assert(ct == T_INT || is_subword_type(ct), "must match: rt=%s, ct=%s", type2name(rt), type2name(ct)); - } else if (rt == T_OBJECT || rt == T_ARRAY) { - assert(ct == T_OBJECT || ct == T_ARRAY, "rt=%s, ct=%s", type2name(rt), type2name(ct)); + } else if (rt == T_OBJECT || rt == T_ARRAY || rt == T_VALUETYPE) { + assert(ct == T_OBJECT || ct == T_ARRAY || ct == T_VALUETYPE, "rt=%s, ct=%s", type2name(rt), type2name(ct)); if (ctype->is_loaded()) { const TypeOopPtr* arg_type = TypeOopPtr::make_from_klass(rtype->as_klass()); const Type* sig_type = TypeOopPtr::make_from_klass(ctype->as_klass()); - if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { + if (declared_signature->returns_never_null()) { + assert(ct == T_VALUETYPE, "should be a value type"); + sig_type = sig_type->join_speculative(TypePtr::NOTNULL); + } + if (arg_type != NULL && !arg_type->higher_equal(sig_type) && !peek()->is_ValueType()) { Node* retnode = pop(); Node* cast_obj = _gvn.transform(new CheckCastPPNode(control(), retnode, sig_type)); push(cast_obj); @@ -731,6 +741,14 @@ "mismatched return types: rtype=%s, ctype=%s", rtype->name(), ctype->name()); } + if (rtype->basic_type() == T_VALUETYPE && !peek()->is_ValueType()) { + Node* retnode = pop(); + if (!gvn().type(retnode)->maybe_null() && rtype->as_value_klass()->is_scalarizable()) { + retnode = ValueTypeNode::make_from_oop(this, retnode, rtype->as_value_klass()); + } + push_node(T_VALUETYPE, retnode); + } + // If the return type of the method is not loaded, assert that the // value we got is a null. Otherwise, we need to recompile. if (!rtype->is_loaded()) { --- old/src/hotspot/share/opto/escape.cpp 2019-03-11 14:26:42.230354656 +0100 +++ new/src/hotspot/share/opto/escape.cpp 2019-03-11 14:26:41.978354660 +0100 @@ -141,6 +141,16 @@ java_objects_worklist.append(phantom_obj); for( uint next = 0; next < ideal_nodes.size(); ++next ) { Node* n = ideal_nodes.at(next); + if ((n->Opcode() == Op_LoadX || n->Opcode() == Op_StoreX) && + !n->in(MemNode::Address)->is_AddP() && + _igvn->type(n->in(MemNode::Address))->isa_oopptr()) { + // Load/Store at mark work address is at offset 0 so has no AddP which confuses EA + Node* addp = new AddPNode(n->in(MemNode::Address), n->in(MemNode::Address), _igvn->MakeConX(0)); + _igvn->register_new_node_with_optimizer(addp); + _igvn->replace_input_of(n, MemNode::Address, addp); + ideal_nodes.push(addp); + _nodes.at_put_grow(addp->_idx, NULL, NULL); + } // Create PointsTo nodes and add them to Connection Graph. Called // only once per ideal node since ideal_nodes is Unique_Node list. add_node_to_connection_graph(n, &delayed_worklist); @@ -163,7 +173,8 @@ // scalar replaceable objects in split_unique_types(). _mergemem_worklist.append(n->as_MergeMem()); } else if (OptimizePtrCompare && n->is_Cmp() && - (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) { + ((n->Opcode() == Op_CmpP && !(((CmpPNode*)n)->has_perturbed_operand() != NULL)) || + n->Opcode() == Op_CmpN)) { // Collect compare pointers nodes. ptr_cmp_worklist.append(n); } else if (n->is_MemBarStoreStore()) { @@ -373,6 +384,17 @@ (n->is_CallStaticJava() && n->as_CallStaticJava()->is_boxing_method())) { add_call_node(n->as_Call()); + } else if (n->as_Call()->tf()->returns_value_type_as_fields()) { + bool returns_oop = false; + for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) { + ProjNode* pn = n->fast_out(i)->as_Proj(); + if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) { + returns_oop = true; + } + } + if (returns_oop) { + add_call_node(n->as_Call()); + } } } return; @@ -481,8 +503,10 @@ } case Op_Proj: { // we are only interested in the oop result projection from a call - if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && - n->in(0)->as_Call()->returns_pointer()) { + if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() && + (n->in(0)->as_Call()->returns_pointer() || n->bottom_type()->isa_ptr())) { + assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) || + n->in(0)->as_Call()->tf()->returns_value_type_as_fields(), "what kind of oop return is it?"); add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist); } @@ -637,8 +661,10 @@ } case Op_Proj: { // we are only interested in the oop result projection from a call - if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && - n->in(0)->as_Call()->returns_pointer()) { + if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() && + (n->in(0)->as_Call()->returns_pointer()|| n->bottom_type()->isa_ptr())) { + assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) || + n->in(0)->as_Call()->tf()->returns_value_type_as_fields(), "what kind of oop return is it?"); add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL); break; } @@ -800,7 +826,7 @@ } void ConnectionGraph::add_call_node(CallNode* call) { - assert(call->returns_pointer(), "only for call which returns pointer"); + assert(call->returns_pointer() || call->tf()->returns_value_type_as_fields(), "only for call which returns pointer"); uint call_idx = call->_idx; if (call->is_Allocate()) { Node* k = call->in(AllocateNode::KlassNode); @@ -887,7 +913,7 @@ ptnode_adr(call_idx)->set_scalar_replaceable(false); } else { // Determine whether any arguments are returned. - const TypeTuple* d = call->tf()->domain(); + const TypeTuple* d = call->tf()->domain_cc(); bool ret_arg = false; for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { if (d->field_at(i)->isa_ptr() != NULL && @@ -934,7 +960,7 @@ case Op_CallLeaf: { // Stub calls, objects do not escape but they are not scale replaceable. // Adjust escape state for outgoing arguments. - const TypeTuple * d = call->tf()->domain(); + const TypeTuple * d = call->tf()->domain_sig(); bool src_has_oops = false; for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { const Type* at = d->field_at(i); @@ -964,7 +990,10 @@ aat->isa_ptr() != NULL, "expecting an Ptr"); bool arg_has_oops = aat->isa_oopptr() && (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() || - (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass())); + (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()) || + (aat->isa_aryptr() && aat->isa_aryptr()->elem() != NULL && + aat->isa_aryptr()->elem()->isa_valuetype() && + aat->isa_aryptr()->elem()->isa_valuetype()->value_klass()->contains_oops())); if (i == TypeFunc::Parms) { src_has_oops = arg_has_oops; } @@ -1003,7 +1032,9 @@ strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 || strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 || strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 || - strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0) + strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 || + strcmp(call->as_CallLeaf()->_name, "load_unknown_value") == 0 || + strcmp(call->as_CallLeaf()->_name, "store_unknown_value") == 0) ))) { call->dump(); fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name); @@ -1062,7 +1093,7 @@ // fall-through if not a Java method or no analyzer information if (call_analyzer != NULL) { PointsToNode* call_ptn = ptnode_adr(call->_idx); - const TypeTuple* d = call->tf()->domain(); + const TypeTuple* d = call->tf()->domain_cc(); for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { const Type* at = d->field_at(i); int k = i - TypeFunc::Parms; @@ -1106,7 +1137,7 @@ // Fall-through here if not a Java method or no analyzer information // or some other type of call, assume the worst case: all arguments // globally escape. - const TypeTuple* d = call->tf()->domain(); + const TypeTuple* d = call->tf()->domain_cc(); for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { const Type* at = d->field_at(i); if (at->isa_oopptr() != NULL) { @@ -1627,9 +1658,9 @@ if (missed_obj != NULL) { tty->print_cr("----------field---------------------------------"); field->dump(); - tty->print_cr("----------missed referernce to object-----------"); + tty->print_cr("----------missed reference to object------------"); missed_obj->dump(); - tty->print_cr("----------object referernced by init store -----"); + tty->print_cr("----------object referenced by init store-------"); store->dump(); val->dump(); assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); @@ -2065,8 +2096,9 @@ bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { const Type* adr_type = n->as_AddP()->bottom_type(); + int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot; BasicType bt = T_INT; - if (offset == Type::OffsetBot) { + if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) { // Check only oop fields. if (!adr_type->isa_aryptr() || (adr_type->isa_aryptr()->klass() == NULL) || @@ -2078,7 +2110,7 @@ } } else if (offset != oopDesc::klass_offset_in_bytes()) { if (adr_type->isa_instptr()) { - ciField* field = _compile->alias_type(adr_type->isa_instptr())->field(); + ciField* field = _compile->alias_type(adr_type->is_ptr())->field(); if (field != NULL) { bt = field->layout_type(); } else { @@ -2098,7 +2130,13 @@ // Ignore first AddP. } else { const Type* elemtype = adr_type->isa_aryptr()->elem(); - bt = elemtype->array_element_basic_type(); + if (elemtype->isa_valuetype() && field_offset != Type::OffsetBot) { + ciValueKlass* vk = elemtype->is_valuetype()->value_klass(); + field_offset += vk->first_field_offset(); + bt = vk->get_field_by_offset(field_offset, false)->layout_type(); + } else { + bt = elemtype->array_element_basic_type(); + } } } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { // Allocation initialization, ThreadLocal field access, unsafe access @@ -2110,12 +2148,12 @@ } } } - return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY); + return (bt == T_OBJECT || bt == T_VALUETYPE || bt == T_NARROWOOP || bt == T_ARRAY); } // Returns unique pointed java object or NULL. JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) { - assert(!_collecting, "should not call when contructed graph"); + assert(!_collecting, "should not call when constructed graph"); // If the node was created after the escape computation we can't answer. uint idx = n->_idx; if (idx >= nodes_size()) { @@ -2252,9 +2290,7 @@ "offset must be a constant or it is initialization of array"); return offs; } - const TypePtr *t_ptr = adr_type->isa_ptr(); - assert(t_ptr != NULL, "must be a pointer type"); - return t_ptr->offset(); + return adr_type->is_ptr()->flattened_offset(); } Node* ConnectionGraph::get_addp_base(Node *addp) { @@ -2409,9 +2445,16 @@ assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); assert(offs != Type::OffsetBot, "offset must be a constant"); - t = base_t->add_offset(offs)->is_oopptr(); + if (base_t->isa_aryptr() != NULL) { + // In the case of a flattened value type array, each field has its + // own slice so we need to extract the field being accessed from + // the address computation + t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr(); + } else { + t = base_t->add_offset(offs)->is_oopptr(); + } } - int inst_id = base_t->instance_id(); + int inst_id = base_t->instance_id(); assert(!t->is_known_instance() || t->instance_id() == inst_id, "old type must be non-instance or match new type"); @@ -2425,7 +2468,7 @@ // of the allocation type was not propagated to the subclass type check. // // Or the type 't' could be not related to 'base_t' at all. - // It could happened when CHA type is different from MDO type on a dead path + // It could happen when CHA type is different from MDO type on a dead path // (for example, from instanceof check) which is not collapsed during parsing. // // Do nothing for such AddP node and don't process its users since @@ -2435,7 +2478,13 @@ !base_t->klass()->is_subtype_of(t->klass())) { return false; // bail out } - const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr(); + const TypePtr* tinst = base_t->add_offset(t->offset()); + if (tinst->isa_aryptr() && t->isa_aryptr()) { + // In the case of a flattened value type array, each field has its + // own slice so we need to keep track of the field being accessed. + tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get()); + } + // Do NOT remove the next line: ensure a new alias index is allocated // for the instance type. Note: C++ will not remove it since the call // has side effect. @@ -3139,7 +3188,7 @@ // push allocation's users on appropriate worklist for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { Node *use = n->fast_out(i); - if(use->is_Mem() && use->in(MemNode::Address) == n) { + if (use->is_Mem() && use->in(MemNode::Address) == n) { // Load/store to instance's field memnode_worklist.append_if_missing(use); } else if (use->is_MemBar()) { @@ -3176,6 +3225,14 @@ // EncodeISOArray overwrites destination array memnode_worklist.append_if_missing(use); } + } else if (use->Opcode() == Op_Return) { + assert(_compile->tf()->returns_value_type_as_fields(), "must return a value type"); + // Get ValueKlass by removing the tag bit from the metadata pointer + Node* klass = use->in(TypeFunc::Parms); + intptr_t ptr = igvn->type(klass)->isa_rawptr()->get_con(); + clear_nth_bit(ptr, 0); + assert(Metaspace::contains((void*)ptr), "should be klass"); + assert(((ValueKlass*)ptr)->contains_oops(), "returned value type must contain a reference field"); } else { uint op = use->Opcode(); if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) && @@ -3187,7 +3244,8 @@ op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || - BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) { + BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) || + op == Op_ValueType)) { n->dump(); use->dump(); assert(false, "EA: missing allocation reference path"); @@ -3254,6 +3312,9 @@ // get the memory projection n = n->find_out_with(Op_SCMemProj); assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required"); + } else if (n->is_CallLeaf() && n->as_CallLeaf()->_name != NULL && + strcmp(n->as_CallLeaf()->_name, "store_unknown_value") == 0) { + n = n->as_CallLeaf()->proj_out(TypeFunc::Memory); } else { assert(n->is_Mem(), "memory node required."); Node *addr = n->in(MemNode::Address); @@ -3294,7 +3355,7 @@ memnode_worklist.append_if_missing(use); } #ifdef ASSERT - } else if(use->is_Mem()) { + } else if (use->is_Mem()) { assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); } else if (use->is_MergeMem()) { assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); @@ -3303,6 +3364,10 @@ // EncodeISOArray overwrites destination array memnode_worklist.append_if_missing(use); } + } else if (use->is_CallLeaf() && use->as_CallLeaf()->_name != NULL && + strcmp(use->as_CallLeaf()->_name, "store_unknown_value") == 0) { + // store_unknown_value overwrites destination array + memnode_worklist.append_if_missing(use); } else { uint op = use->Opcode(); if ((use->in(MemNode::Memory) == n) && @@ -3324,7 +3389,7 @@ // Phase 3: Process MergeMem nodes from mergemem_worklist. // Walk each memory slice moving the first node encountered of each - // instance type to the the input corresponding to its alias index. + // instance type to the input corresponding to its alias index. uint length = _mergemem_worklist.length(); for( uint next = 0; next < length; ++next ) { MergeMemNode* nmm = _mergemem_worklist.at(next); @@ -3396,8 +3461,8 @@ // the Memory input of memnodes // First update the inputs of any non-instance Phi's from // which we split out an instance Phi. Note we don't have - // to recursively process Phi's encounted on the input memory - // chains as is done in split_memory_phi() since they will + // to recursively process Phi's encountered on the input memory + // chains as is done in split_memory_phi() since they will // also be processed here. for (int j = 0; j < orig_phis.length(); j++) { PhiNode *phi = orig_phis.at(j); --- old/src/hotspot/share/opto/generateOptoStub.cpp 2019-03-11 14:26:42.726354650 +0100 +++ new/src/hotspot/share/opto/generateOptoStub.cpp 2019-03-11 14:26:42.478354653 +0100 @@ -47,8 +47,8 @@ bool return_pc) { ResourceMark rm; - const TypeTuple *jdomain = C->tf()->domain(); - const TypeTuple *jrange = C->tf()->range(); + const TypeTuple *jdomain = C->tf()->domain_sig(); + const TypeTuple *jrange = C->tf()->range_sig(); // The procedure start StartNode* start = new StartNode(root(), jdomain); @@ -286,7 +286,7 @@ exit_memory, frameptr(), returnadr()); - if (C->tf()->range()->cnt() > TypeFunc::Parms) + if (C->tf()->range_sig()->cnt() > TypeFunc::Parms) ret->add_req( map()->in(TypeFunc::Parms) ); break; case 1: // This is a fancy tail-call jump. Jump to computed address. --- old/src/hotspot/share/opto/graphKit.cpp 2019-03-11 14:26:43.210354643 +0100 +++ new/src/hotspot/share/opto/graphKit.cpp 2019-03-11 14:26:42.962354646 +0100 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "ci/ciUtilities.hpp" #include "compiler/compileLog.hpp" +#include "ci/ciValueKlass.hpp" #include "gc/shared/barrierSet.hpp" #include "gc/shared/c2/barrierSetC2.hpp" #include "interpreter/interpreter.hpp" @@ -37,24 +38,34 @@ #include "opto/intrinsicnode.hpp" #include "opto/locknode.hpp" #include "opto/machnode.hpp" +#include "opto/narrowptrnode.hpp" #include "opto/opaquenode.hpp" #include "opto/parse.hpp" #include "opto/rootnode.hpp" #include "opto/runtime.hpp" +#include "opto/valuetypenode.hpp" #include "runtime/deoptimization.hpp" #include "runtime/sharedRuntime.hpp" //----------------------------GraphKit----------------------------------------- // Main utility constructor. -GraphKit::GraphKit(JVMState* jvms) +GraphKit::GraphKit(JVMState* jvms, PhaseGVN* gvn) : Phase(Phase::Parser), _env(C->env()), - _gvn(*C->initial_gvn()), + _gvn((gvn != NULL) ? *gvn : *C->initial_gvn()), _barrier_set(BarrierSet::barrier_set()->barrier_set_c2()) { + assert(gvn == NULL || !gvn->is_IterGVN() || gvn->is_IterGVN()->delay_transform(), "delay transform should be enabled"); _exceptions = jvms->map()->next_exception(); if (_exceptions != NULL) jvms->map()->set_next_exception(NULL); set_jvms(jvms); +#ifdef ASSERT + if (_gvn.is_IterGVN() != NULL) { + assert(_gvn.is_IterGVN()->delay_transform(), "Transformation must be delayed if IterGVN is used"); + // Save the initial size of _for_igvn worklist for verification (see ~GraphKit) + _worklist_size = _gvn.C->for_igvn()->size(); + } +#endif } // Private constructor for parser. @@ -823,7 +834,7 @@ if (cur_method != NULL && cur_bci != InvocationEntryBci) { Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci); return Interpreter::bytecode_should_reexecute(code) || - (is_anewarray && code == Bytecodes::_multianewarray); + (is_anewarray && (code == Bytecodes::_multianewarray)); // Reexecute _multianewarray bytecode which was replaced with // sequence of [a]newarray. See Parse::do_multianewarray(). // @@ -831,8 +842,9 @@ // is limited by dimensions and guarded by flag so in some cases // multianewarray() runtime calls will be generated and // the bytecode should not be reexecutes (stack will not be reset). - } else + } else { return false; + } } // Helper function for adding JVMState and debug information to node @@ -1076,6 +1088,15 @@ } break; + case Bytecodes::_withfield: { + bool ignored_will_link; + ciField* field = method()->get_field_at_bci(bci(), ignored_will_link); + int size = field->type()->size(); + inputs = size+1; + depth = rsize - inputs; + break; + } + case Bytecodes::_ireturn: case Bytecodes::_lreturn: case Bytecodes::_freturn: @@ -1201,6 +1222,7 @@ switch(type) { case T_LONG : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break; case T_INT : chk = new CmpINode(value, _gvn.intcon(0)); break; + case T_VALUETYPE : // fall through case T_ARRAY : // fall through type = T_OBJECT; // simplify further tests case T_OBJECT : { @@ -1372,10 +1394,28 @@ return value; } +Node* GraphKit::null2default(Node* value, ciValueKlass* vk) { + Node* null_ctl = top(); + value = null_check_oop(value, &null_ctl); + if (!null_ctl->is_top()) { + // Return default value if oop is null + Node* region = new RegionNode(3); + region->init_req(1, control()); + region->init_req(2, null_ctl); + value = PhiNode::make(region, value, TypeInstPtr::make(TypePtr::BotPTR, vk)); + value->set_req(2, ValueTypeNode::default_oop(gvn(), vk)); + set_control(gvn().transform(region)); + value = gvn().transform(value); + } + return value; +} //------------------------------cast_not_null---------------------------------- // Cast obj to not-null on this path Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) { + if (obj->is_ValueType()) { + return obj; + } const Type *t = _gvn.type(obj); const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL); // Object is already not-null? @@ -1504,7 +1544,8 @@ ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched, unsafe); } ld = _gvn.transform(ld); - if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) { + + if (((bt == T_OBJECT || bt == T_VALUETYPE) && C->do_escape_analysis()) || C->eliminate_boxing()) { // Improve graph before escape analysis and boxing elimination. record_for_igvn(ld); } @@ -1555,7 +1596,8 @@ Node* val, const Type* val_type, BasicType bt, - DecoratorSet decorators) { + DecoratorSet decorators, + bool deoptimize_on_exception) { // Transformation of a value which could be NULL pointer (CastPP #NULL) // could be delayed during Parse (for example, in adjust_map_after_if()). // Execute transformation here to avoid barrier generation in such case. @@ -1568,6 +1610,10 @@ } assert(val != NULL, "not dead path"); + if (val->is_ValueType()) { + // Allocate value type and get oop + val = val->as_ValueType()->allocate(this, deoptimize_on_exception)->get_oop(); + } C2AccessValuePtr addr(adr, adr_type); C2AccessValue value(val, val_type); @@ -1688,8 +1734,8 @@ } } -void GraphKit::access_clone(Node* src, Node* dst, Node* size, bool is_array) { - return _barrier_set->clone(this, src, dst, size, is_array); +void GraphKit::access_clone(Node* src_base, Node* dst_base, Node* countx, bool is_array) { + return _barrier_set->clone(this, src_base, dst_base, countx, is_array); } Node* GraphKit::access_resolve(Node* n, DecoratorSet decorators) { @@ -1704,6 +1750,11 @@ Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt, const TypeInt* sizetype, Node* ctrl) { uint shift = exact_log2(type2aelembytes(elembt)); + ciKlass* arytype_klass = _gvn.type(ary)->is_aryptr()->klass(); + if (arytype_klass->is_value_array_klass()) { + ciValueArrayKlass* vak = arytype_klass->as_value_array_klass(); + shift = vak->log2_element_size(); + } uint header = arrayOopDesc::base_offset_in_bytes(elembt); // short-circuit a common case (saves lots of confusing waste motion) @@ -1724,6 +1775,7 @@ Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) { const Type* elemtype = arytype->elem(); BasicType elembt = elemtype->array_element_basic_type(); + assert(elembt != T_VALUETYPE, "value types are not supported by this method"); Node* adr = array_element_address(ary, idx, elembt, arytype->size()); if (elembt == T_NARROWOOP) { elembt = T_OBJECT; // To satisfy switch in LoadNode::make() @@ -1734,12 +1786,45 @@ //-------------------------set_arguments_for_java_call------------------------- // Arguments (pre-popped from the stack) are taken from the JVMS. -void GraphKit::set_arguments_for_java_call(CallJavaNode* call) { +void GraphKit::set_arguments_for_java_call(CallJavaNode* call, bool incremental_inlining) { // Add the call arguments: - uint nargs = call->method()->arg_size(); - for (uint i = 0; i < nargs; i++) { - Node* arg = argument(i); - call->init_req(i + TypeFunc::Parms, arg); + const TypeTuple* domain = call->tf()->domain_sig(); + ExtendedSignature sig_cc = ExtendedSignature(call->method()->get_sig_cc(), SigEntryFilter()); + uint nargs = domain->cnt(); + for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) { + Node* arg = argument(i-TypeFunc::Parms); + const Type* t = domain->field_at(i); + if (call->method()->has_scalarized_args() && t->is_valuetypeptr() && !t->maybe_null()) { + // We don't pass value type arguments by reference but instead + // pass each field of the value type + ValueTypeNode* vt = arg->isa_ValueType(); + if (vt == NULL) { + // TODO why is that?? Shouldn't we always see a valuetype node here? + vt = ValueTypeNode::make_from_oop(this, arg, t->value_klass()); + } + vt->pass_fields(this, call, sig_cc, idx); + // If a value type argument is passed as fields, attach the Method* to the call site + // to be able to access the extended signature later via attached_method_before_pc(). + // For example, see CompiledMethod::preserve_callee_argument_oops(). + call->set_override_symbolic_info(true); + continue; + } else if (arg->is_ValueType()) { + // Pass value type argument via oop to callee + if (!incremental_inlining) { + arg = arg->as_ValueType()->allocate(this)->get_oop(); + } else { + arg = ValueTypePtrNode::make_from_value_type(this, arg->as_ValueType(), false); + } + } + call->init_req(idx++, arg); + // Skip reserved arguments + BasicType bt = t->basic_type(); + while (SigEntry::next_is_reserved(sig_cc, bt, true)) { + call->init_req(idx++, top()); + if (type2size[bt] == 2) { + call->init_req(idx++, top()); + } + } } } @@ -1777,13 +1862,6 @@ Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj, bool deoptimize) { if (stopped()) return top(); // maybe the call folded up? - // Capture the return value, if any. - Node* ret; - if (call->method() == NULL || - call->method()->return_type()->basic_type() == T_VOID) - ret = top(); - else ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms)); - // Note: Since any out-of-line call can produce an exception, // we always insert an I_O projection from the call into the result. @@ -1796,6 +1874,25 @@ set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) )); set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) )); } + + // Capture the return value, if any. + Node* ret; + if (call->method() == NULL || call->method()->return_type()->basic_type() == T_VOID) { + ret = top(); + } else if (call->tf()->returns_value_type_as_fields()) { + // Return of multiple values (value type fields): we create a + // ValueType node, each field is a projection from the call. + ciValueKlass* vk = call->method()->return_type()->as_value_klass(); + const Array* sig_array = vk->extended_sig(); + GrowableArray sig = GrowableArray(sig_array->length()); + sig.appendAll(sig_array); + ExtendedSignature sig_cc = ExtendedSignature(&sig, SigEntryFilter()); + uint base_input = TypeFunc::Parms + 1; + ret = ValueTypeNode::make_from_multi(this, call, sig_cc, vk, base_input, false); + } else { + ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms)); + } + return ret; } @@ -1874,8 +1971,7 @@ SafePointNode* final_state = stop(); // Find all the needed outputs of this call - CallProjections callprojs; - call->extract_projections(&callprojs, true); + CallProjections* callprojs = call->extract_projections(true); Node* init_mem = call->in(TypeFunc::Memory); Node* final_mem = final_state->in(TypeFunc::Memory); @@ -1883,39 +1979,40 @@ Node* final_io = final_state->in(TypeFunc::I_O); // Replace all the old call edges with the edges from the inlining result - if (callprojs.fallthrough_catchproj != NULL) { - C->gvn_replace_by(callprojs.fallthrough_catchproj, final_ctl); + if (callprojs->fallthrough_catchproj != NULL) { + C->gvn_replace_by(callprojs->fallthrough_catchproj, final_ctl); } - if (callprojs.fallthrough_memproj != NULL) { + if (callprojs->fallthrough_memproj != NULL) { if (final_mem->is_MergeMem()) { // Parser's exits MergeMem was not transformed but may be optimized final_mem = _gvn.transform(final_mem); } - C->gvn_replace_by(callprojs.fallthrough_memproj, final_mem); + C->gvn_replace_by(callprojs->fallthrough_memproj, final_mem); } - if (callprojs.fallthrough_ioproj != NULL) { - C->gvn_replace_by(callprojs.fallthrough_ioproj, final_io); + if (callprojs->fallthrough_ioproj != NULL) { + C->gvn_replace_by(callprojs->fallthrough_ioproj, final_io); } // Replace the result with the new result if it exists and is used - if (callprojs.resproj != NULL && result != NULL) { - C->gvn_replace_by(callprojs.resproj, result); + if (callprojs->resproj[0] != NULL && result != NULL) { + assert(callprojs->nb_resproj == 1, "unexpected number of results"); + C->gvn_replace_by(callprojs->resproj[0], result); } if (ejvms == NULL) { // No exception edges to simply kill off those paths - if (callprojs.catchall_catchproj != NULL) { - C->gvn_replace_by(callprojs.catchall_catchproj, C->top()); + if (callprojs->catchall_catchproj != NULL) { + C->gvn_replace_by(callprojs->catchall_catchproj, C->top()); } - if (callprojs.catchall_memproj != NULL) { - C->gvn_replace_by(callprojs.catchall_memproj, C->top()); + if (callprojs->catchall_memproj != NULL) { + C->gvn_replace_by(callprojs->catchall_memproj, C->top()); } - if (callprojs.catchall_ioproj != NULL) { - C->gvn_replace_by(callprojs.catchall_ioproj, C->top()); + if (callprojs->catchall_ioproj != NULL) { + C->gvn_replace_by(callprojs->catchall_ioproj, C->top()); } // Replace the old exception object with top - if (callprojs.exobj != NULL) { - C->gvn_replace_by(callprojs.exobj, C->top()); + if (callprojs->exobj != NULL) { + C->gvn_replace_by(callprojs->exobj, C->top()); } } else { GraphKit ekit(ejvms); @@ -1926,20 +2023,20 @@ Node* ex_oop = ekit.use_exception_state(ex_map); - if (callprojs.catchall_catchproj != NULL) { - C->gvn_replace_by(callprojs.catchall_catchproj, ekit.control()); + if (callprojs->catchall_catchproj != NULL) { + C->gvn_replace_by(callprojs->catchall_catchproj, ekit.control()); ex_ctl = ekit.control(); } - if (callprojs.catchall_memproj != NULL) { - C->gvn_replace_by(callprojs.catchall_memproj, ekit.reset_memory()); + if (callprojs->catchall_memproj != NULL) { + C->gvn_replace_by(callprojs->catchall_memproj, ekit.reset_memory()); } - if (callprojs.catchall_ioproj != NULL) { - C->gvn_replace_by(callprojs.catchall_ioproj, ekit.i_o()); + if (callprojs->catchall_ioproj != NULL) { + C->gvn_replace_by(callprojs->catchall_ioproj, ekit.i_o()); } // Replace the old exception object with the newly created one - if (callprojs.exobj != NULL) { - C->gvn_replace_by(callprojs.exobj, ex_oop); + if (callprojs->exobj != NULL) { + C->gvn_replace_by(callprojs->exobj, ex_oop); } } @@ -1962,7 +2059,7 @@ } } - if (callprojs.fallthrough_catchproj != NULL && !final_ctl->is_top() && do_replaced_nodes) { + if (callprojs->fallthrough_catchproj != NULL && !final_ctl->is_top() && do_replaced_nodes) { replaced_nodes.apply(C, final_ctl); } if (!ex_ctl->is_top() && do_replaced_nodes) { @@ -2139,9 +2236,9 @@ void GraphKit::round_double_arguments(ciMethod* dest_method) { // (Note: TypeFunc::make has a cache that makes this fast.) const TypeFunc* tf = TypeFunc::make(dest_method); - int nargs = tf->domain()->cnt() - TypeFunc::Parms; + int nargs = tf->domain_sig()->cnt() - TypeFunc::Parms; for (int j = 0; j < nargs; j++) { - const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms); + const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms); if( targ->basic_type() == T_DOUBLE ) { // If any parameters are doubles, they must be rounded before // the call, dstore_rounding does gvn.transform @@ -2198,7 +2295,7 @@ if (speculative != current_type->speculative()) { // Build a type with a speculative type (what we think we know // about the type but will need a guard when we use it) - const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, speculative); + const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, speculative); // We're changing the type, we need a new CheckCast node to carry // the new type. The new type depends on the control: what // profiling tells us is only valid from here as far as we can @@ -2263,11 +2360,11 @@ return; } const TypeFunc* tf = TypeFunc::make(dest_method); - int nargs = tf->domain()->cnt() - TypeFunc::Parms; + int nargs = tf->domain_sig()->cnt() - TypeFunc::Parms; int skip = Bytecodes::has_receiver(bc) ? 1 : 0; for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) { - const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms); - if (targ->basic_type() == T_OBJECT || targ->basic_type() == T_ARRAY) { + const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms); + if (targ->isa_oopptr()) { ProfilePtrKind ptr_kind = ProfileMaybeNull; ciKlass* better_type = NULL; if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) { @@ -2780,25 +2877,36 @@ Node* *casted_receiver) { const TypeKlassPtr* tklass = TypeKlassPtr::make(klass); Node* recv_klass = load_object_klass(receiver); - Node* want_klass = makecon(tklass); - Node* cmp = _gvn.transform( new CmpPNode(recv_klass, want_klass) ); - Node* bol = _gvn.transform( new BoolNode(cmp, BoolTest::eq) ); - IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN); - set_control( _gvn.transform( new IfTrueNode (iff) )); - Node* fail = _gvn.transform( new IfFalseNode(iff) ); - + Node* fail = type_check(recv_klass, tklass, prob); const TypeOopPtr* recv_xtype = tklass->as_instance_type(); assert(recv_xtype->klass_is_exact(), ""); // Subsume downstream occurrences of receiver with a cast to // recv_xtype, since now we know what the type will be. Node* cast = new CheckCastPPNode(control(), receiver, recv_xtype); - (*casted_receiver) = _gvn.transform(cast); + Node* res = _gvn.transform(cast); + if (recv_xtype->is_valuetypeptr() && recv_xtype->value_klass()->is_scalarizable()) { + assert(!gvn().type(res)->maybe_null(), "receiver should never be null"); + res = ValueTypeNode::make_from_oop(this, res, recv_xtype->value_klass()); + } + + (*casted_receiver) = res; // (User must make the replace_in_map call.) return fail; } +Node* GraphKit::type_check(Node* recv_klass, const TypeKlassPtr* tklass, + float prob) { + Node* want_klass = makecon(tklass); + Node* cmp = _gvn.transform( new CmpPNode(recv_klass, want_klass)); + Node* bol = _gvn.transform( new BoolNode(cmp, BoolTest::eq) ); + IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN); + set_control( _gvn.transform( new IfTrueNode (iff))); + Node* fail = _gvn.transform( new IfFalseNode(iff)); + return fail; +} + //------------------------------subtype_check_receiver------------------------- Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass, Node** casted_receiver) { @@ -2969,10 +3077,11 @@ bool speculative_not_null = false; bool never_see_null = (ProfileDynamicTypes // aggressive use of profile && seems_never_null(obj, data, speculative_not_null)); + bool is_value = obj->is_ValueType(); // Null check; get casted pointer; set region slot 3 Node* null_ctl = top(); - Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null); + Node* not_null_obj = is_value ? obj : null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null); // If not_null_obj is dead, only null-path is taken if (stopped()) { // Doing instance-of on a NULL? @@ -2990,35 +3099,45 @@ } // Do we know the type check always succeed? - bool known_statically = false; - if (_gvn.type(superklass)->singleton()) { - ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass(); - ciKlass* subk = _gvn.type(obj)->is_oopptr()->klass(); - if (subk != NULL && subk->is_loaded()) { - int static_res = C->static_subtype_check(superk, subk); - known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false); + if (!is_value) { + bool known_statically = false; + if (_gvn.type(superklass)->singleton()) { + ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass(); + ciKlass* subk = _gvn.type(obj)->is_oopptr()->klass(); + if (subk != NULL && subk->is_loaded()) { + int static_res = C->static_subtype_check(superk, subk); + known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false); + } } - } - if (!known_statically) { - const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); - // We may not have profiling here or it may not help us. If we - // have a speculative type use it to perform an exact cast. - ciKlass* spec_obj_type = obj_type->speculative_type(); - if (spec_obj_type != NULL || (ProfileDynamicTypes && data != NULL)) { - Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, NULL, spec_obj_type, safe_for_replace); - if (stopped()) { // Profile disagrees with this path. - set_control(null_ctl); // Null is the only remaining possibility. - return intcon(0); - } - if (cast_obj != NULL) { - not_null_obj = cast_obj; + if (!known_statically) { + const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); + // We may not have profiling here or it may not help us. If we + // have a speculative type use it to perform an exact cast. + ciKlass* spec_obj_type = obj_type->speculative_type(); + if (spec_obj_type != NULL || (ProfileDynamicTypes && data != NULL)) { + Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, NULL, spec_obj_type, safe_for_replace); + if (stopped()) { // Profile disagrees with this path. + set_control(null_ctl); // Null is the only remaining possibility. + return intcon(0); + } + if (cast_obj != NULL && + // A value that's sometimes null is not something we can optimize well + !(cast_obj->is_ValueType() && null_ctl != top())) { + not_null_obj = cast_obj; + is_value = not_null_obj->is_ValueType(); + } } } } // Load the object's klass - Node* obj_klass = load_object_klass(not_null_obj); + Node* obj_klass = NULL; + if (is_value) { + obj_klass = makecon(TypeKlassPtr::make(_gvn.type(not_null_obj)->is_valuetype()->value_klass())); + } else { + obj_klass = load_object_klass(not_null_obj); + } // Generate the subtype check Node* not_subtype_ctrl = gen_subtype_check(obj_klass, superklass); @@ -3038,7 +3157,7 @@ // If we know the type check always succeeds then we don't use the // profiling data at this bytecode. Don't lose it, feed it to the // type system as a speculative type. - if (safe_for_replace) { + if (safe_for_replace && !is_value) { Node* casted_obj = record_profiled_receiver_for_speculation(obj); replace_in_map(obj, casted_obj); } @@ -3053,11 +3172,12 @@ // If failure_control is supplied and not null, it is filled in with // the control edge for the cast failure. Otherwise, an appropriate // uncommon trap or exception is thrown. -Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, - Node* *failure_control) { +Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, Node* *failure_control, bool never_null) { kill_dead_locals(); // Benefit all the uncommon traps - const TypeKlassPtr *tk = _gvn.type(superklass)->is_klassptr(); - const Type *toop = TypeOopPtr::make_from_klass(tk->klass()); + const TypeKlassPtr* tk = _gvn.type(superklass)->is_klassptr(); + const TypeOopPtr* toop = TypeOopPtr::make_from_klass(tk->klass()); + assert(!never_null || toop->is_valuetypeptr(), "must be a value type pointer"); + bool is_value = obj->is_ValueType(); // Fast cutout: Check the case that the cast is vacuously true. // This detects the common cases where the test will short-circuit @@ -3066,18 +3186,43 @@ // want a residual null check left around. (Causes a slowdown, // for example, in some objArray manipulations, such as a[i]=a[j].) if (tk->singleton()) { - const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr(); - if (objtp != NULL && objtp->klass() != NULL) { - switch (C->static_subtype_check(tk->klass(), objtp->klass())) { + ciKlass* klass = NULL; + if (is_value) { + klass = _gvn.type(obj)->is_valuetype()->value_klass(); + } else { + const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr(); + if (objtp != NULL) { + klass = objtp->klass(); + } + } + if (klass != NULL) { + switch (C->static_subtype_check(tk->klass(), klass)) { case Compile::SSC_always_true: // If we know the type check always succeed then we don't use // the profiling data at this bytecode. Don't lose it, feed it // to the type system as a speculative type. - return record_profiled_receiver_for_speculation(obj); + if (!is_value) { + obj = record_profiled_receiver_for_speculation(obj); + if (never_null) { + obj = null_check(obj); + } + if (toop->is_valuetypeptr() && toop->value_klass()->is_scalarizable() && !gvn().type(obj)->maybe_null()) { + obj = ValueTypeNode::make_from_oop(this, obj, toop->value_klass()); + } + } + return obj; case Compile::SSC_always_false: - // It needs a null check because a null will *pass* the cast check. - // A non-null value will always produce an exception. - return null_assert(obj); + if (is_value || never_null) { + if (!is_value) { + null_check(obj); + } + // Value type is never null. Always throw an exception. + builtin_throw(Deoptimization::Reason_class_check, makecon(TypeKlassPtr::make(klass))); + return top(); + } else { + // It needs a null check because a null will *pass* the cast check. + return null_assert(obj); + } } } } @@ -3105,7 +3250,14 @@ // Null check; get casted pointer; set region slot 3 Node* null_ctl = top(); - Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null); + Node* not_null_obj = NULL; + if (is_value) { + not_null_obj = obj; + } else if (never_null) { + not_null_obj = null_check(obj); + } else { + not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null); + } // If not_null_obj is dead, only null-path is taken if (stopped()) { // Doing instance-of on a NULL? @@ -3123,7 +3275,7 @@ } Node* cast_obj = NULL; - if (tk->klass_is_exact()) { + if (!is_value && tk->klass_is_exact()) { // The following optimization tries to statically cast the speculative type of the object // (for example obtained during profiling) to the type of the superklass and then do a // dynamic check that the type of the object is what we expect. To work correctly @@ -3134,6 +3286,13 @@ ciKlass* spec_obj_type = obj_type->speculative_type(); if (spec_obj_type != NULL || data != NULL) { cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace); + if (cast_obj != NULL && cast_obj->is_ValueType()) { + if (null_ctl != top()) { + cast_obj = NULL; // A value that's sometimes null is not something we can optimize well + } else { + return cast_obj; + } + } if (cast_obj != NULL) { if (failure_control != NULL) // failure is now impossible (*failure_control) = top(); @@ -3145,13 +3304,18 @@ if (cast_obj == NULL) { // Load the object's klass - Node* obj_klass = load_object_klass(not_null_obj); + Node* obj_klass = NULL; + if (is_value) { + obj_klass = makecon(TypeKlassPtr::make(_gvn.type(not_null_obj)->is_valuetype()->value_klass())); + } else { + obj_klass = load_object_klass(not_null_obj); + } // Generate the subtype check Node* not_subtype_ctrl = gen_subtype_check( obj_klass, superklass ); // Plug in success path into the merge - cast_obj = _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop)); + cast_obj = is_value ? not_null_obj : _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop)); // Failure path ends in uncommon trap (or may be dead - failure impossible) if (failure_control == NULL) { if (not_subtype_ctrl != top()) { // If failure is possible @@ -3184,9 +3348,104 @@ set_control( _gvn.transform(region) ); record_for_igvn(region); - return record_profiled_receiver_for_speculation(res); + if (!is_value) { + res = record_profiled_receiver_for_speculation(res); + if (toop->is_valuetypeptr() && toop->value_klass()->is_scalarizable() && !gvn().type(res)->maybe_null()) { + res = ValueTypeNode::make_from_oop(this, res, toop->value_klass()); + } + } + return res; +} + +Node* GraphKit::is_always_locked(Node* obj) { + Node* mark_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes()); + Node* mark = make_load(NULL, mark_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); + Node* value_mask = _gvn.MakeConX(markOopDesc::always_locked_pattern); + return _gvn.transform(new AndXNode(mark, value_mask)); +} + +Node* GraphKit::gen_value_type_test(Node* kls) { + Node* flags_addr = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset())); + Node* flags = make_load(NULL, flags_addr, TypeInt::INT, T_INT, MemNode::unordered); + Node* is_value = _gvn.transform(new AndINode(flags, intcon(JVM_ACC_VALUE))); + Node* cmp = _gvn.transform(new CmpINode(is_value, intcon(0))); + return cmp; +} + +// Deoptimize if 'obj' is a value type +void GraphKit::gen_value_type_guard(Node* obj, int nargs) { + assert(EnableValhalla, "should only be used if value types are enabled"); + Node* bol = NULL; + if (obj->is_ValueTypeBase()) { + bol = intcon(0); + } else { + Node* is_value = is_always_locked(obj); + Node* value_mask = _gvn.MakeConX(markOopDesc::always_locked_pattern); + Node* cmp = _gvn.transform(new CmpXNode(is_value, value_mask)); + bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne)); + } + { BuildCutout unless(this, bol, PROB_MAX); + inc_sp(nargs); + uncommon_trap(Deoptimization::Reason_class_check, + Deoptimization::Action_none); + } +} + +// Deoptimize if 'ary' is flattened or if 'obj' is null and 'ary' is a value type array +void GraphKit::gen_value_type_array_guard(Node* ary, Node* obj, int nargs) { + assert(EnableValhalla, "should only be used if value types are enabled"); + // Load array element klass + Node* kls = load_object_klass(ary); + Node* k_adr = basic_plus_adr(kls, in_bytes(ArrayKlass::element_klass_offset())); + Node* elem_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS)); + // Check if element is a value type + Node* flags_addr = basic_plus_adr(elem_klass, in_bytes(Klass::access_flags_offset())); + Node* flags = make_load(NULL, flags_addr, TypeInt::INT, T_INT, MemNode::unordered); + Node* is_value_elem = _gvn.transform(new AndINode(flags, intcon(JVM_ACC_VALUE))); + + const Type* objtype = _gvn.type(obj); + if (objtype == TypePtr::NULL_PTR) { + // Object is always null, check if array is a value type array + Node* cmp = _gvn.transform(new CmpINode(is_value_elem, intcon(0))); + Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq)); + { BuildCutout unless(this, bol, PROB_MAX); + // TODO just deoptimize for now if we store null to a value type array + inc_sp(nargs); + uncommon_trap(Deoptimization::Reason_array_check, + Deoptimization::Action_none); + } + } else { + // Check if (is_value_elem && obj_is_null) <=> (!is_value_elem | !obj_is_null == 0) + // TODO what if we later figure out that obj is never null? + Node* not_value = _gvn.transform(new XorINode(is_value_elem, intcon(JVM_ACC_VALUE))); + not_value = _gvn.transform(new ConvI2LNode(not_value)); + Node* not_null = _gvn.transform(new CastP2XNode(NULL, obj)); + Node* both = _gvn.transform(new OrLNode(not_null, not_value)); + Node* cmp = _gvn.transform(new CmpLNode(both, longcon(0))); + Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne)); + { BuildCutout unless(this, bol, PROB_MAX); + // TODO just deoptimize for now if we store null to a value type array + inc_sp(nargs); + uncommon_trap(Deoptimization::Reason_array_check, + Deoptimization::Action_none); + } + } +} + +Node* GraphKit::load_lh_array_tag(Node* kls) { + Node* lhp = basic_plus_adr(kls, in_bytes(Klass::layout_helper_offset())); + Node* layout_val = make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered); + return _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift))); } + +Node* GraphKit::gen_lh_array_test(Node* kls, unsigned int lh_value) { + Node* layout_val = load_lh_array_tag(kls); + Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(lh_value))); + return cmp; +} + + //------------------------------next_monitor----------------------------------- // What number should be given to the next monitor? int GraphKit::next_monitor() { @@ -3252,6 +3511,13 @@ if( !GenerateSynchronizationCode ) return NULL; // Not locking things? + + // We cannot lock on a value type + const TypeOopPtr* objptr = _gvn.type(obj)->make_oopptr(); + if (objptr->can_be_value_type()) { + gen_value_type_guard(obj, 1); + } + if (stopped()) // Dead monitor? return NULL; @@ -3326,6 +3592,7 @@ map()->pop_monitor(); // Kill monitor from debug info return; } + assert(!obj->is_ValueTypeBase(), "should not unlock on value type"); // Memory barrier to avoid floating things down past the locked region insert_mem_bar(Op_MemBarReleaseLock); @@ -3366,8 +3633,14 @@ const TypeKlassPtr* inst_klass = _gvn.type(klass_node)->isa_klassptr(); if (!StressReflectiveCode && inst_klass != NULL) { ciKlass* klass = inst_klass->klass(); + assert(klass != NULL, "klass should not be NULL"); bool xklass = inst_klass->klass_is_exact(); - if (xklass || klass->is_array_klass()) { + bool can_be_value_array = false; + if (klass->is_array_klass() && EnableValhalla && ValueArrayFlatten) { + ciKlass* elem = klass->as_array_klass()->element_klass(); + can_be_value_array = elem != NULL && (elem->is_java_lang_Object() || elem->is_interface()); + } + if (xklass || (klass->is_array_klass() && !can_be_value_array)) { jint lhelper = klass->layout_helper(); if (lhelper != Klass::_lh_neutral_value) { constant_value = lhelper; @@ -3429,6 +3702,7 @@ MergeMemNode* minit_in = MergeMemNode::make(malloc); init->set_req(InitializeNode::Memory, minit_in); record_for_igvn(minit_in); // fold it up later, if possible + _gvn.set_type(minit_in, Type::MEMORY); Node* minit_out = memory(rawidx); assert(minit_out->is_Proj() && minit_out->in(0) == init, ""); // Add an edge in the MergeMem for the header fields so an access @@ -3436,10 +3710,26 @@ set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes()))); set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes()))); if (oop_type->isa_aryptr()) { - const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot); - int elemidx = C->get_alias_index(telemref); - hook_memory_on_init(*this, elemidx, minit_in, minit_out); + const TypeAryPtr* arytype = oop_type->is_aryptr(); + if (arytype->klass()->is_value_array_klass()) { + ciValueArrayKlass* vak = arytype->klass()->as_value_array_klass(); + ciValueKlass* vk = vak->element_klass()->as_value_klass(); + for (int i = 0, len = vk->nof_nonstatic_fields(); i < len; i++) { + ciField* field = vk->nonstatic_field_at(i); + if (field->offset() >= TrackedInitializationLimit * HeapWordSize) + continue; // do not bother to track really large numbers of fields + int off_in_vt = field->offset() - vk->first_field_offset(); + const TypePtr* adr_type = arytype->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot); + int fieldidx = C->get_alias_index(adr_type); + hook_memory_on_init(*this, fieldidx, minit_in, minit_out); + } + } else { + const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot); + int elemidx = C->get_alias_index(telemref); + hook_memory_on_init(*this, elemidx, minit_in, minit_out); + } } else if (oop_type->isa_instptr()) { + set_memory(minit_out, C->get_alias_index(oop_type)); // mark word ciInstanceKlass* ik = oop_type->klass()->as_instance_klass(); for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) { ciField* field = ik->nonstatic_field_at(i); @@ -3490,14 +3780,15 @@ Node* GraphKit::new_instance(Node* klass_node, Node* extra_slow_test, Node* *return_size_val, - bool deoptimize_on_exception) { + bool deoptimize_on_exception, + ValueTypeBaseNode* value_node) { // Compute size in doublewords // The size is always an integral number of doublewords, represented // as a positive bytewise size stored in the klass's layout_helper. // The layout_helper also encodes (in a low bit) the need for a slow path. jint layout_con = Klass::_lh_neutral_value; Node* layout_val = get_layout_helper(klass_node, layout_con); - int layout_is_con = (layout_val == NULL); + bool layout_is_con = (layout_val == NULL); if (extra_slow_test == NULL) extra_slow_test = intcon(0); // Generate the initial go-slow test. It's either ALWAYS (return a @@ -3548,20 +3839,28 @@ // Now generate allocation code // The entire memory state is needed for slow path of the allocation - // since GC and deoptimization can happened. + // since GC and deoptimization can happen. Node *mem = reset_memory(); set_all_memory(mem); // Create new memory state AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP), control(), mem, i_o(), size, klass_node, - initial_slow_test); + initial_slow_test, value_node); return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception); } +// With compressed oops, the 64 bit init value for non flattened value +// arrays is built from 2 32 bit compressed oops +static Node* raw_default_for_coops(Node* default_value, GraphKit& kit) { + Node* lower = kit.gvn().transform(new CastP2XNode(kit.control(), default_value)); + Node* upper = kit.gvn().transform(new LShiftLNode(lower, kit.intcon(32))); + return kit.gvn().transform(new OrLNode(lower, upper)); +} + //-------------------------------new_array------------------------------------- -// helper for both newarray and anewarray +// helper for newarray and anewarray // The 'length' parameter is (obviously) the length of the array. // See comments on new_instance for the meaning of the other arguments. Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) @@ -3571,7 +3870,7 @@ bool deoptimize_on_exception) { jint layout_con = Klass::_lh_neutral_value; Node* layout_val = get_layout_helper(klass_node, layout_con); - int layout_is_con = (layout_val == NULL); + bool layout_is_con = (layout_val == NULL); if (!layout_is_con && !StressReflectiveCode && !too_many_traps(Deoptimization::Reason_class_check)) { @@ -3601,7 +3900,7 @@ assert(!StressReflectiveCode, "stress mode does not use these paths"); // Increase the size limit if we have exact knowledge of array type. int log2_esize = Klass::layout_helper_log2_element_size(layout_con); - fast_size_limit <<= (LogBytesPerLong - log2_esize); + fast_size_limit <<= MAX2(LogBytesPerLong - log2_esize, 0); } Node* initial_slow_cmp = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) ); @@ -3620,9 +3919,10 @@ int hsize = Klass::layout_helper_header_size(layout_con); int eshift = Klass::layout_helper_log2_element_size(layout_con); BasicType etype = Klass::layout_helper_element_type(layout_con); + bool is_value_array = Klass::layout_helper_is_valueArray(layout_con); if ((round_mask & ~right_n_bits(eshift)) == 0) round_mask = 0; // strength-reduce it if it goes away completely - assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded"); + assert(is_value_array || (hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded"); assert(header_size_min <= hsize, "generic minimum is smallest"); header_size_min = hsize; header_size = intcon(hsize + round_mask); @@ -3706,7 +4006,7 @@ // Now generate allocation code // The entire memory state is needed for slow path of the allocation - // since GC and deoptimization can happened. + // since GC and deoptimization can happen. Node *mem = reset_memory(); set_all_memory(mem); // Create new memory state @@ -3715,20 +4015,93 @@ initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn); } + const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type(); + const TypeAryPtr* ary_ptr = ary_type->isa_aryptr(); + const Type* elem = NULL; + ciKlass* elem_klass = NULL; + if (ary_ptr != NULL) { + elem = ary_ptr->elem(); + elem_klass = ary_ptr->klass()->as_array_klass()->element_klass(); + } + Node* default_value = NULL; + Node* raw_default_value = NULL; + if (elem != NULL && elem->make_ptr()) { + if (elem_klass != NULL && elem_klass->is_valuetype()) { + ciValueKlass* vk = elem_klass->as_value_klass(); + if (!vk->flatten_array()) { + default_value = ValueTypeNode::default_oop(gvn(), vk); + if (elem->isa_narrowoop()) { + default_value = _gvn.transform(new EncodePNode(default_value, elem)); + raw_default_value = raw_default_for_coops(default_value, *this); + } else { + raw_default_value = _gvn.transform(new CastP2XNode(control(), default_value)); + } + } + } + } + + if (EnableValhalla && (elem == NULL || (elem_klass != NULL && elem_klass->is_java_lang_Object() && !ary_type->klass_is_exact()))) { + assert(raw_default_value == NULL, "shouldn't be set yet"); + + // unkown array type, could be a non flattened value array that's + // initialize to a non zero default value + + Node* r = new RegionNode(4); + Node* phi = new PhiNode(r, TypeX_X); + + Node* cmp = gen_lh_array_test(klass_node, Klass::_lh_array_tag_obj_value); + Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq)); + IfNode* iff = create_and_map_if(control(), bol, PROB_FAIR, COUNT_UNKNOWN); + r->init_req(1, _gvn.transform(new IfFalseNode(iff))); + phi->init_req(1, MakeConX(0)); + set_control(_gvn.transform(new IfTrueNode(iff))); + Node* k_adr = basic_plus_adr(klass_node, in_bytes(ArrayKlass::element_klass_offset())); + Node* elem_klass = _gvn.transform(LoadKlassNode::make(_gvn, control(), immutable_memory(), k_adr, TypeInstPtr::KLASS)); + cmp = gen_value_type_test(elem_klass); + bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq)); + iff = create_and_map_if(control(), bol, PROB_FAIR, COUNT_UNKNOWN); + r->init_req(2, _gvn.transform(new IfTrueNode(iff))); + phi->init_req(2, MakeConX(0)); + set_control(_gvn.transform(new IfFalseNode(iff))); + + Node* adr_fixed_block_addr = basic_plus_adr(elem_klass, in_bytes(InstanceKlass::adr_valueklass_fixed_block_offset())); + Node* adr_fixed_block = make_load(control(), adr_fixed_block_addr, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered); + + Node* default_value_offset_addr = basic_plus_adr(adr_fixed_block, in_bytes(ValueKlass::default_value_offset_offset())); + Node* default_value_offset = make_load(control(), default_value_offset_addr, TypeInt::INT, T_INT, MemNode::unordered); + + Node* elem_mirror = load_mirror_from_klass(elem_klass); + + Node* default_value_addr = basic_plus_adr(elem_mirror, ConvI2X(default_value_offset)); + const TypePtr* adr_type = _gvn.type(default_value_addr)->is_ptr(); + Node* val = access_load_at(elem_mirror, default_value_addr, adr_type, TypeInstPtr::BOTTOM, T_OBJECT, IN_HEAP); + + if (UseCompressedOops) { + val = _gvn.transform(new EncodePNode(val, elem)); + val = raw_default_for_coops(val, *this); + } else { + val = _gvn.transform(new CastP2XNode(control(), val)); + } + r->init_req(3, control()); + phi->init_req(3, val); + set_control(_gvn.transform(r)); + raw_default_value = _gvn.transform(phi); + } + // Create the AllocateArrayNode and its result projections AllocateArrayNode* alloc = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT), control(), mem, i_o(), size, klass_node, initial_slow_test, - length); + length, default_value, + raw_default_value); // Cast to correct type. Note that the klass_node may be constant or not, // and in the latter case the actual array type will be inexact also. // (This happens via a non-constant argument to inline_native_newArray.) // In any case, the value of klass_node provides the desired array type. const TypeInt* length_type = _gvn.find_int_type(length); - const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type(); if (ary_type->isa_aryptr() && length_type != NULL) { // Try to get a better type than POS for the size ary_type = ary_type->is_aryptr()->cast_to_size(length_type); @@ -3883,11 +4256,11 @@ Node* GraphKit::load_String_value(Node* str, bool set_ctrl) { int value_offset = java_lang_String::value_offset_in_bytes(); const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), - false, NULL, 0); + false, NULL, Type::Offset(0)); const TypePtr* value_field_type = string_type->add_offset(value_offset); const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull, TypeAry::make(TypeInt::BYTE, TypeInt::POS), - ciTypeArrayKlass::make(T_BYTE), true, 0); + ciTypeArrayKlass::make(T_BYTE), true, Type::Offset(0)); Node* p = basic_plus_adr(str, str, value_offset); Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT, IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED); @@ -3900,7 +4273,7 @@ } int coder_offset = java_lang_String::coder_offset_in_bytes(); const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), - false, NULL, 0); + false, NULL, Type::Offset(0)); const TypePtr* coder_field_type = string_type->add_offset(coder_offset); Node* p = basic_plus_adr(str, str, coder_offset); @@ -3912,7 +4285,7 @@ void GraphKit::store_String_value(Node* str, Node* value) { int value_offset = java_lang_String::value_offset_in_bytes(); const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), - false, NULL, 0); + false, NULL, Type::Offset(0)); const TypePtr* value_field_type = string_type->add_offset(value_offset); access_store_at(str, basic_plus_adr(str, value_offset), value_field_type, @@ -3922,7 +4295,7 @@ void GraphKit::store_String_coder(Node* str, Node* value) { int coder_offset = java_lang_String::coder_offset_in_bytes(); const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), - false, NULL, 0); + false, NULL, Type::Offset(0)); const TypePtr* coder_field_type = string_type->add_offset(coder_offset); access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type, @@ -4035,7 +4408,22 @@ const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(), /*is_unsigned_load=*/false); if (con_type != NULL) { - return makecon(con_type); + Node* con = makecon(con_type); + if (field->layout_type() == T_VALUETYPE && field->type()->as_value_klass()->is_scalarizable()) { + // Load value type from constant oop + assert(!con_type->maybe_null(), "should never be null"); + con = ValueTypeNode::make_from_oop(this, con, field->type()->as_value_klass()); + } + return con; } return NULL; } + +//---------------------------load_mirror_from_klass---------------------------- +// Given a klass oop, load its java mirror (a java.lang.Class oop). +Node* GraphKit::load_mirror_from_klass(Node* klass) { + Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset())); + Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered); + // mirror = ((OopHandle)mirror)->resolve(); + return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE); +} --- old/src/hotspot/share/opto/graphKit.hpp 2019-03-11 14:26:43.698354636 +0100 +++ new/src/hotspot/share/opto/graphKit.hpp 2019-03-11 14:26:43.466354639 +0100 @@ -66,6 +66,9 @@ int _bci; // JVM Bytecode Pointer ciMethod* _method; // JVM Current Method BarrierSetC2* _barrier_set; +#ifdef ASSERT + uint _worklist_size; +#endif private: int _sp; // JVM Expression Stack Pointer; don't modify directly! @@ -78,11 +81,16 @@ public: GraphKit(); // empty constructor - GraphKit(JVMState* jvms); // the JVM state on which to operate + GraphKit(JVMState* jvms, PhaseGVN* gvn = NULL); // the JVM state on which to operate #ifdef ASSERT ~GraphKit() { assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms"); + // During incremental inlining, the Node_Array of the C->for_igvn() worklist and the IGVN + // worklist are shared but the _in_worklist VectorSet is not. To avoid inconsistencies, + // we should not add nodes to the _for_igvn worklist when using IGVN for the GraphKit. + assert((_gvn.is_IterGVN() == NULL) || (_gvn.C->for_igvn()->size() == _worklist_size), + "GraphKit should not modify _for_igvn worklist after parsing"); } #endif @@ -93,7 +101,7 @@ PhaseGVN& gvn() const { return _gvn; } void* barrier_set_state() const { return C->barrier_set_state(); } - void record_for_igvn(Node* n) const { C->record_for_igvn(n); } // delegate to Compile + void record_for_igvn(Node* n) const { _gvn.record_for_igvn(n); } // Handy well-known nodes: Node* null() const { return zerocon(T_OBJECT); } @@ -369,6 +377,8 @@ return null_check_common(value, type, true, NULL, _gvn.type(value)->speculative_always_null()); } + Node* null2default(Node* value, ciValueKlass* vk = NULL); + // Check if value is null and abort if it is Node* must_be_not_null(Node* value, bool do_replace_in_map); @@ -580,7 +590,8 @@ Node* val, const Type* val_type, BasicType bt, - DecoratorSet decorators); + DecoratorSet decorators, + bool deoptimize_on_exception = false); Node* access_load_at(Node* obj, // containing obj Node* adr, // actual adress to load val at @@ -632,7 +643,7 @@ BasicType bt, DecoratorSet decorators); - void access_clone(Node* src, Node* dst, Node* size, bool is_array); + void access_clone(Node* src_base, Node* dst_base, Node* countx, bool is_array); Node* access_resolve(Node* n, DecoratorSet decorators); @@ -669,6 +680,9 @@ // callee (with all arguments still on the stack). Node* null_check_receiver_before_call(ciMethod* callee) { assert(!callee->is_static(), "must be a virtual method"); + if (argument(0)->is_ValueType()) { + return argument(0); + } // Callsite signature can be different from actual method being called (i.e _linkTo* sites). // Use callsite signature always. ciMethod* declared_method = method()->get_method_at_bci(bci()); @@ -681,7 +695,7 @@ // Fill in argument edges for the call from argument(0), argument(1), ... // (The next step is to call set_edges_for_java_call.) - void set_arguments_for_java_call(CallJavaNode* call); + void set_arguments_for_java_call(CallJavaNode* call, bool incremental_inlining = false); // Fill in non-argument edges for the call. // Transform the call, and update the basics: control, i_o, memory. @@ -819,8 +833,14 @@ // Generate a check-cast idiom. Used by both the check-cast bytecode // and the array-store bytecode - Node* gen_checkcast( Node *subobj, Node* superkls, - Node* *failure_control = NULL ); + Node* gen_checkcast(Node *subobj, Node* superkls, Node* *failure_control = NULL, bool never_null = false); + + Node* is_always_locked(Node* obj); + Node* gen_value_type_test(Node* kls); + void gen_value_type_guard(Node* obj, int nargs = 0); + void gen_value_type_array_guard(Node* ary, Node* obj, int nargs); + Node* load_lh_array_tag(Node* kls); + Node* gen_lh_array_test(Node* kls, unsigned int lh_value); Node* gen_subtype_check(Node* subklass, Node* superklass) { MergeMemNode* mem = merged_memory(); @@ -835,6 +855,7 @@ // (Caller is responsible for doing replace_in_map.) Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob, Node* *casted_receiver); + Node* type_check(Node* recv_klass, const TypeKlassPtr* tklass, float prob); // Inexact type check used for predicted calls. Node* subtype_check_receiver(Node* receiver, ciKlass* klass, @@ -848,7 +869,8 @@ Node* new_instance(Node* klass_node, Node* slow_test = NULL, Node* *return_size_val = NULL, - bool deoptimize_on_exception = false); + bool deoptimize_on_exception = false, + ValueTypeBaseNode* value_node = NULL); Node* new_array(Node* klass_node, Node* count_val, int nargs, Node* *return_size_val = NULL, bool deoptimize_on_exception = false); @@ -886,6 +908,8 @@ void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs); Node* make_constant_from_field(ciField* field, Node* obj); + + Node* load_mirror_from_klass(Node* klass); }; // Helper class to support building of control flow branches. Upon --- old/src/hotspot/share/opto/idealKit.cpp 2019-03-11 14:26:44.126354630 +0100 +++ new/src/hotspot/share/opto/idealKit.cpp 2019-03-11 14:26:43.918354633 +0100 @@ -48,7 +48,6 @@ _cvstate = NULL; // We can go memory state free or else we need the entire memory state assert(_initial_memory == NULL || _initial_memory->Opcode() == Op_MergeMem, "memory must be pre-split"); - assert(!_gvn.is_IterGVN(), "IdealKit can't be used during Optimize phase"); int init_size = 5; _pending_cvstates = new (C->node_arena()) GrowableArray(C->node_arena(), init_size, 0, 0); DEBUG_ONLY(_state = new (C->node_arena()) GrowableArray(C->node_arena(), init_size, 0, 0)); @@ -296,7 +295,7 @@ return delay_transform(n); } else { n = gvn().transform(n); - C->record_for_igvn(n); + gvn().record_for_igvn(n); return n; } } @@ -305,7 +304,7 @@ Node* IdealKit::delay_transform(Node* n) { // Delay transform until IterativeGVN gvn().set_type(n, n->bottom_type()); - C->record_for_igvn(n); + gvn().record_for_igvn(n); return n; } @@ -533,8 +532,8 @@ assert(C->alias_type(call->adr_type()) == C->alias_type(adr_type), "call node must be constructed correctly"); Node* res = NULL; - if (slow_call_type->range()->cnt() > TypeFunc::Parms) { - assert(slow_call_type->range()->cnt() == TypeFunc::Parms+1, "only one return value"); + if (slow_call_type->range_sig()->cnt() > TypeFunc::Parms) { + assert(slow_call_type->range_sig()->cnt() == TypeFunc::Parms+1, "only one return value"); res = transform(new ProjNode(call, TypeFunc::Parms)); } return res; --- old/src/hotspot/share/opto/lcm.cpp 2019-03-11 14:26:44.554354624 +0100 +++ new/src/hotspot/share/opto/lcm.cpp 2019-03-11 14:26:44.342354627 +0100 @@ -276,9 +276,9 @@ tptr = base->bottom_type()->is_ptr(); } // Give up if offset is not a compile-time constant. - if (offset == Type::OffsetBot || tptr->_offset == Type::OffsetBot) + if (offset == Type::OffsetBot || tptr->offset() == Type::OffsetBot) continue; - offset += tptr->_offset; // correct if base is offseted + offset += tptr->offset(); // correct if base is offseted // Give up if reference is beyond page size. if (MacroAssembler::needs_explicit_null_check(offset)) continue; @@ -843,7 +843,7 @@ regs.Insert(_matcher.c_frame_pointer()); // Set all registers killed and not already defined by the call. - uint r_cnt = mcall->tf()->range()->cnt(); + uint r_cnt = mcall->tf()->range_cc()->cnt(); int op = mcall->ideal_Opcode(); MachProjNode *proj = new MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj ); map_node_to_block(proj, block); --- old/src/hotspot/share/opto/library_call.cpp 2019-03-11 14:26:44.982354618 +0100 +++ new/src/hotspot/share/opto/library_call.cpp 2019-03-11 14:26:44.770354621 +0100 @@ -52,6 +52,7 @@ #include "opto/runtime.hpp" #include "opto/rootnode.hpp" #include "opto/subnode.hpp" +#include "opto/valuetypenode.hpp" #include "prims/nativeLookup.hpp" #include "prims/unsafe.hpp" #include "runtime/objectMonitor.hpp" @@ -163,7 +164,6 @@ void generate_string_range_check(Node* array, Node* offset, Node* length, bool char_count); Node* generate_current_thread(Node* &tls_output); - Node* load_mirror_from_klass(Node* klass); Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null, RegionNode* region, int null_path, int offset); @@ -185,20 +185,36 @@ int modifier_mask, int modifier_bits, RegionNode* region); Node* generate_interface_guard(Node* kls, RegionNode* region); + Node* generate_value_guard(Node* kls, RegionNode* region); + + enum ArrayKind { + AnyArray, + NonArray, + ObjectArray, + NonObjectArray, + TypeArray, + ValueArray + }; + Node* generate_array_guard(Node* kls, RegionNode* region) { - return generate_array_guard_common(kls, region, false, false); + return generate_array_guard_common(kls, region, AnyArray); } Node* generate_non_array_guard(Node* kls, RegionNode* region) { - return generate_array_guard_common(kls, region, false, true); + return generate_array_guard_common(kls, region, NonArray); } Node* generate_objArray_guard(Node* kls, RegionNode* region) { - return generate_array_guard_common(kls, region, true, false); + return generate_array_guard_common(kls, region, ObjectArray); } Node* generate_non_objArray_guard(Node* kls, RegionNode* region) { - return generate_array_guard_common(kls, region, true, true); + return generate_array_guard_common(kls, region, NonObjectArray); } - Node* generate_array_guard_common(Node* kls, RegionNode* region, - bool obj_array, bool not_array); + Node* generate_typeArray_guard(Node* kls, RegionNode* region) { + return generate_array_guard_common(kls, region, TypeArray); + } + Node* generate_valueArray_guard(Node* kls, RegionNode* region) { + return generate_array_guard_common(kls, region, ValueArray); + } + Node* generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind); Node* generate_virtual_guard(Node* obj_klass, RegionNode* slow_region); CallJavaNode* generate_method_call(vmIntrinsics::ID method_id, bool is_virtual = false, bool is_static = false); @@ -253,6 +269,8 @@ bool inline_unsafe_allocate(); bool inline_unsafe_newArray(bool uninitialized); bool inline_unsafe_copyMemory(); + bool inline_unsafe_make_private_buffer(); + bool inline_unsafe_finish_private_buffer(); bool inline_native_currentThread(); bool inline_native_time_funcs(address method, const char* funcName); @@ -589,6 +607,8 @@ case vmIntrinsics::_inflateStringC: case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress); + case vmIntrinsics::_makePrivateBuffer: return inline_unsafe_make_private_buffer(); + case vmIntrinsics::_finishPrivateBuffer: return inline_unsafe_finish_private_buffer(); case vmIntrinsics::_getReference: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false); case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_store, T_BOOLEAN, Relaxed, false); case vmIntrinsics::_getByte: return inline_unsafe_access(!is_store, T_BYTE, Relaxed, false); @@ -598,6 +618,7 @@ case vmIntrinsics::_getLong: return inline_unsafe_access(!is_store, T_LONG, Relaxed, false); case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_store, T_FLOAT, Relaxed, false); case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_store, T_DOUBLE, Relaxed, false); + case vmIntrinsics::_getValue: return inline_unsafe_access(!is_store, T_VALUETYPE,Relaxed, false); case vmIntrinsics::_putReference: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false); case vmIntrinsics::_putBoolean: return inline_unsafe_access( is_store, T_BOOLEAN, Relaxed, false); @@ -608,6 +629,7 @@ case vmIntrinsics::_putLong: return inline_unsafe_access( is_store, T_LONG, Relaxed, false); case vmIntrinsics::_putFloat: return inline_unsafe_access( is_store, T_FLOAT, Relaxed, false); case vmIntrinsics::_putDouble: return inline_unsafe_access( is_store, T_DOUBLE, Relaxed, false); + case vmIntrinsics::_putValue: return inline_unsafe_access( is_store, T_VALUETYPE,Relaxed, false); case vmIntrinsics::_getReferenceVolatile: return inline_unsafe_access(!is_store, T_OBJECT, Volatile, false); case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_store, T_BOOLEAN, Volatile, false); @@ -2360,18 +2382,18 @@ if (!is_store) { // Object getReference(Object base, int/long offset), etc. BasicType rtype = sig->return_type()->basic_type(); - assert(rtype == type, "getter must return the expected value"); - assert(sig->count() == 2, "oop getter has 2 arguments"); + assert(rtype == type || (rtype == T_OBJECT && type == T_VALUETYPE), "getter must return the expected value"); + assert(sig->count() == 2 || (type == T_VALUETYPE && sig->count() == 3), "oop getter has 2 or 3 arguments"); assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object"); assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct"); } else { // void putReference(Object base, int/long offset, Object x), etc. assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value"); - assert(sig->count() == 3, "oop putter has 3 arguments"); + assert(sig->count() == 3 || (type == T_VALUETYPE && sig->count() == 4), "oop putter has 3 arguments"); assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object"); assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct"); BasicType vtype = sig->type_at(sig->count()-1)->basic_type(); - assert(vtype == type, "putter must accept the expected value"); + assert(vtype == type || (type == T_VALUETYPE && vtype == T_OBJECT), "putter must accept the expected value"); } #endif // ASSERT } @@ -2396,13 +2418,73 @@ // by oopDesc::field_addr. assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled"); + + ciValueKlass* value_klass = NULL; + if (type == T_VALUETYPE) { + Node* cls = null_check(argument(4)); + if (stopped()) { + return true; + } + Node* kls = load_klass_from_mirror(cls, false, NULL, 0); + const TypeKlassPtr* kls_t = _gvn.type(kls)->isa_klassptr(); + if (!kls_t->klass_is_exact()) { + return false; + } + ciKlass* klass = kls_t->klass(); + if (!klass->is_valuetype()) { + return false; + } + value_klass = klass->as_value_klass(); + } + + receiver = null_check(receiver); + if (stopped()) { + return true; + } + + if (base->is_ValueType()) { + ValueTypeNode* vt = base->as_ValueType(); + + if (is_store) { + if (!vt->is_allocated(&_gvn) || !_gvn.type(vt)->is_valuetype()->larval()) { + return false; + } + base = vt->get_oop(); + } else { + if (offset->is_Con()) { + long off = find_long_con(offset, 0); + ciValueKlass* vk = _gvn.type(vt)->is_valuetype()->value_klass(); + if ((long)(int)off != off || !vk->contains_field_offset(off)) { + return false; + } + + ciField* f = vk->get_non_flattened_field_by_offset((int)off); + + if (f != NULL) { + BasicType bt = f->layout_type(); + if (bt == T_ARRAY || bt == T_NARROWOOP) { + bt = T_OBJECT; + } + if (bt == type) { + if (bt != T_VALUETYPE || f->type() == value_klass) { + set_result(vt->field_value_by_offset((int)off, false)); + return true; + } + } + } + } + vt = vt->allocate(this)->as_ValueType(); + base = vt->get_oop(); + } + } + // 32-bit machines ignore the high half! offset = ConvL2X(offset); adr = make_unsafe_address(base, offset, is_store ? ACCESS_WRITE : ACCESS_READ, type, kind == Relaxed); if (_gvn.type(base)->isa_ptr() != TypePtr::NULL_PTR) { heap_base_oop = base; - } else if (type == T_OBJECT) { + } else if (type == T_OBJECT || (value_klass != NULL && value_klass->has_object_fields())) { return false; // off-heap oop accesses are not supported } @@ -2413,7 +2495,7 @@ decorators |= IN_HEAP; } - val = is_store ? argument(4) : NULL; + val = is_store ? argument(4 + (type == T_VALUETYPE ? 1 : 0)) : NULL; const TypePtr *adr_type = _gvn.type(adr)->isa_ptr(); @@ -2427,7 +2509,31 @@ } bool mismatched = false; - BasicType bt = alias_type->basic_type(); + BasicType bt = T_ILLEGAL; + ciField* field = NULL; + if (adr_type->isa_instptr()) { + const TypeInstPtr* instptr = adr_type->is_instptr(); + ciInstanceKlass* k = instptr->klass()->as_instance_klass(); + int off = instptr->offset(); + if (instptr->const_oop() != NULL && + instptr->klass() == ciEnv::current()->Class_klass() && + instptr->offset() >= (instptr->klass()->as_instance_klass()->size_helper() * wordSize)) { + k = instptr->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass(); + field = k->get_field_by_offset(off, true); + } else { + field = k->get_non_flattened_field_by_offset(off); + } + if (field != NULL) { + bt = field->layout_type(); + } + assert(bt == alias_type->basic_type() || bt == T_VALUETYPE, "should match"); + if (field != NULL && bt == T_VALUETYPE && !field->is_flattened()) { + bt = T_OBJECT; + } + } else { + bt = alias_type->basic_type(); + } + if (bt != T_ILLEGAL) { assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access"); if (bt == T_BYTE && adr_type->isa_aryptr()) { @@ -2448,6 +2554,28 @@ mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched } + if (type == T_VALUETYPE) { + if (adr_type->isa_instptr()) { + if (field == NULL || field->type() != value_klass) { + mismatched = true; + } + } else if (adr_type->isa_aryptr()) { + const Type* elem = adr_type->is_aryptr()->elem(); + if (!elem->isa_valuetype()) { + mismatched = true; + } else if (elem->is_valuetype()->value_klass() != value_klass) { + mismatched = true; + } + } + if (is_store) { + const Type* val_t = _gvn.type(val); + if (!val_t->isa_valuetype() || + val_t->is_valuetype()->value_klass() != value_klass) { + return false; + } + } + } + assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched"); if (mismatched) { @@ -2460,17 +2588,17 @@ // Figure out the memory ordering. decorators |= mo_decorator_for_access_kind(kind); - if (!is_store && type == T_OBJECT) { - const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type); - if (tjp != NULL) { - value_type = tjp; + if (!is_store) { + if (type == T_OBJECT) { + const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type); + if (tjp != NULL) { + value_type = tjp; + } + } else if (type == T_VALUETYPE) { + value_type = NULL; } } - receiver = null_check(receiver); - if (stopped()) { - return true; - } // Heap pointers get a null-check from the interpreter, // as a courtesy. However, this is not guaranteed by Unsafe, // and it is not possible to fully distinguish unintended nulls @@ -2479,14 +2607,24 @@ if (!is_store) { Node* p = NULL; // Try to constant fold a load from a constant field - ciField* field = alias_type->field(); + if (heap_base_oop != top() && field != NULL && field->is_constant() && !mismatched) { // final or stable field p = make_constant_from_field(field, heap_base_oop); } if (p == NULL) { // Could not constant fold the load - p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators); + if (type == T_VALUETYPE) { + if (adr_type->isa_instptr() && !mismatched) { + ciInstanceKlass* holder = adr_type->is_instptr()->klass()->as_instance_klass(); + int offset = adr_type->is_instptr()->offset(); + p = ValueTypeNode::make_from_flattened(this, value_klass, base, base, holder, offset, decorators); + } else { + p = ValueTypeNode::make_from_flattened(this, value_klass, base, adr, NULL, 0, decorators); + } + } else { + p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators); + } // Normalize the value returned by getBoolean in the following cases if (type == T_BOOLEAN && (mismatched || @@ -2513,6 +2651,14 @@ p = gvn().transform(new CastP2XNode(NULL, p)); p = ConvX2UL(p); } + if (field != NULL && field->is_flattenable()&& !field->is_flattened()) { + // Load a non-flattened but flattenable value type from memory + if (value_type->value_klass()->is_scalarizable()) { + p = ValueTypeNode::make_from_oop(this, p, value_type->value_klass()); + } else { + p = null2default(p, value_type->value_klass()); + } + } // The load node has the control of the preceding MemBarCPUOrder. All // following nodes will have the control of the MemBarCPUOrder inserted at // the end of this method. So, pushing the load onto the stack at a later @@ -2524,9 +2670,66 @@ val = ConvL2X(val); val = gvn().transform(new CastX2PNode(val)); } - access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators); + if (type == T_VALUETYPE) { + if (adr_type->isa_instptr() && !mismatched) { + ciInstanceKlass* holder = adr_type->is_instptr()->klass()->as_instance_klass(); + int offset = adr_type->is_instptr()->offset(); + val->as_ValueType()->store_flattened(this, base, base, holder, offset, decorators); + } else { + val->as_ValueType()->store_flattened(this, base, adr, NULL, 0, decorators); + } + } else { + access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators); + } + } + + if (argument(1)->is_ValueType() && is_store) { + Node* value = ValueTypeNode::make_from_oop(this, base, _gvn.type(base)->value_klass()); + value = value->as_ValueType()->make_larval(this, false); + replace_in_map(argument(1), value); + } + + return true; +} + +bool LibraryCallKit::inline_unsafe_make_private_buffer() { + Node* receiver = argument(0); + Node* value = argument(1); + + receiver = null_check(receiver); + if (stopped()) { + return true; + } + + if (!value->is_ValueType()) { + return false; + } + + set_result(value->as_ValueType()->make_larval(this, true)); + + return true; +} + +bool LibraryCallKit::inline_unsafe_finish_private_buffer() { + Node* receiver = argument(0); + Node* buffer = argument(1); + + receiver = null_check(receiver); + if (stopped()) { + return true; + } + + if (!buffer->is_ValueType()) { + return false; + } + + ValueTypeNode* vt = buffer->as_ValueType(); + if (!vt->is_allocated(&_gvn) || !_gvn.type(vt)->is_valuetype()->larval()) { + return false; } + set_result(vt->finish_larval(this)); + return true; } @@ -3062,15 +3265,6 @@ return true; } -//---------------------------load_mirror_from_klass---------------------------- -// Given a klass oop, load its java mirror (a java.lang.Class oop). -Node* LibraryCallKit::load_mirror_from_klass(Node* klass) { - Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset())); - Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered); - // mirror = ((OopHandle)mirror)->resolve(); - return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE); -} - //-----------------------load_klass_from_mirror_common------------------------- // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop. // Test the klass oop for null (signifying a primitive Class like Integer.TYPE), @@ -3117,6 +3311,10 @@ return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region); } +Node* LibraryCallKit::generate_value_guard(Node* kls, RegionNode* region) { + return generate_access_flags_guard(kls, JVM_ACC_VALUE, 0, region); +} + //-------------------------inline_native_Class_query------------------- bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) { const Type* return_type = TypeInt::BOOL; @@ -3301,18 +3499,28 @@ if (obj == NULL || obj->is_top()) { return false; // dead path } - const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr(); + + ciKlass* obj_klass = NULL; + if (obj->is_ValueType()) { + const TypeValueType* tvt = _gvn.type(obj)->is_valuetype(); + obj_klass = tvt->value_klass(); + } else { + const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr(); + if (tp != NULL) { + obj_klass = tp->klass(); + } + } // First, see if Class.cast() can be folded statically. // java_mirror_type() returns non-null for compile-time Class constants. ciType* tm = mirror_con->java_mirror_type(); if (tm != NULL && tm->is_klass() && - tp != NULL && tp->klass() != NULL) { - if (!tp->klass()->is_loaded()) { + obj_klass != NULL) { + if (!obj_klass->is_loaded()) { // Don't use intrinsic when class is not loaded. return false; } else { - int static_res = C->static_subtype_check(tm->as_klass(), tp->klass()); + int static_res = C->static_subtype_check(tm->as_klass(), obj_klass); if (static_res == Compile::SSC_always_true) { // isInstance() is true - fold the code. set_result(obj); @@ -3480,30 +3688,28 @@ } //---------------------generate_array_guard_common------------------------ -Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, - bool obj_array, bool not_array) { +Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind) { if (stopped()) { return NULL; } - // If obj_array/non_array==false/false: - // Branch around if the given klass is in fact an array (either obj or prim). - // If obj_array/non_array==false/true: - // Branch around if the given klass is not an array klass of any kind. - // If obj_array/non_array==true/true: - // Branch around if the kls is not an oop array (kls is int[], String, etc.) - // If obj_array/non_array==true/false: - // Branch around if the kls is an oop array (Object[] or subtype) - // // Like generate_guard, adds a new path onto the region. jint layout_con = 0; Node* layout_val = get_layout_helper(kls, layout_con); if (layout_val == NULL) { - bool query = (obj_array - ? Klass::layout_helper_is_objArray(layout_con) - : Klass::layout_helper_is_array(layout_con)); - if (query == not_array) { + bool query = 0; + switch(kind) { + case ObjectArray: query = Klass::layout_helper_is_objArray(layout_con); break; + case NonObjectArray: query = !Klass::layout_helper_is_objArray(layout_con); break; + case TypeArray: query = Klass::layout_helper_is_typeArray(layout_con); break; + case ValueArray: query = Klass::layout_helper_is_valueArray(layout_con); break; + case AnyArray: query = Klass::layout_helper_is_array(layout_con); break; + case NonArray: query = !Klass::layout_helper_is_array(layout_con); break; + default: + ShouldNotReachHere(); + } + if (!query) { return NULL; // never a branch } else { // always a branch Node* always_branch = control(); @@ -3513,22 +3719,43 @@ return always_branch; } } + unsigned int value = 0; + BoolTest::mask btest = BoolTest::illegal; + switch(kind) { + case ObjectArray: + case NonObjectArray: { + value = Klass::_lh_array_tag_obj_value; + layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift))); + btest = kind == ObjectArray ? BoolTest::eq : BoolTest::ne; + break; + } + case TypeArray: { + value = Klass::_lh_array_tag_type_value; + layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift))); + btest = BoolTest::eq; + break; + } + case ValueArray: { + value = Klass::_lh_array_tag_vt_value; + layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift))); + btest = BoolTest::eq; + break; + } + case AnyArray: value = Klass::_lh_neutral_value; btest = BoolTest::lt; break; + case NonArray: value = Klass::_lh_neutral_value; btest = BoolTest::gt; break; + default: + ShouldNotReachHere(); + } // Now test the correct condition. - jint nval = (obj_array - ? (jint)(Klass::_lh_array_tag_type_value - << Klass::_lh_array_tag_shift) - : Klass::_lh_neutral_value); + jint nval = (jint)value; Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval))); - BoolTest::mask btest = BoolTest::lt; // correct for testing is_[obj]array - // invert the test if we are looking for a non-array - if (not_array) btest = BoolTest(btest).negate(); Node* bol = _gvn.transform(new BoolNode(cmp, btest)); return generate_fair_guard(bol, region); } //-----------------------inline_native_newArray-------------------------- -// private static native Object java.lang.reflect.newArray(Class componentType, int length); +// private static native Object java.lang.reflect.Array.newArray(Class componentType, int length); // private native Object Unsafe.allocateUninitializedArray0(Class cls, int size); bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) { Node* mirror; @@ -3644,6 +3871,19 @@ Node* end = is_copyOfRange? argument(2): argument(1); Node* array_type_mirror = is_copyOfRange? argument(3): argument(2); + const TypeAryPtr* original_t = _gvn.type(original)->isa_aryptr(); + const TypeInstPtr* mirror_t = _gvn.type(array_type_mirror)->isa_instptr(); + if (EnableValhalla && ValueArrayFlatten && + (original_t == NULL || mirror_t == NULL || + (mirror_t->java_mirror_type() == NULL && + (original_t->elem()->isa_valuetype() || + (original_t->elem()->make_oopptr() != NULL && + original_t->elem()->make_oopptr()->can_be_value_type()))))) { + // We need to know statically if the copy is to a flattened array + // or not but can't tell. + return false; + } + Node* newcopy = NULL; // Set the original stack and the reexecute bit for the interpreter to reexecute @@ -3667,16 +3907,58 @@ // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc. // Bail out if that is so. - Node* not_objArray = generate_non_objArray_guard(klass_node, bailout); + // Value type array may have object field that would require a + // write barrier. Conservatively, go to slow path. + BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); + Node* not_objArray = !bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, BarrierSetC2::Parsing) ? + generate_typeArray_guard(klass_node, bailout) : generate_non_objArray_guard(klass_node, bailout); if (not_objArray != NULL) { // Improve the klass node's type from the new optimistic assumption: ciKlass* ak = ciArrayKlass::make(env()->Object_klass()); - const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/); + const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0)); Node* cast = new CastPPNode(klass_node, akls); cast->init_req(0, control()); klass_node = _gvn.transform(cast); } + Node* original_kls = load_object_klass(original); + // ArrayCopyNode:Ideal may transform the ArrayCopyNode to + // loads/stores but it is legal only if we're sure the + // Arrays.copyOf would succeed. So we need all input arguments + // to the copyOf to be validated, including that the copy to the + // new array won't trigger an ArrayStoreException. That subtype + // check can be optimized if we know something on the type of + // the input array from type speculation. + if (_gvn.type(klass_node)->singleton() && !stopped()) { + ciKlass* subk = _gvn.type(original_kls)->is_klassptr()->klass(); + ciKlass* superk = _gvn.type(klass_node)->is_klassptr()->klass(); + + int test = C->static_subtype_check(superk, subk); + if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) { + const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr(); + if (t_original->speculative_type() != NULL) { + original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true); + original_kls = load_object_klass(original); + } + } + } + + if (EnableValhalla) { + // Either both or neither new array klass and original array + // klass must be flattened + Node* flattened_klass = generate_valueArray_guard(klass_node, NULL); + generate_valueArray_guard(original_kls, bailout); + if (flattened_klass != NULL) { + RegionNode* r = new RegionNode(2); + record_for_igvn(r); + r->init_req(1, control()); + set_control(flattened_klass); + generate_valueArray_guard(original_kls, r); + bailout->add_req(control()); + set_control(_gvn.transform(r)); + } + } + // Bail out if either start or end is negative. generate_negative_guard(start, bailout, &start); generate_negative_guard(end, bailout, &end); @@ -3713,31 +3995,11 @@ // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class). // This will fail a store-check if x contains any non-nulls. - // ArrayCopyNode:Ideal may transform the ArrayCopyNode to - // loads/stores but it is legal only if we're sure the - // Arrays.copyOf would succeed. So we need all input arguments - // to the copyOf to be validated, including that the copy to the - // new array won't trigger an ArrayStoreException. That subtype - // check can be optimized if we know something on the type of - // the input array from type speculation. - if (_gvn.type(klass_node)->singleton()) { - ciKlass* subk = _gvn.type(load_object_klass(original))->is_klassptr()->klass(); - ciKlass* superk = _gvn.type(klass_node)->is_klassptr()->klass(); - - int test = C->static_subtype_check(superk, subk); - if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) { - const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr(); - if (t_original->speculative_type() != NULL) { - original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true); - } - } - } - bool validated = false; // Reason_class_check rather than Reason_intrinsic because we // want to intrinsify even if this traps. if (!too_many_traps(Deoptimization::Reason_class_check)) { - Node* not_subtype_ctrl = gen_subtype_check(load_object_klass(original), + Node* not_subtype_ctrl = gen_subtype_check(original_kls, klass_node); if (not_subtype_ctrl != top()) { @@ -3754,7 +4016,7 @@ newcopy = new_array(klass_node, length, 0); // no arguments to push ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false, - load_object_klass(original), klass_node); + original_kls, klass_node); if (!is_copyOfRange) { ac->set_copyof(validated); } else { @@ -3878,7 +4140,12 @@ PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT); PhiNode* result_io = new PhiNode(result_reg, Type::ABIO); PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM); - Node* obj = NULL; + Node* obj = argument(0); + + if (obj->is_ValueType() || gvn().type(obj)->is_valuetypeptr()) { + return false; + } + if (!is_static) { // Check for hashing null object obj = null_check_receiver(); @@ -3888,7 +4155,6 @@ } else { // Do a null check, and return zero if null. // System.identityHashCode(null) == 0 - obj = argument(0); Node* null_ctl = top(); obj = null_check_oop(obj, &null_ctl); result_reg->init_req(_null_path, null_ctl); @@ -3908,6 +4174,13 @@ RegionNode* slow_region = new RegionNode(1); record_for_igvn(slow_region); + const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); + assert(!obj_type->isa_valuetype() || !obj_type->is_valuetypeptr(), "no value type here"); + if (is_static && obj_type->can_be_value_type()) { + Node* obj_klass = load_object_klass(obj); + generate_value_guard(obj_klass, slow_region); + } + // If this is a virtual call, we generate a funny guard. We pull out // the vtable entry corresponding to hashCode() from the target object. // If the target method which we are calling happens to be the native @@ -3994,7 +4267,13 @@ // // Build special case code for calls to getClass on an object. bool LibraryCallKit::inline_native_getClass() { - Node* obj = null_check_receiver(); + Node* obj = argument(0); + if (obj->is_ValueType()) { + ciKlass* vk = _gvn.type(obj)->is_valuetype()->value_klass(); + set_result(makecon(TypeInstPtr::make(vk->java_mirror()))); + return true; + } + obj = null_check_receiver(); if (stopped()) return true; set_result(load_mirror_from_klass(load_object_klass(obj))); return true; @@ -4251,7 +4530,34 @@ // TODO: generate fields copies for small objects instead. Node* size = _gvn.transform(obj_size); - access_clone(obj, alloc_obj, size, is_array); + // Exclude the header but include array length to copy by 8 bytes words. + // Can't use base_offset_in_bytes(bt) since basic type is unknown. + int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() : + instanceOopDesc::base_offset_in_bytes(); + // base_off: + // 8 - 32-bit VM + // 12 - 64-bit VM, compressed klass + // 16 - 64-bit VM, normal klass + if (base_off % BytesPerLong != 0) { + assert(UseCompressedClassPointers, ""); + if (is_array) { + // Exclude length to copy by 8 bytes words. + base_off += sizeof(int); + } else { + // Include klass to copy by 8 bytes words. + base_off = instanceOopDesc::klass_offset_in_bytes(); + } + assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment"); + } + Node* src_base = basic_plus_adr(obj, base_off); + Node* dst_base = basic_plus_adr(alloc_obj, base_off); + + // Compute the length also, if needed: + Node* countx = size; + countx = _gvn.transform(new SubXNode(countx, MakeConX(base_off))); + countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong))); + + access_clone(src_base, dst_base, countx, is_array); // Do not let reads from the cloned object float above the arraycopy. if (alloc != NULL) { @@ -4294,7 +4600,12 @@ { PreserveReexecuteState preexecs(this); jvms()->set_should_reexecute(true); - Node* obj = null_check_receiver(); + Node* obj = argument(0); + if (obj->is_ValueType()) { + return false; + } + + obj = null_check_receiver(); if (stopped()) return true; const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); @@ -4304,7 +4615,8 @@ // loads/stores. Maybe a speculative type can help us. if (!obj_type->klass_is_exact() && obj_type->speculative_type() != NULL && - obj_type->speculative_type()->is_instance_klass()) { + obj_type->speculative_type()->is_instance_klass() && + !obj_type->speculative_type()->is_valuetype()) { ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass(); if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem && !spec_ik->has_injected_fields()) { @@ -4341,61 +4653,72 @@ PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM); record_for_igvn(result_reg); + // We only go to the fast case code if we pass a number of guards. + // The paths which do not pass are accumulated in the slow_region. + RegionNode* slow_region = new RegionNode(1); + record_for_igvn(slow_region); + Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL); if (array_ctl != NULL) { // It's an array. PreserveJVMState pjvms(this); set_control(array_ctl); - Node* obj_length = load_array_length(obj); - Node* obj_size = NULL; - Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size); // no arguments to push BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, BarrierSetC2::Parsing)) { - // If it is an oop array, it requires very special treatment, - // because gc barriers are required when accessing the array. - Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL); - if (is_obja != NULL) { - PreserveJVMState pjvms2(this); - set_control(is_obja); - obj = access_resolve(obj, ACCESS_READ); - // Generate a direct call to the right arraycopy function(s). - Node* alloc = tightly_coupled_allocation(alloc_obj, NULL); - ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, alloc != NULL, false); - ac->set_cloneoop(); - Node* n = _gvn.transform(ac); - assert(n == ac, "cannot disappear"); - ac->connect_outputs(this); - - result_reg->init_req(_objArray_path, control()); - result_val->init_req(_objArray_path, alloc_obj); - result_i_o ->set_req(_objArray_path, i_o()); - result_mem ->set_req(_objArray_path, reset_memory()); - } + // Value type array may have object field that would require a + // write barrier. Conservatively, go to slow path. + generate_valueArray_guard(obj_klass, slow_region); } - // Otherwise, there are no barriers to worry about. - // (We can dispense with card marks if we know the allocation - // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks - // causes the non-eden paths to take compensating steps to - // simulate a fresh allocation, so that no further - // card marks are required in compiled code to initialize - // the object.) if (!stopped()) { - copy_to_clone(obj, alloc_obj, obj_size, true); + Node* obj_length = load_array_length(obj); + Node* obj_size = NULL; + Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size); // no arguments to push + + BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); + if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, BarrierSetC2::Parsing)) { + // If it is an oop array, it requires very special treatment, + // because gc barriers are required when accessing the array. + Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL); + if (is_obja != NULL) { + PreserveJVMState pjvms2(this); + set_control(is_obja); + // Generate a direct call to the right arraycopy function(s). + Node* alloc = tightly_coupled_allocation(alloc_obj, NULL); + ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, alloc != NULL, false); + ac->set_cloneoop(); + Node* n = _gvn.transform(ac); + assert(n == ac, "cannot disappear"); + ac->connect_outputs(this); + + result_reg->init_req(_objArray_path, control()); + result_val->init_req(_objArray_path, alloc_obj); + result_i_o ->set_req(_objArray_path, i_o()); + result_mem ->set_req(_objArray_path, reset_memory()); + } + } - // Present the results of the copy. - result_reg->init_req(_array_path, control()); - result_val->init_req(_array_path, alloc_obj); - result_i_o ->set_req(_array_path, i_o()); - result_mem ->set_req(_array_path, reset_memory()); + // Otherwise, there are no barriers to worry about. + // (We can dispense with card marks if we know the allocation + // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks + // causes the non-eden paths to take compensating steps to + // simulate a fresh allocation, so that no further + // card marks are required in compiled code to initialize + // the object.) + + if (!stopped()) { + copy_to_clone(obj, alloc_obj, obj_size, true); + + // Present the results of the copy. + result_reg->init_req(_array_path, control()); + result_val->init_req(_array_path, alloc_obj); + result_i_o ->set_req(_array_path, i_o()); + result_mem ->set_req(_array_path, reset_memory()); + } } } - // We only go to the instance fast case code if we pass a number of guards. - // The paths which do not pass are accumulated in the slow_region. - RegionNode* slow_region = new RegionNode(1); - record_for_igvn(slow_region); if (!stopped()) { // It's an instance (we did array above). Make the slow-path tests. // If this is a virtual call, we generate a funny guard. We grab @@ -4556,11 +4879,10 @@ _reexecute_sp = saved_reexecute_sp; // Remove the allocation from above the guards - CallProjections callprojs; - alloc->extract_projections(&callprojs, true); + CallProjections* callprojs = alloc->extract_projections(true); InitializeNode* init = alloc->initialization(); Node* alloc_mem = alloc->in(TypeFunc::Memory); - C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O)); + C->gvn_replace_by(callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O)); C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem); C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0)); @@ -4572,7 +4894,7 @@ set_all_memory(mem); alloc->set_req(TypeFunc::Memory, mem); set_control(init->proj_out_or_null(TypeFunc::Control)); - set_i_o(callprojs.fallthrough_ioproj); + set_i_o(callprojs->fallthrough_ioproj); // Update memory as done in GraphKit::set_output_for_allocation() const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength)); @@ -4816,6 +5138,26 @@ Deoptimization::Action_make_not_entrant); assert(stopped(), "Should be stopped"); } + + const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr(); + const Type *toop = TypeOopPtr::make_from_klass(dest_klass_t->klass()); + src = _gvn.transform(new CheckCastPPNode(control(), src, toop)); + + src_type = _gvn.type(src); + top_src = src_type->isa_aryptr(); + + if (top_dest != NULL && + top_dest->elem()->make_oopptr() != NULL && + top_dest->elem()->make_oopptr()->can_be_value_type()) { + generate_valueArray_guard(load_object_klass(dest), slow_region); + } + + if (top_src != NULL && + top_src->elem()->make_oopptr() != NULL && + top_src->elem()->make_oopptr()->can_be_value_type()) { + generate_valueArray_guard(load_object_klass(src), slow_region); + } + { PreserveJVMState pjvms(this); set_control(_gvn.transform(slow_region)); @@ -4823,10 +5165,6 @@ Deoptimization::Action_make_not_entrant); assert(stopped(), "Should be stopped"); } - - const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr(); - const Type *toop = TypeOopPtr::make_from_klass(dest_klass_t->klass()); - src = _gvn.transform(new CheckCastPPNode(control(), src, toop)); } arraycopy_move_allocation_here(alloc, dest, saved_jvms, saved_reexecute_sp, new_idx); --- old/src/hotspot/share/opto/live.cpp 2019-03-11 14:26:45.474354612 +0100 +++ new/src/hotspot/share/opto/live.cpp 2019-03-11 14:26:45.266354614 +0100 @@ -337,7 +337,7 @@ // Derived is NULL+offset assert(!is_derived || check->bottom_type()->is_ptr()->ptr() == TypePtr::Null,"Bad derived pointer"); } else { - assert(check->bottom_type()->is_ptr()->_offset == 0,"Bad base pointer"); + assert(check->bottom_type()->is_ptr()->offset() == 0,"Bad base pointer"); // Base either ConP(NULL) or loadConP if (check->is_Mach()) { assert(check->as_Mach()->ideal_Opcode() == Op_ConP,"Bad base pointer"); @@ -346,7 +346,7 @@ check->bottom_type()->is_ptr()->ptr() == TypePtr::Null,"Bad base pointer"); } } - } else if( check->bottom_type()->is_ptr()->_offset == 0 ) { + } else if (check->bottom_type()->is_ptr()->offset() == 0) { if(check->is_Proj() || (check->is_Mach() && (check->as_Mach()->ideal_Opcode() == Op_CreateEx || check->as_Mach()->ideal_Opcode() == Op_ThreadLocal || --- old/src/hotspot/share/opto/locknode.cpp 2019-03-11 14:26:45.902354606 +0100 +++ new/src/hotspot/share/opto/locknode.cpp 2019-03-11 14:26:45.690354609 +0100 @@ -182,8 +182,16 @@ void Parse::do_monitor_enter() { kill_dead_locals(); + Node* obj = peek(); + + if (obj->is_ValueType()) { + uncommon_trap(Deoptimization::Reason_class_check, + Deoptimization::Action_none); + return; + } + // Null check; get casted pointer. - Node* obj = null_check(peek()); + obj = null_check(obj); // Check for locking null object if (stopped()) return; --- old/src/hotspot/share/opto/loopopts.cpp 2019-03-11 14:26:46.326354600 +0100 +++ new/src/hotspot/share/opto/loopopts.cpp 2019-03-11 14:26:46.118354603 +0100 @@ -40,6 +40,7 @@ #include "opto/opaquenode.hpp" #include "opto/rootnode.hpp" #include "opto/subnode.hpp" +#include "opto/valuetypenode.hpp" #include "utilities/macros.hpp" #if INCLUDE_ZGC #include "gc/z/c2/zBarrierSetC2.hpp" @@ -63,6 +64,12 @@ return NULL; } + // Value types should not be split through Phis because they cannot be merged + // through Phi nodes but each value input needs to be merged individually. + if (n->is_ValueType()) { + return NULL; + } + int wins = 0; assert(!n->is_CFG(), ""); assert(region->is_Region(), ""); @@ -1449,6 +1456,12 @@ try_move_store_after_loop(n); + // Remove multiple allocations of the same value type + if (n->is_ValueType() && EliminateAllocations) { + n->as_ValueType()->remove_redundant_allocations(&_igvn, this); + return; // n is now dead + } + // Check for Opaque2's who's loop has disappeared - who's input is in the // same loop nest as their output. Remove 'em, they are no longer useful. if( n_op == Op_Opaque2 && --- old/src/hotspot/share/opto/machnode.cpp 2019-03-11 14:26:46.766354594 +0100 +++ new/src/hotspot/share/opto/machnode.cpp 2019-03-11 14:26:46.562354596 +0100 @@ -380,6 +380,22 @@ } assert(tp->base() != Type::AnyPtr, "not a bare pointer"); + if (tp->isa_aryptr()) { + // In the case of a flattened value type array, each field has its + // own slice so we need to extract the field being accessed from + // the address computation + if (offset == Type::OffsetBot) { + Node* base; + Node* index; + const MachOper* oper = memory_inputs(base, index); + if (oper != (MachOper*)-1) { + offset = oper->constant_disp(); + return tp->is_aryptr()->add_field_offset_and_offset(offset)->add_offset(Type::OffsetBot); + } + } + return tp->is_aryptr()->add_field_offset_and_offset(offset); + } + return tp->add_offset(offset); } @@ -653,8 +669,8 @@ uint MachCallNode::cmp( const Node &n ) const { return _tf == ((MachCallNode&)n)._tf; } -const Type *MachCallNode::bottom_type() const { return tf()->range(); } -const Type* MachCallNode::Value(PhaseGVN* phase) const { return tf()->range(); } +const Type *MachCallNode::bottom_type() const { return tf()->range_cc(); } +const Type* MachCallNode::Value(PhaseGVN* phase) const { return tf()->range_cc(); } #ifndef PRODUCT void MachCallNode::dump_spec(outputStream *st) const { @@ -666,11 +682,13 @@ #endif bool MachCallNode::return_value_is_used() const { - if (tf()->range()->cnt() == TypeFunc::Parms) { + if (tf()->range_sig()->cnt() == TypeFunc::Parms) { // void return return false; } + assert(tf()->returns_value_type_as_fields(), "multiple return values not supported"); + // find the projection corresponding to the return value for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { Node *use = fast_out(i); @@ -686,16 +704,25 @@ // Because this is used in deoptimization, we want the type info, not the data // flow info; the interpreter will "use" things that are dead to the optimizer. bool MachCallNode::returns_pointer() const { - const TypeTuple *r = tf()->range(); + const TypeTuple *r = tf()->range_sig(); return (r->cnt() > TypeFunc::Parms && r->field_at(TypeFunc::Parms)->isa_ptr()); } +bool MachCallNode::returns_vt() const { + return tf()->returns_value_type_as_fields(); +} + //------------------------------Registers-------------------------------------- const RegMask &MachCallNode::in_RegMask(uint idx) const { // Values in the domain use the users calling convention, embodied in the // _in_rms array of RegMasks. - if (idx < tf()->domain()->cnt()) { + if (entry_point() == NULL && idx == TypeFunc::Parms) { + // Null entry point is a special cast where the target of the call + // is in a register. + return MachNode::in_RegMask(idx); + } + if (idx < tf()->domain_sig()->cnt()) { return _in_rms[idx]; } if (idx == mach_constant_base_node_input()) { @@ -728,7 +755,7 @@ const RegMask &MachCallJavaNode::in_RegMask(uint idx) const { // Values in the domain use the users calling convention, embodied in the // _in_rms array of RegMasks. - if (idx < tf()->domain()->cnt()) { + if (idx < tf()->domain_cc()->cnt()) { return _in_rms[idx]; } if (idx == mach_constant_base_node_input()) { --- old/src/hotspot/share/opto/machnode.hpp 2019-03-11 14:26:47.198354588 +0100 +++ new/src/hotspot/share/opto/machnode.hpp 2019-03-11 14:26:46.990354591 +0100 @@ -463,6 +463,27 @@ int constant_offset_unchecked() const; }; +//------------------------------MachVEPNode----------------------------------- +// Machine Value Type Entry Point Node +class MachVEPNode : public MachIdealNode { +public: + MachVEPNode(Label* verified_entry, bool verified, bool receiver_only) : + _verified_entry(verified_entry), + _verified(verified), + _receiver_only(receiver_only) {} + virtual void emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const; + virtual uint size(PhaseRegAlloc* ra_) const; + +#ifndef PRODUCT + virtual const char* Name() const { return "ValueType Entry-Point"; } + virtual void format(PhaseRegAlloc*, outputStream* st) const; +#endif +private: + Label* _verified_entry; + bool _verified; + bool _receiver_only; +}; + //------------------------------MachUEPNode----------------------------------- // Machine Unvalidated Entry Point Node class MachUEPNode : public MachIdealNode { @@ -481,11 +502,14 @@ // Machine function Prolog Node class MachPrologNode : public MachIdealNode { public: - MachPrologNode( ) {} + MachPrologNode(Label* verified_entry) : _verified_entry(verified_entry) { + init_class_id(Class_MachProlog); + } virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const; virtual uint size(PhaseRegAlloc *ra_) const; virtual int reloc() const; + Label* _verified_entry; #ifndef PRODUCT virtual const char *Name() const { return "Prolog"; } virtual void format( PhaseRegAlloc *, outputStream *st ) const; @@ -894,6 +918,7 @@ // Similar to cousin class CallNode::returns_pointer bool returns_pointer() const; + bool returns_vt() const; #ifndef PRODUCT virtual void dump_spec(outputStream *st) const; --- old/src/hotspot/share/opto/macro.cpp 2019-03-11 14:26:47.630354582 +0100 +++ new/src/hotspot/share/opto/macro.cpp 2019-03-11 14:26:47.414354585 +0100 @@ -46,6 +46,7 @@ #include "opto/runtime.hpp" #include "opto/subnode.hpp" #include "opto/type.hpp" +#include "opto/valuetypenode.hpp" #include "runtime/sharedRuntime.hpp" #include "utilities/macros.hpp" #if INCLUDE_G1GC @@ -80,8 +81,8 @@ void PhaseMacroExpand::copy_call_debug_info(CallNode *oldcall, CallNode * newcall) { // Copy debug information and adjust JVMState information - uint old_dbg_start = oldcall->tf()->domain()->cnt(); - uint new_dbg_start = newcall->tf()->domain()->cnt(); + uint old_dbg_start = oldcall->tf()->domain_sig()->cnt(); + uint new_dbg_start = newcall->tf()->domain_sig()->cnt(); int jvms_adj = new_dbg_start - old_dbg_start; assert (new_dbg_start == newcall->req(), "argument count mismatch"); @@ -276,7 +277,7 @@ int adr_idx = phase->C->get_alias_index(atype); if (adr_idx == alias_idx) { assert(atype->isa_oopptr(), "address type must be oopptr"); - int adr_offset = atype->offset(); + int adr_offset = atype->flattened_offset(); uint adr_iid = atype->is_oopptr()->instance_id(); // Array elements references have the same alias_idx // but different offset and different instance_id. @@ -319,7 +320,7 @@ return NULL; } mem = mem->in(MemNode::Memory); - } else if (mem->Opcode() == Op_StrInflatedCopy) { + } else if (mem->Opcode() == Op_StrInflatedCopy) { Node* adr = mem->in(3); // Destination array const TypePtr* atype = adr->bottom_type()->is_ptr(); int adr_idx = phase->C->get_alias_index(atype); @@ -387,7 +388,7 @@ Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *phi_type, const TypeOopPtr *adr_t, AllocateNode *alloc, Node_Stack *value_phis, int level) { assert(mem->is_Phi(), "sanity"); int alias_idx = C->get_alias_index(adr_t); - int offset = adr_t->offset(); + int offset = adr_t->flattened_offset(); int instance_id = adr_t->instance_id(); // Check if an appropriate value phi already exists. @@ -489,14 +490,13 @@ assert((uint)instance_id == alloc->_idx, "wrong allocation"); int alias_idx = C->get_alias_index(adr_t); - int offset = adr_t->offset(); + int offset = adr_t->flattened_offset(); Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory); Node *alloc_ctrl = alloc->in(TypeFunc::Control); Node *alloc_mem = alloc->in(TypeFunc::Memory); Arena *a = Thread::current()->resource_area(); VectorSet visited(a); - bool done = sfpt_mem == alloc_mem; Node *mem = sfpt_mem; while (!done) { @@ -509,7 +509,7 @@ } else if (mem->is_Initialize()) { mem = mem->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn); if (mem == NULL) { - done = true; // Something go wrong. + done = true; // Something went wrong. } else if (mem->is_Store()) { const TypePtr* atype = mem->as_Store()->adr_type(); assert(C->get_alias_index(atype) == Compile::AliasIdxRaw, "store is correct memory slice"); @@ -519,7 +519,7 @@ const TypeOopPtr* atype = mem->as_Store()->adr_type()->isa_oopptr(); assert(atype != NULL, "address type must be oopptr"); assert(C->get_alias_index(atype) == alias_idx && - atype->is_known_instance_field() && atype->offset() == offset && + atype->is_known_instance_field() && atype->flattened_offset() == offset && atype->instance_id() == instance_id, "store is correct memory slice"); done = true; } else if (mem->is_Phi()) { @@ -551,6 +551,11 @@ if (mem != NULL) { if (mem == start_mem || mem == alloc_mem) { // hit a sentinel, return appropriate 0 value + Node* default_value = alloc->in(AllocateNode::DefaultValue); + if (default_value != NULL) { + return default_value; + } + assert(alloc->in(AllocateNode::RawDefaultValue) == NULL, "default value may not be null"); return _igvn.zerocon(ft); } else if (mem->is_Store()) { Node* n = mem->in(MemNode::ValueIn); @@ -582,10 +587,47 @@ return make_arraycopy_load(mem->as_ArrayCopy(), offset, ctl, m, ft, ftype, alloc); } } - // Something go wrong. + // Something went wrong. return NULL; } +// Search the last value stored into the value type's fields. +Node* PhaseMacroExpand::value_type_from_mem(Node* mem, Node* ctl, ciValueKlass* vk, const TypeAryPtr* adr_type, int offset, AllocateNode* alloc) { + // Subtract the offset of the first field to account for the missing oop header + offset -= vk->first_field_offset(); + // Create a new ValueTypeNode and retrieve the field values from memory + ValueTypeNode* vt = ValueTypeNode::make_uninitialized(_igvn, vk)->as_ValueType(); + for (int i = 0; i < vk->nof_declared_nonstatic_fields(); ++i) { + ciType* field_type = vt->field_type(i); + int field_offset = offset + vt->field_offset(i); + // Each value type field has its own memory slice + adr_type = adr_type->with_field_offset(field_offset); + Node* value = NULL; + if (vt->field_is_flattened(i)) { + value = value_type_from_mem(mem, ctl, field_type->as_value_klass(), adr_type, field_offset, alloc); + } else { + const Type* ft = Type::get_const_type(field_type); + BasicType bt = field_type->basic_type(); + if (UseCompressedOops && !is_java_primitive(bt)) { + ft = ft->make_narrowoop(); + bt = T_NARROWOOP; + } + value = value_from_mem(mem, ctl, bt, ft, adr_type, alloc); + if (value != NULL && ft->isa_narrowoop()) { + assert(UseCompressedOops, "unexpected narrow oop"); + value = transform_later(new DecodeNNode(value, value->get_ptr_type())); + } + } + if (value != NULL) { + vt->set_field_value(i, value); + } else { + // We might have reached the TrackedInitializationLimit + return NULL; + } + } + return vt; +} + // Check the possibility of scalar replacement. bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArray & safepoints) { // Scan the uses of the allocation to check for anything that would @@ -641,7 +683,7 @@ if (n->is_Load() || n->is_LoadStore()) { NOT_PRODUCT(fail_eliminate = "Field load";) } else { - NOT_PRODUCT(fail_eliminate = "Not store field referrence";) + NOT_PRODUCT(fail_eliminate = "Not store field reference";) } can_eliminate = false; } @@ -668,6 +710,10 @@ } else { safepoints.append_if_missing(sfpt); } + } else if (use->is_ValueType() && use->isa_ValueType()->get_oop() == res) { + // ok to eliminate + } else if (use->is_Store()) { + // store to mark work } else if (use->Opcode() != Op_CastP2X) { // CastP2X is used by card mark if (use->is_Phi()) { if (use->outcnt() == 1 && use->unique_out()->Opcode() == Op_Return) { @@ -679,12 +725,15 @@ } else { if (use->Opcode() == Op_Return) { NOT_PRODUCT(fail_eliminate = "Object is return value";) - }else { + } else { NOT_PRODUCT(fail_eliminate = "Object is referenced by node";) } DEBUG_ONLY(disq_node = use;) } can_eliminate = false; + } else { + assert(use->Opcode() == Op_CastP2X, "should be"); + assert(!use->has_out_with(Op_OrL), "should have been removed because oop is never null"); } } } @@ -747,13 +796,25 @@ assert(klass->is_array_klass() && nfields >= 0, "must be an array klass."); elem_type = klass->as_array_klass()->element_type(); basic_elem_type = elem_type->basic_type(); + if (elem_type->is_valuetype()) { + ciValueKlass* vk = elem_type->as_value_klass(); + if (!vk->flatten_array()) { + assert(basic_elem_type == T_VALUETYPE, "unexpected element basic type"); + basic_elem_type = T_OBJECT; + } + } array_base = arrayOopDesc::base_offset_in_bytes(basic_elem_type); element_size = type2aelembytes(basic_elem_type); + if (klass->is_value_array_klass()) { + // Flattened value type array + element_size = klass->as_value_array_klass()->element_byte_size(); + } } } // // Process the safepoint uses // + Unique_Node_List value_worklist; while (safepoints.length() > 0) { SafePointNode* sfpt = safepoints.pop(); Node* mem = sfpt->memory(); @@ -780,6 +841,7 @@ offset = field->offset(); elem_type = field->type(); basic_elem_type = field->layout_type(); + assert(!field->is_flattened(), "flattened value type fields should not have safepoint uses"); } else { offset = array_base + j * (intptr_t)element_size; } @@ -807,9 +869,15 @@ field_type = Type::get_const_basic_type(basic_elem_type); } - const TypeOopPtr *field_addr_type = res_type->add_offset(offset)->isa_oopptr(); - - Node *field_val = value_from_mem(mem, ctl, basic_elem_type, field_type, field_addr_type, alloc); + Node* field_val = NULL; + const TypeOopPtr* field_addr_type = res_type->add_offset(offset)->isa_oopptr(); + if (klass->is_value_array_klass()) { + ciValueKlass* vk = elem_type->as_value_klass(); + assert(vk->flatten_array(), "must be flattened"); + field_val = value_type_from_mem(mem, ctl, vk, field_addr_type->isa_aryptr(), 0, alloc); + } else { + field_val = value_from_mem(mem, ctl, basic_elem_type, field_type, field_addr_type, alloc); + } if (field_val == NULL) { // We weren't able to find a value for this field, // give up on eliminating this allocation. @@ -875,6 +943,9 @@ } else { field_val = transform_later(new DecodeNNode(field_val, field_val->get_ptr_type())); } + } else if (field_val->is_ValueType()) { + // Keep track of value types to scalarize them later + value_worklist.push(field_val); } sfpt->add_req(field_val); } @@ -888,6 +959,11 @@ _igvn._worklist.push(sfpt); safepoints_done.append_if_missing(sfpt); // keep it for rollback } + // Scalarize value types that were added to the safepoint + for (uint i = 0; i < value_worklist.size(); ++i) { + Node* vt = value_worklist.at(i); + vt->as_ValueType()->make_scalar_in_safepoints(&_igvn); + } return true; } @@ -952,12 +1028,11 @@ assert(ac->is_arraycopy_validated() || ac->is_copyof_validated() || ac->is_copyofrange_validated(), "unsupported"); - CallProjections callprojs; - ac->extract_projections(&callprojs, true); + CallProjections* callprojs = ac->extract_projections(true); - _igvn.replace_node(callprojs.fallthrough_ioproj, ac->in(TypeFunc::I_O)); - _igvn.replace_node(callprojs.fallthrough_memproj, ac->in(TypeFunc::Memory)); - _igvn.replace_node(callprojs.fallthrough_catchproj, ac->in(TypeFunc::Control)); + _igvn.replace_node(callprojs->fallthrough_ioproj, ac->in(TypeFunc::I_O)); + _igvn.replace_node(callprojs->fallthrough_memproj, ac->in(TypeFunc::Memory)); + _igvn.replace_node(callprojs->fallthrough_catchproj, ac->in(TypeFunc::Control)); // Set control to top. IGVN will remove the remaining projections ac->set_req(0, top()); @@ -974,6 +1049,12 @@ } _igvn._worklist.push(ac); + } else if (use->is_ValueType()) { + assert(use->isa_ValueType()->get_oop() == res, "unexpected value type use"); + _igvn.rehash_node_delayed(use); + use->isa_ValueType()->set_oop(_igvn.zerocon(T_VALUETYPE)); + } else if (use->is_Store()) { + _igvn.replace_node(use, use->in(MemNode::Memory)); } else { eliminate_gc_barrier(use); } @@ -1125,7 +1206,7 @@ extract_call_projections(boxing); - const TypeTuple* r = boxing->tf()->range(); + const TypeTuple* r = boxing->tf()->range_sig(); assert(r->cnt() > TypeFunc::Parms, "sanity"); const TypeInstPtr* t = r->field_at(TypeFunc::Parms)->isa_instptr(); assert(t != NULL, "sanity"); @@ -1285,30 +1366,28 @@ initial_slow_test = NULL; } - - enum { too_big_or_final_path = 1, need_gc_path = 2 }; Node *slow_region = NULL; Node *toobig_false = ctrl; assert (initial_slow_test == NULL || !always_slow, "arguments must be consistent"); // generate the initial test if necessary if (initial_slow_test != NULL ) { - slow_region = new RegionNode(3); - + if (slow_region == NULL) { + slow_region = new RegionNode(1); + } // Now make the initial failure test. Usually a too-big test but // might be a TRUE for finalizers or a fancy class check for // newInstance0. - IfNode *toobig_iff = new IfNode(ctrl, initial_slow_test, PROB_MIN, COUNT_UNKNOWN); + IfNode* toobig_iff = new IfNode(ctrl, initial_slow_test, PROB_MIN, COUNT_UNKNOWN); transform_later(toobig_iff); // Plug the failing-too-big test into the slow-path region - Node *toobig_true = new IfTrueNode( toobig_iff ); + Node* toobig_true = new IfTrueNode(toobig_iff); transform_later(toobig_true); - slow_region ->init_req( too_big_or_final_path, toobig_true ); - toobig_false = new IfFalseNode( toobig_iff ); + slow_region ->add_req(toobig_true); + toobig_false = new IfFalseNode(toobig_iff); transform_later(toobig_false); } else { // No initial test, just fall into next case toobig_false = ctrl; - debug_only(slow_region = NodeSentinel); } Node *slow_mem = mem; // save the current memory state for slow path @@ -1341,11 +1420,11 @@ fast_oop_ctrl, fast_oop_rawmem, prefetch_lines); - if (initial_slow_test) { - slow_region->init_req(need_gc_path, needgc_ctrl); + if (slow_region != NULL) { + slow_region->add_req(needgc_ctrl); // This completes all paths into the slow merge point transform_later(slow_region); - } else { // No initial slow path needed! + } else { // Just fall from the need-GC path straight into the VM call. slow_region = needgc_ctrl; } @@ -1612,21 +1691,17 @@ // Helper for PhaseMacroExpand::expand_allocate_common. // Initializes the newly-allocated storage. -Node* -PhaseMacroExpand::initialize_object(AllocateNode* alloc, - Node* control, Node* rawmem, Node* object, - Node* klass_node, Node* length, - Node* size_in_bytes) { +Node* PhaseMacroExpand::initialize_object(AllocateNode* alloc, + Node* control, Node* rawmem, Node* object, + Node* klass_node, Node* length, + Node* size_in_bytes) { InitializeNode* init = alloc->initialization(); // Store the klass & mark bits - Node* mark_node = NULL; - // For now only enable fast locking for non-array types - if (UseBiasedLocking && (length == NULL)) { - mark_node = make_load(control, rawmem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeRawPtr::BOTTOM, T_ADDRESS); - } else { - mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype())); + Node* mark_node = alloc->make_ideal_mark(&_igvn, object, control, rawmem, klass_node); + if (!mark_node->is_Con()) { + transform_later(mark_node); } - rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS); + rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, TypeX_X->basic_type()); rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA); int header_size = alloc->minimum_header_size(); // conservatively small @@ -1654,6 +1729,8 @@ // within an Allocate, and then (maybe or maybe not) clear some more later. if (!(UseTLAB && ZeroTLAB)) { rawmem = ClearArrayNode::clear_memory(control, rawmem, object, + alloc->in(AllocateNode::DefaultValue), + alloc->in(AllocateNode::RawDefaultValue), header_size, size_in_bytes, &_igvn); } @@ -2415,6 +2492,211 @@ _igvn.replace_node(_memproj_fallthrough, mem_phi); } +// A value type might be returned from the call but we don't know its +// type. Either we get a buffered value (and nothing needs to be done) +// or one of the values being returned is the klass of the value type +// and we need to allocate a value type instance of that type and +// initialize it with other values being returned. In that case, we +// first try a fast path allocation and initialize the value with the +// value klass's pack handler or we fall back to a runtime call. +void PhaseMacroExpand::expand_mh_intrinsic_return(CallStaticJavaNode* call) { + assert(call->method()->is_method_handle_intrinsic(), "must be a method handle intrinsic call"); + Node* ret = call->proj_out_or_null(TypeFunc::Parms); + if (ret == NULL) { + return; + } + const TypeFunc* tf = call->_tf; + const TypeTuple* domain = OptoRuntime::store_value_type_fields_Type()->domain_cc(); + const TypeFunc* new_tf = TypeFunc::make(tf->domain_sig(), tf->domain_cc(), tf->range_sig(), domain); + call->_tf = new_tf; + // Make sure the change of type is applied before projections are processed by igvn + _igvn.set_type(call, call->Value(&_igvn)); + _igvn.set_type(ret, ret->Value(&_igvn)); + + // Before any new projection is added: + CallProjections* projs = call->extract_projections(true, true); + + Node* ctl = new Node(1); + Node* mem = new Node(1); + Node* io = new Node(1); + Node* ex_ctl = new Node(1); + Node* ex_mem = new Node(1); + Node* ex_io = new Node(1); + Node* res = new Node(1); + + Node* cast = transform_later(new CastP2XNode(ctl, res)); + Node* mask = MakeConX(0x1); + Node* masked = transform_later(new AndXNode(cast, mask)); + Node* cmp = transform_later(new CmpXNode(masked, mask)); + Node* bol = transform_later(new BoolNode(cmp, BoolTest::eq)); + IfNode* allocation_iff = new IfNode(ctl, bol, PROB_MAX, COUNT_UNKNOWN); + transform_later(allocation_iff); + Node* allocation_ctl = transform_later(new IfTrueNode(allocation_iff)); + Node* no_allocation_ctl = transform_later(new IfFalseNode(allocation_iff)); + + Node* no_allocation_res = transform_later(new CheckCastPPNode(no_allocation_ctl, res, TypeInstPtr::BOTTOM)); + + Node* mask2 = MakeConX(-2); + Node* masked2 = transform_later(new AndXNode(cast, mask2)); + Node* rawklassptr = transform_later(new CastX2PNode(masked2)); + Node* klass_node = transform_later(new CheckCastPPNode(allocation_ctl, rawklassptr, TypeKlassPtr::OBJECT_OR_NULL)); + + Node* slowpath_bol = NULL; + Node* top_adr = NULL; + Node* old_top = NULL; + Node* new_top = NULL; + if (UseTLAB) { + Node* end_adr = NULL; + set_eden_pointers(top_adr, end_adr); + Node* end = make_load(ctl, mem, end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS); + old_top = new LoadPNode(ctl, mem, top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); + transform_later(old_top); + Node* layout_val = make_load(NULL, mem, klass_node, in_bytes(Klass::layout_helper_offset()), TypeInt::INT, T_INT); + Node* size_in_bytes = ConvI2X(layout_val); + new_top = new AddPNode(top(), old_top, size_in_bytes); + transform_later(new_top); + Node* slowpath_cmp = new CmpPNode(new_top, end); + transform_later(slowpath_cmp); + slowpath_bol = new BoolNode(slowpath_cmp, BoolTest::ge); + transform_later(slowpath_bol); + } else { + slowpath_bol = intcon(1); + top_adr = top(); + old_top = top(); + new_top = top(); + } + IfNode* slowpath_iff = new IfNode(allocation_ctl, slowpath_bol, PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN); + transform_later(slowpath_iff); + + Node* slowpath_true = new IfTrueNode(slowpath_iff); + transform_later(slowpath_true); + + CallStaticJavaNode* slow_call = new CallStaticJavaNode(OptoRuntime::store_value_type_fields_Type(), + StubRoutines::store_value_type_fields_to_buf(), + "store_value_type_fields", + call->jvms()->bci(), + TypePtr::BOTTOM); + slow_call->init_req(TypeFunc::Control, slowpath_true); + slow_call->init_req(TypeFunc::Memory, mem); + slow_call->init_req(TypeFunc::I_O, io); + slow_call->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr)); + slow_call->init_req(TypeFunc::ReturnAdr, call->in(TypeFunc::ReturnAdr)); + slow_call->init_req(TypeFunc::Parms, res); + + Node* slow_ctl = transform_later(new ProjNode(slow_call, TypeFunc::Control)); + Node* slow_mem = transform_later(new ProjNode(slow_call, TypeFunc::Memory)); + Node* slow_io = transform_later(new ProjNode(slow_call, TypeFunc::I_O)); + Node* slow_res = transform_later(new ProjNode(slow_call, TypeFunc::Parms)); + Node* slow_catc = transform_later(new CatchNode(slow_ctl, slow_io, 2)); + Node* slow_norm = transform_later(new CatchProjNode(slow_catc, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci)); + Node* slow_excp = transform_later(new CatchProjNode(slow_catc, CatchProjNode::catch_all_index, CatchProjNode::no_handler_bci)); + + Node* ex_r = new RegionNode(3); + Node* ex_mem_phi = new PhiNode(ex_r, Type::MEMORY, TypePtr::BOTTOM); + Node* ex_io_phi = new PhiNode(ex_r, Type::ABIO); + ex_r->init_req(1, slow_excp); + ex_mem_phi->init_req(1, slow_mem); + ex_io_phi->init_req(1, slow_io); + ex_r->init_req(2, ex_ctl); + ex_mem_phi->init_req(2, ex_mem); + ex_io_phi->init_req(2, ex_io); + + transform_later(ex_r); + transform_later(ex_mem_phi); + transform_later(ex_io_phi); + + Node* slowpath_false = new IfFalseNode(slowpath_iff); + transform_later(slowpath_false); + Node* rawmem = new StorePNode(slowpath_false, mem, top_adr, TypeRawPtr::BOTTOM, new_top, MemNode::unordered); + transform_later(rawmem); + Node* mark_node = makecon(TypeRawPtr::make((address)markOopDesc::always_locked_prototype())); + rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS); + rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA); + if (UseCompressedClassPointers) { + rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::klass_gap_offset_in_bytes(), intcon(0), T_INT); + } + Node* fixed_block = make_load(slowpath_false, rawmem, klass_node, in_bytes(InstanceKlass::adr_valueklass_fixed_block_offset()), TypeRawPtr::BOTTOM, T_ADDRESS); + Node* pack_handler = make_load(slowpath_false, rawmem, fixed_block, in_bytes(ValueKlass::pack_handler_offset()), TypeRawPtr::BOTTOM, T_ADDRESS); + + CallLeafNoFPNode* handler_call = new CallLeafNoFPNode(OptoRuntime::pack_value_type_Type(), + NULL, + "pack handler", + TypeRawPtr::BOTTOM); + handler_call->init_req(TypeFunc::Control, slowpath_false); + handler_call->init_req(TypeFunc::Memory, rawmem); + handler_call->init_req(TypeFunc::I_O, top()); + handler_call->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr)); + handler_call->init_req(TypeFunc::ReturnAdr, top()); + handler_call->init_req(TypeFunc::Parms, pack_handler); + handler_call->init_req(TypeFunc::Parms+1, old_top); + + // We don't know how many values are returned. This assumes the + // worst case, that all available registers are used. + for (uint i = TypeFunc::Parms+1; i < domain->cnt(); i++) { + if (domain->field_at(i) == Type::HALF) { + slow_call->init_req(i, top()); + handler_call->init_req(i+1, top()); + continue; + } + Node* proj = transform_later(new ProjNode(call, i)); + slow_call->init_req(i, proj); + handler_call->init_req(i+1, proj); + } + + // We can safepoint at that new call + copy_call_debug_info(call, slow_call); + transform_later(slow_call); + transform_later(handler_call); + + Node* handler_ctl = transform_later(new ProjNode(handler_call, TypeFunc::Control)); + rawmem = transform_later(new ProjNode(handler_call, TypeFunc::Memory)); + Node* slowpath_false_res = transform_later(new ProjNode(handler_call, TypeFunc::Parms)); + + MergeMemNode* slowpath_false_mem = MergeMemNode::make(mem); + slowpath_false_mem->set_memory_at(Compile::AliasIdxRaw, rawmem); + transform_later(slowpath_false_mem); + + Node* r = new RegionNode(4); + Node* mem_phi = new PhiNode(r, Type::MEMORY, TypePtr::BOTTOM); + Node* io_phi = new PhiNode(r, Type::ABIO); + Node* res_phi = new PhiNode(r, TypeInstPtr::BOTTOM); + + r->init_req(1, no_allocation_ctl); + mem_phi->init_req(1, mem); + io_phi->init_req(1, io); + res_phi->init_req(1, no_allocation_res); + r->init_req(2, slow_norm); + mem_phi->init_req(2, slow_mem); + io_phi->init_req(2, slow_io); + res_phi->init_req(2, slow_res); + r->init_req(3, handler_ctl); + mem_phi->init_req(3, slowpath_false_mem); + io_phi->init_req(3, io); + res_phi->init_req(3, slowpath_false_res); + + transform_later(r); + transform_later(mem_phi); + transform_later(io_phi); + transform_later(res_phi); + + assert(projs->nb_resproj == 1, "unexpected number of results"); + _igvn.replace_in_uses(projs->fallthrough_catchproj, r); + _igvn.replace_in_uses(projs->fallthrough_memproj, mem_phi); + _igvn.replace_in_uses(projs->fallthrough_ioproj, io_phi); + _igvn.replace_in_uses(projs->resproj[0], res_phi); + _igvn.replace_in_uses(projs->catchall_catchproj, ex_r); + _igvn.replace_in_uses(projs->catchall_memproj, ex_mem_phi); + _igvn.replace_in_uses(projs->catchall_ioproj, ex_io_phi); + + _igvn.replace_node(ctl, projs->fallthrough_catchproj); + _igvn.replace_node(mem, projs->fallthrough_memproj); + _igvn.replace_node(io, projs->fallthrough_ioproj); + _igvn.replace_node(res, projs->resproj[0]); + _igvn.replace_node(ex_ctl, projs->catchall_catchproj); + _igvn.replace_node(ex_mem, projs->catchall_memproj); + _igvn.replace_node(ex_io, projs->catchall_ioproj); + } + //---------------------------eliminate_macro_nodes---------------------- // Eliminate scalar replaced allocations and associated locks. void PhaseMacroExpand::eliminate_macro_nodes() { @@ -2459,9 +2741,13 @@ case Node::Class_AllocateArray: success = eliminate_allocate_node(n->as_Allocate()); break; - case Node::Class_CallStaticJava: - success = eliminate_boxing_node(n->as_CallStaticJava()); + case Node::Class_CallStaticJava: { + CallStaticJavaNode* call = n->as_CallStaticJava(); + if (!call->method()->is_method_handle_intrinsic()) { + success = eliminate_boxing_node(n->as_CallStaticJava()); + } break; + } case Node::Class_Lock: case Node::Class_Unlock: assert(!n->as_AbstractLock()->is_eliminated(), "sanity"); @@ -2511,10 +2797,13 @@ _igvn._worklist.push(n); success = true; } else if (n->Opcode() == Op_CallStaticJava) { - // Remove it from macro list and put on IGVN worklist to optimize. - C->remove_macro_node(n); - _igvn._worklist.push(n); - success = true; + CallStaticJavaNode* call = n->as_CallStaticJava(); + if (!call->method()->is_method_handle_intrinsic()) { + // Remove it from macro list and put on IGVN worklist to optimize. + C->remove_macro_node(n); + _igvn._worklist.push(n); + success = true; + } } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) { _igvn.replace_node(n, n->in(1)); success = true; @@ -2595,6 +2884,10 @@ case Node::Class_Unlock: expand_unlock_node(n->as_Unlock()); break; + case Node::Class_CallStaticJava: + expand_mh_intrinsic_return(n->as_CallStaticJava()); + C->remove_macro_node(n); + break; default: assert(false, "unknown node type in macro list"); } --- old/src/hotspot/share/opto/macro.hpp 2019-03-11 14:26:48.074354576 +0100 +++ new/src/hotspot/share/opto/macro.hpp 2019-03-11 14:26:47.862354579 +0100 @@ -85,6 +85,7 @@ address slow_call_address); Node *value_from_mem(Node *mem, Node *ctl, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, AllocateNode *alloc); Node *value_from_mem_phi(Node *mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, AllocateNode *alloc, Node_Stack *value_phis, int level); + Node* value_type_from_mem(Node* mem, Node* ctl, ciValueKlass* vk, const TypeAryPtr* adr_type, int offset, AllocateNode* alloc); bool eliminate_boxing_node(CallStaticJavaNode *boxing); bool eliminate_allocate_node(AllocateNode *alloc); @@ -98,6 +99,7 @@ bool eliminate_locking_node(AbstractLockNode *alock); void expand_lock_node(LockNode *lock); void expand_unlock_node(UnlockNode *unlock); + void expand_mh_intrinsic_return(CallStaticJavaNode* call); // More helper methods modeled after GraphKit for array copy void insert_mem_bar(Node** ctrl, Node** mem, int opcode, Node* precedent = NULL); @@ -115,11 +117,16 @@ // helper methods modeled after LibraryCallKit for array copy Node* generate_guard(Node** ctrl, Node* test, RegionNode* region, float true_prob); Node* generate_slow_guard(Node** ctrl, Node* test, RegionNode* region); + Node* generate_fair_guard(Node** ctrl, Node* test, RegionNode* region); void generate_negative_guard(Node** ctrl, Node* index, RegionNode* region); void generate_limit_guard(Node** ctrl, Node* offset, Node* subseq_length, Node* array_length, RegionNode* region); // More helper methods for array copy Node* generate_nonpositive_guard(Node** ctrl, Node* index, bool never_negative); + Node* generate_flattened_array_guard(Node** ctrl, Node* mem, Node* obj, RegionNode* region); + Node* generate_object_array_guard(Node** ctrl, Node* mem, Node* obj, RegionNode* region); + Node* generate_array_guard(Node** ctrl, Node* mem, Node* obj, RegionNode* region, jint lh_con); + void finish_arraycopy_call(Node* call, Node** ctrl, MergeMemNode** mem, const TypePtr* adr_type); address basictype2arraycopy(BasicType t, Node* src_offset, @@ -135,12 +142,15 @@ Node* src, Node* src_offset, Node* dest, Node* dest_offset, Node* copy_length, + Node* dest_length, bool disjoint_bases = false, bool length_never_negative = false, RegionNode* slow_region = NULL); void generate_clear_array(Node* ctrl, MergeMemNode* merge_mem, const TypePtr* adr_type, Node* dest, + Node* val, + Node* raw_val, BasicType basic_elem_type, Node* slice_idx, Node* slice_len, @@ -176,7 +186,9 @@ Node* src, Node* src_offset, Node* dest, Node* dest_offset, Node* copy_length, bool dest_uninitialized); - + const TypePtr* adjust_parameters_for_vt(const TypeAryPtr* top_dest, Node*& src_offset, + Node*& dest_offset, Node*& length, BasicType& dest_elem, + Node*& dest_length); void expand_arraycopy_node(ArrayCopyNode *ac); int replace_input(Node *use, Node *oldref, Node *newref); @@ -195,6 +207,8 @@ Node* make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset, Node* ctl, Node* mem, BasicType ft, const Type *ftype, AllocateNode *alloc); + bool can_try_zeroing_elimination(AllocateArrayNode* alloc, Node* src, Node* dest) const; + public: PhaseMacroExpand(PhaseIterGVN &igvn) : Phase(Macro_Expand), _igvn(igvn), _has_locks(false) { _igvn.set_delay_transform(true); --- old/src/hotspot/share/opto/macroArrayCopy.cpp 2019-03-11 14:26:48.494354570 +0100 +++ new/src/hotspot/share/opto/macroArrayCopy.cpp 2019-03-11 14:26:48.290354573 +0100 @@ -139,6 +139,10 @@ return generate_guard(ctrl, test, region, PROB_UNLIKELY_MAG(3)); } +inline Node* PhaseMacroExpand::generate_fair_guard(Node** ctrl, Node* test, RegionNode* region) { + return generate_guard(ctrl, test, region, PROB_FAIR); +} + void PhaseMacroExpand::generate_negative_guard(Node** ctrl, Node* index, RegionNode* region) { if ((*ctrl)->is_top()) return; // already stopped @@ -184,6 +188,34 @@ return is_notp; } +Node* PhaseMacroExpand::generate_flattened_array_guard(Node** ctrl, Node* mem, Node* obj_or_klass, RegionNode* region) { + return generate_array_guard(ctrl, mem, obj_or_klass, region, Klass::_lh_array_tag_vt_value); +} + +Node* PhaseMacroExpand::generate_object_array_guard(Node** ctrl, Node* mem, Node* obj_or_klass, RegionNode* region) { + return generate_array_guard(ctrl, mem, obj_or_klass, region, Klass::_lh_array_tag_obj_value); +} + +Node* PhaseMacroExpand::generate_array_guard(Node** ctrl, Node* mem, Node* obj_or_klass, RegionNode* region, jint lh_con) { + if ((*ctrl)->is_top()) return NULL; + + Node* kls = NULL; + if (_igvn.type(obj_or_klass)->isa_oopptr()) { + Node* k_adr = basic_plus_adr(obj_or_klass, oopDesc::klass_offset_in_bytes()); + kls = transform_later(LoadKlassNode::make(_igvn, NULL, C->immutable_memory(), k_adr, TypeInstPtr::KLASS)); + } else { + assert(_igvn.type(obj_or_klass)->isa_klassptr(), "what else?"); + kls = obj_or_klass; + } + Node* layout_val = make_load(NULL, mem, kls, in_bytes(Klass::layout_helper_offset()), TypeInt::INT, T_INT); + + layout_val = transform_later(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift))); + Node* cmp = transform_later(new CmpINode(layout_val, intcon(lh_con))); + Node* bol = transform_later(new BoolNode(cmp, BoolTest::eq)); + + return generate_fair_guard(ctrl, bol, region); +} + void PhaseMacroExpand::finish_arraycopy_call(Node* call, Node** ctrl, MergeMemNode** mem, const TypePtr* adr_type) { transform_later(call); @@ -236,6 +268,25 @@ return StubRoutines::select_arraycopy_function(t, aligned, disjoint, name, dest_uninitialized); } +bool PhaseMacroExpand::can_try_zeroing_elimination(AllocateArrayNode* alloc, + Node* src, + Node* dest) const { + const TypeAryPtr* top_dest = _igvn.type(dest)->isa_aryptr(); + + if (top_dest != NULL) { + if (top_dest->klass() == NULL) { + return false; + } + } + + return ReduceBulkZeroing + && !(UseTLAB && ZeroTLAB) // pointless if already zeroed + && !src->eqv_uncast(dest) + && alloc != NULL + && _igvn.find_int_con(alloc->in(AllocateNode::ALength), 1) > 0 + && alloc->maybe_set_complete(&_igvn); +} + #define XTOP LP64_ONLY(COMMA top()) // Generate an optimized call to arraycopy. @@ -278,6 +329,7 @@ Node* src, Node* src_offset, Node* dest, Node* dest_offset, Node* copy_length, + Node* dest_length, bool disjoint_bases, bool length_never_negative, RegionNode* slow_region) { @@ -288,19 +340,16 @@ Node* original_dest = dest; bool dest_uninitialized = false; + Node* default_value = NULL; + Node* raw_default_value = NULL; // See if this is the initialization of a newly-allocated array. // If so, we will take responsibility here for initializing it to zero. // (Note: Because tightly_coupled_allocation performs checks on the // out-edges of the dest, we need to avoid making derived pointers // from it until we have checked its uses.) - if (ReduceBulkZeroing - && !(UseTLAB && ZeroTLAB) // pointless if already zeroed - && basic_elem_type != T_CONFLICT // avoid corner case - && !src->eqv_uncast(dest) - && alloc != NULL - && _igvn.find_int_con(alloc->in(AllocateNode::ALength), 1) > 0 - && alloc->maybe_set_complete(&_igvn)) { + if (can_try_zeroing_elimination(alloc, src, dest) && + basic_elem_type != T_CONFLICT /* avoid corner case */) { // "You break it, you buy it." InitializeNode* init = alloc->initialization(); assert(init->is_complete(), "we just did this"); @@ -313,6 +362,8 @@ // Also, if this flag is set we make sure that arraycopy interacts properly // with G1, eliding pre-barriers. See CR 6627983. dest_uninitialized = true; + default_value = alloc->in(AllocateNode::DefaultValue); + raw_default_value = alloc->in(AllocateNode::RawDefaultValue); } else { // No zeroing elimination here. alloc = NULL; @@ -378,14 +429,15 @@ // copy_length is 0. if (dest_uninitialized) { assert(!local_ctrl->is_top(), "no ctrl?"); - Node* dest_length = alloc->in(AllocateNode::ALength); if (copy_length->eqv_uncast(dest_length) || _igvn.find_int_con(dest_length, 1) <= 0) { // There is no zeroing to do. No need for a secondary raw memory barrier. } else { // Clear the whole thing since there are no source elements to copy. generate_clear_array(local_ctrl, local_mem, - adr_type, dest, basic_elem_type, + adr_type, dest, + default_value, raw_default_value, + basic_elem_type, intcon(0), NULL, alloc->in(AllocateNode::AllocSize)); // Use a secondary InitializeNode as raw memory barrier. @@ -416,13 +468,14 @@ // The copy destination is the slice dest[off..off+len]. The other slices // are dest_head = dest[0..off] and dest_tail = dest[off+len..dest.length]. Node* dest_size = alloc->in(AllocateNode::AllocSize); - Node* dest_length = alloc->in(AllocateNode::ALength); Node* dest_tail = transform_later( new AddINode(dest_offset, copy_length)); // If there is a head section that needs zeroing, do it now. if (_igvn.find_int_con(dest_offset, -1) != 0) { generate_clear_array(*ctrl, mem, - adr_type, dest, basic_elem_type, + adr_type, dest, + default_value, raw_default_value, + basic_elem_type, intcon(0), dest_offset, NULL); } @@ -471,7 +524,9 @@ *ctrl = tail_ctl; if (notail_ctl == NULL) { generate_clear_array(*ctrl, mem, - adr_type, dest, basic_elem_type, + adr_type, dest, + default_value, raw_default_value, + basic_elem_type, dest_tail, NULL, dest_size); } else { @@ -481,7 +536,9 @@ done_ctl->init_req(1, notail_ctl); done_mem->init_req(1, mem->memory_at(alias_idx)); generate_clear_array(*ctrl, mem, - adr_type, dest, basic_elem_type, + adr_type, dest, + default_value, raw_default_value, + basic_elem_type, dest_tail, NULL, dest_size); done_ctl->init_req(2, *ctrl); @@ -659,7 +716,9 @@ if (dest_uninitialized) { generate_clear_array(local_ctrl, local_mem, - adr_type, dest, basic_elem_type, + adr_type, dest, + default_value, raw_default_value, + basic_elem_type, intcon(0), NULL, alloc->in(AllocateNode::AllocSize)); } @@ -713,6 +772,12 @@ insert_mem_bar(ctrl, &out_mem, Op_MemBarStoreStore); } else if (InsertMemBarAfterArraycopy) { insert_mem_bar(ctrl, &out_mem, Op_MemBarCPUOrder); + } else if (adr_type == TypeRawPtr::BOTTOM) { + // Do not let reads from the destination float above the arraycopy. + // Since we cannot type the arrays, we don't know which slices + // might be affected. We could restrict this barrier only to those + // memory slices which pertain to array elements--but don't bother. + insert_mem_bar(ctrl, &out_mem, Op_MemBarCPUOrder); } _igvn.replace_node(_memproj_fallthrough, out_mem); @@ -757,6 +822,8 @@ void PhaseMacroExpand::generate_clear_array(Node* ctrl, MergeMemNode* merge_mem, const TypePtr* adr_type, Node* dest, + Node* val, + Node* raw_val, BasicType basic_elem_type, Node* slice_idx, Node* slice_len, @@ -772,6 +839,7 @@ Node* mem = merge_mem->memory_at(alias_idx); // memory slice to operate on // scaling and rounding of indexes: + assert(basic_elem_type != T_VALUETYPE, "should have been converted to a basic type copy"); int scale = exact_log2(type2aelembytes(basic_elem_type)); int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type); int clear_low = (-1 << scale) & (BytesPerInt - 1); @@ -795,12 +863,12 @@ if (start_con >= 0 && end_con >= 0) { // Constant start and end. Simple. - mem = ClearArrayNode::clear_memory(ctrl, mem, dest, + mem = ClearArrayNode::clear_memory(ctrl, mem, dest, val, raw_val, start_con, end_con, &_igvn); } else if (start_con >= 0 && dest_size != top()) { // Constant start, pre-rounded end after the tail of the array. Node* end = dest_size; - mem = ClearArrayNode::clear_memory(ctrl, mem, dest, + mem = ClearArrayNode::clear_memory(ctrl, mem, dest, val, raw_val, start_con, end, &_igvn); } else if (start_con >= 0 && slice_len != top()) { // Constant start, non-constant end. End needs rounding up. @@ -813,7 +881,7 @@ end_base += end_round; end = transform_later(new AddXNode(end, MakeConX(end_base)) ); end = transform_later(new AndXNode(end, MakeConX(~end_round)) ); - mem = ClearArrayNode::clear_memory(ctrl, mem, dest, + mem = ClearArrayNode::clear_memory(ctrl, mem, dest, val, raw_val, start_con, end, &_igvn); } else if (start_con < 0 && dest_size != top()) { // Non-constant start, pre-rounded end after the tail of the array. @@ -842,12 +910,18 @@ // Store a zero to the immediately preceding jint: Node* x1 = transform_later(new AddXNode(start, MakeConX(-bump_bit)) ); Node* p1 = basic_plus_adr(dest, x1); - mem = StoreNode::make(_igvn, ctrl, mem, p1, adr_type, intcon(0), T_INT, MemNode::unordered); + if (val == NULL) { + assert(raw_val == NULL, "val may not be null"); + mem = StoreNode::make(_igvn, ctrl, mem, p1, adr_type, intcon(0), T_INT, MemNode::unordered); + } else { + assert(_igvn.type(val)->isa_narrowoop(), "should be narrow oop"); + mem = new StoreNNode(ctrl, mem, p1, adr_type, val, MemNode::unordered); + } mem = transform_later(mem); } } Node* end = dest_size; // pre-rounded - mem = ClearArrayNode::clear_memory(ctrl, mem, dest, + mem = ClearArrayNode::clear_memory(ctrl, mem, dest, raw_val, start, end, &_igvn); } else { // Non-constant start, unrounded non-constant end. @@ -1082,6 +1156,37 @@ finish_arraycopy_call(call, ctrl, mem, adr_type); } +const TypePtr* PhaseMacroExpand::adjust_parameters_for_vt(const TypeAryPtr* top_dest, Node*& src_offset, + Node*& dest_offset, Node*& length, BasicType& dest_elem, + Node*& dest_length) { + assert(top_dest->klass()->is_value_array_klass(), "inconsistent"); + int elem_size = ((ciValueArrayKlass*)top_dest->klass())->element_byte_size(); + if (elem_size >= 8) { + if (elem_size > 8) { + // treat as array of long but scale length, src offset and dest offset + assert((elem_size % 8) == 0, "not a power of 2?"); + int factor = elem_size / 8; + length = transform_later(new MulINode(length, intcon(factor))); + src_offset = transform_later(new MulINode(src_offset, intcon(factor))); + dest_offset = transform_later(new MulINode(dest_offset, intcon(factor))); + if (dest_length != NULL) { + dest_length = transform_later(new MulINode(dest_length, intcon(factor))); + } + elem_size = 8; + } + dest_elem = T_LONG; + } else if (elem_size == 4) { + dest_elem = T_INT; + } else if (elem_size == 2) { + dest_elem = T_CHAR; + } else if (elem_size == 1) { + dest_elem = T_BYTE; + } else { + ShouldNotReachHere(); + } + return TypeRawPtr::BOTTOM; +} + void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) { Node* ctrl = ac->in(TypeFunc::Control); Node* io = ac->in(TypeFunc::I_O); @@ -1111,26 +1216,43 @@ return; } else if (ac->is_copyof() || ac->is_copyofrange() || ac->is_cloneoop()) { + const Type* dest_type = _igvn.type(dest); + const TypeAryPtr* top_dest = dest_type->isa_aryptr(); + + BasicType dest_elem = top_dest->klass()->as_array_klass()->element_type()->basic_type(); + if (dest_elem == T_ARRAY || (dest_elem == T_VALUETYPE && top_dest->klass()->is_obj_array_klass())) { + dest_elem = T_OBJECT; + } + Node* mem = ac->in(TypeFunc::Memory); merge_mem = MergeMemNode::make(mem); transform_later(merge_mem); - RegionNode* slow_region = new RegionNode(1); - transform_later(slow_region); - AllocateArrayNode* alloc = NULL; if (ac->is_alloc_tightly_coupled()) { alloc = AllocateArrayNode::Ideal_array_allocation(dest, &_igvn); assert(alloc != NULL, "expect alloc"); } + assert(dest_elem != T_VALUETYPE || alloc != NULL, "unsupported"); + Node* dest_length = alloc != NULL ? alloc->in(AllocateNode::ALength) : NULL; - const TypePtr* adr_type = _igvn.type(dest)->is_oopptr()->add_offset(Type::OffsetBot); - if (ac->_dest_type != TypeOopPtr::BOTTOM) { - adr_type = ac->_dest_type->add_offset(Type::OffsetBot)->is_ptr(); + const TypePtr* adr_type = NULL; + + if (dest_elem == T_VALUETYPE) { + adr_type = adjust_parameters_for_vt(top_dest, src_offset, dest_offset, length, dest_elem, dest_length); + } else { + adr_type = _igvn.type(dest)->is_oopptr()->add_offset(Type::OffsetBot); + if (ac->_dest_type != TypeOopPtr::BOTTOM) { + adr_type = ac->_dest_type->add_offset(Type::OffsetBot)->is_ptr(); + } + if (ac->_src_type != ac->_dest_type) { + adr_type = TypeRawPtr::BOTTOM; + } } generate_arraycopy(ac, alloc, &ctrl, merge_mem, &io, - adr_type, T_OBJECT, + adr_type, dest_elem, src, src_offset, dest, dest_offset, length, + dest_length, true, !ac->is_copyofrange()); return; @@ -1163,8 +1285,12 @@ if (top_src != NULL && top_src->klass() != NULL) { src_elem = top_src->klass()->as_array_klass()->element_type()->basic_type(); } - if (src_elem == T_ARRAY) src_elem = T_OBJECT; - if (dest_elem == T_ARRAY) dest_elem = T_OBJECT; + if (src_elem == T_ARRAY || (src_elem == T_VALUETYPE && top_src->klass()->is_obj_array_klass())) { + src_elem = T_OBJECT; + } + if (dest_elem == T_ARRAY || (dest_elem == T_VALUETYPE && top_dest->klass()->is_obj_array_klass())) { + dest_elem = T_OBJECT; + } if (ac->is_arraycopy_validated() && dest_elem != T_CONFLICT && @@ -1183,22 +1309,21 @@ transform_later(merge_mem); } + RegionNode* slow_region = new RegionNode(1); + transform_later(slow_region); + + generate_flattened_array_guard(&ctrl, merge_mem, dest, slow_region); + // Call StubRoutines::generic_arraycopy stub. Node* mem = generate_arraycopy(ac, NULL, &ctrl, merge_mem, &io, TypeRawPtr::BOTTOM, T_CONFLICT, src, src_offset, dest, dest_offset, length, + NULL, // If a negative length guard was generated for the ArrayCopyNode, // the length of the array can never be negative. - false, ac->has_negative_length_guard()); + false, ac->has_negative_length_guard(), + slow_region); - // Do not let reads from the destination float above the arraycopy. - // Since we cannot type the arrays, we don't know which slices - // might be affected. We could restrict this barrier only to those - // memory slices which pertain to array elements--but don't bother. - if (!InsertMemBarAfterArraycopy) { - // (If InsertMemBarAfterArraycopy, there is already one in place.) - insert_mem_bar(&ctrl, &mem, Op_MemBarCPUOrder); - } return; } @@ -1206,6 +1331,11 @@ // (2) src and dest arrays must have elements of the same BasicType // Figure out the size and type of the elements we will be copying. + // + // We have no stub to copy flattened value type arrays with oop + // fields if we need to emit write barriers. + // + BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); if (src_elem != dest_elem || dest_elem == T_VOID) { // The component types are not the same or are not recognized. Punt. // (But, avoid the native method wrapper to JVM_ArrayCopy.) @@ -1233,12 +1363,15 @@ // (8) dest_offset + length must not exceed length of dest. // (9) each element of an oop array must be assignable - { - Node* mem = ac->in(TypeFunc::Memory); - merge_mem = MergeMemNode::make(mem); - transform_later(merge_mem); + Node* mem = ac->in(TypeFunc::Memory); + if (dest_elem == T_VALUETYPE) { + // copy modifies more than 1 slice + insert_mem_bar(&ctrl, &mem, Op_MemBarCPUOrder); } + merge_mem = MergeMemNode::make(mem); + transform_later(merge_mem); + RegionNode* slow_region = new RegionNode(1); transform_later(slow_region); @@ -1278,10 +1411,28 @@ // (9) each element of an oop array must be assignable // The generate_arraycopy subroutine checks this. + + if (dest_elem == T_OBJECT && + ValueArrayFlatten && + top_dest->elem()->make_oopptr()->can_be_value_type()) { + generate_flattened_array_guard(&ctrl, merge_mem, dest, slow_region); + } + + if (src_elem == T_OBJECT && + ValueArrayFlatten && + top_src->elem()->make_oopptr()->can_be_value_type()) { + generate_flattened_array_guard(&ctrl, merge_mem, src, slow_region); + } } + // This is where the memory effects are placed: const TypePtr* adr_type = NULL; - if (ac->_dest_type != TypeOopPtr::BOTTOM) { + + Node* dest_length = alloc != NULL ? alloc->in(AllocateNode::ALength) : NULL; + + if (dest_elem == T_VALUETYPE) { + adr_type = adjust_parameters_for_vt(top_dest, src_offset, dest_offset, length, dest_elem, dest_length); + } else if (ac->_dest_type != TypeOopPtr::BOTTOM) { adr_type = ac->_dest_type->add_offset(Type::OffsetBot)->is_ptr(); } else { adr_type = TypeAryPtr::get_array_body_type(dest_elem); @@ -1290,7 +1441,9 @@ generate_arraycopy(ac, alloc, &ctrl, merge_mem, &io, adr_type, dest_elem, src, src_offset, dest, dest_offset, length, + dest_length, // If a negative length guard was generated for the ArrayCopyNode, // the length of the array can never be negative. - false, ac->has_negative_length_guard(), slow_region); + false, ac->has_negative_length_guard(), + slow_region); } --- old/src/hotspot/share/opto/matcher.cpp 2019-03-11 14:26:48.930354564 +0100 +++ new/src/hotspot/share/opto/matcher.cpp 2019-03-11 14:26:48.722354567 +0100 @@ -171,6 +171,52 @@ } #endif +// Array of RegMask, one per returned values (value type instances can +// be returned as multiple return values, one per field) +RegMask* Matcher::return_values_mask(const TypeTuple *range) { + uint cnt = range->cnt() - TypeFunc::Parms; + if (cnt == 0) { + return NULL; + } + RegMask* mask = NEW_RESOURCE_ARRAY(RegMask, cnt); + + if (!ValueTypeReturnedAsFields) { + // Get ideal-register return type + uint ireg = range->field_at(TypeFunc::Parms)->ideal_reg(); + // Get machine return register + OptoRegPair regs = return_value(ireg, false); + + // And mask for same + mask[0].Clear(); + mask[0].Insert(regs.first()); + if (OptoReg::is_valid(regs.second())) { + mask[0].Insert(regs.second()); + } + } else { + BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, cnt); + VMRegPair* vm_parm_regs = NEW_RESOURCE_ARRAY(VMRegPair, cnt); + + for (uint i = 0; i < cnt; i++) { + sig_bt[i] = range->field_at(i+TypeFunc::Parms)->basic_type(); + } + + int regs = SharedRuntime::java_return_convention(sig_bt, vm_parm_regs, cnt); + assert(regs > 0, "should have been tested during graph construction"); + for (uint i = 0; i < cnt; i++) { + mask[i].Clear(); + + OptoReg::Name reg1 = OptoReg::as_OptoReg(vm_parm_regs[i].first()); + if (OptoReg::is_valid(reg1)) { + mask[i].Insert(reg1); + } + OptoReg::Name reg2 = OptoReg::as_OptoReg(vm_parm_regs[i].second()); + if (OptoReg::is_valid(reg2)) { + mask[i].Insert(reg2); + } + } + } + return mask; +} //---------------------------match--------------------------------------------- void Matcher::match( ) { @@ -186,21 +232,10 @@ _return_addr_mask.Insert(OptoReg::add(return_addr(),1)); #endif - // Map a Java-signature return type into return register-value - // machine registers for 0, 1 and 2 returned values. - const TypeTuple *range = C->tf()->range(); - if( range->cnt() > TypeFunc::Parms ) { // If not a void function - // Get ideal-register return type - uint ireg = range->field_at(TypeFunc::Parms)->ideal_reg(); - // Get machine return register - uint sop = C->start()->Opcode(); - OptoRegPair regs = return_value(ireg, false); - - // And mask for same - _return_value_mask = RegMask(regs.first()); - if( OptoReg::is_valid(regs.second()) ) - _return_value_mask.Insert(regs.second()); - } + // Map Java-signature return types into return register-value + // machine registers. + const TypeTuple *range = C->tf()->range_cc(); + _return_values_mask = return_values_mask(range); // --------------- // Frame Layout @@ -208,7 +243,7 @@ // Need the method signature to determine the incoming argument types, // because the types determine which registers the incoming arguments are // in, and this affects the matched code. - const TypeTuple *domain = C->tf()->domain(); + const TypeTuple *domain = C->tf()->domain_cc(); uint argcnt = domain->cnt() - TypeFunc::Parms; BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, argcnt ); VMRegPair *vm_parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt ); @@ -462,6 +497,25 @@ for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) { C->FIRST_STACK_mask().Insert(i); } + + // Check if the method has a reserved entry in the argument stack area that + // should not be used for spilling because it may hold the return address. + if (C->method() != NULL && C->method()->has_scalarized_args()) { + ExtendedSignature sig_cc = ExtendedSignature(C->method()->get_sig_cc(), SigEntryFilter()); + for (int off = 0; !sig_cc.at_end(); ) { + BasicType bt = (*sig_cc)._bt; + off += type2size[bt]; + while (SigEntry::next_is_reserved(sig_cc, bt)) { + // Remove reserved stack slot from mask to avoid spilling + OptoRegPair reg = _parm_regs[off]; + assert(OptoReg::is_valid(reg.first()), "invalid reserved register"); + C->FIRST_STACK_mask().Remove(reg.first()); + C->FIRST_STACK_mask().Remove(reg.first()+1); // Always occupies two stack slots + off += type2size[bt]; + } + } + } + // Add in all bits past the outgoing argument area guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)), "must be able to represent all call arguments in reg mask"); @@ -654,12 +708,11 @@ // Input RegMask array shared by all Returns. // The type for doubles and longs has a count of 2, but // there is only 1 returned value - uint ret_edge_cnt = TypeFunc::Parms + ((C->tf()->range()->cnt() == TypeFunc::Parms) ? 0 : 1); + uint ret_edge_cnt = C->tf()->range_cc()->cnt(); RegMask *ret_rms = init_input_masks( ret_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask ); - // Returns have 0 or 1 returned values depending on call signature. - // Return register is specified by return_value in the AD file. - if (ret_edge_cnt > TypeFunc::Parms) - ret_rms[TypeFunc::Parms+0] = _return_value_mask; + for (i = TypeFunc::Parms; i < ret_edge_cnt; i++) { + ret_rms[i] = _return_values_mask[i-TypeFunc::Parms]; + } // Input RegMask array shared by all Rethrows. uint reth_edge_cnt = TypeFunc::Parms+1; @@ -726,7 +779,7 @@ } // Next unused projection number from Start. - int proj_cnt = C->tf()->domain()->cnt(); + int proj_cnt = C->tf()->domain_cc()->cnt(); // Do all the save-on-entry registers. Make projections from Start for // them, and give them a use at the exit points. To the allocator, they @@ -1007,7 +1060,11 @@ } else { // Nothing the matcher cares about if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Multi()) { // Projections? // Convert to machine-dependent projection - m = n->in(0)->as_Multi()->match( n->as_Proj(), this ); + RegMask* mask = NULL; + if (n->in(0)->is_Call()) { + mask = return_values_mask(n->in(0)->as_Call()->tf()->range_cc()); + } + m = n->in(0)->as_Multi()->match(n->as_Proj(), this, mask); #ifdef ASSERT _new2old_map.map(m->_idx, n); #endif @@ -1152,7 +1209,7 @@ bool is_method_handle_invoke = false; // for special kill effects if( sfpt->is_Call() ) { call = sfpt->as_Call(); - domain = call->tf()->domain(); + domain = call->tf()->domain_cc(); cnt = domain->cnt(); // Match just the call, nothing else @@ -1227,13 +1284,16 @@ // Do the normal argument list (parameters) register masks - int argcnt = cnt - TypeFunc::Parms; + // Null entry point is a special cast where the target of the call + // is in a register. + int adj = (call != NULL && call->entry_point() == NULL) ? 1 : 0; + int argcnt = cnt - TypeFunc::Parms - adj; if( argcnt > 0 ) { // Skip it all if we have no args BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, argcnt ); VMRegPair *parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt ); int i; for( i = 0; i < argcnt; i++ ) { - sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type(); + sig_bt[i] = domain->field_at(i+TypeFunc::Parms+adj)->basic_type(); } // V-call to pick proper calling convention call->calling_convention( sig_bt, parm_regs, argcnt ); @@ -1274,19 +1334,21 @@ // and over the entire method. for( i = 0; i < argcnt; i++ ) { // Address of incoming argument mask to fill in - RegMask *rm = &mcall->_in_rms[i+TypeFunc::Parms]; + RegMask *rm = &mcall->_in_rms[i+TypeFunc::Parms+adj]; if( !parm_regs[i].first()->is_valid() && !parm_regs[i].second()->is_valid() ) { continue; // Avoid Halves } // Grab first register, adjust stack slots and insert in mask. OptoReg::Name reg1 = warp_outgoing_stk_arg(parm_regs[i].first(), begin_out_arg_area, out_arg_limit_per_call ); - if (OptoReg::is_valid(reg1)) + if (OptoReg::is_valid(reg1)) { rm->Insert( reg1 ); + } // Grab second register (if any), adjust stack slots and insert in mask. OptoReg::Name reg2 = warp_outgoing_stk_arg(parm_regs[i].second(), begin_out_arg_area, out_arg_limit_per_call ); - if (OptoReg::is_valid(reg2)) + if (OptoReg::is_valid(reg2)) { rm->Insert( reg2 ); + } } // End of for all arguments // Compute number of stack slots needed to restore stack in case of @@ -1306,7 +1368,7 @@ // Since the max-per-method covers the max-per-call-site and debug info // is excluded on the max-per-method basis, debug info cannot land in // this killed area. - uint r_cnt = mcall->tf()->range()->cnt(); + uint r_cnt = mcall->tf()->range_sig()->cnt(); MachProjNode *proj = new MachProjNode( mcall, r_cnt+10000, RegMask::Empty, MachProjNode::fat_proj ); if (!RegMask::can_represent_arg(OptoReg::Name(out_arg_limit_per_call-1))) { C->record_method_not_compilable("unsupported outgoing calling sequence"); @@ -1327,7 +1389,7 @@ // Debug inputs begin just after the last incoming parameter assert((mcall == NULL) || (mcall->jvms() == NULL) || - (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), ""); + (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain_cc()->cnt()), ""); // Move the OopMap msfpt->_oop_map = sfpt->_oop_map; @@ -2362,6 +2424,13 @@ n->del_req(3); break; } + case Op_ClearArray: { + Node* pair = new BinaryNode(n->in(2), n->in(3)); + n->set_req(2, pair); + n->set_req(3, n->in(4)); + n->del_req(4); + break; + } default: break; } --- old/src/hotspot/share/opto/matcher.hpp 2019-03-11 14:26:49.370354558 +0100 +++ new/src/hotspot/share/opto/matcher.hpp 2019-03-11 14:26:49.162354561 +0100 @@ -257,6 +257,8 @@ // Helper for match OptoReg::Name warp_incoming_stk_arg( VMReg reg ); + RegMask* return_values_mask(const TypeTuple *range); + // Transform, then walk. Does implicit DCE while walking. // Name changed from "transform" to avoid it being virtual. Node *xform( Node *old_space_node, int Nodes ); @@ -382,7 +384,7 @@ // Return value register. On Intel it is EAX. On Sparc i0/o0. static OptoRegPair return_value(uint ideal_reg, bool is_outgoing); static OptoRegPair c_return_value(uint ideal_reg, bool is_outgoing); - RegMask _return_value_mask; + RegMask* _return_values_mask; // Inline Cache Register static OptoReg::Name inline_cache_reg(); static int inline_cache_reg_encode(); --- old/src/hotspot/share/opto/memnode.cpp 2019-03-11 14:26:49.798354552 +0100 +++ new/src/hotspot/share/opto/memnode.cpp 2019-03-11 14:26:49.586354555 +0100 @@ -44,6 +44,7 @@ #include "opto/narrowptrnode.hpp" #include "opto/phaseX.hpp" #include "opto/regmask.hpp" +#include "opto/valuetypenode.hpp" #include "utilities/align.hpp" #include "utilities/copy.hpp" #include "utilities/macros.hpp" @@ -238,7 +239,7 @@ phase->C->must_alias(adr_check, alias_idx ); // Sometimes dead array references collapse to a[-1], a[-2], or a[-3] if( !consistent && adr_check != NULL && !adr_check->empty() && - tp->isa_aryptr() && tp->offset() == Type::OffsetBot && + tp->isa_aryptr() && tp->offset() == Type::OffsetBot && adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot && ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() || adr_check->offset() == oopDesc::klass_offset_in_bytes() || @@ -818,6 +819,7 @@ case T_FLOAT: load = new LoadFNode (ctl, mem, adr, adr_type, rt, mo, control_dependency); break; case T_DOUBLE: load = new LoadDNode (ctl, mem, adr, adr_type, rt, mo, control_dependency); break; case T_ADDRESS: load = new LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(), mo, control_dependency); break; + case T_VALUETYPE: case T_OBJECT: #ifdef _LP64 if (adr->bottom_type()->is_ptr_to_narrowoop()) { @@ -1080,6 +1082,12 @@ // (This is one of the few places where a generic PhaseTransform // can create new nodes. Think of it as lazily manifesting // virtually pre-existing constants.) + assert(memory_type() != T_VALUETYPE, "should not be used for value types"); + Node* default_value = ld_alloc->in(AllocateNode::DefaultValue); + if (default_value != NULL) { + return default_value; + } + assert(ld_alloc->in(AllocateNode::RawDefaultValue) == NULL, "default value may not be null"); return phase->zerocon(memory_type()); } @@ -1137,6 +1145,33 @@ //------------------------------Identity--------------------------------------- // Loads are identity if previous store is to same address Node* LoadNode::Identity(PhaseGVN* phase) { + // Loading from a ValueTypePtr? The ValueTypePtr has the values of + // all fields as input. Look for the field with matching offset. + Node* addr = in(Address); + intptr_t offset; + Node* base = AddPNode::Ideal_base_and_offset(addr, phase, offset); + if (base != NULL && base->is_ValueTypePtr() && offset > oopDesc::klass_offset_in_bytes()) { + Node* value = base->as_ValueTypePtr()->field_value_by_offset((int)offset, true); + if (value->is_ValueType()) { + // Non-flattened value type field + ValueTypeNode* vt = value->as_ValueType(); + if (vt->is_allocated(phase)) { + value = vt->get_oop(); + } else { + // Not yet allocated, bail out + value = NULL; + } + } + if (value != NULL) { + if (Opcode() == Op_LoadN) { + // Encode oop value if we are loading a narrow oop + assert(!phase->type(value)->isa_narrowoop(), "should already be decoded"); + value = phase->transform(new EncodePNode(value, bottom_type())); + } + return value; + } + } + // If the previous store-maker is the right kind of Store, and the store is // to the same address, then we are equal to the value stored. Node* mem = in(Memory); @@ -1664,6 +1699,17 @@ } } + AllocateNode* alloc = AllocateNode::Ideal_allocation(address, phase); + if (alloc != NULL && mem->is_Proj() && + mem->in(0) != NULL && + mem->in(0) == alloc->initialization() && + Opcode() == Op_LoadX && + alloc->initialization()->proj_out_or_null(0) != NULL) { + InitializeNode* init = alloc->initialization(); + Node* control = init->proj_out(0); + return alloc->make_ideal_mark(phase, address, control, mem, NULL); + } + return progress ? this : NULL; } @@ -1753,6 +1799,7 @@ // expression (LShiftL quux 3) independently optimized to the constant 8. if ((t->isa_int() == NULL) && (t->isa_long() == NULL) && (_type->isa_vect() == NULL) + && t->isa_valuetype() == NULL && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) { // t might actually be lower than _type, if _type is a unique // concrete subclass of abstract class t. @@ -1787,6 +1834,7 @@ assert( off != Type::OffsetBot || // arrays can be cast to Objects tp->is_oopptr()->klass()->is_java_lang_Object() || + tp->is_oopptr()->klass() == ciEnv::current()->Class_klass() || // unsafe field access may not have a constant offset C->has_unsafe_access(), "Field accesses must be precise" ); @@ -1796,7 +1844,17 @@ const TypeInstPtr* tinst = tp->is_instptr(); ciObject* const_oop = tinst->const_oop(); if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != NULL && const_oop->is_instance()) { - const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), memory_type()); + BasicType bt = memory_type(); + ciType* mirror_type = const_oop->as_instance()->java_mirror_type(); + if (mirror_type != NULL && mirror_type->is_valuetype()) { + ciValueKlass* vk = mirror_type->as_value_klass(); + if (off == vk->default_value_offset()) { + // Loading a special hidden field that contains the oop of the default value type + const Type* const_oop = TypeInstPtr::make(vk->default_value_instance()); + return (bt == T_NARROWOOP) ? const_oop->make_narrowoop() : const_oop; + } + } + const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), bt); if (con_type != NULL) { return con_type; } @@ -1804,27 +1862,44 @@ } else if (tp->base() == Type::KlassPtr) { assert( off != Type::OffsetBot || // arrays can be cast to Objects + tp->is_klassptr()->klass() == NULL || tp->is_klassptr()->klass()->is_java_lang_Object() || // also allow array-loading from the primary supertype // array during subtype checks Opcode() == Op_LoadKlass, "Field accesses must be precise" ); // For klass/static loads, we expect the _type to be precise - } else if (tp->base() == Type::RawPtr && adr->is_Load() && off == 0) { - /* With mirrors being an indirect in the Klass* - * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset)) - * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass). - * - * So check the type and klass of the node before the LoadP. - */ - Node* adr2 = adr->in(MemNode::Address); - const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr(); - if (tkls != NULL && !StressReflectiveCode) { - ciKlass* klass = tkls->klass(); - if (klass->is_loaded() && tkls->klass_is_exact() && tkls->offset() == in_bytes(Klass::java_mirror_offset())) { - assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror"); - assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror"); - return TypeInstPtr::make(klass->java_mirror()); + } else if (tp->base() == Type::RawPtr && !StressReflectiveCode) { + if (adr->is_Load() && off == 0) { + /* With mirrors being an indirect in the Klass* + * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset)) + * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass). + * + * So check the type and klass of the node before the LoadP. + */ + Node* adr2 = adr->in(MemNode::Address); + const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr(); + if (tkls != NULL) { + ciKlass* klass = tkls->klass(); + if (klass != NULL && klass->is_loaded() && tkls->klass_is_exact() && tkls->offset() == in_bytes(Klass::java_mirror_offset())) { + assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror"); + assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror"); + return TypeInstPtr::make(klass->java_mirror()); + } + } + } else { + // Check for a load of the default value offset from the ValueKlassFixedBlock: + // LoadI(LoadP(value_klass, adr_valueklass_fixed_block_offset), default_value_offset_offset) + intptr_t offset = 0; + Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); + if (base != NULL && base->is_Load() && offset == in_bytes(ValueKlass::default_value_offset_offset())) { + const TypeKlassPtr* tkls = phase->type(base->in(MemNode::Address))->isa_klassptr(); + if (tkls != NULL && tkls->is_loaded() && tkls->klass_is_exact() && tkls->isa_valuetype() && + tkls->offset() == in_bytes(InstanceKlass::adr_valueklass_fixed_block_offset())) { + assert(base->Opcode() == Op_LoadP, "must load an oop from klass"); + assert(Opcode() == Op_LoadI, "must load an int from fixed block"); + return TypeInt::make(tkls->klass()->as_value_klass()->default_value_offset()); + } } } } @@ -1832,7 +1907,7 @@ const TypeKlassPtr *tkls = tp->isa_klassptr(); if (tkls != NULL && !StressReflectiveCode) { ciKlass* klass = tkls->klass(); - if (klass->is_loaded() && tkls->klass_is_exact()) { + if (tkls->is_loaded() && tkls->klass_is_exact()) { // We are loading a field from a Klass metaobject whose identity // is known at compile time (the type is "exact" or "precise"). // Check for fields we know are maintained as constants by the VM. @@ -1859,7 +1934,7 @@ // We can still check if we are loading from the primary_supers array at a // shallow enough depth. Even though the klass is not exact, entries less // than or equal to its super depth are correct. - if (klass->is_loaded() ) { + if (tkls->is_loaded()) { ciType *inner = klass; while( inner->is_obj_array_klass() ) inner = inner->as_obj_array_klass()->base_element_type(); @@ -2146,7 +2221,7 @@ } // Return root of possible klass - return TypeKlassPtr::make(TypePtr::NotNull, ik, 0/*offset*/); + return TypeKlassPtr::make(TypePtr::NotNull, ik, Type::Offset(0)); } } @@ -2178,7 +2253,7 @@ return TypeKlassPtr::make(ak); } } - return TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/); + return TypeKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0)); } else { // Found a type-array? //assert(!UseExactTypes, "this code should be useless with exact types"); assert( ak->is_type_array_klass(), "" ); @@ -2190,9 +2265,10 @@ // Check for loading klass from an array klass const TypeKlassPtr *tkls = tp->isa_klassptr(); if (tkls != NULL && !StressReflectiveCode) { - ciKlass* klass = tkls->klass(); - if( !klass->is_loaded() ) + if (!tkls->is_loaded()) { return _type; // Bail out if not loaded + } + ciKlass* klass = tkls->klass(); if( klass->is_obj_array_klass() && tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) { ciKlass* elem = klass->as_obj_array_klass()->element_klass(); @@ -2202,7 +2278,7 @@ // The array's TypeKlassPtr was declared 'precise' or 'not precise' // according to the element type's subclassing. - return TypeKlassPtr::make(tkls->ptr(), elem, 0/*offset*/); + return TypeKlassPtr::make(tkls->ptr(), elem, Type::Offset(0)); } if( klass->is_instance_klass() && tkls->klass_is_exact() && tkls->offset() == in_bytes(Klass::super_offset())) { @@ -2410,6 +2486,7 @@ case T_DOUBLE: return new StoreDNode(ctl, mem, adr, adr_type, val, mo); case T_METADATA: case T_ADDRESS: + case T_VALUETYPE: case T_OBJECT: #ifdef _LP64 if (adr->bottom_type()->is_ptr_to_narrowoop()) { @@ -2488,6 +2565,7 @@ phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw || (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode (Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy + (Opcode() == Op_StoreL && st->Opcode() == Op_StoreN) || (is_mismatched_access() || st->as_Store()->is_mismatched_access()), "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]); @@ -2573,10 +2651,11 @@ // Store of zero anywhere into a freshly-allocated object? // Then the store is useless. // (It must already have been captured by the InitializeNode.) - if (result == this && - ReduceFieldZeroing && phase->type(val)->is_zero_type()) { + if (result == this && ReduceFieldZeroing) { // a newly allocated object is already all-zeroes everywhere - if (mem->is_Proj() && mem->in(0)->is_Allocate()) { + if (mem->is_Proj() && mem->in(0)->is_Allocate() && + (phase->type(val)->is_zero_type() || mem->in(0)->in(AllocateNode::DefaultValue) == val)) { + assert(!phase->type(val)->is_zero_type() || mem->in(0)->in(AllocateNode::DefaultValue) == NULL, "storing null to value array is forbidden"); result = mem; } @@ -2589,7 +2668,15 @@ if (prev_val != NULL && phase->eqv(prev_val, val)) { // prev_val and val might differ by a cast; it would be good // to keep the more informative of the two. - result = mem; + if (phase->type(val)->is_zero_type()) { + result = mem; + } else if (prev_mem->is_Proj() && prev_mem->in(0)->is_Initialize()) { + InitializeNode* init = prev_mem->in(0)->as_Initialize(); + AllocateNode* alloc = init->allocation(); + if (alloc != NULL && alloc->in(AllocateNode::DefaultValue) == val) { + result = mem; + } + } } } } @@ -2897,7 +2984,7 @@ // Length too long; communicate this to matchers and assemblers. // Assemblers are responsible to produce fast hardware clears for it. if (size > InitArrayShortSize) { - return new ClearArrayNode(in(0), in(1), in(2), in(3), true); + return new ClearArrayNode(in(0), in(1), in(2), in(3), in(4), true); } Node *mem = in(1); if( phase->type(mem)==Type::TOP ) return NULL; @@ -2912,14 +2999,14 @@ if( adr->Opcode() != Op_AddP ) Unimplemented(); Node *base = adr->in(1); - Node *zero = phase->makecon(TypeLong::ZERO); + Node *val = in(4); Node *off = phase->MakeConX(BytesPerLong); - mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false); + mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false); count--; while( count-- ) { mem = phase->transform(mem); adr = phase->transform(new AddPNode(base,adr,off)); - mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false); + mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false); } return mem; } @@ -2953,6 +3040,8 @@ //----------------------------clear_memory------------------------------------- // Generate code to initialize object storage to zero. Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest, + Node* val, + Node* raw_val, intptr_t start_offset, Node* end_offset, PhaseGVN* phase) { @@ -2963,17 +3052,24 @@ Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset)); adr = phase->transform(adr); const TypePtr* atp = TypeRawPtr::BOTTOM; - mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered); + if (val != NULL) { + assert(phase->type(val)->isa_narrowoop(), "should be narrow oop"); + mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered); + } else { + assert(raw_val == NULL, "val may not be null"); + mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered); + } mem = phase->transform(mem); offset += BytesPerInt; } assert((offset % unit) == 0, ""); // Initialize the remaining stuff, if any, with a ClearArray. - return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase); + return clear_memory(ctl, mem, dest, raw_val, phase->MakeConX(offset), end_offset, phase); } Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest, + Node* raw_val, Node* start_offset, Node* end_offset, PhaseGVN* phase) { @@ -2996,11 +3092,16 @@ // Bulk clear double-words Node* zsize = phase->transform(new SubXNode(zend, zbase) ); Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) ); - mem = new ClearArrayNode(ctl, mem, zsize, adr, false); + if (raw_val == NULL) { + raw_val = phase->MakeConX(0); + } + mem = new ClearArrayNode(ctl, mem, zsize, adr, raw_val, false); return phase->transform(mem); } Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest, + Node* val, + Node* raw_val, intptr_t start_offset, intptr_t end_offset, PhaseGVN* phase) { @@ -3015,14 +3116,20 @@ done_offset -= BytesPerInt; } if (done_offset > start_offset) { - mem = clear_memory(ctl, mem, dest, + mem = clear_memory(ctl, mem, dest, val, raw_val, start_offset, phase->MakeConX(done_offset), phase); } if (done_offset < end_offset) { // emit the final 32-bit store Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset)); adr = phase->transform(adr); const TypePtr* atp = TypeRawPtr::BOTTOM; - mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered); + if (val != NULL) { + assert(phase->type(val)->isa_narrowoop(), "should be narrow oop"); + mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered); + } else { + assert(raw_val == NULL, "val may not be null"); + mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered); + } mem = phase->transform(mem); done_offset += BytesPerInt; } @@ -3171,7 +3278,7 @@ //------------------------------match------------------------------------------ // Construct projections for memory. -Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) { +Node *MemBarNode::match(const ProjNode *proj, const Matcher *m, const RegMask* mask) { switch (proj->_con) { case TypeFunc::Control: case TypeFunc::Memory: @@ -3457,7 +3564,9 @@ // return false if the init contains any stores already bool AllocateNode::maybe_set_complete(PhaseGVN* phase) { InitializeNode* init = initialization(); - if (init == NULL || init->is_complete()) return false; + if (init == NULL || init->is_complete()) { + return false; + } init->remove_extra_zeroes(); // for now, if this allocation has already collected any inits, bail: if (init->is_non_zero()) return false; @@ -4200,6 +4309,8 @@ // Do some incremental zeroing on rawmem, in parallel with inits. zeroes_done = align_down(zeroes_done, BytesPerInt); rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr, + allocation()->in(AllocateNode::DefaultValue), + allocation()->in(AllocateNode::RawDefaultValue), zeroes_done, zeroes_needed, phase); zeroes_done = zeroes_needed; @@ -4259,6 +4370,8 @@ } if (zeroes_done < size_limit) { rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr, + allocation()->in(AllocateNode::DefaultValue), + allocation()->in(AllocateNode::RawDefaultValue), zeroes_done, size_in_bytes, phase); } } --- old/src/hotspot/share/opto/memnode.hpp 2019-03-11 14:26:50.262354545 +0100 +++ new/src/hotspot/share/opto/memnode.hpp 2019-03-11 14:26:50.050354548 +0100 @@ -1095,9 +1095,11 @@ class ClearArrayNode: public Node { private: bool _is_large; + bool _word_copy_only; public: - ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, bool is_large) - : Node(ctrl,arymem,word_cnt,base), _is_large(is_large) { + ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, Node* val, bool is_large) + : Node(ctrl, arymem, word_cnt, base, val), _is_large(is_large), + _word_copy_only(val->bottom_type()->isa_long() && (!val->bottom_type()->is_long()->is_con() || val->bottom_type()->is_long()->get_con() != 0)) { init_class_id(Class_ClearArray); } virtual int Opcode() const; @@ -1109,20 +1111,26 @@ virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); virtual uint match_edge(uint idx) const; bool is_large() const { return _is_large; } + bool word_copy_only() const { return _word_copy_only; } // Clear the given area of an object or array. // The start offset must always be aligned mod BytesPerInt. // The end offset must always be aligned mod BytesPerLong. // Return the new memory. static Node* clear_memory(Node* control, Node* mem, Node* dest, + Node* val, + Node* raw_val, intptr_t start_offset, intptr_t end_offset, PhaseGVN* phase); static Node* clear_memory(Node* control, Node* mem, Node* dest, + Node* val, + Node* raw_val, intptr_t start_offset, Node* end_offset, PhaseGVN* phase); static Node* clear_memory(Node* control, Node* mem, Node* dest, + Node* raw_val, Node* start_offset, Node* end_offset, PhaseGVN* phase); @@ -1173,7 +1181,7 @@ virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); virtual uint match_edge(uint idx) const { return 0; } virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; } - virtual Node *match( const ProjNode *proj, const Matcher *m ); + virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask); // Factory method. Builds a wide or narrow membar. // Optional 'precedent' becomes an extra edge if not null. static MemBarNode* make(Compile* C, int opcode, @@ -1305,7 +1313,7 @@ enum { Incomplete = 0, Complete = 1, - WithArraycopy = 2 + WithArraycopy = 2, }; int _is_complete; --- old/src/hotspot/share/opto/mulnode.cpp 2019-03-11 14:26:50.698354539 +0100 +++ new/src/hotspot/share/opto/mulnode.cpp 2019-03-11 14:26:50.486354542 +0100 @@ -155,6 +155,18 @@ if( t2->higher_equal( zero ) ) return zero; } + // Code pattern on return from a call that returns an __Value. Can + // be optimized away if the return value turns out to be an oop. + if (op == Op_AndX && + in(1) != NULL && + in(1)->Opcode() == Op_CastP2X && + in(1)->in(1) != NULL && + phase->type(in(1)->in(1))->isa_oopptr() && + t2->isa_intptr_t()->_lo >= 0 && + t2->isa_intptr_t()->_hi <= MinObjAlignmentInBytesMask) { + return add_id(); + } + // Either input is BOTTOM ==> the result is the local BOTTOM if( t1 == Type::BOTTOM || t2 == Type::BOTTOM ) return bottom_type(); @@ -584,6 +596,13 @@ return usr; } } + + if (con == markOopDesc::always_locked_pattern) { + assert(EnableValhalla || ACmpOnValues == 3, "should only be used for value types"); + if (in(1)->is_Load() && phase->type(in(1)->in(MemNode::Address))->is_valuetypeptr()) { + return in(2); // Obj is known to be a value type + } + } } return MulNode::Identity(phase); } --- old/src/hotspot/share/opto/multnode.cpp 2019-03-11 14:26:51.130354533 +0100 +++ new/src/hotspot/share/opto/multnode.cpp 2019-03-11 14:26:50.922354536 +0100 @@ -40,7 +40,7 @@ return RegMask::Empty; } -Node *MultiNode::match( const ProjNode *proj, const Matcher *m ) { return proj->clone(); } +Node *MultiNode::match(const ProjNode *proj, const Matcher *m, const RegMask* mask) { return proj->clone(); } //------------------------------proj_out--------------------------------------- // Get a named projection or null if not found --- old/src/hotspot/share/opto/multnode.hpp 2019-03-11 14:26:51.550354528 +0100 +++ new/src/hotspot/share/opto/multnode.hpp 2019-03-11 14:26:51.342354530 +0100 @@ -44,7 +44,7 @@ virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash virtual bool depends_only_on_test() const { return false; } virtual const RegMask &out_RegMask() const; - virtual Node *match( const ProjNode *proj, const Matcher *m ); + virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask); virtual uint ideal_reg() const { return NotAMachineReg; } ProjNode* proj_out(uint which_proj) const; // Get a named projection ProjNode* proj_out_or_null(uint which_proj) const; --- old/src/hotspot/share/opto/narrowptrnode.cpp 2019-03-11 14:26:51.978354522 +0100 +++ new/src/hotspot/share/opto/narrowptrnode.cpp 2019-03-11 14:26:51.766354525 +0100 @@ -42,7 +42,7 @@ if (t == Type::TOP) return Type::TOP; if (t == TypeNarrowOop::NULL_PTR) return TypePtr::NULL_PTR; - assert(t->isa_narrowoop(), "only narrowoop here"); + assert(t->isa_narrowoop(), "only narrowoop here"); return t->make_ptr(); } --- old/src/hotspot/share/opto/node.cpp 2019-03-11 14:26:52.402354516 +0100 +++ new/src/hotspot/share/opto/node.cpp 2019-03-11 14:26:52.190354519 +0100 @@ -546,6 +546,9 @@ if (n->is_SafePoint()) { n->as_SafePoint()->clone_replaced_nodes(); } + if (n->is_ValueTypeBase()) { + C->add_value_type(n); + } return n; // Return the clone } @@ -624,6 +627,9 @@ if (Opcode() == Op_Opaque4) { compile->remove_opaque4_node(this); } + if (is_ValueTypeBase()) { + compile->remove_value_type(this); + } if (is_SafePoint()) { as_SafePoint()->delete_replaced_nodes(); @@ -1373,6 +1379,9 @@ if (dead->Opcode() == Op_Opaque4) { igvn->C->remove_opaque4_node(dead); } + if (dead->is_ValueTypeBase()) { + igvn->C->remove_value_type(dead); + } BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); bs->unregister_potential_barrier_node(dead); igvn->C->record_dead_node(dead->_idx); --- old/src/hotspot/share/opto/node.hpp 2019-03-11 14:26:52.842354510 +0100 +++ new/src/hotspot/share/opto/node.hpp 2019-03-11 14:26:52.630354513 +0100 @@ -102,6 +102,7 @@ class MachNode; class MachNullCheckNode; class MachProjNode; +class MachPrologNode; class MachReturnNode; class MachSafePointNode; class MachSpillCopyNode; @@ -150,6 +151,9 @@ class Type; class TypeNode; class UnlockNode; +class ValueTypeBaseNode; +class ValueTypeNode; +class ValueTypePtrNode; class VectorNode; class LoadVectorNode; class StoreVectorNode; @@ -662,6 +666,7 @@ DEFINE_CLASS_ID(MachJump, MachConstant, 0) DEFINE_CLASS_ID(MachMerge, Mach, 6) DEFINE_CLASS_ID(MachMemBar, Mach, 7) + DEFINE_CLASS_ID(MachProlog, Mach, 8) DEFINE_CLASS_ID(Type, Node, 2) DEFINE_CLASS_ID(Phi, Type, 0) @@ -677,6 +682,9 @@ DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0) DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1) DEFINE_CLASS_ID(ShenandoahBarrier, Type, 7) + DEFINE_CLASS_ID(ValueTypeBase, Type, 8) + DEFINE_CLASS_ID(ValueType, ValueTypeBase, 0) + DEFINE_CLASS_ID(ValueTypePtr, ValueTypeBase, 1) DEFINE_CLASS_ID(Proj, Node, 3) DEFINE_CLASS_ID(CatchProj, Proj, 0) @@ -852,6 +860,7 @@ DEFINE_CLASS_QUERY(MachJump) DEFINE_CLASS_QUERY(MachNullCheck) DEFINE_CLASS_QUERY(MachProj) + DEFINE_CLASS_QUERY(MachProlog) DEFINE_CLASS_QUERY(MachReturn) DEFINE_CLASS_QUERY(MachSafePoint) DEFINE_CLASS_QUERY(MachSpillCopy) @@ -880,6 +889,9 @@ DEFINE_CLASS_QUERY(Store) DEFINE_CLASS_QUERY(Sub) DEFINE_CLASS_QUERY(Type) + DEFINE_CLASS_QUERY(ValueType) + DEFINE_CLASS_QUERY(ValueTypeBase) + DEFINE_CLASS_QUERY(ValueTypePtr) DEFINE_CLASS_QUERY(Vector) DEFINE_CLASS_QUERY(LoadVector) DEFINE_CLASS_QUERY(StoreVector) --- old/src/hotspot/share/opto/output.cpp 2019-03-11 14:26:53.278354504 +0100 +++ new/src/hotspot/share/opto/output.cpp 2019-03-11 14:26:53.066354507 +0100 @@ -71,24 +71,32 @@ const StartNode *start = entry->head()->as_Start(); // Replace StartNode with prolog - MachPrologNode *prolog = new MachPrologNode(); + Label verified_entry; + MachPrologNode* prolog = new MachPrologNode(&verified_entry); entry->map_node(prolog, 0); _cfg->map_node_to_block(prolog, entry); _cfg->unmap_node_from_block(start); // start is no longer in any block // Virtual methods need an unverified entry point - - if( is_osr_compilation() ) { - if( PoisonOSREntry ) { + if (is_osr_compilation()) { + if (PoisonOSREntry) { // TODO: Should use a ShouldNotReachHereNode... _cfg->insert( broot, 0, new MachBreakpointNode() ); } } else { - if( _method && !_method->flags().is_static() ) { + if (_method && !_method->is_static()) { // Insert unvalidated entry point - _cfg->insert( broot, 0, new MachUEPNode() ); + _cfg->insert(broot, 0, new MachUEPNode()); + } + if (_method && _method->has_scalarized_args()) { + // Add entry point to unpack all value type arguments + _cfg->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ false)); + if (!_method->is_static()) { + // Add verified/unverified entry points to only unpack value type receiver at interface calls + _cfg->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ true)); + _cfg->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ true)); + } } - } // Break before main entry point @@ -124,6 +132,19 @@ return; } + if (_method && _method->has_scalarized_args()) { + // Compute the offsets of the entry points required by the value type calling convention + if (!_method->is_static()) { + uint vep_ro_size = ((MachVEPNode*)broot->get_node(0))->size(_regalloc); + uint vvep_ro_size = ((MachVEPNode*)broot->get_node(1))->size(_regalloc); + _code_offsets.set_value(CodeOffsets::Verified_Value_Entry_RO, vep_ro_size); + _code_offsets.set_value(CodeOffsets::Verified_Value_Entry, vep_ro_size + vvep_ro_size); + } else { + _code_offsets.set_value(CodeOffsets::Entry, -1); // will be patched later + _code_offsets.set_value(CodeOffsets::Verified_Value_Entry, 0); + } + } + ScheduleAndBundle(); #ifndef PRODUCT @@ -288,7 +309,9 @@ MachCallNode *mcall = mach->as_MachCall(); // This destination address is NOT PC-relative - mcall->method_set((intptr_t)mcall->entry_point()); + if (mcall->entry_point() != NULL) { + mcall->method_set((intptr_t)mcall->entry_point()); + } if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) { stub_size += CompiledStaticCall::to_interp_stub_size(); @@ -726,6 +749,7 @@ int safepoint_pc_offset = current_offset; bool is_method_handle_invoke = false; bool return_oop = false; + bool return_vt = false; // Add the safepoint in the DebugInfoRecorder if( !mach->is_MachCall() ) { @@ -743,9 +767,12 @@ } // Check if a call returns an object. - if (mcall->returns_pointer()) { + if (mcall->returns_pointer() || mcall->returns_vt()) { return_oop = true; } + if (mcall->returns_vt()) { + return_vt = true; + } safepoint_pc_offset += mcall->ret_addr_offset(); debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map); } @@ -860,7 +887,7 @@ // Now we can describe the scope. methodHandle null_mh; bool rethrow_exception = false; - debug_info()->describe_scope(safepoint_pc_offset, null_mh, scope_method, jvms->bci(), jvms->should_reexecute(), rethrow_exception, is_method_handle_invoke, return_oop, locvals, expvals, monvals); + debug_info()->describe_scope(safepoint_pc_offset, null_mh, scope_method, jvms->bci(), jvms->should_reexecute(), rethrow_exception, is_method_handle_invoke, return_oop, return_vt, locvals, expvals, monvals); } // End jvms loop // Mark the end of the scope set. @@ -969,6 +996,10 @@ if (fixed_slots() != 0) { _orig_pc_slot_offset_in_bytes = _regalloc->reg2offset(OptoReg::stack2reg(_orig_pc_slot)); } + if (C->needs_stack_repair()) { + // Compute the byte offset of the stack increment value + _sp_inc_slot_offset_in_bytes = _regalloc->reg2offset(OptoReg::stack2reg(_sp_inc_slot)); + } // Compute prolog code size _method_size = 0; @@ -1233,8 +1264,10 @@ if (is_mcall) { MachCallNode *mcall = mach->as_MachCall(); - // This destination address is NOT PC-relative - mcall->method_set((intptr_t)mcall->entry_point()); + if (mcall->entry_point() != NULL) { + // This destination address is NOT PC-relative + mcall->method_set((intptr_t)mcall->entry_point()); + } // Save the return address call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset(); --- old/src/hotspot/share/opto/parse.hpp 2019-03-11 14:26:53.718354498 +0100 +++ new/src/hotspot/share/opto/parse.hpp 2019-03-11 14:26:53.510354500 +0100 @@ -424,7 +424,7 @@ SafePointNode* create_entry_map(); // OSR helpers - Node *fetch_interpreter_state(int index, BasicType bt, Node *local_addrs, Node *local_addrs_base); + Node* fetch_interpreter_state(int index, const Type* type, Node* local_addrs, Node* local_addrs_base); Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit); void load_interpreter_state(Node* osr_buf); @@ -470,7 +470,7 @@ void do_one_bytecode(); // helper function to generate array store check - void array_store_check(); + Node* array_store_check(); // Helper function to generate array load void array_load(BasicType etype); // Helper function to generate array store @@ -535,8 +535,10 @@ // implementation of object creation bytecodes void emit_guard_for_new(ciInstanceKlass* klass); void do_new(); + void do_defaultvalue(); + void do_withfield(); void do_newarray(BasicType elemtype); - void do_anewarray(); + void do_newarray(); void do_multianewarray(); Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs); @@ -551,10 +553,10 @@ bool seems_stable_comparison() const; void do_ifnull(BoolTest::mask btest, Node* c); - void do_if(BoolTest::mask btest, Node* c); + void do_if(BoolTest::mask btest, Node* c, bool new_path = false, Node** ctrl_taken = NULL); + void do_acmp(BoolTest::mask btest, Node* a, Node* b); int repush_if_args(); - void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, - Block* path, Block* other_path); + void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path); void sharpen_type_after_if(BoolTest::mask btest, Node* con, const Type* tcon, Node* val, const Type* tval); --- old/src/hotspot/share/opto/parse1.cpp 2019-03-11 14:26:54.150354492 +0100 +++ new/src/hotspot/share/opto/parse1.cpp 2019-03-11 14:26:53.938354494 +0100 @@ -37,6 +37,7 @@ #include "opto/parse.hpp" #include "opto/rootnode.hpp" #include "opto/runtime.hpp" +#include "opto/valuetypenode.hpp" #include "runtime/arguments.hpp" #include "runtime/handles.inline.hpp" #include "runtime/safepointMechanism.hpp" @@ -101,10 +102,16 @@ // Construct a node which can be used to get incoming state for // on stack replacement. -Node *Parse::fetch_interpreter_state(int index, - BasicType bt, - Node *local_addrs, - Node *local_addrs_base) { +Node* Parse::fetch_interpreter_state(int index, + const Type* type, + Node* local_addrs, + Node* local_addrs_base) { + BasicType bt = type->basic_type(); + if (type == TypePtr::NULL_PTR) { + // Ptr types are mixed together with T_ADDRESS but NULL is + // really for T_OBJECT types so correct it. + bt = T_OBJECT; + } Node *mem = memory(Compile::AliasIdxRaw); Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize ); Node *ctl = control(); @@ -116,6 +123,7 @@ case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break; case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break; case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break; + case T_VALUETYPE: case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break; case T_LONG: case T_DOUBLE: { @@ -146,8 +154,11 @@ // The safepoint is a map which will feed an uncommon trap. Node* Parse::check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit) { - const TypeOopPtr* tp = type->isa_oopptr(); + if (type->isa_valuetype() != NULL) { + // The interpreter passes value types as oops + tp = TypeOopPtr::make_from_klass(type->isa_valuetype()->value_klass()); + } // TypeFlow may assert null-ness if a type appears unloaded. if (type == TypePtr::NULL_PTR || @@ -170,6 +181,12 @@ if (tp != NULL && tp->klass() != C->env()->Object_klass()) { // TypeFlow asserted a specific object type. Value must have that type. Node* bad_type_ctrl = NULL; + if (tp->is_valuetypeptr()) { + // Check value types for null here to prevent checkcast from adding an + // exception state before the bytecode entry (use 'bad_type_ctrl' instead). + l = null_check_oop(l, &bad_type_ctrl); + bad_type_exit->control()->add_req(bad_type_ctrl); + } l = gen_checkcast(l, makecon(TypeKlassPtr::make(tp->klass())), &bad_type_ctrl); bad_type_exit->control()->add_req(bad_type_ctrl); } @@ -188,7 +205,6 @@ int max_locals = jvms()->loc_size(); int max_stack = jvms()->stk_size(); - // Mismatch between method and jvms can occur since map briefly held // an OSR entry state (which takes up one RawPtr word). assert(max_locals == method()->max_locals(), "sanity"); @@ -226,14 +242,12 @@ // Make a BoxLockNode for the monitor. Node *box = _gvn.transform(new BoxLockNode(next_monitor())); - // Displaced headers and locked objects are interleaved in the // temp OSR buffer. We only copy the locked objects out here. // Fetch the locked object from the OSR temp buffer and copy to our fastlock node. - Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr, osr_buf); + Node* lock_object = fetch_interpreter_state(index*2, Type::get_const_basic_type(T_OBJECT), monitors_addr, osr_buf); // Try and copy the displaced header to the BoxNode - Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf); - + Node* displaced_hdr = fetch_interpreter_state((index*2) + 1, Type::get_const_basic_type(T_ADDRESS), monitors_addr, osr_buf); store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered); @@ -300,13 +314,7 @@ continue; } // Construct code to access the appropriate local. - BasicType bt = type->basic_type(); - if (type == TypePtr::NULL_PTR) { - // Ptr types are mixed together with T_ADDRESS but NULL is - // really for T_OBJECT types so correct it. - bt = T_OBJECT; - } - Node *value = fetch_interpreter_state(index, bt, locals_addr, osr_buf); + Node* value = fetch_interpreter_state(index, type, locals_addr, osr_buf); set_local(index, value); } @@ -595,6 +603,18 @@ return; } + // Handle value type arguments + int arg_size_sig = tf()->domain_sig()->cnt(); + for (uint i = 0; i < (uint)arg_size_sig; i++) { + Node* parm = map()->in(i); + const Type* t = _gvn.type(parm); + if (t->is_valuetypeptr() && t->value_klass()->is_scalarizable() && !t->maybe_null()) { + // Create ValueTypeNode from the oop and replace the parameter + Node* vt = ValueTypeNode::make_from_oop(this, parm, t->value_klass()); + map()->replace_edge(parm, vt); + } + } + entry_map = map(); // capture any changes performed by method setup code assert(jvms()->endoff() == map()->req(), "map matches JVMS layout"); @@ -777,8 +797,8 @@ _exits.set_all_memory(memphi); // Add a return value to the exit state. (Do not push it yet.) - if (tf()->range()->cnt() > TypeFunc::Parms) { - const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms); + if (tf()->range_sig()->cnt() > TypeFunc::Parms) { + const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms); if (ret_type->isa_int()) { BasicType ret_bt = method()->return_type()->basic_type(); if (ret_bt == T_BOOLEAN || @@ -796,26 +816,32 @@ if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) { ret_type = TypeOopPtr::BOTTOM; } + if ((_caller->has_method() || tf()->returns_value_type_as_fields()) && + ret_type->is_valuetypeptr() && ret_type->value_klass()->is_scalarizable() && !ret_type->maybe_null()) { + // Scalarize value type return when inlining or with multiple return values + ret_type = TypeValueType::make(ret_type->value_klass()); + } int ret_size = type2size[ret_type->basic_type()]; Node* ret_phi = new PhiNode(region, ret_type); gvn().set_type_bottom(ret_phi); _exits.ensure_stack(ret_size); - assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range"); + assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range"); assert(method()->return_type()->size() == ret_size, "tf agrees w/ method"); _exits.set_argument(0, ret_phi); // here is where the parser finds it // Note: ret_phi is not yet pushed, until do_exits. } } - //----------------------------build_start_state------------------------------- // Construct a state which contains only the incoming arguments from an // unknown caller. The method & bci will be NULL & InvocationEntryBci. JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) { - int arg_size = tf->domain()->cnt(); - int max_size = MAX2(arg_size, (int)tf->range()->cnt()); + int arg_size = tf->domain_sig()->cnt(); + int max_size = MAX2(arg_size, (int)tf->range_cc()->cnt()); JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms); SafePointNode* map = new SafePointNode(max_size, NULL); + map->set_jvms(jvms); + jvms->set_map(map); record_for_igvn(map); assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size"); Node_Notes* old_nn = default_node_notes(); @@ -827,20 +853,40 @@ entry_nn->set_jvms(entry_jvms); set_default_node_notes(entry_nn); } - uint i; - for (i = 0; i < (uint)arg_size; i++) { - Node* parm = initial_gvn()->transform(new ParmNode(start, i)); + PhaseGVN& gvn = *initial_gvn(); + uint j = 0; + ExtendedSignature sig_cc = ExtendedSignature(method()->get_sig_cc(), SigEntryFilter()); + for (uint i = 0; i < (uint)arg_size; i++) { + const Type* t = tf->domain_sig()->field_at(i); + Node* parm = NULL; + if (has_scalarized_args() && t->is_valuetypeptr() && !t->maybe_null()) { + // Value type arguments are not passed by reference: we get an argument per + // field of the value type. Build ValueTypeNodes from the value type arguments. + GraphKit kit(jvms, &gvn); + kit.set_control(map->control()); + Node* old_mem = map->memory(); + // Use immutable memory for value type loads and restore it below + // TODO make sure value types are always loaded from immutable memory + kit.set_all_memory(C->immutable_memory()); + parm = ValueTypeNode::make_from_multi(&kit, start, sig_cc, t->value_klass(), j, true); + map->set_control(kit.control()); + map->set_memory(old_mem); + } else { + parm = gvn.transform(new ParmNode(start, j++)); + BasicType bt = t->basic_type(); + while (i >= TypeFunc::Parms && SigEntry::next_is_reserved(sig_cc, bt, true)) { + j += type2size[bt]; // Skip reserved arguments + } + } map->init_req(i, parm); // Record all these guys for later GVN. record_for_igvn(parm); } - for (; i < map->req(); i++) { - map->init_req(i, top()); + for (; j < map->req(); j++) { + map->init_req(j, top()); } assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here"); set_default_node_notes(old_nn); - map->set_jvms(jvms); - jvms->set_map(map); return jvms; } @@ -867,12 +913,32 @@ kit.frameptr(), kit.returnadr()); // Add zero or 1 return values - int ret_size = tf()->range()->cnt() - TypeFunc::Parms; + int ret_size = tf()->range_sig()->cnt() - TypeFunc::Parms; if (ret_size > 0) { kit.inc_sp(-ret_size); // pop the return value(s) kit.sync_jvms(); - ret->add_req(kit.argument(0)); - // Note: The second dummy edge is not needed by a ReturnNode. + Node* res = kit.argument(0); + if (tf()->returns_value_type_as_fields()) { + // Multiple return values (value type fields): add as many edges + // to the Return node as returned values. + assert(res->is_ValueType(), "what else supports multi value return?"); + ValueTypeNode* vt = res->as_ValueType(); + ret->add_req_batch(NULL, tf()->range_cc()->cnt() - TypeFunc::Parms); + if (vt->is_allocated(&kit.gvn()) && !StressValueTypeReturnedAsFields) { + ret->init_req(TypeFunc::Parms, vt->get_oop()); + } else { + ret->init_req(TypeFunc::Parms, vt->tagged_klass(kit.gvn())); + } + const Array* sig_array = vt->type()->is_valuetype()->value_klass()->extended_sig(); + GrowableArray sig = GrowableArray(sig_array->length()); + sig.appendAll(sig_array); + ExtendedSignature sig_cc = ExtendedSignature(&sig, SigEntryFilter()); + uint idx = TypeFunc::Parms+1; + vt->pass_fields(&kit, ret, sig_cc, idx); + } else { + ret->add_req(res); + // Note: The second dummy edge is not needed by a ReturnNode. + } } // bind it to root root()->add_req(ret); @@ -1029,8 +1095,8 @@ mms.set_memory(_gvn.transform(mms.memory())); } - if (tf()->range()->cnt() > TypeFunc::Parms) { - const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms); + if (tf()->range_sig()->cnt() > TypeFunc::Parms) { + const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms); Node* ret_phi = _gvn.transform( _exits.argument(0) ); if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) { // In case of concurrent class loading, the type we set for the @@ -1170,7 +1236,7 @@ assert(merged_memory(), ""); // Now add the locals which are initially bound to arguments: - uint arg_size = tf()->domain()->cnt(); + uint arg_size = tf()->domain_sig()->cnt(); ensure_stack(arg_size - TypeFunc::Parms); // OSR methods have funny args for (i = TypeFunc::Parms; i < arg_size; i++) { map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms)); @@ -1629,6 +1695,39 @@ assert(sp() == target->start_sp(), ""); clean_stack(sp()); + // Check for merge conflicts involving value types + JVMState* old_jvms = map()->jvms(); + int old_bci = bci(); + JVMState* tmp_jvms = old_jvms->clone_shallow(C); + tmp_jvms->set_should_reexecute(true); + map()->set_jvms(tmp_jvms); + // Execution needs to restart a the next bytecode (entry of next + // block) + if (target->is_merged() || + pnum > PhiNode::Input || + target->is_handler() || + target->is_loop_head()) { + set_parse_bci(target->start()); + for (uint j = TypeFunc::Parms; j < map()->req(); j++) { + Node* n = map()->in(j); // Incoming change to target state. + const Type* t = NULL; + if (tmp_jvms->is_loc(j)) { + t = target->local_type_at(j - tmp_jvms->locoff()); + } else if (tmp_jvms->is_stk(j) && j < (uint)sp() + tmp_jvms->stkoff()) { + t = target->stack_type_at(j - tmp_jvms->stkoff()); + } + if (t != NULL && t != Type::BOTTOM) { + if (n->is_ValueType() && !t->isa_valuetype()) { + // Allocate value type in src block to be able to merge it with oop in target block + map()->set_req(j, ValueTypePtrNode::make_from_value_type(this, n->as_ValueType(), true)); + } + assert(!t->isa_valuetype() || n->is_ValueType(), "inconsistent typeflow info"); + } + } + } + map()->set_jvms(old_jvms); + set_parse_bci(old_bci); + if (!target->is_merged()) { // No prior mapping at this bci if (TraceOptoParse) { tty->print(" with empty state"); } @@ -1682,6 +1781,7 @@ target->mark_merged_backedge(block()); } #endif + // We must not manufacture more phis if the target is already parsed. bool nophi = target->is_parsed(); @@ -1717,14 +1817,18 @@ // Update all the non-control inputs to map: assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms"); bool check_elide_phi = target->is_SEL_backedge(save_block); + bool last_merge = (pnum == PhiNode::Input); for (uint j = 1; j < newin->req(); j++) { Node* m = map()->in(j); // Current state of target. Node* n = newin->in(j); // Incoming change to target state. PhiNode* phi; - if (m->is_Phi() && m->as_Phi()->region() == r) + if (m->is_Phi() && m->as_Phi()->region() == r) { phi = m->as_Phi(); - else + } else if (m->is_ValueType() && m->as_ValueType()->has_phi_inputs(r)){ + phi = m->as_ValueType()->get_oop()->as_Phi(); + } else { phi = NULL; + } if (m != n) { // Different; must merge switch (j) { // Frame pointer and Return Address never changes @@ -1758,11 +1862,34 @@ // - the corresponding control edges is top (a dead incoming path) // It is a bug if we create a phi which sees a garbage value on a live path. - if (phi != NULL) { + // Merging two value types? + if (phi != NULL && n->is_ValueType()) { + // Reload current state because it may have been updated by ensure_phi + m = map()->in(j); + ValueTypeNode* vtm = m->as_ValueType(); // Current value type + ValueTypeNode* vtn = n->as_ValueType(); // Incoming value type + assert(vtm->get_oop() == phi, "Value type should have Phi input"); + if (TraceOptoParse) { +#ifdef ASSERT + tty->print_cr("\nMerging value types"); + tty->print_cr("Current:"); + vtm->dump(2); + tty->print_cr("Incoming:"); + vtn->dump(2); + tty->cr(); +#endif + } + // Do the merge + vtm->merge_with(&_gvn, vtn, pnum, last_merge); + if (last_merge) { + map()->set_req(j, _gvn.transform_no_reclaim(vtm)); + record_for_igvn(vtm); + } + } else if (phi != NULL) { assert(n != top() || r->in(pnum) == top(), "live value must not be garbage"); assert(phi->region() == r, ""); phi->set_req(pnum, n); // Then add 'n' to the merge - if (pnum == PhiNode::Input) { + if (last_merge) { // Last merge for this Phi. // So far, Phis have had a reasonable type from ciTypeFlow. // Now _gvn will join that with the meet of current inputs. @@ -1778,8 +1905,7 @@ } } // End of for all values to be merged - if (pnum == PhiNode::Input && - !r->in(0)) { // The occasional useless Region + if (last_merge && !r->in(0)) { // The occasional useless Region assert(control() == r, ""); set_control(r->nonnull_req()); } @@ -1931,6 +2057,8 @@ if (n->is_Phi() && n->as_Phi()->region() == r) { assert(n->req() == pnum, "must be same size as region"); n->add_req(NULL); + } else if (n->is_ValueType() && n->as_ValueType()->has_phi_inputs(r)) { + n->as_ValueType()->add_new_path(r); } } } @@ -1953,6 +2081,10 @@ if (o->is_Phi() && o->as_Phi()->region() == region) { return o->as_Phi(); } + ValueTypeBaseNode* vt = o->isa_ValueType(); + if (vt != NULL && vt->has_phi_inputs(region)) { + return vt->get_oop()->as_Phi(); + } // Now use a Phi here for merging assert(!nocreate, "Cannot build a phi for a block already parsed."); @@ -1972,8 +2104,8 @@ } // If the type falls to bottom, then this must be a local that - // is mixing ints and oops or some such. Forcing it to top - // makes it go dead. + // is already dead or is mixing ints and oops or some such. + // Forcing it to top makes it go dead. if (t == Type::BOTTOM) { map->set_req(idx, top()); return NULL; @@ -1986,11 +2118,20 @@ return NULL; } - PhiNode* phi = PhiNode::make(region, o, t); - gvn().set_type(phi, t); - if (C->do_escape_analysis()) record_for_igvn(phi); - map->set_req(idx, phi); - return phi; + if (vt != NULL) { + // Value types are merged by merging their field values. + // Create a cloned ValueTypeNode with phi inputs that + // represents the merged value type and update the map. + vt = vt->clone_with_phis(&_gvn, region); + map->set_req(idx, vt); + return vt->get_oop()->as_Phi(); + } else { + PhiNode* phi = PhiNode::make(region, o, t); + gvn().set_type(phi, t); + if (C->do_escape_analysis()) record_for_igvn(phi); + map->set_req(idx, phi); + return phi; + } } //--------------------------ensure_memory_phi---------------------------------- @@ -2170,60 +2311,77 @@ } // Do not set_parse_bci, so that return goo is credited to the return insn. - set_bci(InvocationEntryBci); + // vreturn can trigger an allocation so vreturn can throw. Setting + // the bci here breaks exception handling. Commenting this out + // doesn't seem to break anything. + // set_bci(InvocationEntryBci); if (method()->is_synchronized() && GenerateSynchronizationCode) { shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node()); } if (C->env()->dtrace_method_probes()) { make_dtrace_method_exit(method()); } - SafePointNode* exit_return = _exits.map(); - exit_return->in( TypeFunc::Control )->add_req( control() ); - exit_return->in( TypeFunc::I_O )->add_req( i_o () ); - Node *mem = exit_return->in( TypeFunc::Memory ); - for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) { - if (mms.is_empty()) { - // get a copy of the base memory, and patch just this one input - const TypePtr* adr_type = mms.adr_type(C); - Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type); - assert(phi->as_Phi()->region() == mms.base_memory()->in(0), ""); - gvn().set_type_bottom(phi); - phi->del_req(phi->req()-1); // prepare to re-patch - mms.set_memory(phi); - } - mms.memory()->add_req(mms.memory2()); - } - // frame pointer is always same, already captured if (value != NULL) { - // If returning oops to an interface-return, there is a silent free - // cast from oop to interface allowed by the Verifier. Make it explicit - // here. Node* phi = _exits.argument(0); - const TypeInstPtr *tr = phi->bottom_type()->isa_instptr(); - if (tr && tr->klass()->is_loaded() && - tr->klass()->is_interface()) { - const TypeInstPtr *tp = value->bottom_type()->isa_instptr(); - if (tp && tp->klass()->is_loaded() && - !tp->klass()->is_interface()) { + const Type* return_type = phi->bottom_type(); + const TypeOopPtr* tr = return_type->isa_oopptr(); + if (return_type->isa_valuetype()) { + // Value type is returned as fields, make sure it is scalarized + if (!value->is_ValueType()) { + value = ValueTypeNode::make_from_oop(this, value, return_type->is_valuetype()->value_klass()); + } + if (!_caller->has_method()) { + // Value type is returned as fields from root method, make + // sure all non-flattened value type fields are allocated. + assert(tf()->returns_value_type_as_fields(), "must be returned as fields"); + value = value->as_ValueType()->allocate_fields(this); + } + } else if (value->is_ValueType()) { + // Value type is returned as oop, make sure it is allocated + assert(tr && tr->can_be_value_type(), "must return a value type pointer"); + value = ValueTypePtrNode::make_from_value_type(this, value->as_ValueType()); + } else if (tr && tr->isa_instptr() && tr->klass()->is_loaded() && tr->klass()->is_interface()) { + // If returning oops to an interface-return, there is a silent free + // cast from oop to interface allowed by the Verifier. Make it explicit here. + const TypeInstPtr* tp = value->bottom_type()->isa_instptr(); + if (tp && tp->klass()->is_loaded() && !tp->klass()->is_interface()) { // sharpen the type eagerly; this eases certain assert checking - if (tp->higher_equal(TypeInstPtr::NOTNULL)) + if (tp->higher_equal(TypeInstPtr::NOTNULL)) { tr = tr->join_speculative(TypeInstPtr::NOTNULL)->is_instptr(); + } value = _gvn.transform(new CheckCastPPNode(0, value, tr)); } } else { - // Also handle returns of oop-arrays to an arrays-of-interface return + // Handle returns of oop-arrays to an arrays-of-interface return const TypeInstPtr* phi_tip; const TypeInstPtr* val_tip; - Type::get_arrays_base_elements(phi->bottom_type(), value->bottom_type(), &phi_tip, &val_tip); + Type::get_arrays_base_elements(return_type, value->bottom_type(), &phi_tip, &val_tip); if (phi_tip != NULL && phi_tip->is_loaded() && phi_tip->klass()->is_interface() && val_tip != NULL && val_tip->is_loaded() && !val_tip->klass()->is_interface()) { - value = _gvn.transform(new CheckCastPPNode(0, value, phi->bottom_type())); + value = _gvn.transform(new CheckCastPPNode(0, value, return_type)); } } phi->add_req(value); } + SafePointNode* exit_return = _exits.map(); + exit_return->in( TypeFunc::Control )->add_req( control() ); + exit_return->in( TypeFunc::I_O )->add_req( i_o () ); + Node *mem = exit_return->in( TypeFunc::Memory ); + for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) { + if (mms.is_empty()) { + // get a copy of the base memory, and patch just this one input + const TypePtr* adr_type = mms.adr_type(C); + Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type); + assert(phi->as_Phi()->region() == mms.base_memory()->in(0), ""); + gvn().set_type_bottom(phi); + phi->del_req(phi->req()-1); // prepare to re-patch + mms.set_memory(phi); + } + mms.memory()->add_req(mms.memory2()); + } + if (_first_return) { _exits.map()->transfer_replaced_nodes_from(map(), _new_idx); _first_return = false; --- old/src/hotspot/share/opto/parse2.cpp 2019-03-11 14:26:54.598354485 +0100 +++ new/src/hotspot/share/opto/parse2.cpp 2019-03-11 14:26:54.386354488 +0100 @@ -36,12 +36,14 @@ #include "opto/convertnode.hpp" #include "opto/divnode.hpp" #include "opto/idealGraphPrinter.hpp" +#include "opto/idealKit.hpp" #include "opto/matcher.hpp" #include "opto/memnode.hpp" #include "opto/mulnode.hpp" #include "opto/opaquenode.hpp" #include "opto/parse.hpp" #include "opto/runtime.hpp" +#include "opto/valuetypenode.hpp" #include "runtime/deoptimization.hpp" #include "runtime/sharedRuntime.hpp" @@ -53,58 +55,224 @@ //---------------------------------array_load---------------------------------- void Parse::array_load(BasicType bt) { const Type* elemtype = Type::TOP; - bool big_val = bt == T_DOUBLE || bt == T_LONG; Node* adr = array_addressing(bt, 0, &elemtype); if (stopped()) return; // guaranteed null or range check - pop(); // index (already used) - Node* array = pop(); // the array itself + Node* idx = pop(); + Node* ary = pop(); + + // Handle value type arrays + const TypeOopPtr* elemptr = elemtype->make_oopptr(); + const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr(); + if (elemtype->isa_valuetype() != NULL) { + // Load from flattened value type array + ciValueKlass* vk = elemtype->is_valuetype()->value_klass(); + Node* vt = ValueTypeNode::make_from_flattened(this, vk, ary, adr); + push(vt); + return; + } else if (elemptr != NULL && elemptr->is_valuetypeptr()) { + // Load from non-flattened value type array (elements can never be null) + bt = T_VALUETYPE; + assert(elemptr->meet(TypePtr::NULL_PTR) != elemptr, "value type array elements should never be null"); + } else if (ValueArrayFlatten && elemptr != NULL && elemptr->can_be_value_type() && + !ary_t->klass_is_exact()) { + // Cannot statically determine if array is flattened, emit runtime check + IdealKit ideal(this); + IdealVariable res(ideal); + ideal.declarations_done(); + Node* kls = load_object_klass(ary); + Node* tag = load_lh_array_tag(kls); + ideal.if_then(tag, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value)); { + // non flattened + sync_kit(ideal); + const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); + elemtype = ary_t->elem()->make_oopptr(); + Node* ld = access_load_at(ary, adr, adr_type, elemtype, bt, + IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD); + ideal.sync_kit(this); + ideal.set(res, ld); + } ideal.else_(); { + // flattened + sync_kit(ideal); + Node* k_adr = basic_plus_adr(kls, in_bytes(ArrayKlass::element_klass_offset())); + Node* elem_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS)); + Node* obj_size = NULL; + kill_dead_locals(); + inc_sp(2); + Node* alloc_obj = new_instance(elem_klass, NULL, &obj_size, /*deoptimize_on_exception=*/true); + dec_sp(2); + + AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn); + assert(alloc->maybe_set_complete(&_gvn), ""); + alloc->initialization()->set_complete_with_arraycopy(); + BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); + // Unknown value type so might have reference fields + if (!bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, BarrierSetC2::Parsing)) { + int base_off = sizeof(instanceOopDesc); + Node* dst_base = basic_plus_adr(alloc_obj, base_off); + Node* countx = obj_size; + countx = _gvn.transform(new SubXNode(countx, MakeConX(base_off))); + countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong))); + + assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place"); + Node* lhp = basic_plus_adr(kls, in_bytes(Klass::layout_helper_offset())); + Node* elem_shift = make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered); + uint header = arrayOopDesc::base_offset_in_bytes(T_VALUETYPE); + Node* base = basic_plus_adr(ary, header); + idx = Compile::conv_I2X_index(&_gvn, idx, TypeInt::POS, control()); + Node* scale = _gvn.transform(new LShiftXNode(idx, elem_shift)); + Node* adr = basic_plus_adr(ary, base, scale); + + access_clone(adr, dst_base, countx, false); + } else { + ideal.sync_kit(this); + ideal.make_leaf_call(OptoRuntime::load_unknown_value_Type(), + CAST_FROM_FN_PTR(address, OptoRuntime::load_unknown_value), + "load_unknown_value", + ary, idx, alloc_obj); + sync_kit(ideal); + } + + insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress)); + + ideal.sync_kit(this); + ideal.set(res, alloc_obj); + } ideal.end_if(); + sync_kit(ideal); + push_node(bt, ideal.value(res)); + return; + } if (elemtype == TypeInt::BOOL) { bt = T_BOOLEAN; } else if (bt == T_OBJECT) { - elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr(); + elemtype = ary_t->elem()->make_oopptr(); } const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); - - Node* ld = access_load_at(array, adr, adr_type, elemtype, bt, + Node* ld = access_load_at(ary, adr, adr_type, elemtype, bt, IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD); - if (big_val) { - push_pair(ld); - } else { - push(ld); + if (bt == T_VALUETYPE) { + // Loading a non-flattened (but flattenable) value type from an array + assert(!gvn().type(ld)->maybe_null(), "value type array elements should never be null"); + if (elemptr->value_klass()->is_scalarizable()) { + ld = ValueTypeNode::make_from_oop(this, ld, elemptr->value_klass()); + } } + + push_node(bt, ld); } //--------------------------------array_store---------------------------------- void Parse::array_store(BasicType bt) { const Type* elemtype = Type::TOP; - bool big_val = bt == T_DOUBLE || bt == T_LONG; - Node* adr = array_addressing(bt, big_val ? 2 : 1, &elemtype); + Node* adr = array_addressing(bt, type2size[bt], &elemtype); if (stopped()) return; // guaranteed null or range check + Node* cast_val = NULL; if (bt == T_OBJECT) { - array_store_check(); + cast_val = array_store_check(); + if (stopped()) return; } - Node* val; // Oop to store - if (big_val) { - val = pop_pair(); - } else { - val = pop(); + Node* val = pop_node(bt); // Value to store + Node* idx = pop(); // Index in the array + Node* ary = pop(); // The array itself + + const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr(); + if (bt == T_OBJECT) { + const TypeOopPtr* elemptr = elemtype->make_oopptr(); + const Type* val_t = _gvn.type(val); + if (elemtype->isa_valuetype() != NULL) { + // Store to flattened value type array + if (!cast_val->is_ValueType()) { + inc_sp(3); + cast_val = null_check(cast_val); + if (stopped()) return; + dec_sp(3); + cast_val = ValueTypeNode::make_from_oop(this, cast_val, elemtype->is_valuetype()->value_klass()); + } + cast_val->as_ValueType()->store_flattened(this, ary, adr); + return; + } else if (elemptr->is_valuetypeptr()) { + // Store to non-flattened value type array + if (!cast_val->is_ValueType()) { + // Can not store null into a value type array + inc_sp(3); + cast_val = null_check(cast_val); + if (stopped()) return; + dec_sp(3); + } + } else if (elemptr->can_be_value_type() && !ary_t->klass_is_exact() && + (val->is_ValueType() || val_t == TypePtr::NULL_PTR || val_t->is_oopptr()->can_be_value_type())) { + if (ValueArrayFlatten) { + IdealKit ideal(this); + Node* kls = load_object_klass(ary); + Node* layout_val = load_lh_array_tag(kls); + ideal.if_then(layout_val, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value)); { + // non flattened + sync_kit(ideal); + + if (!val->is_ValueType() && TypePtr::NULL_PTR->higher_equal(val_t)) { + gen_value_type_array_guard(ary, val, 3); + } + + const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); + elemtype = ary_t->elem()->make_oopptr(); + access_store_at(ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY); + ideal.sync_kit(this); + } ideal.else_(); { + // flattened + // Object/interface array must be flattened, cast it + if (val->is_ValueType()) { + sync_kit(ideal); + const TypeValueType* vt = _gvn.type(val)->is_valuetype(); + ciArrayKlass* array_klass = ciArrayKlass::make(vt->value_klass()); + const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr(); + ary = _gvn.transform(new CheckCastPPNode(control(), ary, arytype)); + adr = array_element_address(ary, idx, T_OBJECT, arytype->size(), control()); + val->as_ValueType()->store_flattened(this, ary, adr); + ideal.sync_kit(this); + } else { + if (TypePtr::NULL_PTR->higher_equal(val_t)) { + sync_kit(ideal); + Node* null_ctl = top(); + val = null_check_oop(val, &null_ctl); + if (null_ctl != top()) { + PreserveJVMState pjvms(this); + inc_sp(3); + set_control(null_ctl); + uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none); + dec_sp(3); + } + ideal.sync_kit(this); + } + if (!ideal.ctrl()->is_top()) { + ideal.make_leaf_call(OptoRuntime::store_unknown_value_Type(), + CAST_FROM_FN_PTR(address, OptoRuntime::store_unknown_value), + "store_unknown_value", + val, ary, idx); + } + } + } ideal.end_if(); + sync_kit(ideal); + return; + } else { + if (!val->is_ValueType() && TypePtr::NULL_PTR->higher_equal(val_t)) { + gen_value_type_array_guard(ary, val, 3); + } + } + } } - pop(); // index (already used) - Node* array = pop(); // the array itself if (elemtype == TypeInt::BOOL) { bt = T_BOOLEAN; } else if (bt == T_OBJECT) { - elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr(); + elemtype = ary_t->elem()->make_oopptr(); } const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); - access_store_at(array, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY); + access_store_at(ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY); } @@ -1495,7 +1663,7 @@ } else { // Path is live. // Update method data profile_taken_branch(target_bci); - adjust_map_after_if(btest, c, prob, branch_block, next_block); + adjust_map_after_if(btest, c, prob, branch_block); if (!stopped()) { merge(target_bci); } @@ -1515,13 +1683,12 @@ } else { // Path is live. // Update method data profile_not_taken_branch(); - adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, - next_block, branch_block); + adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block); } } //------------------------------------do_if------------------------------------ -void Parse::do_if(BoolTest::mask btest, Node* c) { +void Parse::do_if(BoolTest::mask btest, Node* c, bool new_path, Node** ctrl_taken) { int target_bci = iter().get_dest(); Block* branch_block = successor_for_bci(target_bci); @@ -1610,16 +1777,24 @@ set_control(taken_branch); if (stopped()) { - if (C->eliminate_boxing()) { - // Mark the successor block as parsed + if (C->eliminate_boxing() && !new_path) { + // Mark the successor block as parsed (if we haven't created a new path) branch_block->next_path_num(); } } else { // Update method data profile_taken_branch(target_bci); - adjust_map_after_if(taken_btest, c, prob, branch_block, next_block); + adjust_map_after_if(taken_btest, c, prob, branch_block); if (!stopped()) { - merge(target_bci); + if (new_path) { + // Merge by using a new path + merge_new_path(target_bci); + } else if (ctrl_taken != NULL) { + // Don't merge but save taken branch to be wired by caller + *ctrl_taken = control(); + } else { + merge(target_bci); + } } } } @@ -1628,16 +1803,327 @@ set_control(untaken_branch); // Branch not taken. - if (stopped()) { + if (stopped() && ctrl_taken == NULL) { if (C->eliminate_boxing()) { - // Mark the successor block as parsed + // Mark the successor block as parsed (if caller does not re-wire control flow) next_block->next_path_num(); } } else { // Update method data profile_not_taken_branch(); - adjust_map_after_if(untaken_btest, c, untaken_prob, - next_block, branch_block); + adjust_map_after_if(untaken_btest, c, untaken_prob, next_block); + } +} + +void Parse::do_acmp(BoolTest::mask btest, Node* a, Node* b) { + ciMethod* subst_method = ciEnv::current()->ValueBootstrapMethods_klass()->find_method(ciSymbol::isSubstitutable_name(), ciSymbol::object_object_boolean_signature()); + // If current method is ValueBootstrapMethods::isSubstitutable(), + // compile the acmp as a regular pointer comparison otherwise we + // could call ValueBootstrapMethods::isSubstitutable() back + if (ACmpOnValues == 0 || method() == subst_method) { + Node* cmp = CmpP(a, b); + cmp = optimize_cmp_with_klass(cmp); + do_if(btest, cmp); + return; + } + + if (ACmpOnValues == 3) { + // Substituability test + if (a->is_ValueType()) { + inc_sp(2); + a = a->as_ValueType()->allocate(this, true)->get_oop(); + dec_sp(2); + } + if (b->is_ValueType()) { + inc_sp(2); + b = b->as_ValueType()->allocate(this, true)->get_oop(); + dec_sp(2); + } + + const TypeOopPtr* ta = _gvn.type(a)->isa_oopptr(); + const TypeOopPtr* tb = _gvn.type(b)->isa_oopptr(); + + if (ta == NULL || !ta->can_be_value_type_raw() || + tb == NULL || !tb->can_be_value_type_raw()) { + Node* cmp = CmpP(a, b); + cmp = optimize_cmp_with_klass(cmp); + do_if(btest, cmp); + return; + } + + Node* cmp = CmpP(a, b); + cmp = optimize_cmp_with_klass(cmp); + Node* eq_region = NULL; + if (btest == BoolTest::eq) { + do_if(btest, cmp, true); + if (stopped()) { + return; + } + } else { + assert(btest == BoolTest::ne, "only eq or ne"); + Node* is_not_equal = NULL; + eq_region = new RegionNode(3); + { + PreserveJVMState pjvms(this); + do_if(btest, cmp, false, &is_not_equal); + if (!stopped()) { + eq_region->init_req(1, control()); + } + } + if (is_not_equal == NULL || is_not_equal->is_top()) { + record_for_igvn(eq_region); + set_control(_gvn.transform(eq_region)); + return; + } + set_control(is_not_equal); + } + // Pointers not equal, check for values + Node* ne_region = new RegionNode(6); + inc_sp(2); + Node* null_ctl = top(); + Node* not_null_a = null_check_oop(a, &null_ctl, !too_many_traps(Deoptimization::Reason_null_check), false, false); + dec_sp(2); + ne_region->init_req(1, null_ctl); + if (stopped()) { + record_for_igvn(ne_region); + set_control(_gvn.transform(ne_region)); + if (btest == BoolTest::ne) { + { + PreserveJVMState pjvms(this); + int target_bci = iter().get_dest(); + merge(target_bci); + } + record_for_igvn(eq_region); + set_control(_gvn.transform(eq_region)); + } + return; + } + + Node* is_value = is_always_locked(not_null_a); + Node* value_mask = _gvn.MakeConX(markOopDesc::always_locked_pattern); + Node* is_value_cmp = _gvn.transform(new CmpXNode(is_value, value_mask)); + Node* is_value_bol = _gvn.transform(new BoolNode(is_value_cmp, BoolTest::ne)); + IfNode* is_value_iff = create_and_map_if(control(), is_value_bol, PROB_FAIR, COUNT_UNKNOWN); + Node* not_value = _gvn.transform(new IfTrueNode(is_value_iff)); + set_control(_gvn.transform(new IfFalseNode(is_value_iff))); + ne_region->init_req(2, not_value); + + // One of the 2 pointers refers to a value, check if both are of + // the same class + inc_sp(2); + null_ctl = top(); + Node* not_null_b = null_check_oop(b, &null_ctl, !too_many_traps(Deoptimization::Reason_null_check), false, false); + dec_sp(2); + ne_region->init_req(3, null_ctl); + if (stopped()) { + record_for_igvn(ne_region); + set_control(_gvn.transform(ne_region)); + if (btest == BoolTest::ne) { + { + PreserveJVMState pjvms(this); + int target_bci = iter().get_dest(); + merge(target_bci); + } + record_for_igvn(eq_region); + set_control(_gvn.transform(eq_region)); + } + return; + } + Node* kls_a = load_object_klass(not_null_a); + Node* kls_b = load_object_klass(not_null_b); + Node* kls_cmp = CmpP(kls_a, kls_b); + Node* kls_bol = _gvn.transform(new BoolNode(kls_cmp, BoolTest::ne)); + IfNode* kls_iff = create_and_map_if(control(), kls_bol, PROB_FAIR, COUNT_UNKNOWN); + Node* kls_ne = _gvn.transform(new IfTrueNode(kls_iff)); + set_control(_gvn.transform(new IfFalseNode(kls_iff))); + ne_region->init_req(4, kls_ne); + + if (stopped()) { + record_for_igvn(ne_region); + set_control(_gvn.transform(ne_region)); + if (btest == BoolTest::ne) { + { + PreserveJVMState pjvms(this); + int target_bci = iter().get_dest(); + merge(target_bci); + } + record_for_igvn(eq_region); + set_control(_gvn.transform(eq_region)); + } + return; + } + // Both are values of the same class, we need to perform a + // substitutability test. Delegate to + // ValueBootstrapMethods::isSubstitutable(). + + Node* ne_io_phi = PhiNode::make(ne_region, i_o()); + Node* mem = reset_memory(); + Node* ne_mem_phi = PhiNode::make(ne_region, mem); + + Node* eq_io_phi = NULL; + Node* eq_mem_phi = NULL; + if (eq_region != NULL) { + eq_io_phi = PhiNode::make(eq_region, i_o()); + eq_mem_phi = PhiNode::make(eq_region, mem); + } + + set_all_memory(mem); + + kill_dead_locals(); + CallStaticJavaNode *call = new CallStaticJavaNode(C, TypeFunc::make(subst_method), SharedRuntime::get_resolve_static_call_stub(), subst_method, bci()); + call->set_override_symbolic_info(true); + call->init_req(TypeFunc::Parms, not_null_a); + call->init_req(TypeFunc::Parms+1, not_null_b); + inc_sp(2); + set_edges_for_java_call(call, false, false); + Node* ret = set_results_for_java_call(call, false, true); + dec_sp(2); + + // Test the return value of ValueBootstrapMethods::isSubstitutable() + Node* subst_cmp = _gvn.transform(new CmpINode(ret, intcon(1))); + if (btest == BoolTest::eq) { + do_if(btest, subst_cmp); + } else { + assert(btest == BoolTest::ne, "only eq or ne"); + Node* is_not_equal = NULL; + { + PreserveJVMState pjvms(this); + do_if(btest, subst_cmp, false, &is_not_equal); + if (!stopped()) { + eq_region->init_req(2, control()); + eq_io_phi->init_req(2, i_o()); + eq_mem_phi->init_req(2, reset_memory()); + } + } + set_control(is_not_equal); + } + ne_region->init_req(5, control()); + ne_io_phi->init_req(5, i_o()); + ne_mem_phi->init_req(5, reset_memory()); + + record_for_igvn(ne_region); + set_control(_gvn.transform(ne_region)); + set_i_o(_gvn.transform(ne_io_phi)); + set_all_memory(_gvn.transform(ne_mem_phi)); + + if (btest == BoolTest::ne) { + { + PreserveJVMState pjvms(this); + int target_bci = iter().get_dest(); + merge(target_bci); + } + + record_for_igvn(eq_region); + set_control(_gvn.transform(eq_region)); + set_i_o(_gvn.transform(eq_io_phi)); + set_all_memory(_gvn.transform(eq_mem_phi)); + } + + return; + } + // In the case were both operands might be value types, we need to + // use the new acmp implementation. Otherwise, i.e. if one operand + // is not a value type, we can use the old acmp implementation. + Node* cmp = C->optimize_acmp(&_gvn, a, b); + if (cmp != NULL) { + // Use optimized/old acmp + cmp = optimize_cmp_with_klass(_gvn.transform(cmp)); + do_if(btest, cmp); + return; + } + + Node* ctrl = NULL; + bool safe_for_replace = true; + if (ACmpOnValues != 1) { + // Emit old acmp before new acmp for quick a != b check + cmp = CmpP(a, b); + cmp = optimize_cmp_with_klass(_gvn.transform(cmp)); + if (btest == BoolTest::ne) { + do_if(btest, cmp, true); + if (stopped()) { + return; // Never equal + } + } else if (btest == BoolTest::eq) { + Node* is_equal = NULL; + { + PreserveJVMState pjvms(this); + do_if(btest, cmp, false, &is_equal); + if (!stopped()) { + // Not equal, skip valuetype check + ctrl = new RegionNode(3); + ctrl->init_req(1, control()); + _gvn.set_type(ctrl, Type::CONTROL); + record_for_igvn(ctrl); + safe_for_replace = false; + } + } + if (is_equal == NULL) { + assert(ctrl != NULL, "no control left"); + set_control(_gvn.transform(ctrl)); + return; // Never equal + } + set_control(is_equal); + } + } + + // Null check operand before loading the is_value bit + bool speculate = false; + if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(b))) { + // Operand 'b' is never null, swap operands to avoid null check + swap(a, b); + } else if (!too_many_traps(Deoptimization::Reason_speculate_null_check)) { + // Speculate on non-nullness of one operand + if (!_gvn.type(a)->speculative_maybe_null()) { + speculate = true; + } else if (!_gvn.type(b)->speculative_maybe_null()) { + speculate = true; + swap(a, b); + } + } + inc_sp(2); + Node* null_ctl = top(); + Node* not_null_a = null_check_oop(a, &null_ctl, speculate, safe_for_replace, speculate); + assert(!stopped(), "operand is always null"); + dec_sp(2); + Node* region = new RegionNode(2); + Node* is_value = new PhiNode(region, TypeX_X); + if (null_ctl != top()) { + assert(!speculate, "should never be null"); + region->add_req(null_ctl); + is_value->add_req(_gvn.MakeConX(0)); + } + + Node* value_mask = _gvn.MakeConX(markOopDesc::always_locked_pattern); + if (ACmpOnValues == 1) { + Node* mark_addr = basic_plus_adr(not_null_a, oopDesc::mark_offset_in_bytes()); + Node* mark = make_load(NULL, mark_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); + Node* not_mark = _gvn.transform(new XorXNode(mark, _gvn.MakeConX(-1))); + Node* andn = _gvn.transform(new AndXNode(not_mark, value_mask)); + Node* neg_if_value = _gvn.transform(new SubXNode(andn, _gvn.MakeConX(1))); + is_value->init_req(1, _gvn.transform(new RShiftXNode(neg_if_value, _gvn.intcon(63)))); + } else { + is_value->init_req(1, is_always_locked(not_null_a)); + } + region->init_req(1, control()); + + set_control(_gvn.transform(region)); + is_value = _gvn.transform(is_value); + + if (ACmpOnValues == 1) { + // Perturbe oop if operand is a value type to make comparison fail + Node* pert = _gvn.transform(new AddPNode(a, a, is_value)); + cmp = _gvn.transform(new CmpPNode(pert, b)); + } else { + // Check for a value type because we already know that operands are equal + cmp = _gvn.transform(new CmpXNode(is_value, value_mask)); + btest = (btest == BoolTest::eq) ? BoolTest::ne : BoolTest::eq; + } + cmp = optimize_cmp_with_klass(cmp); + do_if(btest, cmp); + + if (ctrl != NULL) { + ctrl->init_req(2, control()); + set_control(_gvn.transform(ctrl)); } } @@ -1667,8 +2153,7 @@ // branch, seeing how it constrains a tested value, and then // deciding if it's worth our while to encode this constraint // as graph nodes in the current abstract interpretation map. -void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, - Block* path, Block* other_path) { +void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path) { if (!c->is_Cmp()) { maybe_add_predicate_after_if(path); return; @@ -1878,6 +2363,10 @@ inc_sp(2); obj = maybe_cast_profiled_obj(obj, k); dec_sp(2); + if (obj->is_ValueType()) { + assert(obj->as_ValueType()->is_allocated(&_gvn), "must be allocated"); + obj = obj->as_ValueType()->get_oop(); + } // Make the CmpP use the casted obj addp = basic_plus_adr(obj, addp->in(AddPNode::Offset)); load_klass = load_klass->clone(); @@ -2725,20 +3214,25 @@ maybe_add_safepoint(iter().get_dest()); a = null(); b = pop(); - if (!_gvn.type(b)->speculative_maybe_null() && - !too_many_traps(Deoptimization::Reason_speculate_null_check)) { - inc_sp(1); - Node* null_ctl = top(); - b = null_check_oop(b, &null_ctl, true, true, true); - assert(null_ctl->is_top(), "no null control here"); - dec_sp(1); - } else if (_gvn.type(b)->speculative_always_null() && - !too_many_traps(Deoptimization::Reason_speculate_null_assert)) { - inc_sp(1); - b = null_assert(b); - dec_sp(1); + if (b->is_ValueType()) { + // Return constant false because 'b' is always non-null + c = _gvn.makecon(TypeInt::CC_GT); + } else { + if (!_gvn.type(b)->speculative_maybe_null() && + !too_many_traps(Deoptimization::Reason_speculate_null_check)) { + inc_sp(1); + Node* null_ctl = top(); + b = null_check_oop(b, &null_ctl, true, true, true); + assert(null_ctl->is_top(), "no null control here"); + dec_sp(1); + } else if (_gvn.type(b)->speculative_always_null() && + !too_many_traps(Deoptimization::Reason_speculate_null_assert)) { + inc_sp(1); + b = null_assert(b); + dec_sp(1); + } + c = _gvn.transform( new CmpPNode(b, a) ); } - c = _gvn.transform( new CmpPNode(b, a) ); do_ifnull(btest, c); break; @@ -2749,9 +3243,7 @@ maybe_add_safepoint(iter().get_dest()); a = access_resolve(pop(), 0); b = access_resolve(pop(), 0); - c = _gvn.transform( new CmpPNode(b, a) ); - c = optimize_cmp_with_klass(c); - do_if(btest, c); + do_acmp(btest, a, b); break; case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx; @@ -2806,7 +3298,7 @@ do_instanceof(); break; case Bytecodes::_anewarray: - do_anewarray(); + do_newarray(); break; case Bytecodes::_newarray: do_newarray((BasicType)iter().get_index()); @@ -2817,6 +3309,12 @@ case Bytecodes::_new: do_new(); break; + case Bytecodes::_defaultvalue: + do_defaultvalue(); + break; + case Bytecodes::_withfield: + do_withfield(); + break; case Bytecodes::_jsr: case Bytecodes::_jsr_w: --- old/src/hotspot/share/opto/parse3.cpp 2019-03-11 14:26:55.042354479 +0100 +++ new/src/hotspot/share/opto/parse3.cpp 2019-03-11 14:26:54.830354482 +0100 @@ -27,6 +27,7 @@ #include "interpreter/linkResolver.hpp" #include "memory/universe.hpp" #include "oops/objArrayKlass.hpp" +#include "oops/valueArrayKlass.hpp" #include "opto/addnode.hpp" #include "opto/castnode.hpp" #include "opto/memnode.hpp" @@ -34,6 +35,7 @@ #include "opto/rootnode.hpp" #include "opto/runtime.hpp" #include "opto/subnode.hpp" +#include "opto/valuetypenode.hpp" #include "runtime/deoptimization.hpp" #include "runtime/handles.inline.hpp" @@ -80,6 +82,14 @@ ciInstanceKlass* field_holder = field->holder(); + if (is_field && field_holder->is_valuetype() && peek()->is_ValueType()) { + assert(is_get, "value type field store not supported"); + ValueTypeNode* vt = pop()->as_ValueType(); + Node* value = vt->field_value_by_offset(field->offset()); + push_node(field->layout_type(), value); + return; + } + if (is_field == field->is_static()) { // Interpreter will throw java_lang_IncompatibleClassChangeError // Check this before allowing methods to access static fields @@ -127,6 +137,9 @@ do_get_xxx(obj, field, is_field); } else { do_put_xxx(obj, field, is_field); + if (stopped()) { + return; + } (void) pop(); // pop receiver after putting } } else { @@ -140,7 +153,6 @@ } } - void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) { BasicType bt = field->layout_type(); @@ -162,6 +174,8 @@ ciType* field_klass = field->type(); bool is_vol = field->is_volatile(); + bool flattened = field->is_flattened(); + bool flattenable = field->is_flattenable(); // Compute address and memory type. int offset = field->offset_in_bytes(); @@ -173,12 +187,7 @@ bool must_assert_null = false; - DecoratorSet decorators = IN_HEAP; - decorators |= is_vol ? MO_SEQ_CST : MO_UNORDERED; - - bool is_obj = bt == T_OBJECT || bt == T_ARRAY; - - if (is_obj) { + if (bt == T_OBJECT || bt == T_ARRAY || bt == T_VALUETYPE) { if (!field->type()->is_loaded()) { type = TypeInstPtr::BOTTOM; must_assert_null = true; @@ -195,12 +204,37 @@ assert(type != NULL, "field singleton type must be consistent"); } else { type = TypeOopPtr::make_from_klass(field_klass->as_klass()); + if (bt == T_VALUETYPE && field->is_static()) { + // Check if static value type field is already initialized + assert(!flattened, "static fields should not be flattened"); + ciInstance* mirror = field->holder()->java_mirror(); + ciObject* val = mirror->field_value(field).as_object(); + if (!val->is_null_object()) { + type = type->join_speculative(TypePtr::NOTNULL); + } + } } } else { type = Type::get_const_basic_type(bt); } - Node* ld = access_load_at(obj, adr, adr_type, type, bt, decorators); + Node* ld = NULL; + if (flattened) { + // Load flattened value type + ld = ValueTypeNode::make_from_flattened(this, field_klass->as_value_klass(), obj, obj, field->holder(), offset); + } else { + DecoratorSet decorators = IN_HEAP; + decorators |= is_vol ? MO_SEQ_CST : MO_UNORDERED; + ld = access_load_at(obj, adr, adr_type, type, bt, decorators); + if (flattenable) { + // Load a non-flattened but flattenable value type from memory + if (field_klass->as_value_klass()->is_scalarizable()) { + ld = ValueTypeNode::make_from_oop(this, ld, field_klass->as_value_klass()); + } else { + ld = null2default(ld, field_klass->as_value_klass()); + } + } + } // Adjust Java stack if (type2size[bt] == 1) @@ -247,20 +281,35 @@ DecoratorSet decorators = IN_HEAP; decorators |= is_vol ? MO_SEQ_CST : MO_UNORDERED; - bool is_obj = bt == T_OBJECT || bt == T_ARRAY; - // Store the value. const Type* field_type; if (!field->type()->is_loaded()) { field_type = TypeInstPtr::BOTTOM; } else { - if (is_obj) { + if (bt == T_OBJECT || bt == T_ARRAY || bt == T_VALUETYPE) { field_type = TypeOopPtr::make_from_klass(field->type()->as_klass()); } else { field_type = Type::BOTTOM; } } - access_store_at(obj, adr, adr_type, val, field_type, bt, decorators); + + if (field->is_flattenable() && !val->is_ValueType()) { + inc_sp(1); + val = null_check(val); + dec_sp(1); + if (stopped()) return; + } + + if (field->is_flattened()) { + // Store flattened value type to a non-static field + if (!val->is_ValueType()) { + assert(!gvn().type(val)->maybe_null(), "should never be null"); + val = ValueTypeNode::make_from_oop(this, val, field->type()->as_value_klass()); + } + val->as_ValueType()->store_flattened(this, obj, obj, field->holder(), offset); + } else { + access_store_at(obj, adr, adr_type, val, field_type, bt, decorators); + } if (is_field) { // Remember we wrote a volatile field. @@ -292,16 +341,17 @@ } //============================================================================= -void Parse::do_anewarray() { + +void Parse::do_newarray() { bool will_link; ciKlass* klass = iter().get_klass(will_link); // Uncommon Trap when class that array contains is not loaded // we need the loaded class for the rest of graph; do not // initialize the container class (see Java spec)!!! - assert(will_link, "anewarray: typeflow responsibility"); + assert(will_link, "newarray: typeflow responsibility"); - ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass); + ciArrayKlass* array_klass = ciArrayKlass::make(klass); // Check that array_klass object is loaded if (!array_klass->is_loaded()) { // Generate uncommon_trap for unloaded array_class @@ -309,6 +359,13 @@ Deoptimization::Action_reinterpret, array_klass); return; + } else if (array_klass->element_klass() != NULL && + array_klass->element_klass()->is_valuetype() && + !array_klass->element_klass()->as_value_klass()->is_initialized()) { + uncommon_trap(Deoptimization::Reason_uninitialized, + Deoptimization::Action_reinterpret, + NULL); + return; } kill_dead_locals(); --- old/src/hotspot/share/opto/parseHelper.cpp 2019-03-11 14:26:55.470354473 +0100 +++ new/src/hotspot/share/opto/parseHelper.cpp 2019-03-11 14:26:55.258354476 +0100 @@ -23,15 +23,18 @@ */ #include "precompiled.hpp" +#include "ci/ciValueKlass.hpp" #include "classfile/systemDictionary.hpp" #include "compiler/compileLog.hpp" #include "oops/objArrayKlass.hpp" +#include "oops/valueArrayKlass.hpp" #include "opto/addnode.hpp" #include "opto/memnode.hpp" #include "opto/mulnode.hpp" #include "opto/parse.hpp" #include "opto/rootnode.hpp" #include "opto/runtime.hpp" +#include "opto/valuetypenode.hpp" #include "runtime/sharedRuntime.hpp" //------------------------------make_dtrace_method_entry_exit ---------------- @@ -65,6 +68,7 @@ void Parse::do_checkcast() { bool will_link; ciKlass* klass = iter().get_klass(will_link); + bool never_null = iter().is_klass_never_null(); Node *obj = peek(); @@ -92,7 +96,10 @@ return; } - Node *res = gen_checkcast(obj, makecon(TypeKlassPtr::make(klass)) ); + Node* res = gen_checkcast(obj, makecon(TypeKlassPtr::make(klass)), NULL, never_null); + if (stopped()) { + return; + } // Pop from stack AFTER gen_checkcast because it can uncommon trap and // the debug info has to be correct. @@ -137,26 +144,27 @@ //------------------------------array_store_check------------------------------ // pull array from stack and check that the store is valid -void Parse::array_store_check() { - +Node* Parse::array_store_check() { // Shorthand access to array store elements without popping them. Node *obj = peek(0); Node *idx = peek(1); Node *ary = peek(2); + const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr(); + const Type* elemtype = ary_t->elem(); + const TypeOopPtr* elemptr = elemtype->make_oopptr(); + bool is_value_array = elemtype->isa_valuetype() != NULL || (elemptr != NULL && elemptr->is_valuetypeptr()); + if (_gvn.type(obj) == TypePtr::NULL_PTR) { // There's never a type check on null values. // This cutout lets us avoid the uncommon_trap(Reason_array_check) // below, which turns into a performance liability if the // gen_checkcast folds up completely. - return; + return obj; } // Extract the array klass type - int klass_offset = oopDesc::klass_offset_in_bytes(); - Node* p = basic_plus_adr( ary, ary, klass_offset ); - // p's type is array-of-OOPS plus klass_offset - Node* array_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS)); + Node* array_klass = load_object_klass(ary); // Get the array klass const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr(); @@ -221,7 +229,8 @@ // Come here for polymorphic array klasses // Extract the array element class - int element_klass_offset = in_bytes(ObjArrayKlass::element_klass_offset()); + int element_klass_offset = in_bytes(ArrayKlass::element_klass_offset()); + Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset); // We are allowed to use the constant type only if cast succeeded. If always_see_exact_class is true, // we must set a control edge from the IfTrue node created by the uncommon_trap above to the @@ -229,48 +238,64 @@ Node* a_e_klass = _gvn.transform(LoadKlassNode::make(_gvn, always_see_exact_class ? control() : NULL, immutable_memory(), p2, tak)); + // Handle value type arrays + if (is_value_array) { + // We statically know that this is a value type array, use precise klass ptr + ciValueKlass* vk = elemtype->isa_valuetype() ? elemtype->is_valuetype()->value_klass() : + elemptr->value_klass(); + a_e_klass = makecon(TypeKlassPtr::make(vk)); + } + // Check (the hard way) and throw if not a subklass. - // Result is ignored, we just need the CFG effects. - gen_checkcast(obj, a_e_klass); + return gen_checkcast(obj, a_e_klass); } void Parse::emit_guard_for_new(ciInstanceKlass* klass) { - // Emit guarded new - // if (klass->_init_thread != current_thread || - // klass->_init_state != being_initialized) - // uncommon_trap - Node* cur_thread = _gvn.transform( new ThreadLocalNode() ); - Node* merge = new RegionNode(3); - _gvn.set_type(merge, Type::CONTROL); - Node* kls = makecon(TypeKlassPtr::make(klass)); + if ((!klass->is_initialized() && !klass->is_being_initialized()) || + klass->is_abstract() || klass->is_interface() || + klass->name() == ciSymbol::java_lang_Class() || + iter().is_unresolved_klass()) { + uncommon_trap(Deoptimization::Reason_uninitialized, + Deoptimization::Action_reinterpret, + klass); + } if (klass->is_being_initialized()) { + // Emit guarded new + // if (klass->_init_thread != current_thread || + // klass->_init_state != being_initialized) + // uncommon_trap + Node* cur_thread = _gvn.transform( new ThreadLocalNode() ); + Node* merge = new RegionNode(3); + _gvn.set_type(merge, Type::CONTROL); + Node* kls = makecon(TypeKlassPtr::make(klass)); + + Node* init_thread_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_thread_offset())); + Node* adr_node = basic_plus_adr(kls, kls, init_thread_offset); + Node* init_thread = make_load(NULL, adr_node, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered); + Node *tst = Bool( CmpP( init_thread, cur_thread), BoolTest::eq); + IfNode* iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN); + set_control(IfTrue(iff)); + merge->set_req(1, IfFalse(iff)); + + Node* init_state_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_state_offset())); + adr_node = basic_plus_adr(kls, kls, init_state_offset); + // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler + // can generate code to load it as unsigned byte. + Node* init_state = make_load(NULL, adr_node, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered); + Node* being_init = _gvn.intcon(InstanceKlass::being_initialized); + tst = Bool( CmpI( init_state, being_init), BoolTest::eq); + iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN); + set_control(IfTrue(iff)); + merge->set_req(2, IfFalse(iff)); + + PreserveJVMState pjvms(this); + record_for_igvn(merge); + set_control(merge); - Node* init_thread_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_thread_offset())); - Node* adr_node = basic_plus_adr(kls, kls, init_thread_offset); - Node* init_thread = make_load(NULL, adr_node, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered); - Node *tst = Bool( CmpP( init_thread, cur_thread), BoolTest::eq); - IfNode* iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN); - set_control(IfTrue(iff)); - merge->set_req(1, IfFalse(iff)); - - Node* init_state_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_state_offset())); - adr_node = basic_plus_adr(kls, kls, init_state_offset); - // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler - // can generate code to load it as unsigned byte. - Node* init_state = make_load(NULL, adr_node, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered); - Node* being_init = _gvn.intcon(InstanceKlass::being_initialized); - tst = Bool( CmpI( init_state, being_init), BoolTest::eq); - iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN); - set_control(IfTrue(iff)); - merge->set_req(2, IfFalse(iff)); - - PreserveJVMState pjvms(this); - record_for_igvn(merge); - set_control(merge); - - uncommon_trap(Deoptimization::Reason_uninitialized, - Deoptimization::Action_reinterpret, - klass); + uncommon_trap(Deoptimization::Reason_uninitialized, + Deoptimization::Action_reinterpret, + klass); + } } @@ -283,18 +308,8 @@ assert(will_link, "_new: typeflow responsibility"); // Should initialize, or throw an InstantiationError? - if ((!klass->is_initialized() && !klass->is_being_initialized()) || - klass->is_abstract() || klass->is_interface() || - klass->name() == ciSymbol::java_lang_Class() || - iter().is_unresolved_klass()) { - uncommon_trap(Deoptimization::Reason_uninitialized, - Deoptimization::Action_reinterpret, - klass); - return; - } - if (klass->is_being_initialized()) { - emit_guard_for_new(klass); - } + emit_guard_for_new(klass); + if (stopped()) return; Node* kls = makecon(TypeKlassPtr::make(klass)); Node* obj = new_instance(kls); @@ -316,6 +331,63 @@ } } +//------------------------------do_defaultvalue--------------------------------- +void Parse::do_defaultvalue() { + bool will_link; + ciValueKlass* vk = iter().get_klass(will_link)->as_value_klass(); + assert(will_link, "defaultvalue: typeflow responsibility"); + + // Should initialize, or throw an InstantiationError? + emit_guard_for_new(vk); + if (stopped()) return; + + // Always scalarize default value because it's not NULL by definition + push(ValueTypeNode::make_default(_gvn, vk)); +} + +//------------------------------do_withfield------------------------------------ +void Parse::do_withfield() { + bool will_link; + ciField* field = iter().get_field(will_link); + assert(will_link, "withfield: typeflow responsibility"); + BasicType bt = field->layout_type(); + Node* val = type2size[bt] == 1 ? pop() : pop_pair(); + ciValueKlass* holder_klass = field->holder()->as_value_klass(); + Node* holder = pop(); + + if (!holder->is_ValueType()) { + // Null check and scalarize value type holder + inc_sp(2); + holder = null_check(holder); + dec_sp(2); + if (stopped()) return; + holder = ValueTypeNode::make_from_oop(this, holder, holder_klass); + } + if (!val->is_ValueType() && field->is_flattenable()) { + // Null check and scalarize value type field value + inc_sp(2); + val = null_check(val); + dec_sp(2); + if (stopped()) return; + val = ValueTypeNode::make_from_oop(this, val, gvn().type(val)->value_klass()); + } else if (val->is_ValueType() && !field->is_flattenable()) { + // Non-flattenable field should not be scalarized + val = ValueTypePtrNode::make_from_value_type(this, val->as_ValueType()); + } + + // Clone the value type node and set the new field value + ValueTypeNode* new_vt = holder->clone()->as_ValueType(); + new_vt->set_oop(_gvn.zerocon(T_VALUETYPE)); + gvn().set_type(new_vt, new_vt->bottom_type()); + new_vt->set_field_value_by_offset(field->offset(), val); + + if (holder_klass->is_scalarizable()) { + push(_gvn.transform(new_vt)); + } else { + push(new_vt->allocate(this)->get_oop()); + } +} + #ifndef PRODUCT //------------------------------dump_map_adr_mem------------------------------- // Debug dump of the mapping from address types to MergeMemNode indices. --- old/src/hotspot/share/opto/phaseX.cpp 2019-03-11 14:26:55.898354467 +0100 +++ new/src/hotspot/share/opto/phaseX.cpp 2019-03-11 14:26:55.686354470 +0100 @@ -1245,18 +1245,18 @@ //------------------------------transform-------------------------------------- // Non-recursive: idealize Node 'n' with respect to its inputs and its value Node *PhaseIterGVN::transform( Node *n ) { - if (_delay_transform) { - // Register the node but don't optimize for now - register_new_node_with_optimizer(n); - return n; - } - // If brand new node, make space in type array, and give it a type. ensure_type_or_null(n); if (type_or_null(n) == NULL) { set_type_bottom(n); } + if (_delay_transform) { + // Add the node to the worklist but don't optimize for now + _worklist.push(n); + return n; + } + return transform_old(n); } @@ -1496,6 +1496,9 @@ if (dead->Opcode() == Op_Opaque4) { C->remove_opaque4_node(dead); } + if (dead->is_ValueTypeBase()) { + C->remove_value_type(dead); + } BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); bs->unregister_potential_barrier_node(dead); } @@ -1560,6 +1563,17 @@ temp->destruct(); // reuse the _idx of this little guy } +void PhaseIterGVN::replace_in_uses(Node* n, Node* m) { + for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { + Node* u = n->fast_out(i); + if (u != n) { + rehash_node_delayed(u); + int nb = u->replace_edge(n, m); + --i, imax -= nb; + } + } +} + //------------------------------add_users_to_worklist-------------------------- void PhaseIterGVN::add_users_to_worklist0( Node *n ) { for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { @@ -1705,6 +1719,14 @@ Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory); if (imem != NULL) add_users_to_worklist0(imem); } + if (use_op == Op_CastP2X) { + for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { + Node* u = use->fast_out(i2); + if (u->Opcode() == Op_AndX) { + _worklist.push(u); + } + } + } // Loading the java mirror from a Klass requires two loads and the type // of the mirror load depends on the type of 'n'. See LoadNode::Value(). // LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror)))) @@ -1866,6 +1888,14 @@ worklist.push(phi); } } + if (m_op == Op_CastP2X) { + for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) { + Node* u = m->fast_out(i2); + if (u->Opcode() == Op_AndX) { + worklist.push(u); + } + } + } // Loading the java mirror from a Klass requires two loads and the type // of the mirror load depends on the type of 'n'. See LoadNode::Value(). BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); --- old/src/hotspot/share/opto/phaseX.hpp 2019-03-11 14:26:56.334354461 +0100 +++ new/src/hotspot/share/opto/phaseX.hpp 2019-03-11 14:26:56.126354464 +0100 @@ -478,7 +478,7 @@ // Idealize new Node 'n' with respect to its inputs and its value virtual Node *transform( Node *a_node ); - virtual void record_for_igvn(Node *n) { } + virtual void record_for_igvn(Node *n) { _worklist.push(n); } virtual PhaseIterGVN *is_IterGVN() { return this; } @@ -530,6 +530,8 @@ subsume_node(old, nn); } + void replace_in_uses(Node* n, Node* m); + // Delayed node rehash: remove a node from the hash table and rehash it during // next optimizing pass void rehash_node_delayed(Node* n) { --- old/src/hotspot/share/opto/runtime.cpp 2019-03-11 14:26:56.762354455 +0100 +++ new/src/hotspot/share/opto/runtime.cpp 2019-03-11 14:26:56.550354458 +0100 @@ -49,6 +49,8 @@ #include "oops/objArrayKlass.hpp" #include "oops/oop.inline.hpp" #include "oops/typeArrayOop.inline.hpp" +#include "oops/valueArrayKlass.hpp" +#include "oops/valueArrayOop.inline.hpp" #include "opto/ad.hpp" #include "opto/addnode.hpp" #include "opto/callnode.hpp" @@ -240,7 +242,12 @@ // Scavenge and allocate an instance. oop result; - if (array_type->is_typeArray_klass()) { + if (array_type->is_valueArray_klass()) { + // TODO refactor all these checks, is_typeArray_klass should not be true for a value type array + // TODO use oopFactory::new_array + Klass* elem_type = ValueArrayKlass::cast(array_type)->element_klass(); + result = oopFactory::new_valueArray(elem_type, len, THREAD); + } else if (array_type->is_typeArray_klass()) { // The oopFactory likes to work with the element type. // (We could bypass the oopFactory, since it doesn't add much value.) BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); @@ -251,7 +258,7 @@ // that latter value in hand for the fast path. Handle holder(THREAD, array_type->klass_holder()); // keep the array klass alive Klass* elem_type = ObjArrayKlass::cast(array_type)->element_klass(); - result = oopFactory::new_objArray(elem_type, len, THREAD); + result = oopFactory::new_array(elem_type, len, THREAD); } // Pass oops back through thread local storage. Our apparent type to Java @@ -565,7 +572,7 @@ const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); - return TypeFunc::make(domain,range); + return TypeFunc::make(domain, range); } @@ -1173,7 +1180,7 @@ fields = TypeTuple::fields(1); fields[TypeFunc::Parms+0] = NULL; // void const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); - return TypeFunc::make(domain,range); + return TypeFunc::make(domain, range); } JRT_LEAF(void, OptoRuntime::profile_receiver_type_C(DataLayout* data, oopDesc* receiver)) @@ -1492,7 +1499,7 @@ const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); - return TypeFunc::make(domain,range); + return TypeFunc::make(domain, range); } @@ -1510,7 +1517,7 @@ const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); - return TypeFunc::make(domain,range); + return TypeFunc::make(domain, range); } const TypeFunc *OptoRuntime::dtrace_object_alloc_Type() { @@ -1526,7 +1533,7 @@ const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); - return TypeFunc::make(domain,range); + return TypeFunc::make(domain, range); } @@ -1658,3 +1665,124 @@ st->print_raw_cr(tempst.as_string()); } + +const TypeFunc *OptoRuntime::store_value_type_fields_Type() { + // create input type (domain) + uint total = SharedRuntime::java_return_convention_max_int + SharedRuntime::java_return_convention_max_float*2; + const Type **fields = TypeTuple::fields(total); + // We don't know the number of returned values and their + // types. Assume all registers available to the return convention + // are used. + fields[TypeFunc::Parms] = TypePtr::BOTTOM; + uint i = 1; + for (; i < SharedRuntime::java_return_convention_max_int; i++) { + fields[TypeFunc::Parms+i] = TypeInt::INT; + } + for (; i < total; i+=2) { + fields[TypeFunc::Parms+i] = Type::DOUBLE; + fields[TypeFunc::Parms+i+1] = Type::HALF; + } + const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + total, fields); + + // create result type (range) + fields = TypeTuple::fields(1); + fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; + + const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1,fields); + + return TypeFunc::make(domain, range); +} + +const TypeFunc *OptoRuntime::pack_value_type_Type() { + // create input type (domain) + uint total = 1 + SharedRuntime::java_return_convention_max_int + SharedRuntime::java_return_convention_max_float*2; + const Type **fields = TypeTuple::fields(total); + // We don't know the number of returned values and their + // types. Assume all registers available to the return convention + // are used. + fields[TypeFunc::Parms] = TypeRawPtr::BOTTOM; + fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; + uint i = 2; + for (; i < SharedRuntime::java_return_convention_max_int+1; i++) { + fields[TypeFunc::Parms+i] = TypeInt::INT; + } + for (; i < total; i+=2) { + fields[TypeFunc::Parms+i] = Type::DOUBLE; + fields[TypeFunc::Parms+i+1] = Type::HALF; + } + const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + total, fields); + + // create result type (range) + fields = TypeTuple::fields(1); + fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; + + const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1,fields); + + return TypeFunc::make(domain, range); +} + +JRT_LEAF(void, OptoRuntime::load_unknown_value(valueArrayOopDesc* array, int index, instanceOopDesc* buffer)) +{ + Klass* klass = array->klass(); + assert(klass->is_valueArray_klass(), "expected value array oop"); + + ValueArrayKlass* vaklass = ValueArrayKlass::cast(klass); + ValueKlass* vklass = vaklass->element_klass(); + void* src = array->value_at_addr(index, vaklass->layout_helper()); + vklass->value_store(src, vklass->data_for_oop(buffer), + vaklass->element_byte_size(), true, false); +} +JRT_END + +const TypeFunc *OptoRuntime::load_unknown_value_Type() { + // create input type (domain) + const Type **fields = TypeTuple::fields(3); + // We don't know the number of returned values and their + // types. Assume all registers available to the return convention + // are used. + fields[TypeFunc::Parms] = TypeOopPtr::NOTNULL; + fields[TypeFunc::Parms+1] = TypeInt::POS; + fields[TypeFunc::Parms+2] = TypeInstPtr::NOTNULL; + + const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+3, fields); + + // create result type (range) + fields = TypeTuple::fields(0); + const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); + + return TypeFunc::make(domain, range); +} + +JRT_LEAF(void, OptoRuntime::store_unknown_value(instanceOopDesc* buffer, valueArrayOopDesc* array, int index)) +{ + assert(buffer != NULL, "can't store null into flat array"); + Klass* klass = array->klass(); + assert(klass->is_valueArray_klass(), "expected value array"); + assert(ArrayKlass::cast(klass)->element_klass() == buffer->klass(), "Store type incorrect"); + + ValueArrayKlass* vaklass = ValueArrayKlass::cast(klass); + ValueKlass* vklass = vaklass->element_klass(); + const int lh = vaklass->layout_helper(); + vklass->value_store(vklass->data_for_oop(buffer), array->value_at_addr(index, lh), + vaklass->element_byte_size(), true, false); +} +JRT_END + +const TypeFunc *OptoRuntime::store_unknown_value_Type() { + // create input type (domain) + const Type **fields = TypeTuple::fields(3); + // We don't know the number of returned values and their + // types. Assume all registers available to the return convention + // are used. + fields[TypeFunc::Parms] = TypeInstPtr::NOTNULL; + fields[TypeFunc::Parms+1] = TypeOopPtr::NOTNULL; + fields[TypeFunc::Parms+2] = TypeInt::POS; + + const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+3, fields); + + // create result type (range) + fields = TypeTuple::fields(0); + const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); + + return TypeFunc::make(domain, range); +} --- old/src/hotspot/share/opto/runtime.hpp 2019-03-11 14:26:57.294354448 +0100 +++ new/src/hotspot/share/opto/runtime.hpp 2019-03-11 14:26:57.062354451 +0100 @@ -318,6 +318,14 @@ static const TypeFunc* dtrace_method_entry_exit_Type(); static const TypeFunc* dtrace_object_alloc_Type(); + static const TypeFunc* store_value_type_fields_Type(); + static const TypeFunc* pack_value_type_Type(); + + static void load_unknown_value(valueArrayOopDesc* array, int index, instanceOopDesc* buffer); + static const TypeFunc *load_unknown_value_Type(); + static void store_unknown_value(instanceOopDesc* buffer, valueArrayOopDesc* array, int index); + static const TypeFunc *store_unknown_value_Type(); + private: static NamedCounter * volatile _named_counters; --- old/src/hotspot/share/opto/split_if.cpp 2019-03-11 14:26:57.778354441 +0100 +++ new/src/hotspot/share/opto/split_if.cpp 2019-03-11 14:26:57.530354445 +0100 @@ -27,6 +27,7 @@ #include "opto/callnode.hpp" #include "opto/loopnode.hpp" #include "opto/movenode.hpp" +#include "opto/valuetypenode.hpp" //------------------------------split_thru_region------------------------------ @@ -231,6 +232,15 @@ rtype = TypeLong::INT; } + // Value types should not be split through Phis but each value input + // needs to be merged individually. At this point, value types should + // only be used by AllocateNodes. Try to remove redundant allocations + // and unlink the now dead value type node. + if (n->is_ValueType()) { + n->as_ValueType()->remove_redundant_allocations(&_igvn, this); + return true; // n is now dead + } + // Now actually split-up this guy. One copy per control path merging. Node *phi = PhiNode::make_blank(blk1, n); for( uint j = 1; j < blk1->req(); j++ ) { --- old/src/hotspot/share/opto/stringopts.cpp 2019-03-11 14:26:58.302354434 +0100 +++ new/src/hotspot/share/opto/stringopts.cpp 2019-03-11 14:26:58.030354438 +0100 @@ -321,37 +321,37 @@ void StringConcat::eliminate_call(CallNode* call) { Compile* C = _stringopts->C; - CallProjections projs; - call->extract_projections(&projs, false); - if (projs.fallthrough_catchproj != NULL) { - C->gvn_replace_by(projs.fallthrough_catchproj, call->in(TypeFunc::Control)); + CallProjections* projs = call->extract_projections(false); + if (projs->fallthrough_catchproj != NULL) { + C->gvn_replace_by(projs->fallthrough_catchproj, call->in(TypeFunc::Control)); } - if (projs.fallthrough_memproj != NULL) { - C->gvn_replace_by(projs.fallthrough_memproj, call->in(TypeFunc::Memory)); + if (projs->fallthrough_memproj != NULL) { + C->gvn_replace_by(projs->fallthrough_memproj, call->in(TypeFunc::Memory)); } - if (projs.catchall_memproj != NULL) { - C->gvn_replace_by(projs.catchall_memproj, C->top()); + if (projs->catchall_memproj != NULL) { + C->gvn_replace_by(projs->catchall_memproj, C->top()); } - if (projs.fallthrough_ioproj != NULL) { - C->gvn_replace_by(projs.fallthrough_ioproj, call->in(TypeFunc::I_O)); + if (projs->fallthrough_ioproj != NULL) { + C->gvn_replace_by(projs->fallthrough_ioproj, call->in(TypeFunc::I_O)); } - if (projs.catchall_ioproj != NULL) { - C->gvn_replace_by(projs.catchall_ioproj, C->top()); + if (projs->catchall_ioproj != NULL) { + C->gvn_replace_by(projs->catchall_ioproj, C->top()); } - if (projs.catchall_catchproj != NULL) { + if (projs->catchall_catchproj != NULL) { // EA can't cope with the partially collapsed graph this // creates so put it on the worklist to be collapsed later. - for (SimpleDUIterator i(projs.catchall_catchproj); i.has_next(); i.next()) { + for (SimpleDUIterator i(projs->catchall_catchproj); i.has_next(); i.next()) { Node *use = i.get(); int opc = use->Opcode(); if (opc == Op_CreateEx || opc == Op_Region) { _stringopts->record_dead_node(use); } } - C->gvn_replace_by(projs.catchall_catchproj, C->top()); + C->gvn_replace_by(projs->catchall_catchproj, C->top()); } - if (projs.resproj != NULL) { - C->gvn_replace_by(projs.resproj, C->top()); + if (projs->resproj[0] != NULL) { + assert(projs->nb_resproj == 1, "unexpected number of results"); + C->gvn_replace_by(projs->resproj[0], C->top()); } C->gvn_replace_by(call, C->top()); } --- old/src/hotspot/share/opto/subnode.cpp 2019-03-11 14:26:58.798354427 +0100 +++ new/src/hotspot/share/opto/subnode.cpp 2019-03-11 14:26:58.562354431 +0100 @@ -717,6 +717,41 @@ return NULL; // No change } +//------------------------------Ideal------------------------------------------ +Node* CmpLNode::Ideal(PhaseGVN* phase, bool can_reshape) { + Node* a = NULL; + Node* b = NULL; + if (is_double_null_check(phase, a, b) && (phase->type(a)->is_zero_type() || phase->type(b)->is_zero_type())) { + // Degraded to a simple null check, use old acmp + return new CmpPNode(a, b); + } + return NULL; +} + +// Match double null check emitted by Compile::optimize_acmp() +bool CmpLNode::is_double_null_check(PhaseGVN* phase, Node*& a, Node*& b) const { + if (in(1)->Opcode() == Op_OrL && + in(1)->in(1)->Opcode() == Op_CastP2X && + in(1)->in(2)->Opcode() == Op_CastP2X && + in(2)->bottom_type()->is_zero_type()) { + assert(EnableValhalla, "unexpected double null check"); + a = in(1)->in(1)->in(1); + b = in(1)->in(2)->in(1); + return true; + } + return false; +} + +//------------------------------Value------------------------------------------ +const Type* CmpLNode::Value(PhaseGVN* phase) const { + Node* a = NULL; + Node* b = NULL; + if (is_double_null_check(phase, a, b) && (!phase->type(a)->maybe_null() || !phase->type(b)->maybe_null())) { + // One operand is never NULL, emit constant false + return TypeInt::CC_GT; + } + return SubNode::Value(phase); +} //============================================================================= // Simplify a CmpL (compare 2 longs ) node, based on local information. @@ -799,6 +834,14 @@ // Simplify an CmpP (compare 2 pointers) node, based on local information. // If both inputs are constants, compare them. const Type *CmpPNode::sub( const Type *t1, const Type *t2 ) const { + if (ACmpOnValues != 3 && + (t1->isa_valuetype() || t2->isa_valuetype() || + ((t1->is_valuetypeptr() || t2->is_valuetypeptr()) && + (!t1->maybe_null() || !t2->maybe_null())))) { + // One operand is a value type and one operand is never null, fold to constant false + return TypeInt::CC_GT; + } + const TypePtr *r0 = t1->is_ptr(); // Handy access const TypePtr *r1 = t2->is_ptr(); @@ -936,7 +979,18 @@ // super-type array vs a known klass with no subtypes. This amounts to // checking to see an unknown klass subtypes a known klass with no subtypes; // this only happens on an exact match. We can shorten this test by 1 load. -Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) { +Node* CmpPNode::Ideal(PhaseGVN *phase, bool can_reshape) { + Node* pert = has_perturbed_operand(); + if (pert != NULL) { + // Optimize new acmp + Node* a = pert->in(AddPNode::Base); // unperturbed a + Node* b = in(2); + Node* cmp = phase->C->optimize_acmp(phase, a, b); + if (cmp != NULL) { + return cmp; + } + } + // Normalize comparisons between Java mirrors into comparisons of the low- // level klass, where a dependent load could be shortened. // @@ -1043,6 +1097,22 @@ return this; } +// Checks if one operand is perturbed and returns it +Node* CmpPNode::has_perturbed_operand() const { + // We always perturbe the first operand + AddPNode* addP = in(1)->isa_AddP(); + if (addP != NULL) { + Node* base = addP->in(AddPNode::Base); + if (base->is_top()) { + // RawPtr comparison + return NULL; + } + assert(EnableValhalla && ACmpOnValues == 1, "unexpected perturbed oop"); + return in(1); + } + return NULL; +} + //============================================================================= //------------------------------sub-------------------------------------------- // Simplify an CmpN (compare 2 pointers) node, based on local information. --- old/src/hotspot/share/opto/subnode.hpp 2019-03-11 14:26:59.282354421 +0100 +++ new/src/hotspot/share/opto/subnode.hpp 2019-03-11 14:26:59.042354424 +0100 @@ -177,6 +177,7 @@ virtual int Opcode() const; virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); virtual const Type *sub( const Type *, const Type * ) const; + Node* has_perturbed_operand() const; }; //------------------------------CmpNNode-------------------------------------- @@ -195,7 +196,10 @@ public: CmpLNode( Node *in1, Node *in2 ) : CmpNode(in1,in2) {} virtual int Opcode() const; + virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); + virtual const Type* Value(PhaseGVN* phase) const; virtual const Type *sub( const Type *, const Type * ) const; + bool is_double_null_check(PhaseGVN* phase, Node*& a, Node*& b) const; }; //------------------------------CmpULNode--------------------------------------- --- old/src/hotspot/share/opto/type.cpp 2019-03-11 14:26:59.934354412 +0100 +++ new/src/hotspot/share/opto/type.cpp 2019-03-11 14:26:59.630354416 +0100 @@ -23,8 +23,10 @@ */ #include "precompiled.hpp" +#include "ci/ciField.hpp" #include "ci/ciMethodData.hpp" #include "ci/ciTypeFlow.hpp" +#include "ci/ciValueKlass.hpp" #include "classfile/symbolTable.hpp" #include "classfile/systemDictionary.hpp" #include "compiler/compileLog.hpp" @@ -46,6 +48,52 @@ // Dictionary of types shared among compilations. Dict* Type::_shared_type_dict = NULL; +const Type::Offset Type::Offset::top(Type::OffsetTop); +const Type::Offset Type::Offset::bottom(Type::OffsetBot); + +const Type::Offset Type::Offset::meet(const Type::Offset other) const { + // Either is 'TOP' offset? Return the other offset! + int offset = other._offset; + if (_offset == OffsetTop) return Offset(offset); + if (offset == OffsetTop) return Offset(_offset); + // If either is different, return 'BOTTOM' offset + if (_offset != offset) return bottom; + return Offset(_offset); +} + +const Type::Offset Type::Offset::dual() const { + if (_offset == OffsetTop) return bottom;// Map 'TOP' into 'BOTTOM' + if (_offset == OffsetBot) return top;// Map 'BOTTOM' into 'TOP' + return Offset(_offset); // Map everything else into self +} + +const Type::Offset Type::Offset::add(intptr_t offset) const { + // Adding to 'TOP' offset? Return 'TOP'! + if (_offset == OffsetTop || offset == OffsetTop) return top; + // Adding to 'BOTTOM' offset? Return 'BOTTOM'! + if (_offset == OffsetBot || offset == OffsetBot) return bottom; + // Addition overflows or "accidentally" equals to OffsetTop? Return 'BOTTOM'! + offset += (intptr_t)_offset; + if (offset != (int)offset || offset == OffsetTop) return bottom; + + // assert( _offset >= 0 && _offset+offset >= 0, "" ); + // It is possible to construct a negative offset during PhaseCCP + + return Offset((int)offset); // Sum valid offsets +} + +void Type::Offset::dump2(outputStream *st) const { + if (_offset == 0) { + return; + } else if (_offset == OffsetTop) { + st->print("+top"); + } + else if (_offset == OffsetBot) { + st->print("+bot"); + } else if (_offset) { + st->print("+%d", _offset); + } +} // Array which maps compiler types to Basic Types const Type::TypeInfo Type::_type_info[Type::lastype] = { @@ -85,6 +133,7 @@ { Bad, T_ILLEGAL, "vectory:", false, Op_VecY, relocInfo::none }, // VectorY { Bad, T_ILLEGAL, "vectorz:", false, Op_VecZ, relocInfo::none }, // VectorZ #endif + { Bad, T_VALUETYPE, "value:", false, Node::NotAMachineReg, relocInfo::none }, // ValueType { Bad, T_ADDRESS, "anyptr:", false, Op_RegP, relocInfo::none }, // AnyPtr { Bad, T_ADDRESS, "rawptr:", false, Op_RegP, relocInfo::none }, // RawPtr { Bad, T_OBJECT, "oop:", true, Op_RegP, relocInfo::oop_type }, // OopPtr @@ -215,6 +264,16 @@ assert(type->is_return_address(), ""); return TypeRawPtr::make((address)(intptr_t)type->as_return_address()->bci()); + case T_VALUETYPE: { + bool is_never_null = type->is_never_null(); + ciValueKlass* vk = type->unwrap()->as_value_klass(); + if (vk->is_scalarizable() && is_never_null) { + return TypeValueType::make(vk); + } else { + return TypeOopPtr::make_from_klass(vk)->join_speculative(is_never_null ? TypePtr::NOTNULL : TypePtr::BOTTOM); + } + } + default: // make sure we did not mix up the cases: assert(type != ciTypeFlow::StateVector::bottom_type(), ""); @@ -243,6 +302,7 @@ case T_FLOAT: return TypeF::make(constant.as_float()); case T_DOUBLE: return TypeD::make(constant.as_double()); case T_ARRAY: + case T_VALUETYPE: case T_OBJECT: { // cases: // can_be_constant = (oop not scavengable || ScavengeRootsInCode != 0) @@ -284,12 +344,14 @@ switch (conbt) { case T_BOOLEAN: conbt = T_BYTE; break; case T_ARRAY: conbt = T_OBJECT; break; + case T_VALUETYPE: conbt = T_OBJECT; break; default: break; } switch (loadbt) { case T_BOOLEAN: loadbt = T_BYTE; break; case T_NARROWOOP: loadbt = T_OBJECT; break; case T_ARRAY: loadbt = T_OBJECT; break; + case T_VALUETYPE: loadbt = T_OBJECT; break; case T_ADDRESS: loadbt = T_OBJECT; break; default: break; } @@ -527,9 +589,9 @@ floop[1] = TypeInt::INT; TypeTuple::LOOPBODY = TypeTuple::make( 2, floop ); - TypePtr::NULL_PTR= TypePtr::make(AnyPtr, TypePtr::Null, 0); - TypePtr::NOTNULL = TypePtr::make(AnyPtr, TypePtr::NotNull, OffsetBot); - TypePtr::BOTTOM = TypePtr::make(AnyPtr, TypePtr::BotPTR, OffsetBot); + TypePtr::NULL_PTR= TypePtr::make(AnyPtr, TypePtr::Null, Offset(0)); + TypePtr::NOTNULL = TypePtr::make(AnyPtr, TypePtr::NotNull, Offset::bottom); + TypePtr::BOTTOM = TypePtr::make(AnyPtr, TypePtr::BotPTR, Offset::bottom); TypeRawPtr::BOTTOM = TypeRawPtr::make( TypePtr::BotPTR ); TypeRawPtr::NOTNULL= TypeRawPtr::make( TypePtr::NotNull ); @@ -546,12 +608,12 @@ TypeInstPtr::BOTTOM = TypeInstPtr::make(TypePtr::BotPTR, current->env()->Object_klass()); TypeInstPtr::MIRROR = TypeInstPtr::make(TypePtr::NotNull, current->env()->Class_klass()); TypeInstPtr::MARK = TypeInstPtr::make(TypePtr::BotPTR, current->env()->Object_klass(), - false, 0, oopDesc::mark_offset_in_bytes()); + false, 0, Offset(oopDesc::mark_offset_in_bytes())); TypeInstPtr::KLASS = TypeInstPtr::make(TypePtr::BotPTR, current->env()->Object_klass(), - false, 0, oopDesc::klass_offset_in_bytes()); - TypeOopPtr::BOTTOM = TypeOopPtr::make(TypePtr::BotPTR, OffsetBot, TypeOopPtr::InstanceBot); + false, 0, Offset(oopDesc::klass_offset_in_bytes())); + TypeOopPtr::BOTTOM = TypeOopPtr::make(TypePtr::BotPTR, Offset::bottom, TypeOopPtr::InstanceBot); - TypeMetadataPtr::BOTTOM = TypeMetadataPtr::make(TypePtr::BotPTR, NULL, OffsetBot); + TypeMetadataPtr::BOTTOM = TypeMetadataPtr::make(TypePtr::BotPTR, NULL, Offset::bottom); TypeNarrowOop::NULL_PTR = TypeNarrowOop::make( TypePtr::NULL_PTR ); TypeNarrowOop::BOTTOM = TypeNarrowOop::make( TypeInstPtr::BOTTOM ); @@ -568,9 +630,9 @@ mreg2type[Op_RegL] = TypeLong::LONG; mreg2type[Op_RegFlags] = TypeInt::CC; - TypeAryPtr::RANGE = TypeAryPtr::make( TypePtr::BotPTR, TypeAry::make(Type::BOTTOM,TypeInt::POS), NULL /* current->env()->Object_klass() */, false, arrayOopDesc::length_offset_in_bytes()); + TypeAryPtr::RANGE = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(Type::BOTTOM,TypeInt::POS), NULL /* current->env()->Object_klass() */, false, Offset(arrayOopDesc::length_offset_in_bytes())); - TypeAryPtr::NARROWOOPS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeNarrowOop::BOTTOM, TypeInt::POS), NULL /*ciArrayKlass::make(o)*/, false, Type::OffsetBot); + TypeAryPtr::NARROWOOPS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeNarrowOop::BOTTOM, TypeInt::POS), NULL /*ciArrayKlass::make(o)*/, false, Offset::bottom); #ifdef _LP64 if (UseCompressedOops) { @@ -580,19 +642,20 @@ #endif { // There is no shared klass for Object[]. See note in TypeAryPtr::klass(). - TypeAryPtr::OOPS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInstPtr::BOTTOM,TypeInt::POS), NULL /*ciArrayKlass::make(o)*/, false, Type::OffsetBot); + TypeAryPtr::OOPS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInstPtr::BOTTOM,TypeInt::POS), NULL /*ciArrayKlass::make(o)*/, false, Offset::bottom); } - TypeAryPtr::BYTES = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInt::BYTE ,TypeInt::POS), ciTypeArrayKlass::make(T_BYTE), true, Type::OffsetBot); - TypeAryPtr::SHORTS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInt::SHORT ,TypeInt::POS), ciTypeArrayKlass::make(T_SHORT), true, Type::OffsetBot); - TypeAryPtr::CHARS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInt::CHAR ,TypeInt::POS), ciTypeArrayKlass::make(T_CHAR), true, Type::OffsetBot); - TypeAryPtr::INTS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInt::INT ,TypeInt::POS), ciTypeArrayKlass::make(T_INT), true, Type::OffsetBot); - TypeAryPtr::LONGS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeLong::LONG ,TypeInt::POS), ciTypeArrayKlass::make(T_LONG), true, Type::OffsetBot); - TypeAryPtr::FLOATS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(Type::FLOAT ,TypeInt::POS), ciTypeArrayKlass::make(T_FLOAT), true, Type::OffsetBot); - TypeAryPtr::DOUBLES = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(Type::DOUBLE ,TypeInt::POS), ciTypeArrayKlass::make(T_DOUBLE), true, Type::OffsetBot); + TypeAryPtr::BYTES = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInt::BYTE ,TypeInt::POS), ciTypeArrayKlass::make(T_BYTE), true, Offset::bottom); + TypeAryPtr::SHORTS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInt::SHORT ,TypeInt::POS), ciTypeArrayKlass::make(T_SHORT), true, Offset::bottom); + TypeAryPtr::CHARS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInt::CHAR ,TypeInt::POS), ciTypeArrayKlass::make(T_CHAR), true, Offset::bottom); + TypeAryPtr::INTS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInt::INT ,TypeInt::POS), ciTypeArrayKlass::make(T_INT), true, Offset::bottom); + TypeAryPtr::LONGS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeLong::LONG ,TypeInt::POS), ciTypeArrayKlass::make(T_LONG), true, Offset::bottom); + TypeAryPtr::FLOATS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(Type::FLOAT ,TypeInt::POS), ciTypeArrayKlass::make(T_FLOAT), true, Offset::bottom); + TypeAryPtr::DOUBLES = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(Type::DOUBLE ,TypeInt::POS), ciTypeArrayKlass::make(T_DOUBLE), true, Offset::bottom); // Nobody should ask _array_body_type[T_NARROWOOP]. Use NULL as assert. TypeAryPtr::_array_body_type[T_NARROWOOP] = NULL; TypeAryPtr::_array_body_type[T_OBJECT] = TypeAryPtr::OOPS; + TypeAryPtr::_array_body_type[T_VALUETYPE] = TypeAryPtr::OOPS; TypeAryPtr::_array_body_type[T_ARRAY] = TypeAryPtr::OOPS; // arrays are stored in oop arrays TypeAryPtr::_array_body_type[T_BYTE] = TypeAryPtr::BYTES; TypeAryPtr::_array_body_type[T_BOOLEAN] = TypeAryPtr::BYTES; // boolean[] is a byte array @@ -603,8 +666,8 @@ TypeAryPtr::_array_body_type[T_FLOAT] = TypeAryPtr::FLOATS; TypeAryPtr::_array_body_type[T_DOUBLE] = TypeAryPtr::DOUBLES; - TypeKlassPtr::OBJECT = TypeKlassPtr::make( TypePtr::NotNull, current->env()->Object_klass(), 0 ); - TypeKlassPtr::OBJECT_OR_NULL = TypeKlassPtr::make( TypePtr::BotPTR, current->env()->Object_klass(), 0 ); + TypeKlassPtr::OBJECT = TypeKlassPtr::make(TypePtr::NotNull, current->env()->Object_klass(), Offset(0) ); + TypeKlassPtr::OBJECT_OR_NULL = TypeKlassPtr::make(TypePtr::BotPTR, current->env()->Object_klass(), Offset(0) ); const Type **fi2c = TypeTuple::fields(2); fi2c[TypeFunc::Parms+0] = TypeInstPtr::BOTTOM; // Method* @@ -643,6 +706,7 @@ _const_basic_type[T_DOUBLE] = Type::DOUBLE; _const_basic_type[T_OBJECT] = TypeInstPtr::BOTTOM; _const_basic_type[T_ARRAY] = TypeInstPtr::BOTTOM; // there is no separate bottom for arrays + _const_basic_type[T_VALUETYPE] = TypeInstPtr::BOTTOM; _const_basic_type[T_VOID] = TypePtr::NULL_PTR; // reflection represents void this way _const_basic_type[T_ADDRESS] = TypeRawPtr::BOTTOM; // both interpreter return addresses & random raw ptrs _const_basic_type[T_CONFLICT] = Type::BOTTOM; // why not? @@ -659,6 +723,7 @@ _zero_type[T_DOUBLE] = TypeD::ZERO; _zero_type[T_OBJECT] = TypePtr::NULL_PTR; _zero_type[T_ARRAY] = TypePtr::NULL_PTR; // null array is null oop + _zero_type[T_VALUETYPE] = TypePtr::NULL_PTR; _zero_type[T_ADDRESS] = TypePtr::NULL_PTR; // raw pointers use the same null _zero_type[T_VOID] = Type::TOP; // the only void value is no value at all @@ -911,6 +976,9 @@ case NarrowKlass: return t->xmeet(this); + case ValueType: + return t->xmeet(this); + case Bad: // Type check default: // Bogus type not in lattice typerr(t); @@ -978,6 +1046,7 @@ Bad, // VectorX - handled in v-call Bad, // VectorY - handled in v-call Bad, // VectorZ - handled in v-call + Bad, // ValueType - handled in v-call Bad, // AnyPtr - handled in v-call Bad, // RawPtr - handled in v-call @@ -1873,12 +1942,37 @@ const TypeTuple *TypeTuple::INT_CC_PAIR; const TypeTuple *TypeTuple::LONG_CC_PAIR; +static void collect_value_fields(ciValueKlass* vk, const Type** field_array, uint& pos, ExtendedSignature& sig_cc) { + for (int j = 0; j < vk->nof_nonstatic_fields(); j++) { + ciField* field = vk->nonstatic_field_at(j); + BasicType bt = field->type()->basic_type(); + const Type* ft = Type::get_const_type(field->type()); + field_array[pos++] = ft; + if (type2size[bt] == 2) { + field_array[pos++] = Type::HALF; + } + // Skip reserved arguments + while (SigEntry::next_is_reserved(sig_cc, bt)) { + field_array[pos++] = Type::get_const_basic_type(bt); + if (type2size[bt] == 2) { + field_array[pos++] = Type::HALF; + } + } + } +} //------------------------------make------------------------------------------- // Make a TypeTuple from the range of a method signature -const TypeTuple *TypeTuple::make_range(ciSignature* sig) { +const TypeTuple *TypeTuple::make_range(ciSignature* sig, bool ret_vt_fields) { ciType* return_type = sig->return_type(); + bool never_null = sig->returns_never_null(); + uint arg_cnt = return_type->size(); + ret_vt_fields = ret_vt_fields && never_null && return_type->as_value_klass()->can_be_returned_as_fields(); + if (ret_vt_fields) { + arg_cnt = return_type->as_value_klass()->value_arg_slots() + 1; + } + const Type **field_array = fields(arg_cnt); switch (return_type->basic_type()) { case T_LONG: @@ -1899,6 +1993,17 @@ case T_INT: field_array[TypeFunc::Parms] = get_const_type(return_type); break; + case T_VALUETYPE: + if (ret_vt_fields) { + uint pos = TypeFunc::Parms; + field_array[pos] = TypePtr::BOTTOM; + pos++; + ExtendedSignature sig = ExtendedSignature(NULL, SigEntryFilter()); + collect_value_fields(return_type->as_value_klass(), field_array, pos, sig); + } else { + field_array[TypeFunc::Parms] = get_const_type(return_type)->join_speculative(never_null ? TypePtr::NOTNULL : TypePtr::BOTTOM); + } + break; case T_VOID: break; default: @@ -1908,25 +2013,39 @@ } // Make a TypeTuple from the domain of a method signature -const TypeTuple *TypeTuple::make_domain(ciInstanceKlass* recv, ciSignature* sig) { - uint arg_cnt = sig->size(); +const TypeTuple *TypeTuple::make_domain(ciMethod* method, bool vt_fields_as_args) { + ciSignature* sig = method->signature(); + ExtendedSignature sig_cc = ExtendedSignature(vt_fields_as_args ? method->get_sig_cc() : NULL, SigEntryFilter()); + + uint arg_cnt = sig->size() + (method->is_static() ? 0 : 1); + if (vt_fields_as_args) { + for (arg_cnt = 0; !sig_cc.at_end(); ++sig_cc) { + arg_cnt += type2size[(*sig_cc)._bt]; + } + sig_cc = ExtendedSignature(method->get_sig_cc(), SigEntryFilter()); + } uint pos = TypeFunc::Parms; - const Type **field_array; - if (recv != NULL) { - arg_cnt++; - field_array = fields(arg_cnt); - // Use get_const_type here because it respects UseUniqueSubclasses: - field_array[pos++] = get_const_type(recv)->join_speculative(TypePtr::NOTNULL); - } else { - field_array = fields(arg_cnt); + const Type** field_array = fields(arg_cnt); + if (!method->is_static()) { + ciInstanceKlass* recv = method->holder(); + if (vt_fields_as_args && recv->is_valuetype()) { + collect_value_fields(recv->as_value_klass(), field_array, pos, sig_cc); + } else { + field_array[pos++] = get_const_type(recv)->join_speculative(TypePtr::NOTNULL); + if (vt_fields_as_args) { + ++sig_cc; + } + } } int i = 0; while (pos < TypeFunc::Parms + arg_cnt) { ciType* type = sig->type_at(i); + BasicType bt = type->basic_type(); + bool is_flattened = false; - switch (type->basic_type()) { + switch (bt) { case T_LONG: field_array[pos++] = TypeLong::LONG; field_array[pos++] = Type::HALF; @@ -1947,11 +2066,29 @@ case T_SHORT: field_array[pos++] = TypeInt::INT; break; + case T_VALUETYPE: { + bool never_null = sig->is_never_null_at(i); + if (vt_fields_as_args && never_null) { + is_flattened = true; + collect_value_fields(type->as_value_klass(), field_array, pos, sig_cc); + } else { + field_array[pos++] = get_const_type(type)->join_speculative(never_null ? TypePtr::NOTNULL : TypePtr::BOTTOM); + } + break; + } default: ShouldNotReachHere(); } + // Skip reserved arguments + while (!is_flattened && SigEntry::next_is_reserved(sig_cc, bt)) { + field_array[pos++] = Type::get_const_basic_type(bt); + if (type2size[bt] == 2) { + field_array[pos++] = Type::HALF; + } + } i++; } + assert(pos == TypeFunc::Parms + arg_cnt, "wrong number of arguments"); return (TypeTuple*)(new TypeTuple(TypeFunc::Parms + arg_cnt, field_array))->hashcons(); } @@ -2087,6 +2224,10 @@ //------------------------------make------------------------------------------- const TypeAry* TypeAry::make(const Type* elem, const TypeInt* size, bool stable) { + if (elem->is_valuetypeptr()) { + // Value type array elements cannot be NULL + elem = elem->join_speculative(TypePtr::NOTNULL)->is_oopptr(); + } if (UseCompressedOops && elem->isa_oopptr()) { elem = elem->make_narrowoop(); } @@ -2243,6 +2384,120 @@ return false; } +//==============================TypeValueType======================================= + +//------------------------------make------------------------------------------- +const TypeValueType* TypeValueType::make(ciValueKlass* vk, bool larval) { + return (TypeValueType*)(new TypeValueType(vk, larval))->hashcons(); +} + +//------------------------------meet------------------------------------------- +// Compute the MEET of two types. It returns a new Type object. +const Type* TypeValueType::xmeet(const Type* t) const { + // Perform a fast test for common case; meeting the same types together. + if(this == t) return this; // Meeting same type-rep? + + // Current "this->_base" is ValueType + switch (t->base()) { // switch on original type + + case Int: + case Long: + case FloatTop: + case FloatCon: + case FloatBot: + case DoubleTop: + case DoubleCon: + case DoubleBot: + case NarrowKlass: + case Bottom: + return Type::BOTTOM; + + case OopPtr: + case MetadataPtr: + case KlassPtr: + case RawPtr: + return TypePtr::BOTTOM; + + case Top: + return this; + + case NarrowOop: { + const Type* res = t->make_ptr()->xmeet(this); + if (res->isa_ptr()) { + return res->make_narrowoop(); + } + return res; + } + + case AryPtr: + case InstPtr: { + return t->xmeet(this); + } + + case ValueType: { + // All value types inherit from Object + const TypeValueType* other = t->is_valuetype(); + if (_vk == other->_vk) { + if (_larval == other->_larval || + !_larval) { + return this; + } else { + return t; + } + } + return TypeInstPtr::NOTNULL; + } + + default: // All else is a mistake + typerr(t); + + } + return this; +} + +//------------------------------xdual------------------------------------------ +const Type* TypeValueType::xdual() const { + return this; +} + +//------------------------------eq--------------------------------------------- +// Structural equality check for Type representations +bool TypeValueType::eq(const Type* t) const { + const TypeValueType* vt = t->is_valuetype(); + return (_vk == vt->value_klass() && _larval == vt->larval()); +} + +//------------------------------hash------------------------------------------- +// Type-specific hashing function. +int TypeValueType::hash(void) const { + return (intptr_t)_vk; +} + +//------------------------------singleton-------------------------------------- +// TRUE if Type is a singleton type, FALSE otherwise. Singletons are simple constants. +bool TypeValueType::singleton(void) const { + return false; +} + +//------------------------------empty------------------------------------------ +// TRUE if Type is a type with no values, FALSE otherwise. +bool TypeValueType::empty(void) const { + return false; +} + +//------------------------------dump2------------------------------------------ +#ifndef PRODUCT +void TypeValueType::dump2(Dict &d, uint depth, outputStream* st) const { + int count = _vk->nof_declared_nonstatic_fields(); + st->print("valuetype[%d]:{", count); + st->print("%s", count != 0 ? _vk->declared_nonstatic_field_at(0)->type()->name() : "empty"); + for (int i = 1; i < count; ++i) { + st->print(", %s", _vk->declared_nonstatic_field_at(i)->type()->name()); + } + st->print("}%s", _larval?" : larval":""); +} +#endif + //==============================TypeVect======================================= // Convenience common pre-built types. const TypeVect *TypeVect::VECTS = NULL; // 32-bit vectors @@ -2384,7 +2639,7 @@ }; //------------------------------make------------------------------------------- -const TypePtr *TypePtr::make(TYPES t, enum PTR ptr, int offset, const TypePtr* speculative, int inline_depth) { +const TypePtr* TypePtr::make(TYPES t, enum PTR ptr, Offset offset, const TypePtr* speculative, int inline_depth) { return (TypePtr*)(new TypePtr(t,ptr,offset, speculative, inline_depth))->hashcons(); } @@ -2398,7 +2653,7 @@ //------------------------------get_con---------------------------------------- intptr_t TypePtr::get_con() const { assert( _ptr == Null, "" ); - return _offset; + return offset(); } //------------------------------meet------------------------------------------- @@ -2467,20 +2722,13 @@ } //------------------------------meet_offset------------------------------------ -int TypePtr::meet_offset( int offset ) const { - // Either is 'TOP' offset? Return the other offset! - if( _offset == OffsetTop ) return offset; - if( offset == OffsetTop ) return _offset; - // If either is different, return 'BOTTOM' offset - if( _offset != offset ) return OffsetBot; - return _offset; +Type::Offset TypePtr::meet_offset(int offset) const { + return _offset.meet(Offset(offset)); } //------------------------------dual_offset------------------------------------ -int TypePtr::dual_offset( ) const { - if( _offset == OffsetTop ) return OffsetBot;// Map 'TOP' into 'BOTTOM' - if( _offset == OffsetBot ) return OffsetTop;// Map 'BOTTOM' into 'TOP' - return _offset; // Map everything else into self +Type::Offset TypePtr::dual_offset() const { + return _offset.dual(); } //------------------------------xdual------------------------------------------ @@ -2493,19 +2741,8 @@ } //------------------------------xadd_offset------------------------------------ -int TypePtr::xadd_offset( intptr_t offset ) const { - // Adding to 'TOP' offset? Return 'TOP'! - if( _offset == OffsetTop || offset == OffsetTop ) return OffsetTop; - // Adding to 'BOTTOM' offset? Return 'BOTTOM'! - if( _offset == OffsetBot || offset == OffsetBot ) return OffsetBot; - // Addition overflows or "accidentally" equals to OffsetTop? Return 'BOTTOM'! - offset += (intptr_t)_offset; - if (offset != (int)offset || offset == OffsetTop) return OffsetBot; - - // assert( _offset >= 0 && _offset+offset >= 0, "" ); - // It is possible to construct a negative offset during PhaseCCP - - return (int)offset; // Sum valid offsets +Type::Offset TypePtr::xadd_offset(intptr_t offset) const { + return _offset.add(offset); } //------------------------------add_offset------------------------------------- @@ -2517,13 +2754,13 @@ // Structural equality check for Type representations bool TypePtr::eq( const Type *t ) const { const TypePtr *a = (const TypePtr*)t; - return _ptr == a->ptr() && _offset == a->offset() && eq_speculative(a) && _inline_depth == a->_inline_depth; + return _ptr == a->ptr() && _offset == a->_offset && eq_speculative(a) && _inline_depth == a->_inline_depth; } //------------------------------hash------------------------------------------- // Type-specific hashing function. int TypePtr::hash(void) const { - return java_add(java_add((jint)_ptr, (jint)_offset), java_add((jint)hash_speculative(), (jint)_inline_depth)); + return java_add(java_add((jint)_ptr, (jint)offset()), java_add((jint)hash_speculative(), (jint)_inline_depth)); ; } @@ -2783,9 +3020,7 @@ void TypePtr::dump2( Dict &d, uint depth, outputStream *st ) const { if( _ptr == Null ) st->print("NULL"); else st->print("%s *", ptr_msg[_ptr]); - if( _offset == OffsetTop ) st->print("+top"); - else if( _offset == OffsetBot ) st->print("+bot"); - else if( _offset ) st->print("+%d", _offset); + _offset.dump2(st); dump_inline_depth(st); dump_speculative(st); } @@ -2820,11 +3055,11 @@ // constants bool TypePtr::singleton(void) const { // TopPTR, Null, AnyNull, Constant are all singletons - return (_offset != OffsetBot) && !below_centerline(_ptr); + return (_offset != Offset::bottom) && !below_centerline(_ptr); } bool TypePtr::empty(void) const { - return (_offset == OffsetTop) || above_centerline(_ptr); + return (_offset == Offset::top) || above_centerline(_ptr); } //============================================================================= @@ -2966,7 +3201,7 @@ const TypeOopPtr *TypeOopPtr::BOTTOM; //------------------------------TypeOopPtr------------------------------------- -TypeOopPtr::TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, +TypeOopPtr::TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, Offset offset, Offset field_offset, int instance_id, const TypePtr* speculative, int inline_depth) : TypePtr(t, ptr, offset, speculative, inline_depth), _const_oop(o), _klass(k), @@ -2976,53 +3211,68 @@ _is_ptr_to_boxed_value(false), _instance_id(instance_id) { if (Compile::current()->eliminate_boxing() && (t == InstPtr) && - (offset > 0) && xk && (k != 0) && k->is_instance_klass()) { - _is_ptr_to_boxed_value = k->as_instance_klass()->is_boxed_value_offset(offset); + (offset.get() > 0) && xk && (k != 0) && k->is_instance_klass()) { + _is_ptr_to_boxed_value = k->as_instance_klass()->is_boxed_value_offset(offset.get()); } #ifdef _LP64 - if (_offset > 0 || _offset == Type::OffsetTop || _offset == Type::OffsetBot) { - if (_offset == oopDesc::klass_offset_in_bytes()) { + if (this->offset() > 0 || this->offset() == Type::OffsetTop || this->offset() == Type::OffsetBot) { + if (this->offset() == oopDesc::klass_offset_in_bytes()) { _is_ptr_to_narrowklass = UseCompressedClassPointers; } else if (klass() == NULL) { // Array with unknown body type assert(this->isa_aryptr(), "only arrays without klass"); _is_ptr_to_narrowoop = UseCompressedOops; - } else if (this->isa_aryptr()) { - _is_ptr_to_narrowoop = (UseCompressedOops && klass()->is_obj_array_klass() && - _offset != arrayOopDesc::length_offset_in_bytes()); + } else if (UseCompressedOops && this->isa_aryptr() && this->offset() != arrayOopDesc::length_offset_in_bytes()) { + if (klass()->is_obj_array_klass()) { + _is_ptr_to_narrowoop = true; + } else if (klass()->is_value_array_klass() && field_offset != Offset::top && field_offset != Offset::bottom) { + // Check if the field of the value type array element contains oops + ciValueKlass* vk = klass()->as_value_array_klass()->element_klass()->as_value_klass(); + int foffset = field_offset.get() + vk->first_field_offset(); + ciField* field = vk->get_field_by_offset(foffset, false); + assert(field != NULL, "missing field"); + BasicType bt = field->layout_type(); + _is_ptr_to_narrowoop = (bt == T_OBJECT || bt == T_ARRAY || T_VALUETYPE); + } } else if (klass()->is_instance_klass()) { - ciInstanceKlass* ik = klass()->as_instance_klass(); - ciField* field = NULL; if (this->isa_klassptr()) { // Perm objects don't use compressed references - } else if (_offset == OffsetBot || _offset == OffsetTop) { + } else if (_offset == Offset::bottom || _offset == Offset::top) { // unsafe access _is_ptr_to_narrowoop = UseCompressedOops; } else { // exclude unsafe ops assert(this->isa_instptr(), "must be an instance ptr."); - if (klass() == ciEnv::current()->Class_klass() && - (_offset == java_lang_Class::klass_offset_in_bytes() || - _offset == java_lang_Class::array_klass_offset_in_bytes())) { + (this->offset() == java_lang_Class::klass_offset_in_bytes() || + this->offset() == java_lang_Class::array_klass_offset_in_bytes())) { // Special hidden fields from the Class. assert(this->isa_instptr(), "must be an instance ptr."); _is_ptr_to_narrowoop = false; } else if (klass() == ciEnv::current()->Class_klass() && - _offset >= InstanceMirrorKlass::offset_of_static_fields()) { + this->offset() >= InstanceMirrorKlass::offset_of_static_fields()) { // Static fields assert(o != NULL, "must be constant"); - ciInstanceKlass* k = o->as_instance()->java_lang_Class_klass()->as_instance_klass(); - ciField* field = k->get_field_by_offset(_offset, true); - assert(field != NULL, "missing field"); - BasicType basic_elem_type = field->layout_type(); + ciInstanceKlass* ik = o->as_instance()->java_lang_Class_klass()->as_instance_klass(); + BasicType basic_elem_type; + if (ik->is_valuetype() && this->offset() == ik->as_value_klass()->default_value_offset()) { + // Special hidden field that contains the oop of the default value type + basic_elem_type = T_VALUETYPE; + } else { + ciField* field = ik->get_field_by_offset(this->offset(), true); + assert(field != NULL, "missing field"); + basic_elem_type = field->layout_type(); + } _is_ptr_to_narrowoop = UseCompressedOops && (basic_elem_type == T_OBJECT || + basic_elem_type == T_VALUETYPE || basic_elem_type == T_ARRAY); } else { // Instance fields which contains a compressed oop references. - field = ik->get_field_by_offset(_offset, false); + ciInstanceKlass* ik = klass()->as_instance_klass(); + ciField* field = ik->get_field_by_offset(this->offset(), false); if (field != NULL) { BasicType basic_elem_type = field->layout_type(); _is_ptr_to_narrowoop = UseCompressedOops && (basic_elem_type == T_OBJECT || + basic_elem_type == T_VALUETYPE || basic_elem_type == T_ARRAY); } else if (klass()->equals(ciEnv::current()->Object_klass())) { // Compile::find_alias_type() cast exactness on all types to verify @@ -3040,13 +3290,13 @@ } //------------------------------make------------------------------------------- -const TypeOopPtr *TypeOopPtr::make(PTR ptr, int offset, int instance_id, - const TypePtr* speculative, int inline_depth) { +const TypeOopPtr *TypeOopPtr::make(PTR ptr, Offset offset, int instance_id, + const TypePtr* speculative, int inline_depth) { assert(ptr != Constant, "no constant generic pointers"); ciKlass* k = Compile::current()->env()->Object_klass(); bool xk = false; ciObject* o = NULL; - return (TypeOopPtr*)(new TypeOopPtr(OopPtr, ptr, k, xk, o, offset, instance_id, speculative, inline_depth))->hashcons(); + return (TypeOopPtr*)(new TypeOopPtr(OopPtr, ptr, k, xk, o, offset, Offset::bottom, instance_id, speculative, inline_depth))->hashcons(); } @@ -3085,7 +3335,7 @@ if (k == NULL) return TypeKlassPtr::OBJECT; else - return TypeKlassPtr::make(xk? Constant: NotNull, k, 0); + return TypeKlassPtr::make(xk? Constant: NotNull, k, Offset(0)); } //------------------------------meet------------------------------------------- @@ -3123,7 +3373,7 @@ case AnyPtr: { // Found an AnyPtr type vs self-OopPtr type const TypePtr *tp = t->is_ptr(); - int offset = meet_offset(tp->offset()); + Offset offset = meet_offset(tp->offset()); PTR ptr = meet_ptr(tp->ptr()); const TypePtr* speculative = xmeet_speculative(tp); int depth = meet_inline_depth(tp->inline_depth()); @@ -3165,13 +3415,13 @@ const Type *TypeOopPtr::xdual() const { assert(klass() == Compile::current()->env()->Object_klass(), "no klasses here"); assert(const_oop() == NULL, "no constants here"); - return new TypeOopPtr(_base, dual_ptr(), klass(), klass_is_exact(), const_oop(), dual_offset(), dual_instance_id(), dual_speculative(), dual_inline_depth()); + return new TypeOopPtr(_base, dual_ptr(), klass(), klass_is_exact(), const_oop(), dual_offset(), Offset::bottom, dual_instance_id(), dual_speculative(), dual_inline_depth()); } //--------------------------make_from_klass_common----------------------------- // Computes the element-type given a klass. const TypeOopPtr* TypeOopPtr::make_from_klass_common(ciKlass *klass, bool klass_change, bool try_for_exact) { - if (klass->is_instance_klass()) { + if (klass->is_instance_klass() || klass->is_valuetype()) { Compile* C = Compile::current(); Dependencies* deps = C->dependencies(); assert((deps != NULL) == (C->method() != NULL && C->method()->code_size() > 0), "sanity"); @@ -3199,16 +3449,16 @@ } } } - return TypeInstPtr::make(TypePtr::BotPTR, klass, klass_is_exact, NULL, 0); + return TypeInstPtr::make(TypePtr::BotPTR, klass, klass_is_exact, NULL, Offset(0)); } else if (klass->is_obj_array_klass()) { - // Element is an object array. Recursively call ourself. - const TypeOopPtr *etype = TypeOopPtr::make_from_klass_common(klass->as_obj_array_klass()->element_klass(), false, try_for_exact); + // Element is an object or value array. Recursively call ourself. + const TypeOopPtr* etype = TypeOopPtr::make_from_klass_common(klass->as_array_klass()->element_klass(), false, try_for_exact); bool xk = etype->klass_is_exact(); const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS); // We used to pass NotNull in here, asserting that the sub-arrays // are all not-null. This is not true in generally, as code can // slam NULLs down in the subarrays. - const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::BotPTR, arr0, klass, xk, 0); + const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::BotPTR, arr0, klass, xk, Offset(0)); return arr; } else if (klass->is_type_array_klass()) { // Element is an typeArray @@ -3216,7 +3466,12 @@ const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS); // We used to pass NotNull in here, asserting that the array pointer // is not-null. That was not true in general. - const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::BotPTR, arr0, klass, true, 0); + const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::BotPTR, arr0, klass, true, Offset(0)); + return arr; + } else if (klass->is_value_array_klass()) { + ciValueKlass* vk = klass->as_array_klass()->element_klass()->as_value_klass(); + const TypeAry* arr0 = TypeAry::make(TypeValueType::make(vk), TypeInt::POS); + const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::BotPTR, arr0, klass, true, Offset(0)); return arr; } else { ShouldNotReachHere(); @@ -3229,18 +3484,18 @@ const TypeOopPtr* TypeOopPtr::make_from_constant(ciObject* o, bool require_constant) { assert(!o->is_null_object(), "null object not yet handled here."); ciKlass* klass = o->klass(); - if (klass->is_instance_klass()) { - // Element is an instance + if (klass->is_instance_klass() || klass->is_valuetype()) { + // Element is an instance or value type if (require_constant) { if (!o->can_be_constant()) return NULL; } else if (!o->should_be_constant()) { - return TypeInstPtr::make(TypePtr::NotNull, klass, true, NULL, 0); + return TypeInstPtr::make(TypePtr::NotNull, klass, true, NULL, Offset(0)); } return TypeInstPtr::make(o); } else if (klass->is_obj_array_klass()) { // Element is an object array. Recursively call ourself. const TypeOopPtr *etype = - TypeOopPtr::make_from_klass_raw(klass->as_obj_array_klass()->element_klass()); + TypeOopPtr::make_from_klass_raw(klass->as_array_klass()->element_klass()); const TypeAry* arr0 = TypeAry::make(etype, TypeInt::make(o->as_array()->length())); // We used to pass NotNull in here, asserting that the sub-arrays // are all not-null. This is not true in generally, as code can @@ -3248,9 +3503,9 @@ if (require_constant) { if (!o->can_be_constant()) return NULL; } else if (!o->should_be_constant()) { - return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, 0); + return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, Offset(0)); } - const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0); + const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, Offset(0)); return arr; } else if (klass->is_type_array_klass()) { // Element is an typeArray @@ -3262,9 +3517,22 @@ if (require_constant) { if (!o->can_be_constant()) return NULL; } else if (!o->should_be_constant()) { - return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, 0); + return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, Offset(0)); } - const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0); + const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, Offset(0)); + return arr; + } else if (klass->is_value_array_klass()) { + ciValueKlass* vk = klass->as_array_klass()->element_klass()->as_value_klass(); + const TypeAry* arr0 = TypeAry::make(TypeValueType::make(vk), TypeInt::make(o->as_array()->length())); + // We used to pass NotNull in here, asserting that the sub-arrays + // are all not-null. This is not true in generally, as code can + // slam NULLs down in the subarrays. + if (require_constant) { + if (!o->can_be_constant()) return NULL; + } else if (!o->should_be_constant()) { + return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, Offset(0)); + } + const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, Offset(0)); return arr; } @@ -3275,9 +3543,9 @@ //------------------------------get_con---------------------------------------- intptr_t TypeOopPtr::get_con() const { assert( _ptr == Null || _ptr == Constant, "" ); - assert( _offset >= 0, "" ); + assert(offset() >= 0, ""); - if (_offset != 0) { + if (offset() != 0) { // After being ported to the compiler interface, the compiler no longer // directly manipulates the addresses of oops. Rather, it only has a pointer // to a handle at compile time. This handle is embedded in the generated @@ -3370,12 +3638,7 @@ st->print("oopptr:%s", ptr_msg[_ptr]); if( _klass_is_exact ) st->print(":exact"); if( const_oop() ) st->print(INTPTR_FORMAT, p2i(const_oop())); - switch( _offset ) { - case OffsetTop: st->print("+top"); break; - case OffsetBot: st->print("+any"); break; - case 0: break; - default: st->print("+%d",_offset); break; - } + _offset.dump2(st); if (_instance_id == InstanceTop) st->print(",iid=top"); else if (_instance_id != InstanceBot) @@ -3392,7 +3655,7 @@ bool TypeOopPtr::singleton(void) const { // detune optimizer to not generate constant oop + constant offset as a constant! // TopPTR, Null, AnyNull, Constant are all singletons - return (_offset == 0) && !below_centerline(_ptr); + return (offset() == 0) && !below_centerline(_ptr); } //------------------------------add_offset------------------------------------- @@ -3484,9 +3747,9 @@ const TypeInstPtr *TypeInstPtr::KLASS; //------------------------------TypeInstPtr------------------------------------- -TypeInstPtr::TypeInstPtr(PTR ptr, ciKlass* k, bool xk, ciObject* o, int off, +TypeInstPtr::TypeInstPtr(PTR ptr, ciKlass* k, bool xk, ciObject* o, Offset off, int instance_id, const TypePtr* speculative, int inline_depth) - : TypeOopPtr(InstPtr, ptr, k, xk, o, off, instance_id, speculative, inline_depth), + : TypeOopPtr(InstPtr, ptr, k, xk, o, off, Offset::bottom, instance_id, speculative, inline_depth), _name(k->name()) { assert(k != NULL && (k->is_loaded() || o == NULL), @@ -3498,7 +3761,7 @@ ciKlass* k, bool xk, ciObject* o, - int offset, + Offset offset, int instance_id, const TypePtr* speculative, int inline_depth) { @@ -3585,7 +3848,7 @@ // Compute the MEET of two InstPtrs when at least one is unloaded. // Assume classes are different since called after check for same name/class-loader const TypeInstPtr *TypeInstPtr::xmeet_unloaded(const TypeInstPtr *tinst) const { - int off = meet_offset(tinst->offset()); + Offset off = meet_offset(tinst->offset()); PTR ptr = meet_ptr(tinst->ptr()); int instance_id = meet_instance_id(tinst->instance_id()); const TypePtr* speculative = xmeet_speculative(tinst); @@ -3663,7 +3926,7 @@ case AryPtr: { // All arrays inherit from Object class const TypeAryPtr *tp = t->is_aryptr(); - int offset = meet_offset(tp->offset()); + Offset offset = meet_offset(tp->offset()); PTR ptr = meet_ptr(tp->ptr()); int instance_id = meet_instance_id(tp->instance_id()); const TypePtr* speculative = xmeet_speculative(tp); @@ -3675,7 +3938,7 @@ // below the centerline when the superclass is exact. We need to // do the same here. if (klass()->equals(ciEnv::current()->Object_klass()) && !klass_is_exact()) { - return TypeAryPtr::make(ptr, tp->ary(), tp->klass(), tp->klass_is_exact(), offset, instance_id, speculative, depth); + return TypeAryPtr::make(ptr, tp->ary(), tp->klass(), tp->klass_is_exact(), offset, tp->field_offset(), instance_id, speculative, depth); } else { // cannot subclass, so the meet has to fall badly below the centerline ptr = NotNull; @@ -3695,7 +3958,7 @@ if (klass()->equals(ciEnv::current()->Object_klass()) && !klass_is_exact()) { // that is, tp's array type is a subtype of my klass return TypeAryPtr::make(ptr, (ptr == Constant ? tp->const_oop() : NULL), - tp->ary(), tp->klass(), tp->klass_is_exact(), offset, instance_id, speculative, depth); + tp->ary(), tp->klass(), tp->klass_is_exact(), offset, tp->field_offset(), instance_id, speculative, depth); } } // The other case cannot happen, since I cannot be a subtype of an array. @@ -3711,7 +3974,7 @@ case OopPtr: { // Meeting to OopPtrs // Found a OopPtr type vs self-InstPtr type const TypeOopPtr *tp = t->is_oopptr(); - int offset = meet_offset(tp->offset()); + Offset offset = meet_offset(tp->offset()); PTR ptr = meet_ptr(tp->ptr()); switch (tp->ptr()) { case TopPTR: @@ -3736,7 +3999,7 @@ case AnyPtr: { // Meeting to AnyPtrs // Found an AnyPtr type vs self-InstPtr type const TypePtr *tp = t->is_ptr(); - int offset = meet_offset(tp->offset()); + Offset offset = meet_offset(tp->offset()); PTR ptr = meet_ptr(tp->ptr()); int instance_id = meet_instance_id(InstanceTop); const TypePtr* speculative = xmeet_speculative(tp); @@ -3776,7 +4039,7 @@ case InstPtr: { // Meeting 2 Oops? // Found an InstPtr sub-type vs self-InstPtr type const TypeInstPtr *tinst = t->is_instptr(); - int off = meet_offset( tinst->offset() ); + Offset off = meet_offset( tinst->offset() ); PTR ptr = meet_ptr( tinst->ptr() ); int instance_id = meet_instance_id(tinst->instance_id()); const TypePtr* speculative = xmeet_speculative(tinst); @@ -3943,6 +4206,24 @@ return make(ptr, k, false, NULL, off, instance_id, speculative, depth); } // End of case InstPtr + case ValueType: { + const TypeValueType *tv = t->is_valuetype(); + + if (above_centerline(ptr())) { + if (tv->value_klass()->is_subtype_of(_klass)) { + return t; + } else { + return TypeInstPtr::make(NotNull, _klass); + } + } else { + if (tv->value_klass()->is_subtype_of(_klass)) { + return TypeInstPtr::make(ptr(), _klass); + } else { + return TypeInstPtr::make(ptr(), ciEnv::current()->Object_klass()); + } + } + } + } // End of switch return this; // Return the double constant } @@ -4011,11 +4292,7 @@ break; } - if( _offset ) { // Dump offset, if any - if( _offset == OffsetBot ) st->print("+any"); - else if( _offset == OffsetTop ) st->print("+unknown"); - else st->print("+%d", _offset); - } + _offset.dump2(st); st->print(" *"); if (_instance_id == InstanceTop) @@ -4069,18 +4346,18 @@ const TypeAryPtr *TypeAryPtr::DOUBLES; //------------------------------make------------------------------------------- -const TypeAryPtr *TypeAryPtr::make(PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, +const TypeAryPtr* TypeAryPtr::make(PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, Offset offset, Offset field_offset, int instance_id, const TypePtr* speculative, int inline_depth) { assert(!(k == NULL && ary->_elem->isa_int()), "integral arrays must be pre-equipped with a class"); if (!xk) xk = ary->ary_must_be_exact(); assert(instance_id <= 0 || xk || !UseExactTypes, "instances are always exactly typed"); if (!UseExactTypes) xk = (ptr == Constant); - return (TypeAryPtr*)(new TypeAryPtr(ptr, NULL, ary, k, xk, offset, instance_id, false, speculative, inline_depth))->hashcons(); + return (TypeAryPtr*)(new TypeAryPtr(ptr, NULL, ary, k, xk, offset, field_offset, instance_id, false, speculative, inline_depth))->hashcons(); } //------------------------------make------------------------------------------- -const TypeAryPtr *TypeAryPtr::make(PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, +const TypeAryPtr* TypeAryPtr::make(PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, Offset offset, Offset field_offset, int instance_id, const TypePtr* speculative, int inline_depth, bool is_autobox_cache) { assert(!(k == NULL && ary->_elem->isa_int()), @@ -4089,13 +4366,13 @@ if (!xk) xk = (o != NULL) || ary->ary_must_be_exact(); assert(instance_id <= 0 || xk || !UseExactTypes, "instances are always exactly typed"); if (!UseExactTypes) xk = (ptr == Constant); - return (TypeAryPtr*)(new TypeAryPtr(ptr, o, ary, k, xk, offset, instance_id, is_autobox_cache, speculative, inline_depth))->hashcons(); + return (TypeAryPtr*)(new TypeAryPtr(ptr, o, ary, k, xk, offset, field_offset, instance_id, is_autobox_cache, speculative, inline_depth))->hashcons(); } //------------------------------cast_to_ptr_type------------------------------- const Type *TypeAryPtr::cast_to_ptr_type(PTR ptr) const { if( ptr == _ptr ) return this; - return make(ptr, const_oop(), _ary, klass(), klass_is_exact(), _offset, _instance_id, _speculative, _inline_depth); + return make(ptr, const_oop(), _ary, klass(), klass_is_exact(), _offset, _field_offset, _instance_id, _speculative, _inline_depth, _is_autobox_cache); } @@ -4104,18 +4381,18 @@ if( klass_is_exact == _klass_is_exact ) return this; if (!UseExactTypes) return this; if (_ary->ary_must_be_exact()) return this; // cannot clear xk - return make(ptr(), const_oop(), _ary, klass(), klass_is_exact, _offset, _instance_id, _speculative, _inline_depth); + return make(ptr(), const_oop(), _ary, klass(), klass_is_exact, _offset, _field_offset, _instance_id, _speculative, _inline_depth, _is_autobox_cache); } //-----------------------------cast_to_instance_id---------------------------- const TypeOopPtr *TypeAryPtr::cast_to_instance_id(int instance_id) const { if( instance_id == _instance_id ) return this; - return make(_ptr, const_oop(), _ary, klass(), _klass_is_exact, _offset, instance_id, _speculative, _inline_depth); + return make(_ptr, const_oop(), _ary, klass(), _klass_is_exact, _offset, _field_offset, instance_id, _speculative, _inline_depth, _is_autobox_cache); } const TypeOopPtr *TypeAryPtr::cast_to_nonconst() const { if (const_oop() == NULL) return this; - return make(NotNull, NULL, _ary, klass(), _klass_is_exact, _offset, _instance_id, _speculative, _inline_depth); + return make(NotNull, NULL, _ary, klass(), _klass_is_exact, _offset, _field_offset, _instance_id, _speculative, _inline_depth); } @@ -4182,7 +4459,7 @@ new_size = narrow_size_type(new_size); if (new_size == size()) return this; const TypeAry* new_ary = TypeAry::make(elem(), new_size, is_stable()); - return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id, _speculative, _inline_depth); + return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _field_offset, _instance_id, _speculative, _inline_depth, _is_autobox_cache); } //------------------------------cast_to_stable--------------------------------- @@ -4200,7 +4477,7 @@ const TypeAry* new_ary = TypeAry::make(elem, size(), stable); - return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id, _speculative, _inline_depth); + return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _field_offset, _instance_id, _speculative, _inline_depth, _is_autobox_cache); } //-----------------------------stable_dimension-------------------------------- @@ -4222,7 +4499,7 @@ TypePtr::PTR ptr_type = cache ? TypePtr::NotNull : TypePtr::AnyNull; etype = etype->cast_to_ptr_type(TypePtr::NotNull)->is_oopptr(); const TypeAry* new_ary = TypeAry::make(etype, size(), is_stable()); - return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id, _speculative, _inline_depth, cache); + return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _field_offset, _instance_id, _speculative, _inline_depth, cache); } //------------------------------eq--------------------------------------------- @@ -4231,13 +4508,14 @@ const TypeAryPtr *p = t->is_aryptr(); return _ary == p->_ary && // Check array - TypeOopPtr::eq(p); // Check sub-parts + TypeOopPtr::eq(p) &&// Check sub-parts + _field_offset == p->_field_offset; } //------------------------------hash------------------------------------------- // Type-specific hashing function. int TypeAryPtr::hash(void) const { - return (intptr_t)_ary + TypeOopPtr::hash(); + return (intptr_t)_ary + TypeOopPtr::hash() + _field_offset.get(); } //------------------------------meet------------------------------------------- @@ -4270,7 +4548,7 @@ case OopPtr: { // Meeting to OopPtrs // Found a OopPtr type vs self-AryPtr type const TypeOopPtr *tp = t->is_oopptr(); - int offset = meet_offset(tp->offset()); + Offset offset = meet_offset(tp->offset()); PTR ptr = meet_ptr(tp->ptr()); int depth = meet_inline_depth(tp->inline_depth()); const TypePtr* speculative = xmeet_speculative(tp); @@ -4279,7 +4557,7 @@ case AnyNull: { int instance_id = meet_instance_id(InstanceTop); return make(ptr, (ptr == Constant ? const_oop() : NULL), - _ary, _klass, _klass_is_exact, offset, instance_id, speculative, depth); + _ary, _klass, _klass_is_exact, offset, _field_offset, instance_id, speculative, depth); } case BotPTR: case NotNull: { @@ -4293,7 +4571,7 @@ case AnyPtr: { // Meeting two AnyPtrs // Found an AnyPtr type vs self-AryPtr type const TypePtr *tp = t->is_ptr(); - int offset = meet_offset(tp->offset()); + Offset offset = meet_offset(tp->offset()); PTR ptr = meet_ptr(tp->ptr()); const TypePtr* speculative = xmeet_speculative(tp); int depth = meet_inline_depth(tp->inline_depth()); @@ -4309,7 +4587,7 @@ case AnyNull: { int instance_id = meet_instance_id(InstanceTop); return make(ptr, (ptr == Constant ? const_oop() : NULL), - _ary, _klass, _klass_is_exact, offset, instance_id, speculative, depth); + _ary, _klass, _klass_is_exact, offset, _field_offset, instance_id, speculative, depth); } default: ShouldNotReachHere(); } @@ -4321,7 +4599,8 @@ case AryPtr: { // Meeting 2 references? const TypeAryPtr *tap = t->is_aryptr(); - int off = meet_offset(tap->offset()); + Offset off = meet_offset(tap->offset()); + Offset field_off = meet_field_offset(tap->field_offset()); const TypeAry *tary = _ary->meet_speculative(tap->_ary)->is_ary(); PTR ptr = meet_ptr(tap->ptr()); int instance_id = meet_instance_id(tap->instance_id()); @@ -4356,7 +4635,7 @@ if (above_centerline(ptr)) { tary = TypeAry::make(Type::BOTTOM, tary->_size, tary->_stable); } - return make(NotNull, NULL, tary, lazy_klass, false, off, InstanceBot, speculative, depth); + return make(NotNull, NULL, tary, lazy_klass, false, off, field_off, InstanceBot, speculative, depth); } bool xk = false; @@ -4369,7 +4648,7 @@ } else { xk = (tap->_klass_is_exact | this->_klass_is_exact); } - return make(ptr, const_oop(), tary, lazy_klass, xk, off, instance_id, speculative, depth); + return make(ptr, const_oop(), tary, lazy_klass, xk, off, field_off, instance_id, speculative, depth); case Constant: { ciObject* o = const_oop(); if( _ptr == Constant ) { @@ -4388,7 +4667,7 @@ // Only precise for identical arrays xk = this->_klass_is_exact && (klass() == tap->klass()); } - return TypeAryPtr::make(ptr, o, tary, lazy_klass, xk, off, instance_id, speculative, depth); + return TypeAryPtr::make(ptr, o, tary, lazy_klass, xk, off, field_off, instance_id, speculative, depth); } case NotNull: case BotPTR: @@ -4397,7 +4676,7 @@ xk = tap->_klass_is_exact; else xk = (tap->_klass_is_exact & this->_klass_is_exact) && (klass() == tap->klass()); // Only precise for identical arrays - return TypeAryPtr::make(ptr, NULL, tary, lazy_klass, xk, off, instance_id, speculative, depth); + return TypeAryPtr::make(ptr, NULL, tary, lazy_klass, xk, off, field_off, instance_id, speculative, depth); default: ShouldNotReachHere(); } } @@ -4405,7 +4684,7 @@ // All arrays inherit from Object class case InstPtr: { const TypeInstPtr *tp = t->is_instptr(); - int offset = meet_offset(tp->offset()); + Offset offset = meet_offset(tp->offset()); PTR ptr = meet_ptr(tp->ptr()); int instance_id = meet_instance_id(tp->instance_id()); const TypePtr* speculative = xmeet_speculative(tp); @@ -4417,12 +4696,12 @@ // below the centerline when the superclass is exact. We need to // do the same here. if (tp->klass()->equals(ciEnv::current()->Object_klass()) && !tp->klass_is_exact()) { - return TypeAryPtr::make(ptr, _ary, _klass, _klass_is_exact, offset, instance_id, speculative, depth); + return TypeAryPtr::make(ptr, _ary, _klass, _klass_is_exact, offset, _field_offset, instance_id, speculative, depth); } else { // cannot subclass, so the meet has to fall badly below the centerline ptr = NotNull; instance_id = InstanceBot; - return TypeInstPtr::make(ptr, ciEnv::current()->Object_klass(), false, NULL,offset, instance_id, speculative, depth); + return TypeInstPtr::make(ptr, ciEnv::current()->Object_klass(), false, NULL, offset, instance_id, speculative, depth); } case Constant: case NotNull: @@ -4437,7 +4716,7 @@ if (tp->klass()->equals(ciEnv::current()->Object_klass()) && !tp->klass_is_exact()) { // that is, my array type is a subtype of 'tp' klass return make(ptr, (ptr == Constant ? const_oop() : NULL), - _ary, _klass, _klass_is_exact, offset, instance_id, speculative, depth); + _ary, _klass, _klass_is_exact, offset, _field_offset, instance_id, speculative, depth); } } // The other case cannot happen, since t cannot be a subtype of an array. @@ -4449,6 +4728,12 @@ default: typerr(t); } } + + case ValueType: { + // All value types inherit from Object + return TypeInstPtr::make(ptr(), ciEnv::current()->Object_klass()); + } + } return this; // Lint noise } @@ -4456,7 +4741,16 @@ //------------------------------xdual------------------------------------------ // Dual: compute field-by-field dual const Type *TypeAryPtr::xdual() const { - return new TypeAryPtr(dual_ptr(), _const_oop, _ary->dual()->is_ary(),_klass, _klass_is_exact, dual_offset(), dual_instance_id(), is_autobox_cache(), dual_speculative(), dual_inline_depth()); + return new TypeAryPtr(dual_ptr(), _const_oop, _ary->dual()->is_ary(), _klass, _klass_is_exact, dual_offset(), dual_field_offset(), dual_instance_id(), is_autobox_cache(), dual_speculative(), dual_inline_depth()); +} + +Type::Offset TypeAryPtr::meet_field_offset(const Type::Offset offset) const { + return _field_offset.meet(offset); +} + +//------------------------------dual_offset------------------------------------ +Type::Offset TypeAryPtr::dual_field_offset() const { + return _field_offset.dual(); } //----------------------interface_vs_oop--------------------------------------- @@ -4493,16 +4787,21 @@ break; } - if( _offset != 0 ) { + if (elem()->isa_valuetype()) { + st->print("("); + _field_offset.dump2(st); + st->print(")"); + } + if (offset() != 0) { int header_size = objArrayOopDesc::header_size() * wordSize; - if( _offset == OffsetTop ) st->print("+undefined"); - else if( _offset == OffsetBot ) st->print("+any"); - else if( _offset < header_size ) st->print("+%d", _offset); + if( _offset == Offset::top ) st->print("+undefined"); + else if( _offset == Offset::bottom ) st->print("+any"); + else if( offset() < header_size ) st->print("+%d", offset()); else { BasicType basic_elem_type = elem()->basic_type(); int array_base = arrayOopDesc::base_offset_in_bytes(basic_elem_type); int elem_size = type2aelembytes(basic_elem_type); - st->print("[%d]", (_offset - array_base)/elem_size); + st->print("[%d]", (offset() - array_base)/elem_size); } } st->print(" *"); @@ -4523,7 +4822,7 @@ //------------------------------add_offset------------------------------------- const TypePtr *TypeAryPtr::add_offset(intptr_t offset) const { - return make(_ptr, _const_oop, _ary, _klass, _klass_is_exact, xadd_offset(offset), _instance_id, add_offset_speculative(offset), _inline_depth); + return make(_ptr, _const_oop, _ary, _klass, _klass_is_exact, xadd_offset(offset), _field_offset, _instance_id, add_offset_speculative(offset), _inline_depth, _is_autobox_cache); } const Type *TypeAryPtr::remove_speculative() const { @@ -4531,23 +4830,75 @@ return this; } assert(_inline_depth == InlineDepthTop || _inline_depth == InlineDepthBottom, "non speculative type shouldn't have inline depth"); - return make(_ptr, _const_oop, _ary->remove_speculative()->is_ary(), _klass, _klass_is_exact, _offset, _instance_id, NULL, _inline_depth); + return make(_ptr, _const_oop, _ary->remove_speculative()->is_ary(), _klass, _klass_is_exact, _offset, _field_offset, _instance_id, NULL, _inline_depth, _is_autobox_cache); } const TypePtr *TypeAryPtr::with_inline_depth(int depth) const { if (!UseInlineDepthForSpeculativeTypes) { return this; } - return make(_ptr, _const_oop, _ary->remove_speculative()->is_ary(), _klass, _klass_is_exact, _offset, _instance_id, _speculative, depth); + return make(_ptr, _const_oop, _ary->remove_speculative()->is_ary(), _klass, _klass_is_exact, _offset, _field_offset, _instance_id, _speculative, depth, _is_autobox_cache); +} + +const TypeAryPtr* TypeAryPtr::with_field_offset(int offset) const { + return make(_ptr, _const_oop, _ary->remove_speculative()->is_ary(), _klass, _klass_is_exact, _offset, Offset(offset), _instance_id, _speculative, _inline_depth, _is_autobox_cache); +} + +const TypePtr* TypeAryPtr::add_field_offset_and_offset(intptr_t offset) const { + int adj = 0; + if (offset != Type::OffsetBot && offset != Type::OffsetTop) { + const Type* elemtype = elem(); + if (elemtype->isa_valuetype()) { + if (_offset.get() != OffsetBot && _offset.get() != OffsetTop) { + adj = _offset.get(); + offset += _offset.get(); + } + uint header = arrayOopDesc::base_offset_in_bytes(T_OBJECT); + if (_field_offset.get() != OffsetBot && _field_offset.get() != OffsetTop) { + offset += _field_offset.get(); + if (_offset.get() == OffsetBot || _offset.get() == OffsetTop) { + offset += header; + } + } + if (offset >= (intptr_t)header || offset < 0) { + // Try to get the field of the value type array element we are pointing to + ciKlass* arytype_klass = klass(); + ciValueArrayKlass* vak = arytype_klass->as_value_array_klass(); + ciValueKlass* vk = vak->element_klass()->as_value_klass(); + int shift = vak->log2_element_size(); + int mask = (1 << shift) - 1; + intptr_t field_offset = ((offset - header) & mask); + ciField* field = vk->get_field_by_offset(field_offset + vk->first_field_offset(), false); + if (field == NULL) { + // This may happen with nested AddP(base, AddP(base, base, offset), longcon(16)) + return add_offset(offset); + } else { + return with_field_offset(field_offset)->add_offset(offset - field_offset - adj); + } + } + } + } + return add_offset(offset - adj); +} + +// Return offset incremented by field_offset for flattened value type arrays +const int TypeAryPtr::flattened_offset() const { + int offset = _offset.get(); + if (offset != Type::OffsetBot && offset != Type::OffsetTop && + _field_offset != Offset::bottom && _field_offset != Offset::top) { + offset += _field_offset.get(); + } + return offset; } const TypePtr *TypeAryPtr::with_instance_id(int instance_id) const { assert(is_known_instance(), "should be known"); - return make(_ptr, _const_oop, _ary->remove_speculative()->is_ary(), _klass, _klass_is_exact, _offset, instance_id, _speculative, _inline_depth); + return make(_ptr, _const_oop, _ary->remove_speculative()->is_ary(), _klass, _klass_is_exact, _offset, _field_offset, instance_id, _speculative, _inline_depth); } //============================================================================= + //------------------------------hash------------------------------------------- // Type-specific hashing function. int TypeNarrowPtr::hash(void) const { @@ -4636,12 +4987,14 @@ case KlassPtr: case NarrowOop: case NarrowKlass: - case Bottom: // Ye Olde Default return Type::BOTTOM; case Top: return this; + case ValueType: + return t->xmeet(this); + default: // All else is a mistake typerr(t); @@ -4720,7 +5073,7 @@ bool TypeMetadataPtr::singleton(void) const { // detune optimizer to not generate constant metadata + constant offset as a constant! // TopPTR, Null, AnyNull, Constant are all singletons - return (_offset == 0) && !below_centerline(_ptr); + return (offset() == 0) && !below_centerline(_ptr); } //------------------------------add_offset------------------------------------- @@ -4740,9 +5093,9 @@ //------------------------------get_con---------------------------------------- intptr_t TypeMetadataPtr::get_con() const { assert( _ptr == Null || _ptr == Constant, "" ); - assert( _offset >= 0, "" ); + assert(offset() >= 0, ""); - if (_offset != 0) { + if (offset() != 0) { // After being ported to the compiler interface, the compiler no longer // directly manipulates the addresses of oops. Rather, it only has a pointer // to a handle at compile time. This handle is embedded in the generated @@ -4793,7 +5146,7 @@ case AnyPtr: { // Found an AnyPtr type vs self-OopPtr type const TypePtr *tp = t->is_ptr(); - int offset = meet_offset(tp->offset()); + Offset offset = meet_offset(tp->offset()); PTR ptr = meet_ptr(tp->ptr()); switch (tp->ptr()) { case Null: @@ -4819,7 +5172,7 @@ case MetadataPtr: { const TypeMetadataPtr *tp = t->is_metadataptr(); - int offset = meet_offset(tp->offset()); + Offset offset = meet_offset(tp->offset()); PTR tptr = tp->ptr(); PTR ptr = meet_ptr(tptr); ciMetadata* md = (tptr == TopPTR) ? metadata() : tp->metadata(); @@ -4852,11 +5205,11 @@ void TypeMetadataPtr::dump2( Dict &d, uint depth, outputStream *st ) const { st->print("metadataptr:%s", ptr_msg[_ptr]); if( metadata() ) st->print(INTPTR_FORMAT, p2i(metadata())); - switch( _offset ) { + switch (offset()) { case OffsetTop: st->print("+top"); break; case OffsetBot: st->print("+any"); break; case 0: break; - default: st->print("+%d",_offset); break; + default: st->print("+%d",offset()); break; } } #endif @@ -4866,20 +5219,20 @@ // Convenience common pre-built type. const TypeMetadataPtr *TypeMetadataPtr::BOTTOM; -TypeMetadataPtr::TypeMetadataPtr(PTR ptr, ciMetadata* metadata, int offset): +TypeMetadataPtr::TypeMetadataPtr(PTR ptr, ciMetadata* metadata, Offset offset): TypePtr(MetadataPtr, ptr, offset), _metadata(metadata) { } const TypeMetadataPtr* TypeMetadataPtr::make(ciMethod* m) { - return make(Constant, m, 0); + return make(Constant, m, Offset(0)); } const TypeMetadataPtr* TypeMetadataPtr::make(ciMethodData* m) { - return make(Constant, m, 0); + return make(Constant, m, Offset(0)); } //------------------------------make------------------------------------------- // Create a meta data constant -const TypeMetadataPtr *TypeMetadataPtr::make(PTR ptr, ciMetadata* m, int offset) { +const TypeMetadataPtr* TypeMetadataPtr::make(PTR ptr, ciMetadata* m, Offset offset) { assert(m == NULL || !m->is_klass(), "wrong type"); return (TypeMetadataPtr*)(new TypeMetadataPtr(ptr, m, offset))->hashcons(); } @@ -4893,34 +5246,28 @@ const TypeKlassPtr *TypeKlassPtr::OBJECT_OR_NULL; //------------------------------TypeKlassPtr----------------------------------- -TypeKlassPtr::TypeKlassPtr( PTR ptr, ciKlass* klass, int offset ) +TypeKlassPtr::TypeKlassPtr( PTR ptr, ciKlass* klass, Offset offset ) : TypePtr(KlassPtr, ptr, offset), _klass(klass), _klass_is_exact(ptr == Constant) { } //------------------------------make------------------------------------------- // ptr to klass 'k', if Constant, or possibly to a sub-klass if not a Constant -const TypeKlassPtr *TypeKlassPtr::make( PTR ptr, ciKlass* k, int offset ) { - assert( k != NULL, "Expect a non-NULL klass"); - assert(k->is_instance_klass() || k->is_array_klass(), "Incorrect type of klass oop"); - TypeKlassPtr *r = - (TypeKlassPtr*)(new TypeKlassPtr(ptr, k, offset))->hashcons(); - - return r; +const TypeKlassPtr* TypeKlassPtr::make(PTR ptr, ciKlass* k, Offset offset) { + assert(k == NULL || k->is_instance_klass() || k->is_array_klass(), "Incorrect type of klass oop"); + return (TypeKlassPtr*)(new TypeKlassPtr(ptr, k, offset))->hashcons(); } //------------------------------eq--------------------------------------------- // Structural equality check for Type representations bool TypeKlassPtr::eq( const Type *t ) const { const TypeKlassPtr *p = t->is_klassptr(); - return - klass()->equals(p->klass()) && - TypePtr::eq(p); + return klass() == p->klass() && TypePtr::eq(p); } //------------------------------hash------------------------------------------- // Type-specific hashing function. int TypeKlassPtr::hash(void) const { - return java_add((jint)klass()->hash(), (jint)TypePtr::hash()); + return java_add(klass() != NULL ? klass()->hash() : (jint)0, (jint)TypePtr::hash()); } //------------------------------singleton-------------------------------------- @@ -4929,7 +5276,7 @@ bool TypeKlassPtr::singleton(void) const { // detune optimizer to not generate constant klass + constant offset as a constant! // TopPTR, Null, AnyNull, Constant are all singletons - return (_offset == 0) && !below_centerline(_ptr); + return (offset() == 0) && !below_centerline(_ptr); } // Do not allow interface-vs.-noninterface joins to collapse to top. @@ -4941,7 +5288,7 @@ const TypeKlassPtr* ktkp = kills->isa_klassptr(); if (ft->empty()) { - if (!empty() && ktkp != NULL && ktkp->klass()->is_loaded() && ktkp->klass()->is_interface()) + if (!empty() && ktkp != NULL && ktkp->is_loaded() && ktkp->klass()->is_interface()) return kills; // Uplift to interface return Type::TOP; // Canonical empty value @@ -4964,7 +5311,6 @@ ciKlass* TypeAryPtr::compute_klass(DEBUG_ONLY(bool verify)) const { // Compute _klass based on element type. ciKlass* k_ary = NULL; - const TypeInstPtr *tinst; const TypeAryPtr *tary; const Type* el = elem(); if (el->isa_narrowoop()) { @@ -4972,9 +5318,11 @@ } // Get element klass - if ((tinst = el->isa_instptr()) != NULL) { - // Compute array klass from element klass - k_ary = ciObjArrayKlass::make(tinst->klass()); + if (el->isa_instptr()) { + // Compute object array klass from element klass + k_ary = ciArrayKlass::make(el->is_oopptr()->klass()); + } else if (el->isa_valuetype()) { + k_ary = ciArrayKlass::make(el->is_valuetype()->value_klass()); } else if ((tary = el->isa_aryptr()) != NULL) { // Compute array klass from element klass ciKlass* k_elem = tary->klass(); @@ -5039,7 +5387,7 @@ // TypeAryPtr::OOPS->klass() are not common enough to matter. ((TypeAryPtr*)this)->_klass = k_ary; if (UseCompressedOops && k_ary != NULL && k_ary->is_obj_array_klass() && - _offset != 0 && _offset != arrayOopDesc::length_offset_in_bytes()) { + offset() != 0 && offset() != arrayOopDesc::length_offset_in_bytes()) { ((TypeAryPtr*)this)->_is_ptr_to_narrowoop = true; } } @@ -5074,6 +5422,7 @@ // It will be NotNull, and exact if and only if the klass type is exact. const TypeOopPtr* TypeKlassPtr::as_instance_type() const { ciKlass* k = klass(); + assert(k != NULL, "klass should not be NULL"); bool xk = klass_is_exact(); //return TypeInstPtr::make(TypePtr::NotNull, k, xk, NULL, 0); const TypeOopPtr* toop = TypeOopPtr::make_from_klass_raw(k); @@ -5113,7 +5462,7 @@ case AnyPtr: { // Meeting to AnyPtrs // Found an AnyPtr type vs self-KlassPtr type const TypePtr *tp = t->is_ptr(); - int offset = meet_offset(tp->offset()); + Offset offset = meet_offset(tp->offset()); PTR ptr = meet_ptr(tp->ptr()); switch (tp->ptr()) { case TopPTR: @@ -5154,9 +5503,17 @@ case KlassPtr: { // Meet two KlassPtr types const TypeKlassPtr *tkls = t->is_klassptr(); - int off = meet_offset(tkls->offset()); + Offset off = meet_offset(tkls->offset()); PTR ptr = meet_ptr(tkls->ptr()); + if (klass() == NULL || tkls->klass() == NULL) { + ciKlass* k = NULL; + if (ptr == Constant) { + k = (klass() == NULL) ? tkls->klass() : klass(); + } + return make(ptr, k, off); + } + // Check for easy case; klasses are equal (and perhaps not loaded!) // If we have constants, then we created oops so classes are loaded // and we can handle the constants further down. This case handles @@ -5222,9 +5579,9 @@ //------------------------------get_con---------------------------------------- intptr_t TypeKlassPtr::get_con() const { assert( _ptr == Null || _ptr == Constant, "" ); - assert( _offset >= 0, "" ); + assert(offset() >= 0, ""); - if (_offset != 0) { + if (offset() != 0) { // After being ported to the compiler interface, the compiler no longer // directly manipulates the addresses of oops. Rather, it only has a pointer // to a handle at compile time. This handle is embedded in the generated @@ -5247,11 +5604,11 @@ st->print("precise "); case NotNull: { - const char *name = klass()->name()->as_utf8(); - if( name ) { + if (klass() != NULL) { + const char* name = klass()->name()->as_utf8(); st->print("klass %s: " INTPTR_FORMAT, name, p2i(klass())); } else { - ShouldNotReachHere(); + st->print("klass BOTTOM"); } } case BotPTR: @@ -5265,11 +5622,7 @@ break; } - if( _offset ) { // Dump offset, if any - if( _offset == OffsetBot ) { st->print("+any"); } - else if( _offset == OffsetTop ) { st->print("+unknown"); } - else { st->print("+%d", _offset); } - } + _offset.dump2(st); st->print(" *"); } @@ -5281,8 +5634,13 @@ // Convenience common pre-built types. //------------------------------make------------------------------------------- -const TypeFunc *TypeFunc::make( const TypeTuple *domain, const TypeTuple *range ) { - return (TypeFunc*)(new TypeFunc(domain,range))->hashcons(); +const TypeFunc *TypeFunc::make(const TypeTuple *domain_sig, const TypeTuple* domain_cc, + const TypeTuple *range_sig, const TypeTuple *range_cc) { + return (TypeFunc*)(new TypeFunc(domain_sig, domain_cc, range_sig, range_cc))->hashcons(); +} + +const TypeFunc *TypeFunc::make(const TypeTuple *domain, const TypeTuple *range) { + return make(domain, domain, range, range); } //------------------------------make------------------------------------------- @@ -5290,14 +5648,16 @@ Compile* C = Compile::current(); const TypeFunc* tf = C->last_tf(method); // check cache if (tf != NULL) return tf; // The hit rate here is almost 50%. - const TypeTuple *domain; - if (method->is_static()) { - domain = TypeTuple::make_domain(NULL, method->signature()); - } else { - domain = TypeTuple::make_domain(method->holder(), method->signature()); - } - const TypeTuple *range = TypeTuple::make_range(method->signature()); - tf = TypeFunc::make(domain, range); + // Value types are not passed/returned by reference, instead each field of + // the value type is passed/returned as an argument. We maintain two views of + // the argument/return list here: one based on the signature (with a value + // type argument/return as a single slot), one based on the actual calling + // convention (with a value type argument/return as a list of its fields). + const TypeTuple* domain_sig = TypeTuple::make_domain(method, false); + const TypeTuple* domain_cc = TypeTuple::make_domain(method, method->has_scalarized_args()); + const TypeTuple* range_sig = TypeTuple::make_range(method->signature(), false); + const TypeTuple* range_cc = TypeTuple::make_range(method->signature(), ValueTypeReturnedAsFields); + tf = TypeFunc::make(domain_sig, domain_cc, range_sig, range_cc); C->set_last_tf(method, tf); // fill cache return tf; } @@ -5333,29 +5693,31 @@ // Structural equality check for Type representations bool TypeFunc::eq( const Type *t ) const { const TypeFunc *a = (const TypeFunc*)t; - return _domain == a->_domain && - _range == a->_range; + return _domain_sig == a->_domain_sig && + _domain_cc == a->_domain_cc && + _range_sig == a->_range_sig && + _range_cc == a->_range_cc; } //------------------------------hash------------------------------------------- // Type-specific hashing function. int TypeFunc::hash(void) const { - return (intptr_t)_domain + (intptr_t)_range; + return (intptr_t)_domain_sig + (intptr_t)_domain_cc + (intptr_t)_range_sig + (intptr_t)_range_cc; } //------------------------------dump2------------------------------------------ // Dump Function Type #ifndef PRODUCT void TypeFunc::dump2( Dict &d, uint depth, outputStream *st ) const { - if( _range->cnt() <= Parms ) + if( _range_sig->cnt() <= Parms ) st->print("void"); else { uint i; - for (i = Parms; i < _range->cnt()-1; i++) { - _range->field_at(i)->dump2(d,depth,st); + for (i = Parms; i < _range_sig->cnt()-1; i++) { + _range_sig->field_at(i)->dump2(d,depth,st); st->print("/"); } - _range->field_at(i)->dump2(d,depth,st); + _range_sig->field_at(i)->dump2(d,depth,st); } st->print(" "); st->print("( "); @@ -5364,11 +5726,11 @@ return; } d.Insert((void*)this,(void*)this); // Stop recursion - if (Parms < _domain->cnt()) - _domain->field_at(Parms)->dump2(d,depth-1,st); - for (uint i = Parms+1; i < _domain->cnt(); i++) { + if (Parms < _domain_sig->cnt()) + _domain_sig->field_at(Parms)->dump2(d,depth-1,st); + for (uint i = Parms+1; i < _domain_sig->cnt(); i++) { st->print(", "); - _domain->field_at(i)->dump2(d,depth-1,st); + _domain_sig->field_at(i)->dump2(d,depth-1,st); } st->print(" )"); } @@ -5388,8 +5750,8 @@ BasicType TypeFunc::return_type() const{ - if (range()->cnt() == TypeFunc::Parms) { + if (range_sig()->cnt() == TypeFunc::Parms) { return T_VOID; } - return range()->field_at(TypeFunc::Parms)->basic_type(); + return range_sig()->field_at(TypeFunc::Parms)->basic_type(); } --- old/src/hotspot/share/opto/type.hpp 2019-03-11 14:27:00.518354403 +0100 +++ new/src/hotspot/share/opto/type.hpp 2019-03-11 14:27:00.246354407 +0100 @@ -25,8 +25,10 @@ #ifndef SHARE_OPTO_TYPE_HPP #define SHARE_OPTO_TYPE_HPP +#include "ci/ciValueKlass.hpp" #include "opto/adlcVMDeps.hpp" #include "runtime/handles.hpp" +#include "runtime/sharedRuntime.hpp" // Portions of code courtesy of Clifford Click @@ -52,6 +54,7 @@ class TypeNarrowKlass; class TypeAry; class TypeTuple; +class TypeValueType; class TypeVect; class TypeVectS; class TypeVectD; @@ -92,6 +95,7 @@ VectorX, // 128bit Vector types VectorY, // 256bit Vector types VectorZ, // 512bit Vector types + ValueType, // Value type AnyPtr, // Any old raw, klass, inst, or array pointer RawPtr, // Raw (non-oop) pointers @@ -123,6 +127,30 @@ OffsetBot = -2000000001 // any possible offset }; + class Offset { + private: + const int _offset; + + public: + explicit Offset(int offset) : _offset(offset) {} + + const Offset meet(const Offset other) const; + const Offset dual() const; + const Offset add(intptr_t offset) const; + bool operator==(const Offset& other) const { + return _offset == other._offset; + } + bool operator!=(const Offset& other) const { + return _offset != other._offset; + } + int get() const { return _offset; } + + void dump2(outputStream *st) const; + + static const Offset top; + static const Offset bottom; + }; + // Min and max WIDEN values. enum WIDEN { WidenMin = 0, @@ -273,9 +301,6 @@ bool is_ptr_to_narrowoop() const; bool is_ptr_to_narrowklass() const; - bool is_ptr_to_boxing_obj() const; - - // Convenience access float getf() const; double getd() const; @@ -308,6 +333,8 @@ const TypeInstPtr *is_instptr() const; // Instance const TypeAryPtr *isa_aryptr() const; // Returns NULL if not AryPtr const TypeAryPtr *is_aryptr() const; // Array oop + const TypeValueType* isa_valuetype() const; // Returns NULL if not Value Type + const TypeValueType* is_valuetype() const; // Value Type const TypeMetadataPtr *isa_metadataptr() const; // Returns NULL if not oop ptr type const TypeMetadataPtr *is_metadataptr() const; // Java-style GC'd pointer @@ -317,6 +344,9 @@ virtual bool is_finite() const; // Has a finite value virtual bool is_nan() const; // Is not a number (NaN) + bool is_valuetypeptr() const; + ciValueKlass* value_klass() const; + // Returns this ptr type or the equivalent ptr type for this compressed pointer. const TypePtr* make_ptr() const; @@ -663,8 +693,8 @@ } static const TypeTuple *make( uint cnt, const Type **fields ); - static const TypeTuple *make_range(ciSignature *sig); - static const TypeTuple *make_domain(ciInstanceKlass* recv, ciSignature *sig); + static const TypeTuple *make_range(ciSignature* sig, bool ret_vt_fields = false); + static const TypeTuple *make_domain(ciMethod* method, bool vt_fields_as_args = false); // Subroutine call type with space allocated for argument types // Memory for Control, I_O, Memory, FramePtr, and ReturnAdr is allocated implicitly @@ -715,6 +745,9 @@ bool ary_must_be_exact() const; // true if arrays of such are never generic virtual const Type* remove_speculative() const; virtual const Type* cleanup_speculative() const; + + bool is_value_type_array() const { return _elem->isa_valuetype() != NULL; } + #ifdef ASSERT // One type is interface, the other is oop virtual bool interface_vs_oop(const Type *t) const; @@ -724,6 +757,41 @@ #endif }; + +//------------------------------TypeValue--------------------------------------- +// Class of Value Type Types +class TypeValueType : public Type { +private: + ciValueKlass* _vk; + bool _larval; + +protected: + TypeValueType(ciValueKlass* vk, bool larval) + : Type(ValueType), + _vk(vk), _larval(larval) { + } + +public: + static const TypeValueType* make(ciValueKlass* vk, bool larval = false); + ciValueKlass* value_klass() const { return _vk; } + bool larval() const { return _larval; } + + virtual bool eq(const Type* t) const; + virtual int hash() const; // Type specific hashing + virtual bool singleton(void) const; // TRUE if type is a singleton + virtual bool empty(void) const; // TRUE if type is vacuous + + virtual const Type* xmeet(const Type* t) const; + virtual const Type* xdual() const; // Compute dual right now. + + virtual bool would_improve_type(ciKlass* exact_kls, int inline_depth) const { return false; } + virtual bool would_improve_ptr(ProfilePtrKind ptr_kind) const { return false; } + +#ifndef PRODUCT + virtual void dump2(Dict &d, uint, outputStream* st) const; // Specialized per-Type dumping +#endif +}; + //------------------------------TypeVect--------------------------------------- // Class of Vector Types class TypeVect : public Type { @@ -803,7 +871,7 @@ public: enum PTR { TopPTR, AnyNull, Constant, Null, NotNull, BotPTR, lastPTR }; protected: - TypePtr(TYPES t, PTR ptr, int offset, + TypePtr(TYPES t, PTR ptr, Offset offset, const TypePtr* speculative = NULL, int inline_depth = InlineDepthBottom) : Type(t), _speculative(speculative), _inline_depth(inline_depth), _offset(offset), @@ -846,13 +914,13 @@ #endif public: - const int _offset; // Offset into oop, with TOP & BOT + const Offset _offset; // Offset into oop, with TOP & BOT const PTR _ptr; // Pointer equivalence class - const int offset() const { return _offset; } + const int offset() const { return _offset.get(); } const PTR ptr() const { return _ptr; } - static const TypePtr *make(TYPES t, PTR ptr, int offset, + static const TypePtr* make(TYPES t, PTR ptr, Offset offset, const TypePtr* speculative = NULL, int inline_depth = InlineDepthBottom); @@ -861,8 +929,10 @@ virtual intptr_t get_con() const; - int xadd_offset( intptr_t offset ) const; + Offset xadd_offset(intptr_t offset) const; virtual const TypePtr *add_offset( intptr_t offset ) const; + virtual const int flattened_offset() const { return offset(); } + virtual bool eq(const Type *t) const; virtual int hash() const; // Type specific hashing @@ -870,8 +940,8 @@ virtual bool empty(void) const; // TRUE if type is vacuous virtual const Type *xmeet( const Type *t ) const; virtual const Type *xmeet_helper( const Type *t ) const; - int meet_offset( int offset ) const; - int dual_offset( ) const; + Offset meet_offset(int offset) const; + Offset dual_offset() const; virtual const Type *xdual() const; // Compute dual right now. // meet, dual and join over pointer equivalence sets @@ -916,7 +986,7 @@ // include the stack pointer, top of heap, card-marking area, handles, etc. class TypeRawPtr : public TypePtr { protected: - TypeRawPtr( PTR ptr, address bits ) : TypePtr(RawPtr,ptr,0), _bits(bits){} + TypeRawPtr(PTR ptr, address bits) : TypePtr(RawPtr,ptr,Offset(0)), _bits(bits){} public: virtual bool eq( const Type *t ) const; virtual int hash() const; // Type specific hashing @@ -947,8 +1017,8 @@ // Some kind of oop (Java pointer), either instance or array. class TypeOopPtr : public TypePtr { protected: - TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id, - const TypePtr* speculative, int inline_depth); + TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, Offset offset, Offset field_offset, + int instance_id, const TypePtr* speculative, int inline_depth); public: virtual bool eq( const Type *t ) const; virtual int hash() const; // Type specific hashing @@ -1007,7 +1077,7 @@ bool require_constant = false); // Make a generic (unclassed) pointer to an oop. - static const TypeOopPtr* make(PTR ptr, int offset, int instance_id, + static const TypeOopPtr* make(PTR ptr, Offset offset, int instance_id, const TypePtr* speculative = NULL, int inline_depth = InlineDepthBottom); @@ -1022,7 +1092,10 @@ bool is_ptr_to_boxed_value() const { return _is_ptr_to_boxed_value; } bool is_known_instance() const { return _instance_id > 0; } int instance_id() const { return _instance_id; } - bool is_known_instance_field() const { return is_known_instance() && _offset >= 0; } + bool is_known_instance_field() const { return is_known_instance() && _offset.get() >= 0; } + + virtual bool can_be_value_type() const { return EnableValhalla && can_be_value_type_raw(); } + virtual bool can_be_value_type_raw() const { return _klass == NULL || _klass->is_valuetype() || ((_klass->is_java_lang_Object() || _klass->is_interface()) && !klass_is_exact()); } virtual intptr_t get_con() const; @@ -1062,7 +1135,7 @@ // Class of Java object pointers, pointing either to non-array Java instances // or to a Klass* (including array klasses). class TypeInstPtr : public TypeOopPtr { - TypeInstPtr(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id, + TypeInstPtr(PTR ptr, ciKlass* k, bool xk, ciObject* o, Offset offset, int instance_id, const TypePtr* speculative, int inline_depth); virtual bool eq( const Type *t ) const; virtual int hash() const; // Type specific hashing @@ -1076,30 +1149,30 @@ // Make a pointer to a constant oop. static const TypeInstPtr *make(ciObject* o) { - return make(TypePtr::Constant, o->klass(), true, o, 0, InstanceBot); + return make(TypePtr::Constant, o->klass(), true, o, Offset(0), InstanceBot); } // Make a pointer to a constant oop with offset. - static const TypeInstPtr *make(ciObject* o, int offset) { + static const TypeInstPtr* make(ciObject* o, Offset offset) { return make(TypePtr::Constant, o->klass(), true, o, offset, InstanceBot); } // Make a pointer to some value of type klass. static const TypeInstPtr *make(PTR ptr, ciKlass* klass) { - return make(ptr, klass, false, NULL, 0, InstanceBot); + return make(ptr, klass, false, NULL, Offset(0), InstanceBot); } // Make a pointer to some non-polymorphic value of exactly type klass. static const TypeInstPtr *make_exact(PTR ptr, ciKlass* klass) { - return make(ptr, klass, true, NULL, 0, InstanceBot); + return make(ptr, klass, true, NULL, Offset(0), InstanceBot); } // Make a pointer to some value of type klass with offset. - static const TypeInstPtr *make(PTR ptr, ciKlass* klass, int offset) { + static const TypeInstPtr *make(PTR ptr, ciKlass* klass, Offset offset) { return make(ptr, klass, false, NULL, offset, InstanceBot); } // Make a pointer to an oop. - static const TypeInstPtr *make(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, + static const TypeInstPtr* make(PTR ptr, ciKlass* k, bool xk, ciObject* o, Offset offset, int instance_id = InstanceBot, const TypePtr* speculative = NULL, int inline_depth = InlineDepthBottom); @@ -1146,12 +1219,13 @@ //------------------------------TypeAryPtr------------------------------------- // Class of Java array pointers class TypeAryPtr : public TypeOopPtr { - TypeAryPtr( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, - int offset, int instance_id, bool is_autobox_cache, - const TypePtr* speculative, int inline_depth) - : TypeOopPtr(AryPtr,ptr,k,xk,o,offset, instance_id, speculative, inline_depth), + TypeAryPtr(PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, + Offset offset, Offset field_offset, int instance_id, bool is_autobox_cache, + const TypePtr* speculative, int inline_depth) + : TypeOopPtr(AryPtr, ptr, k, xk, o, offset, field_offset, instance_id, speculative, inline_depth), _ary(ary), - _is_autobox_cache(is_autobox_cache) + _is_autobox_cache(is_autobox_cache), + _field_offset(field_offset) { #ifdef ASSERT if (k != NULL) { @@ -1174,6 +1248,12 @@ virtual int hash() const; // Type specific hashing const TypeAry *_ary; // Array we point into const bool _is_autobox_cache; + // For flattened value type arrays, each field of the value type in + // the array has its own memory slice so we need to keep track of + // which field is accessed + const Offset _field_offset; + Offset meet_field_offset(const Type::Offset offset) const; + Offset dual_field_offset() const; ciKlass* compute_klass(DEBUG_ONLY(bool verify = false)) const; @@ -1187,15 +1267,18 @@ bool is_autobox_cache() const { return _is_autobox_cache; } - static const TypeAryPtr *make(PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, + static const TypeAryPtr* make(PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, Offset offset, + Offset field_offset = Offset::bottom, int instance_id = InstanceBot, const TypePtr* speculative = NULL, int inline_depth = InlineDepthBottom); // Constant pointer to array - static const TypeAryPtr *make(PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, + static const TypeAryPtr* make(PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, Offset offset, + Offset field_offset = Offset::bottom, int instance_id = InstanceBot, const TypePtr* speculative = NULL, - int inline_depth = InlineDepthBottom, bool is_autobox_cache = false); + int inline_depth = InlineDepthBottom, + bool is_autobox_cache = false); // Return a 'ptr' version of this type virtual const Type *cast_to_ptr_type(PTR ptr) const; @@ -1226,6 +1309,13 @@ const TypeAryPtr* cast_to_autobox_cache(bool cache) const; + const int flattened_offset() const; + const Offset field_offset() const { return _field_offset; } + const TypeAryPtr* with_field_offset(int offset) const; + const TypePtr* add_field_offset_and_offset(intptr_t offset) const; + + virtual bool can_be_value_type() const { return false; } + // Convenience common pre-built types. static const TypeAryPtr *RANGE; static const TypeAryPtr *OOPS; @@ -1257,7 +1347,7 @@ // Some kind of metadata, either Method*, MethodData* or CPCacheOop class TypeMetadataPtr : public TypePtr { protected: - TypeMetadataPtr(PTR ptr, ciMetadata* metadata, int offset); + TypeMetadataPtr(PTR ptr, ciMetadata* metadata, Offset offset); // Do not allow interface-vs.-noninterface joins to collapse to top. virtual const Type *filter_helper(const Type *kills, bool include_speculative) const; public: @@ -1269,7 +1359,7 @@ ciMetadata* _metadata; public: - static const TypeMetadataPtr* make(PTR ptr, ciMetadata* m, int offset); + static const TypeMetadataPtr* make(PTR ptr, ciMetadata* m, Offset offset); static const TypeMetadataPtr* make(ciMethod* m); static const TypeMetadataPtr* make(ciMethodData* m); @@ -1296,7 +1386,7 @@ //------------------------------TypeKlassPtr----------------------------------- // Class of Java Klass pointers class TypeKlassPtr : public TypePtr { - TypeKlassPtr( PTR ptr, ciKlass* klass, int offset ); + TypeKlassPtr(PTR ptr, ciKlass* klass, Offset offset); protected: virtual const Type *filter_helper(const Type *kills, bool include_speculative) const; @@ -1306,47 +1396,23 @@ virtual bool singleton(void) const; // TRUE if type is a singleton private: - static const TypeKlassPtr* make_from_klass_common(ciKlass* klass, bool klass_change, bool try_for_exact); - ciKlass* _klass; // Does the type exclude subclasses of the klass? (Inexact == polymorphic.) bool _klass_is_exact; public: - ciSymbol* name() const { return klass()->name(); } - ciKlass* klass() const { return _klass; } bool klass_is_exact() const { return _klass_is_exact; } - bool is_loaded() const { return klass()->is_loaded(); } - - // Creates a type given a klass. Correctly handles multi-dimensional arrays - // Respects UseUniqueSubclasses. - // If the klass is final, the resulting type will be exact. - static const TypeKlassPtr* make_from_klass(ciKlass* klass) { - return make_from_klass_common(klass, true, false); - } - // Same as before, but will produce an exact type, even if - // the klass is not final, as long as it has exactly one implementation. - static const TypeKlassPtr* make_from_klass_unique(ciKlass* klass) { - return make_from_klass_common(klass, true, true); - } - // Same as before, but does not respects UseUniqueSubclasses. - // Use this only for creating array element types. - static const TypeKlassPtr* make_from_klass_raw(ciKlass* klass) { - return make_from_klass_common(klass, false, false); - } - - // Make a generic (unclassed) pointer to metadata. - static const TypeKlassPtr* make(PTR ptr, int offset); + bool is_loaded() const { return klass() != NULL && klass()->is_loaded(); } // ptr to klass 'k' - static const TypeKlassPtr *make( ciKlass* k ) { return make( TypePtr::Constant, k, 0); } + static const TypeKlassPtr* make(ciKlass* k) { return make( TypePtr::Constant, k, Offset(0)); } // ptr to klass 'k' with offset - static const TypeKlassPtr *make( ciKlass* k, int offset ) { return make( TypePtr::Constant, k, offset); } + static const TypeKlassPtr* make(ciKlass* k, Offset offset) { return make( TypePtr::Constant, k, offset); } // ptr to klass 'k' or sub-klass - static const TypeKlassPtr *make( PTR ptr, ciKlass* k, int offset); + static const TypeKlassPtr* make(PTR ptr, ciKlass* k, Offset offset); virtual const Type *cast_to_ptr_type(PTR ptr) const; @@ -1493,14 +1559,26 @@ //------------------------------TypeFunc--------------------------------------- // Class of Array Types class TypeFunc : public Type { - TypeFunc( const TypeTuple *domain, const TypeTuple *range ) : Type(Function), _domain(domain), _range(range) {} + TypeFunc(const TypeTuple *domain_sig, const TypeTuple *domain_cc, const TypeTuple *range_sig, const TypeTuple *range_cc) + : Type(Function), _domain_sig(domain_sig), _domain_cc(domain_cc), _range_sig(range_sig), _range_cc(range_cc) {} virtual bool eq( const Type *t ) const; virtual int hash() const; // Type specific hashing virtual bool singleton(void) const; // TRUE if type is a singleton virtual bool empty(void) const; // TRUE if type is vacuous - const TypeTuple* const _domain; // Domain of inputs - const TypeTuple* const _range; // Range of results + // Domains of inputs: value type arguments are not passed by + // reference, instead each field of the value type is passed as an + // argument. We maintain 2 views of the argument list here: one + // based on the signature (with a value type argument as a single + // slot), one based on the actual calling convention (with a value + // type argument as a list of its fields). + const TypeTuple* const _domain_sig; + const TypeTuple* const _domain_cc; + // Range of results. Similar to domains: a value type result can be + // returned in registers in which case range_cc lists all fields and + // is the actual calling convention. + const TypeTuple* const _range_sig; + const TypeTuple* const _range_cc; public: // Constants are shared among ADLC and VM @@ -1514,11 +1592,15 @@ // Accessors: - const TypeTuple* domain() const { return _domain; } - const TypeTuple* range() const { return _range; } + const TypeTuple* domain_sig() const { return _domain_sig; } + const TypeTuple* domain_cc() const { return _domain_cc; } + const TypeTuple* range_sig() const { return _range_sig; } + const TypeTuple* range_cc() const { return _range_cc; } static const TypeFunc *make(ciMethod* method); static const TypeFunc *make(ciSignature signature, const Type* extra); + static const TypeFunc *make(const TypeTuple* domain_sig, const TypeTuple* domain_cc, + const TypeTuple* range_sig, const TypeTuple* range_cc); static const TypeFunc *make(const TypeTuple* domain, const TypeTuple* range); virtual const Type *xmeet( const Type *t ) const; @@ -1526,6 +1608,8 @@ BasicType return_type() const; + bool returns_value_type_as_fields() const { return range_sig() != range_cc(); } + #ifndef PRODUCT virtual void dump2( Dict &d, uint depth, outputStream *st ) const; // Specialized per-Type dumping #endif @@ -1675,6 +1759,15 @@ return (TypeAryPtr*)this; } +inline const TypeValueType* Type::isa_valuetype() const { + return (_base == ValueType) ? (TypeValueType*)this : NULL; +} + +inline const TypeValueType* Type::is_valuetype() const { + assert(_base == ValueType, "Not a value type"); + return (TypeValueType*)this; +} + inline const TypeNarrowOop *Type::is_narrowoop() const { // OopPtr is the first and KlassPtr the last, with no non-oops between. assert(_base == NarrowOop, "Not a narrow oop" ) ; @@ -1741,11 +1834,14 @@ return false; } -inline bool Type::is_ptr_to_boxing_obj() const { - const TypeInstPtr* tp = isa_instptr(); - return (tp != NULL) && (tp->offset() == 0) && - tp->klass()->is_instance_klass() && - tp->klass()->as_instance_klass()->is_box_klass(); +inline bool Type::is_valuetypeptr() const { + return isa_instptr() != NULL && is_instptr()->klass()->is_valuetype(); +} + + +inline ciValueKlass* Type::value_klass() const { + assert(is_valuetypeptr(), "must be a value type ptr"); + return is_instptr()->klass()->as_value_klass(); } @@ -1774,6 +1870,7 @@ #define AndXNode AndLNode #define OrXNode OrLNode #define CmpXNode CmpLNode +#define CmpUXNode CmpULNode #define SubXNode SubLNode #define LShiftXNode LShiftLNode // For object size computation: @@ -1793,6 +1890,8 @@ #define Op_SubX Op_SubL #define Op_XorX Op_XorL #define Op_URShiftX Op_URShiftL +#define Op_LoadX Op_LoadL +#define Op_StoreX Op_StoreL // conversions #define ConvI2X(x) ConvI2L(x) #define ConvL2X(x) (x) @@ -1821,6 +1920,7 @@ #define AndXNode AndINode #define OrXNode OrINode #define CmpXNode CmpINode +#define CmpUXNode CmpUNode #define SubXNode SubINode #define LShiftXNode LShiftINode // For object size computation: @@ -1840,6 +1940,8 @@ #define Op_SubX Op_SubI #define Op_XorX Op_XorI #define Op_URShiftX Op_URShiftI +#define Op_LoadX Op_LoadI +#define Op_StoreX Op_StoreI // conversions #define ConvI2X(x) (x) #define ConvL2X(x) ConvL2I(x) --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/src/hotspot/share/opto/valuetypenode.cpp 2019-03-11 14:27:00.846354399 +0100 @@ -0,0 +1,908 @@ +/* + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "ci/ciValueKlass.hpp" +#include "opto/addnode.hpp" +#include "opto/castnode.hpp" +#include "opto/graphKit.hpp" +#include "opto/rootnode.hpp" +#include "opto/valuetypenode.hpp" +#include "opto/phaseX.hpp" + +// Clones the values type to handle control flow merges involving multiple value types. +// The inputs are replaced by PhiNodes to represent the merged values for the given region. +ValueTypeBaseNode* ValueTypeBaseNode::clone_with_phis(PhaseGVN* gvn, Node* region) { + assert(!has_phi_inputs(region), "already cloned with phis"); + ValueTypeBaseNode* vt = clone()->as_ValueTypeBase(); + + // Create a PhiNode for merging the oop values + const Type* phi_type = Type::get_const_type(value_klass()); + PhiNode* oop = PhiNode::make(region, vt->get_oop(), phi_type); + gvn->set_type(oop, phi_type); + vt->set_oop(oop); + + // Create a PhiNode each for merging the field values + for (uint i = 0; i < vt->field_count(); ++i) { + ciType* type = vt->field_type(i); + Node* value = vt->field_value(i); + if (type->is_valuetype() && value->isa_ValueType()) { + // Handle flattened value type fields recursively + value = value->as_ValueType()->clone_with_phis(gvn, region); + } else { + phi_type = Type::get_const_type(type); + value = PhiNode::make(region, value, phi_type); + gvn->set_type(value, phi_type); + } + vt->set_field_value(i, value); + } + gvn->set_type(vt, vt->bottom_type()); + return vt; +} + +// Checks if the inputs of the ValueBaseTypeNode were replaced by PhiNodes +// for the given region (see ValueBaseTypeNode::clone_with_phis). +bool ValueTypeBaseNode::has_phi_inputs(Node* region) { + // Check oop input + bool result = get_oop()->is_Phi() && get_oop()->as_Phi()->region() == region; +#ifdef ASSERT + if (result) { + // Check all field value inputs for consistency + for (uint i = Oop; i < field_count(); ++i) { + Node* n = in(i); + if (n->is_ValueTypeBase()) { + assert(n->as_ValueTypeBase()->has_phi_inputs(region), "inconsistent phi inputs"); + } else { + assert(n->is_Phi() && n->as_Phi()->region() == region, "inconsistent phi inputs"); + } + } + } +#endif + return result; +} + +// Merges 'this' with 'other' by updating the input PhiNodes added by 'clone_with_phis' +ValueTypeBaseNode* ValueTypeBaseNode::merge_with(PhaseGVN* gvn, const ValueTypeBaseNode* other, int pnum, bool transform) { + // Merge oop inputs + PhiNode* phi = get_oop()->as_Phi(); + phi->set_req(pnum, other->get_oop()); + if (transform) { + set_oop(gvn->transform(phi)); + gvn->record_for_igvn(phi); + } + // Merge field values + for (uint i = 0; i < field_count(); ++i) { + Node* val1 = field_value(i); + Node* val2 = other->field_value(i); + if (val1->is_ValueType()) { + val1->as_ValueType()->merge_with(gvn, val2->as_ValueType(), pnum, transform); + } else { + assert(val1->is_Phi(), "must be a phi node"); + assert(!val2->is_ValueType(), "inconsistent merge values"); + val1->set_req(pnum, val2); + } + if (transform) { + set_field_value(i, gvn->transform(val1)); + gvn->record_for_igvn(val1); + } + } + return this; +} + +// Adds a new merge path to a valuetype node with phi inputs +void ValueTypeBaseNode::add_new_path(Node* region) { + assert(has_phi_inputs(region), "must have phi inputs"); + + PhiNode* phi = get_oop()->as_Phi(); + phi->add_req(NULL); + assert(phi->req() == region->req(), "must be same size as region"); + + for (uint i = 0; i < field_count(); ++i) { + Node* val = field_value(i); + if (val->is_ValueType()) { + val->as_ValueType()->add_new_path(region); + } else { + val->as_Phi()->add_req(NULL); + assert(val->req() == region->req(), "must be same size as region"); + } + } +} + +Node* ValueTypeBaseNode::field_value(uint index) const { + assert(index < field_count(), "index out of bounds"); + return in(Values + index); +} + +// Get the value of the field at the given offset. +// If 'recursive' is true, flattened value type fields will be resolved recursively. +Node* ValueTypeBaseNode::field_value_by_offset(int offset, bool recursive) const { + // If the field at 'offset' belongs to a flattened value type field, 'index' refers to the + // corresponding ValueTypeNode input and 'sub_offset' is the offset in flattened value type. + int index = value_klass()->field_index_by_offset(offset); + int sub_offset = offset - field_offset(index); + Node* value = field_value(index); + assert(value != NULL, "field value not found"); + if (recursive && value->is_ValueType()) { + ValueTypeNode* vt = value->as_ValueType(); + if (field_is_flattened(index)) { + // Flattened value type field + sub_offset += vt->value_klass()->first_field_offset(); // Add header size + return vt->field_value_by_offset(sub_offset, recursive); + } else { + assert(sub_offset == 0, "should not have a sub offset"); + return vt; + } + } + assert(!(recursive && value->is_ValueType()), "should not be a value type"); + assert(sub_offset == 0, "offset mismatch"); + return value; +} + +void ValueTypeBaseNode::set_field_value(uint index, Node* value) { + assert(index < field_count(), "index out of bounds"); + set_req(Values + index, value); +} + +void ValueTypeBaseNode::set_field_value_by_offset(int offset, Node* value) { + set_field_value(field_index(offset), value); +} + +int ValueTypeBaseNode::field_offset(uint index) const { + assert(index < field_count(), "index out of bounds"); + return value_klass()->declared_nonstatic_field_at(index)->offset(); +} + +uint ValueTypeBaseNode::field_index(int offset) const { + uint i = 0; + for (; i < field_count() && field_offset(i) != offset; i++) { } + assert(i < field_count(), "field not found"); + return i; +} + +ciType* ValueTypeBaseNode::field_type(uint index) const { + assert(index < field_count(), "index out of bounds"); + return value_klass()->declared_nonstatic_field_at(index)->type(); +} + +bool ValueTypeBaseNode::field_is_flattened(uint index) const { + assert(index < field_count(), "index out of bounds"); + ciField* field = value_klass()->declared_nonstatic_field_at(index); + assert(!field->is_flattened() || field->type()->is_valuetype(), "must be a value type"); + return field->is_flattened(); +} + +bool ValueTypeBaseNode::field_is_flattenable(uint index) const { + assert(index < field_count(), "index out of bounds"); + ciField* field = value_klass()->declared_nonstatic_field_at(index); + assert(!field->is_flattenable() || field->type()->is_valuetype(), "must be a value type"); + return field->is_flattenable(); +} + +int ValueTypeBaseNode::make_scalar_in_safepoint(PhaseIterGVN* igvn, Unique_Node_List& worklist, SafePointNode* sfpt) { + ciValueKlass* vk = value_klass(); + uint nfields = vk->nof_nonstatic_fields(); + JVMState* jvms = sfpt->jvms(); + int start = jvms->debug_start(); + int end = jvms->debug_end(); + // Replace safepoint edge by SafePointScalarObjectNode and add field values + assert(jvms != NULL, "missing JVMS"); + uint first_ind = (sfpt->req() - jvms->scloff()); + SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(value_ptr(), +#ifdef ASSERT + NULL, +#endif + first_ind, nfields); + sobj->init_req(0, igvn->C->root()); + // Iterate over the value type fields in order of increasing + // offset and add the field values to the safepoint. + for (uint j = 0; j < nfields; ++j) { + int offset = vk->nonstatic_field_at(j)->offset(); + Node* value = field_value_by_offset(offset, true /* include flattened value type fields */); + if (value->is_ValueType()) { + // Add value type field to the worklist to process later + worklist.push(value); + } + sfpt->add_req(value); + } + jvms->set_endoff(sfpt->req()); + sobj = igvn->transform(sobj)->as_SafePointScalarObject(); + igvn->rehash_node_delayed(sfpt); + return sfpt->replace_edges_in_range(this, sobj, start, end); +} + +void ValueTypeBaseNode::make_scalar_in_safepoints(PhaseIterGVN* igvn) { + // Process all safepoint uses and scalarize value type + Unique_Node_List worklist; + for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { + SafePointNode* sfpt = fast_out(i)->isa_SafePoint(); + if (sfpt != NULL && !sfpt->is_CallLeaf() && (!sfpt->is_Call() || sfpt->as_Call()->has_debug_use(this))) { + int nb = 0; + if (is_allocated(igvn) && get_oop()->is_Con()) { + // Value type is allocated with a constant oop, link it directly + nb = sfpt->replace_edges_in_range(this, get_oop(), sfpt->jvms()->debug_start(), sfpt->jvms()->debug_end()); + igvn->rehash_node_delayed(sfpt); + } else { + nb = make_scalar_in_safepoint(igvn, worklist, sfpt); + } + --i; imax -= nb; + } + } + // Now scalarize non-flattened fields + for (uint i = 0; i < worklist.size(); ++i) { + Node* vt = worklist.at(i); + vt->as_ValueType()->make_scalar_in_safepoints(igvn); + } +} + +const TypePtr* ValueTypeBaseNode::field_adr_type(Node* base, int offset, ciInstanceKlass* holder, DecoratorSet decorators, PhaseGVN& gvn) const { + const TypeAryPtr* ary_type = gvn.type(base)->isa_aryptr(); + const TypePtr* adr_type = NULL; + bool is_array = ary_type != NULL; + if ((decorators & C2_MISMATCHED) != 0) { + adr_type = TypeRawPtr::BOTTOM; + } else if (is_array) { + // In the case of a flattened value type array, each field has its own slice + adr_type = ary_type->with_field_offset(offset)->add_offset(Type::OffsetBot); + } else { + ciField* field = holder->get_field_by_offset(offset, false); + assert(field != NULL, "field not found"); + adr_type = gvn.C->alias_type(field)->adr_type(); + } + return adr_type; +} + +void ValueTypeBaseNode::load(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) { + // Initialize the value type by loading its field values from + // memory and adding the values as input edges to the node. + for (uint i = 0; i < field_count(); ++i) { + int offset = holder_offset + field_offset(i); + Node* value = NULL; + ciType* ft = field_type(i); + if (field_is_flattened(i)) { + // Recursively load the flattened value type field + value = ValueTypeNode::make_from_flattened(kit, ft->as_value_klass(), base, ptr, holder, offset, decorators); + } else { + const TypeOopPtr* oop_ptr = kit->gvn().type(base)->isa_oopptr(); + bool is_array = (oop_ptr->isa_aryptr() != NULL); + if (base->is_Con() && !is_array) { + // If the oop to the value type is constant (static final field), we can + // also treat the fields as constants because the value type is immutable. + ciObject* constant_oop = oop_ptr->const_oop(); + ciField* field = holder->get_field_by_offset(offset, false); + assert(field != NULL, "field not found"); + ciConstant constant = constant_oop->as_instance()->field_value(field); + const Type* con_type = Type::make_from_constant(constant, /*require_const=*/ true); + assert(con_type != NULL, "type not found"); + value = kit->gvn().transform(kit->makecon(con_type)); + } else { + // Load field value from memory + const TypePtr* adr_type = field_adr_type(base, offset, holder, decorators, kit->gvn()); + Node* adr = kit->basic_plus_adr(base, ptr, offset); + BasicType bt = type2field[ft->basic_type()]; + assert(is_java_primitive(bt) || adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent"); + const Type* val_type = Type::get_const_type(ft); + if (is_array) { + decorators |= IS_ARRAY; + } + value = kit->access_load_at(base, adr, adr_type, val_type, bt, decorators); + } + if (field_is_flattenable(i)) { + // Loading a non-flattened but flattenable value type from memory + if (ft->as_value_klass()->is_scalarizable()) { + value = ValueTypeNode::make_from_oop(kit, value, ft->as_value_klass()); + } else { + value = kit->null2default(value, ft->as_value_klass()); + } + } + } + set_field_value(i, value); + } +} + +void ValueTypeBaseNode::store_flattened(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) const { + // The value type is embedded into the object without an oop header. Subtract the + // offset of the first field to account for the missing header when storing the values. + if (holder == NULL) { + holder = value_klass(); + } + holder_offset -= value_klass()->first_field_offset(); + store(kit, base, ptr, holder, holder_offset, false, decorators); +} + +void ValueTypeBaseNode::store(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset, bool deoptimize_on_exception, DecoratorSet decorators) const { + // Write field values to memory + for (uint i = 0; i < field_count(); ++i) { + int offset = holder_offset + field_offset(i); + Node* value = field_value(i); + ciType* ft = field_type(i); + if (field_is_flattened(i)) { + // Recursively store the flattened value type field + if (!value->is_ValueType()) { + assert(!kit->gvn().type(value)->maybe_null(), "should never be null"); + value = ValueTypeNode::make_from_oop(kit, value, ft->as_value_klass()); + } + value->as_ValueType()->store_flattened(kit, base, ptr, holder, offset, decorators); + } else { + // Store field value to memory + const TypePtr* adr_type = field_adr_type(base, offset, holder, decorators, kit->gvn()); + Node* adr = kit->basic_plus_adr(base, ptr, offset); + BasicType bt = type2field[ft->basic_type()]; + assert(is_java_primitive(bt) || adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent"); + const Type* val_type = Type::get_const_type(ft); + const TypeAryPtr* ary_type = kit->gvn().type(base)->isa_aryptr(); + if (ary_type != NULL) { + decorators |= IS_ARRAY; + } + kit->access_store_at(base, adr, adr_type, value, val_type, bt, decorators, deoptimize_on_exception); + } + } +} + +ValueTypeBaseNode* ValueTypeBaseNode::allocate(GraphKit* kit, bool deoptimize_on_exception) { + // Check if value type is already allocated + Node* null_ctl = kit->top(); + Node* not_null_oop = kit->null_check_oop(get_oop(), &null_ctl); + if (null_ctl->is_top()) { + // Value type is allocated + return this; + } + assert(!is_allocated(&kit->gvn()), "should not be allocated"); + RegionNode* region = new RegionNode(3); + + // Oop is non-NULL, use it + region->init_req(1, kit->control()); + PhiNode* oop = PhiNode::make(region, not_null_oop, value_ptr()); + PhiNode* io = PhiNode::make(region, kit->i_o(), Type::ABIO); + PhiNode* mem = PhiNode::make(region, kit->merged_memory(), Type::MEMORY, TypePtr::BOTTOM); + + { + // Oop is NULL, allocate and initialize buffer + PreserveJVMState pjvms(kit); + kit->set_control(null_ctl); + kit->kill_dead_locals(); + ciValueKlass* vk = value_klass(); + Node* klass_node = kit->makecon(TypeKlassPtr::make(vk)); + Node* alloc_oop = kit->new_instance(klass_node, NULL, NULL, deoptimize_on_exception, this); + store(kit, alloc_oop, alloc_oop, vk, 0, deoptimize_on_exception); + region->init_req(2, kit->control()); + oop ->init_req(2, alloc_oop); + io ->init_req(2, kit->i_o()); + mem ->init_req(2, kit->merged_memory()); + } + + // Update GraphKit + kit->set_control(kit->gvn().transform(region)); + kit->set_i_o(kit->gvn().transform(io)); + kit->set_all_memory(kit->gvn().transform(mem)); + kit->record_for_igvn(region); + kit->record_for_igvn(oop); + kit->record_for_igvn(io); + kit->record_for_igvn(mem); + + // Use cloned ValueTypeNode to propagate oop from now on + Node* res_oop = kit->gvn().transform(oop); + ValueTypeBaseNode* vt = clone()->as_ValueTypeBase(); + vt->set_oop(res_oop); + vt = kit->gvn().transform(vt)->as_ValueTypeBase(); + kit->replace_in_map(this, vt); + return vt; +} + +bool ValueTypeBaseNode::is_allocated(PhaseGVN* phase) const { + Node* oop = get_oop(); + const Type* oop_type = (phase != NULL) ? phase->type(oop) : oop->bottom_type(); + return !oop_type->maybe_null(); +} + +// When a call returns multiple values, it has several result +// projections, one per field. Replacing the result of the call by a +// value type node (after late inlining) requires that for each result +// projection, we find the corresponding value type field. +void ValueTypeBaseNode::replace_call_results(GraphKit* kit, Node* call, Compile* C) { + ciValueKlass* vk = value_klass(); + for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) { + ProjNode* pn = call->fast_out(i)->as_Proj(); + uint con = pn->_con; + if (con >= TypeFunc::Parms+1) { + uint field_nb = con - (TypeFunc::Parms+1); + int extra = 0; + for (uint j = 0; j < field_nb - extra; j++) { + ciField* f = vk->nonstatic_field_at(j); + BasicType bt = f->type()->basic_type(); + if (bt == T_LONG || bt == T_DOUBLE) { + extra++; + } + } + ciField* f = vk->nonstatic_field_at(field_nb - extra); + Node* field = field_value_by_offset(f->offset(), true); + if (field->is_ValueType()) { + assert(f->is_flattened(), "should be flattened"); + field = field->as_ValueType()->allocate(kit)->get_oop(); + } + C->gvn_replace_by(pn, field); + C->initial_gvn()->hash_delete(pn); + pn->set_req(0, C->top()); + --i; --imax; + } + } +} + +ValueTypeNode* ValueTypeNode::make_uninitialized(PhaseGVN& gvn, ciValueKlass* vk) { + // Create a new ValueTypeNode with uninitialized values and NULL oop + return new ValueTypeNode(vk, gvn.zerocon(T_VALUETYPE)); +} + +Node* ValueTypeNode::default_oop(PhaseGVN& gvn, ciValueKlass* vk) { + // Returns the constant oop of the default value type allocation + return gvn.makecon(TypeInstPtr::make(vk->default_value_instance())); +} + +ValueTypeNode* ValueTypeNode::make_default(PhaseGVN& gvn, ciValueKlass* vk) { + // Create a new ValueTypeNode with default values + ValueTypeNode* vt = new ValueTypeNode(vk, default_oop(gvn, vk)); + for (uint i = 0; i < vt->field_count(); ++i) { + ciType* field_type = vt->field_type(i); + Node* value = NULL; + if (field_type->is_valuetype() && vt->field_is_flattenable(i)) { + ciValueKlass* field_klass = field_type->as_value_klass(); + if (field_klass->is_scalarizable() || vt->field_is_flattened(i)) { + value = ValueTypeNode::make_default(gvn, field_klass); + } else { + value = default_oop(gvn, field_klass); + } + } else { + value = gvn.zerocon(field_type->basic_type()); + } + vt->set_field_value(i, value); + } + vt = gvn.transform(vt)->as_ValueType(); + assert(vt->is_default(gvn), "must be the default value type"); + return vt; +} + +bool ValueTypeNode::is_default(PhaseGVN& gvn) const { + for (uint i = 0; i < field_count(); ++i) { + Node* value = field_value(i); + if (!gvn.type(value)->is_zero_type() && + !(value->is_ValueType() && value->as_ValueType()->is_default(gvn)) && + !(field_type(i)->is_valuetype() && value == default_oop(gvn, field_type(i)->as_value_klass()))) { + return false; + } + } + return true; +} + +ValueTypeNode* ValueTypeNode::make_from_oop(GraphKit* kit, Node* oop, ciValueKlass* vk) { + PhaseGVN& gvn = kit->gvn(); + + // Create and initialize a ValueTypeNode by loading all field + // values from a heap-allocated version and also save the oop. + ValueTypeNode* vt = new ValueTypeNode(vk, oop); + + if (oop->isa_ValueTypePtr()) { + // Can happen with late inlining + ValueTypePtrNode* vtptr = oop->as_ValueTypePtr(); + vt->set_oop(vtptr->get_oop()); + for (uint i = Oop+1; i < vtptr->req(); ++i) { + vt->init_req(i, vtptr->in(i)); + } + } else if (gvn.type(oop)->maybe_null()) { + // Add a null check because the oop may be null + Node* null_ctl = kit->top(); + Node* not_null_oop = kit->null_check_oop(oop, &null_ctl); + if (kit->stopped()) { + // Constant null + kit->set_control(null_ctl); + return make_default(gvn, vk); + } + vt->set_oop(not_null_oop); + vt->load(kit, not_null_oop, not_null_oop, vk, /* holder_offset */ 0); + + if (null_ctl != kit->top()) { + // Return default value type if oop is null + ValueTypeNode* def = make_default(gvn, vk); + Node* region = new RegionNode(3); + region->init_req(1, kit->control()); + region->init_req(2, null_ctl); + + vt = vt->clone_with_phis(&gvn, region)->as_ValueType(); + vt->merge_with(&gvn, def, 2, true); + kit->set_control(gvn.transform(region)); + } + } else { + // Oop can never be null + Node* init_ctl = kit->control(); + vt->load(kit, oop, oop, vk, /* holder_offset */ 0); + assert(init_ctl != kit->control() || oop->is_Con() || oop->is_CheckCastPP() || oop->Opcode() == Op_ValueTypePtr || + vt->is_loaded(&gvn) == oop, "value type should be loaded"); + } + + assert(vt->is_allocated(&gvn), "value type should be allocated"); + return gvn.transform(vt)->as_ValueType(); +} + +// GraphKit wrapper for the 'make_from_flattened' method +ValueTypeNode* ValueTypeNode::make_from_flattened(GraphKit* kit, ciValueKlass* vk, Node* obj, Node* ptr, ciInstanceKlass* holder, int holder_offset, DecoratorSet decorators) { + // Create and initialize a ValueTypeNode by loading all field values from + // a flattened value type field at 'holder_offset' or from a value type array. + ValueTypeNode* vt = make_uninitialized(kit->gvn(), vk); + // The value type is flattened into the object without an oop header. Subtract the + // offset of the first field to account for the missing header when loading the values. + holder_offset -= vk->first_field_offset(); + vt->load(kit, obj, ptr, holder, holder_offset, decorators); + assert(vt->is_loaded(&kit->gvn()) != obj, "holder oop should not be used as flattened value type oop"); + return kit->gvn().transform(vt)->as_ValueType(); +} + +ValueTypeNode* ValueTypeNode::make_from_multi(GraphKit* kit, MultiNode* multi, ExtendedSignature& sig, ciValueKlass* vk, uint& base_input, bool in) { + ValueTypeNode* vt = ValueTypeNode::make_uninitialized(kit->gvn(), vk); + vt->initialize_fields(kit, multi, sig, base_input, 0, in); + return kit->gvn().transform(vt)->as_ValueType(); +} + +ValueTypeNode* ValueTypeNode::make_larval(GraphKit* kit, bool allocate) const { + ciValueKlass* vk = value_klass(); + ValueTypeNode* res = clone()->as_ValueType(); + if (allocate) { + Node* klass_node = kit->makecon(TypeKlassPtr::make(vk)); + Node* alloc_oop = kit->new_instance(klass_node, NULL, NULL, false); + AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop, &kit->gvn()); + alloc->_larval = true; + + store(kit, alloc_oop, alloc_oop, vk, 0, false); + res->set_oop(alloc_oop); + } + res->set_type(TypeValueType::make(vk, true)); + res = kit->gvn().transform(res)->as_ValueType(); + return res; +} + +ValueTypeNode* ValueTypeNode::finish_larval(GraphKit* kit) const { + Node* obj = get_oop(); + Node* mark_addr = kit->basic_plus_adr(obj, oopDesc::mark_offset_in_bytes()); + Node* mark = kit->make_load(NULL, mark_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); + mark = kit->gvn().transform(new AndXNode(mark, kit->MakeConX(~markOopDesc::larval_mask_in_place))); + kit->store_to_memory(kit->control(), mark_addr, mark, TypeX_X->basic_type(), kit->gvn().type(mark_addr)->is_ptr(), MemNode::unordered); + + ciValueKlass* vk = value_klass(); + ValueTypeNode* res = clone()->as_ValueType(); + res->set_type(TypeValueType::make(vk, false)); + res = kit->gvn().transform(res)->as_ValueType(); + return res; +} + +Node* ValueTypeNode::is_loaded(PhaseGVN* phase, ciValueKlass* vk, Node* base, int holder_offset) { + if (vk == NULL) { + vk = value_klass(); + } + if (field_count() == 0) { + assert(is_allocated(phase), "must be allocated"); + return get_oop(); + } + for (uint i = 0; i < field_count(); ++i) { + int offset = holder_offset + field_offset(i); + Node* value = field_value(i); + if (value->is_ValueType()) { + ValueTypeNode* vt = value->as_ValueType(); + if (field_is_flattened(i)) { + // Check value type field load recursively + base = vt->is_loaded(phase, vk, base, offset - vt->value_klass()->first_field_offset()); + if (base == NULL) { + return NULL; + } + continue; + } else { + value = vt->get_oop(); + if (value->Opcode() == Op_CastPP) { + // Skip CastPP + value = value->in(1); + } + } + } + if (value->isa_DecodeN()) { + // Skip DecodeN + value = value->in(1); + } + if (value->isa_Load()) { + // Check if base and offset of field load matches value type layout + intptr_t loffset = 0; + Node* lbase = AddPNode::Ideal_base_and_offset(value->in(MemNode::Address), phase, loffset); + if (lbase == NULL || (lbase != base && base != NULL) || loffset != offset) { + return NULL; + } else if (base == NULL) { + // Set base and check if pointer type matches + base = lbase; + const TypeInstPtr* vtptr = phase->type(base)->isa_instptr(); + if (vtptr == NULL || !vtptr->klass()->equals(vk)) { + return NULL; + } + } + } else { + return NULL; + } + } + return base; +} + +Node* ValueTypeNode::allocate_fields(GraphKit* kit) { + ValueTypeNode* vt = clone()->as_ValueType(); + for (uint i = 0; i < field_count(); i++) { + ValueTypeNode* value = field_value(i)->isa_ValueType(); + if (field_is_flattened(i)) { + // Flattened value type field + vt->set_field_value(i, value->allocate_fields(kit)); + } else if (value != NULL){ + // Non-flattened value type field + vt->set_field_value(i, value->allocate(kit)); + } + } + vt = kit->gvn().transform(vt)->as_ValueType(); + kit->replace_in_map(this, vt); + return vt; +} + +Node* ValueTypeNode::tagged_klass(PhaseGVN& gvn) { + ciValueKlass* vk = value_klass(); + const TypeKlassPtr* tk = TypeKlassPtr::make(vk); + intptr_t bits = tk->get_con(); + set_nth_bit(bits, 0); + return gvn.makecon(TypeRawPtr::make((address)bits)); +} + +void ValueTypeNode::pass_fields(GraphKit* kit, Node* n, ExtendedSignature& sig, uint& base_input, int base_offset) { + for (uint i = 0; i < field_count(); i++) { + int sig_offset = (*sig)._offset; + uint idx = field_index(sig_offset - base_offset); + Node* arg = field_value(idx); + + if (field_is_flattened(idx)) { + // Flattened value type field + arg->as_ValueType()->pass_fields(kit, n, sig, base_input, sig_offset - value_klass()->first_field_offset()); + } else { + if (arg->is_ValueType()) { + // Non-flattened value type field + assert(field_is_flattenable(idx), "must be flattenable"); + ValueTypeNode* vt = arg->as_ValueType(); + assert(n->Opcode() != Op_Return || vt->is_allocated(&kit->gvn()), "value type field should be allocated on return"); + arg = vt->allocate(kit)->get_oop(); + } + // Initialize call/return arguments + BasicType bt = field_type(i)->basic_type(); + n->init_req(base_input++, arg); + if (type2size[bt] == 2) { + n->init_req(base_input++, kit->top()); + } + // Skip reserved arguments + while (SigEntry::next_is_reserved(sig, bt)) { + n->init_req(base_input++, kit->top()); + if (type2size[bt] == 2) { + n->init_req(base_input++, kit->top()); + } + } + } + } +} + +void ValueTypeNode::initialize_fields(GraphKit* kit, MultiNode* multi, ExtendedSignature& sig, uint& base_input, int base_offset, bool in) { + PhaseGVN& gvn = kit->gvn(); + for (uint i = 0; i < field_count(); i++) { + int sig_offset = (*sig)._offset; + uint idx = field_index(sig_offset - base_offset); + ciType* type = field_type(idx); + + Node* parm = NULL; + if (field_is_flattened(idx)) { + // Flattened value type field + ValueTypeNode* vt = ValueTypeNode::make_uninitialized(gvn, type->as_value_klass()); + vt->initialize_fields(kit, multi, sig, base_input, sig_offset - value_klass()->first_field_offset(), in); + parm = gvn.transform(vt); + } else { + if (multi->is_Start()) { + assert(in, "return from start?"); + parm = gvn.transform(new ParmNode(multi->as_Start(), base_input)); + } else if (in) { + parm = multi->as_Call()->in(base_input); + } else { + parm = gvn.transform(new ProjNode(multi->as_Call(), base_input)); + } + if (field_is_flattenable(idx)) { + // Non-flattened but flattenable value type + if (type->as_value_klass()->is_scalarizable()) { + parm = ValueTypeNode::make_from_oop(kit, parm, type->as_value_klass()); + } else { + parm = kit->null2default(parm, type->as_value_klass()); + } + } + base_input += type2size[type->basic_type()]; + // Skip reserved arguments + BasicType bt = type->basic_type(); + while (SigEntry::next_is_reserved(sig, bt)) { + base_input += type2size[bt]; + } + } + assert(parm != NULL, "should never be null"); + set_field_value(idx, parm); + gvn.record_for_igvn(parm); + } +} + +Node* ValueTypeNode::Ideal(PhaseGVN* phase, bool can_reshape) { + Node* oop = get_oop(); + if (is_default(*phase) && (!oop->is_Con() || phase->type(oop)->is_zero_type())) { + // Use the pre-allocated oop for default value types + set_oop(default_oop(*phase, value_klass())); + return this; + } else if (oop->isa_ValueTypePtr()) { + // Can happen with late inlining + ValueTypePtrNode* vtptr = oop->as_ValueTypePtr(); + set_oop(vtptr->get_oop()); + for (uint i = Oop+1; i < vtptr->req(); ++i) { + set_req(i, vtptr->in(i)); + } + return this; + } + + if (!is_allocated(phase)) { + // Save base oop if fields are loaded from memory and the value + // type is not buffered (in this case we should not use the oop). + Node* base = is_loaded(phase); + if (base != NULL) { + set_oop(base); + assert(is_allocated(phase), "should now be allocated"); + return this; + } + } + + if (can_reshape) { + PhaseIterGVN* igvn = phase->is_IterGVN(); + + if (is_default(*phase)) { + // Search for users of the default value type + for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { + Node* user = fast_out(i); + AllocateNode* alloc = user->isa_Allocate(); + if (alloc != NULL && alloc->result_cast() != NULL && alloc->in(AllocateNode::ValueNode) == this) { + // Found an allocation of the default value type. + // If the code in StoreNode::Identity() that removes useless stores was not yet + // executed or ReduceFieldZeroing is disabled, there can still be initializing + // stores (only zero-type or default value stores, because value types are immutable). + Node* res = alloc->result_cast(); + for (DUIterator_Fast jmax, j = res->fast_outs(jmax); j < jmax; j++) { + AddPNode* addp = res->fast_out(j)->isa_AddP(); + if (addp != NULL) { + for (DUIterator_Fast kmax, k = addp->fast_outs(kmax); k < kmax; k++) { + StoreNode* store = addp->fast_out(k)->isa_Store(); + if (store != NULL && store->outcnt() != 0) { + // Remove the useless store + igvn->replace_in_uses(store, store->in(MemNode::Memory)); + } + } + } + } + // Replace allocation by pre-allocated oop + igvn->replace_node(res, default_oop(*phase, value_klass())); + } else if (user->is_ValueType()) { + // Add value type user to worklist to give it a chance to get optimized as well + igvn->_worklist.push(user); + } + } + } + + if (is_allocated(igvn)) { + // Value type is heap allocated, search for safepoint uses + for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { + Node* out = fast_out(i); + if (out->is_SafePoint()) { + // Let SafePointNode::Ideal() take care of re-wiring the + // safepoint to the oop input instead of the value type node. + igvn->rehash_node_delayed(out); + } + } + } + } + return NULL; +} + +// Search for multiple allocations of this value type +// and try to replace them by dominating allocations. +void ValueTypeNode::remove_redundant_allocations(PhaseIterGVN* igvn, PhaseIdealLoop* phase) { + assert(EliminateAllocations, "allocation elimination should be enabled"); + // Search for allocations of this value type + for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { + AllocateNode* alloc = fast_out(i)->isa_Allocate(); + if (alloc != NULL && alloc->result_cast() != NULL && alloc->in(AllocateNode::ValueNode) == this) { + assert(!is_default(*igvn), "default value type allocation"); + Node* res_dom = NULL; + if (is_allocated(igvn)) { + // The value type is already allocated but still connected to an AllocateNode. + // This can happen with late inlining when we first allocate a value type argument + // but later decide to inline the call with the callee code also allocating. + res_dom = get_oop(); + } else { + // Search for a dominating allocation of the same value type + for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) { + Node* out2 = fast_out(j); + if (alloc != out2 && out2->is_Allocate() && out2->in(AllocateNode::ValueNode) == this && + phase->is_dominator(out2, alloc)) { + AllocateNode* alloc_dom = out2->as_Allocate(); + assert(alloc->in(AllocateNode::KlassNode) == alloc_dom->in(AllocateNode::KlassNode), "klasses should match"); + res_dom = alloc_dom->result_cast(); + break; + } + } + } + if (res_dom != NULL) { + // Move users to dominating allocation + Node* res = alloc->result_cast(); + igvn->replace_node(res, res_dom); + // The result of the dominated allocation is now unused and will be + // removed later in AllocateNode::Ideal() to not confuse loop opts. + igvn->record_for_igvn(alloc); +#ifdef ASSERT + if (PrintEliminateAllocations) { + tty->print("++++ Eliminated: %d Allocate ", alloc->_idx); + dump_spec(tty); + tty->cr(); + } +#endif + } + } + } + + // Process users + for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { + Node* out = fast_out(i); + if (out->is_ValueType()) { + // Recursively process value type users + out->as_ValueType()->remove_redundant_allocations(igvn, phase); + } else if (out->isa_Allocate() != NULL) { + // Allocate users should be linked + assert(out->in(AllocateNode::ValueNode) == this, "should be linked"); + } else { +#ifdef ASSERT + // The value type should not have any other users at this time + out->dump(); + assert(false, "unexpected user of value type"); +#endif + } + } +} + +ValueTypePtrNode* ValueTypePtrNode::make_from_value_type(GraphKit* kit, ValueTypeNode* vt, bool deoptimize_on_exception) { + Node* oop = vt->allocate(kit, deoptimize_on_exception)->get_oop(); + ValueTypePtrNode* vtptr = new ValueTypePtrNode(vt->value_klass(), oop); + for (uint i = Oop+1; i < vt->req(); i++) { + vtptr->init_req(i, vt->in(i)); + } + return kit->gvn().transform(vtptr)->as_ValueTypePtr(); +} + +ValueTypePtrNode* ValueTypePtrNode::make_from_oop(GraphKit* kit, Node* oop) { + // Create and initialize a ValueTypePtrNode by loading all field + // values from a heap-allocated version and also save the oop. + ciValueKlass* vk = kit->gvn().type(oop)->value_klass(); + ValueTypePtrNode* vtptr = new ValueTypePtrNode(vk, oop); + vtptr->load(kit, oop, oop, vk); + return kit->gvn().transform(vtptr)->as_ValueTypePtr(); +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/src/hotspot/share/opto/valuetypenode.hpp 2019-03-11 14:27:01.478354390 +0100 @@ -0,0 +1,172 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OPTO_VALUETYPENODE_HPP +#define SHARE_VM_OPTO_VALUETYPENODE_HPP + +#include "opto/node.hpp" +#include "opto/connode.hpp" + +class GraphKit; + +class ValueTypeBaseNode : public TypeNode { +protected: + ValueTypeBaseNode(const Type* t, int nb_fields) + : TypeNode(t, nb_fields) { + init_class_id(Class_ValueTypeBase); + Compile::current()->add_value_type(this); + } + + enum { Control, // Control input + Oop, // Oop of TypeInstPtr + Values // Nodes corresponding to values of the value type's fields. + // Nodes are connected in increasing order of the index of the field they correspond to. + }; + + virtual const TypeInstPtr* value_ptr() const = 0; + // Get the klass defining the field layout of the value type + virtual ciValueKlass* value_klass() const = 0; + + int make_scalar_in_safepoint(PhaseIterGVN* igvn, Unique_Node_List& worklist, SafePointNode* sfpt); + + const TypePtr* field_adr_type(Node* base, int offset, ciInstanceKlass* holder, DecoratorSet decorators, PhaseGVN& gvn) const; + +public: + // Support for control flow merges + bool has_phi_inputs(Node* region); + ValueTypeBaseNode* clone_with_phis(PhaseGVN* gvn, Node* region); + ValueTypeBaseNode* merge_with(PhaseGVN* gvn, const ValueTypeBaseNode* other, int pnum, bool transform); + void add_new_path(Node* region); + + // Get oop for heap allocated value type (may be TypePtr::NULL_PTR) + Node* get_oop() const { return in(Oop); } + void set_oop(Node* oop) { set_req(Oop, oop); } + + // Value type fields + uint field_count() const { return req() - Values; } + Node* field_value(uint index) const; + Node* field_value_by_offset(int offset, bool recursive = false) const; + void set_field_value(uint index, Node* value); + void set_field_value_by_offset(int offset, Node* value); + int field_offset(uint index) const; + uint field_index(int offset) const; + ciType* field_type(uint index) const; + bool field_is_flattened(uint index) const; + bool field_is_flattenable(uint index) const; + + // Replace ValueTypeNodes in debug info at safepoints with SafePointScalarObjectNodes + void make_scalar_in_safepoints(PhaseIterGVN* igvn); + + // Store the value type as a flattened (headerless) representation + void store_flattened(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder = NULL, int holder_offset = 0, DecoratorSet decorators = IN_HEAP | MO_UNORDERED) const; + // Store the field values to memory + void store(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset = 0, bool deoptimize_on_exception = false, DecoratorSet decorators = IN_HEAP | MO_UNORDERED) const; + // Initialize the value type by loading its field values from memory + void load(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset = 0, DecoratorSet decorators = IN_HEAP | MO_UNORDERED); + + // Allocates the value type (if not yet allocated) + ValueTypeBaseNode* allocate(GraphKit* kit, bool deoptimize_on_exception = false); + bool is_allocated(PhaseGVN* phase) const; + + void replace_call_results(GraphKit* kit, Node* call, Compile* C); +}; + +//------------------------------ValueTypeNode------------------------------------- +// Node representing a value type in C2 IR +class ValueTypeNode : public ValueTypeBaseNode { + friend class ValueTypeBaseNode; + friend class ValueTypePtrNode; +private: + ValueTypeNode(ciValueKlass* vk, Node* oop) + : ValueTypeBaseNode(TypeValueType::make(vk), Values + vk->nof_declared_nonstatic_fields()) { + init_class_id(Class_ValueType); + init_req(Oop, oop); + } + + // Checks if the value type is loaded from memory and if so returns the oop + Node* is_loaded(PhaseGVN* phase, ciValueKlass* vk = NULL, Node* base = NULL, int holder_offset = 0); + + // Checks if the value type fields are all set to default values + bool is_default(PhaseGVN& gvn) const; + + const TypeInstPtr* value_ptr() const { return TypeInstPtr::make(TypePtr::BotPTR, value_klass()); } + ciValueKlass* value_klass() const { return type()->is_valuetype()->value_klass(); } + +public: + // Create uninitialized + static ValueTypeNode* make_uninitialized(PhaseGVN& gvn, ciValueKlass* vk); + // Create with default field values + static ValueTypeNode* make_default(PhaseGVN& gvn, ciValueKlass* vk); + // Create and initialize by loading the field values from an oop + static ValueTypeNode* make_from_oop(GraphKit* kit, Node* oop, ciValueKlass* vk); + // Create and initialize by loading the field values from a flattened field or array + static ValueTypeNode* make_from_flattened(GraphKit* kit, ciValueKlass* vk, Node* obj, Node* ptr, ciInstanceKlass* holder = NULL, int holder_offset = 0, DecoratorSet decorators = IN_HEAP | MO_UNORDERED); + // Create and initialize with the inputs or outputs of a MultiNode (method entry or call) + static ValueTypeNode* make_from_multi(GraphKit* kit, MultiNode* multi, ExtendedSignature& sig, ciValueKlass* vk, uint& base_input, bool in); + + ValueTypeNode* make_larval(GraphKit* kit, bool allocate) const; + ValueTypeNode* finish_larval(GraphKit* kit) const; + + // Returns the constant oop of the default value type allocation + static Node* default_oop(PhaseGVN& gvn, ciValueKlass* vk); + + // Allocate all non-flattened value type fields + Node* allocate_fields(GraphKit* kit); + + Node* tagged_klass(PhaseGVN& gvn); + // Pass value type as fields at a call or return + void pass_fields(GraphKit* kit, Node* n, ExtendedSignature& sig, uint& base_input, int base_offset = 0); + // Initialize the value type fields with the inputs or outputs of a MultiNode + void initialize_fields(GraphKit* kit, MultiNode* multi, ExtendedSignature& sig, uint& base_input, int base_offset, bool in); + + // Allocation optimizations + void remove_redundant_allocations(PhaseIterGVN* igvn, PhaseIdealLoop* phase); + + virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); + virtual int Opcode() const; +}; + +//------------------------------ValueTypePtrNode------------------------------------- +// Node representing a value type as a pointer in C2 IR +class ValueTypePtrNode : public ValueTypeBaseNode { +private: + const TypeInstPtr* value_ptr() const { return type()->isa_instptr(); } + ciValueKlass* value_klass() const { return value_ptr()->value_klass(); } + + ValueTypePtrNode(ciValueKlass* vk, Node* oop) + : ValueTypeBaseNode(TypeInstPtr::make(TypePtr::NotNull, vk), Values + vk->nof_declared_nonstatic_fields()) { + init_class_id(Class_ValueTypePtr); + init_req(Oop, oop); + } + +public: + // Create and initialize with the values of a ValueTypeNode + static ValueTypePtrNode* make_from_value_type(GraphKit* kit, ValueTypeNode* vt, bool deoptimize_on_exception = false); + // Create and initialize by loading the field values from an oop + static ValueTypePtrNode* make_from_oop(GraphKit* kit, Node* oop); + + virtual int Opcode() const; +}; + +#endif // SHARE_VM_OPTO_VALUETYPENODE_HPP --- old/src/hotspot/share/precompiled/precompiled.hpp 2019-03-11 14:27:02.402354377 +0100 +++ new/src/hotspot/share/precompiled/precompiled.hpp 2019-03-11 14:27:02.106354382 +0100 @@ -32,6 +32,7 @@ // *.include.hpp, since including them decreased build performance. #include "classfile/classLoaderData.hpp" +# include "ci/ciValueArrayKlass.hpp" #include "classfile/javaClasses.hpp" #include "classfile/systemDictionary.hpp" #include "gc/shared/collectedHeap.hpp" --- old/src/hotspot/share/prims/jni.cpp 2019-03-11 14:27:02.982354369 +0100 +++ new/src/hotspot/share/prims/jni.cpp 2019-03-11 14:27:02.710354373 +0100 @@ -874,6 +874,7 @@ virtual void get_float () = 0; virtual void get_double () = 0; virtual void get_object () = 0; + virtual void get_valuetype() = 0; JNI_ArgumentPusher(Symbol* signature) : SignatureIterator(signature) { this->_return_type = T_ILLEGAL; @@ -894,6 +895,7 @@ inline void do_float() { if (!is_return_type()) get_float(); } inline void do_double() { if (!is_return_type()) get_double(); } inline void do_object(int begin, int end) { if (!is_return_type()) get_object(); } + inline void do_valuetype(int begin, int end) { if (!is_return_type()) get_valuetype(); } inline void do_array(int begin, int end) { if (!is_return_type()) get_object(); } // do_array uses get_object -- there is no get_array inline void do_void() { } @@ -925,6 +927,7 @@ inline void get_float() { _arguments->push_float((jfloat)va_arg(_ap, jdouble)); } // float is coerced to double w/ va_arg inline void get_double() { _arguments->push_double(va_arg(_ap, jdouble)); } inline void get_object() { _arguments->push_jobject(va_arg(_ap, jobject)); } + inline void get_valuetype() { _arguments->push_jobject(va_arg(_ap, jobject)); } inline void set_ap(va_list rap) { va_copy(_ap, rap); @@ -1014,6 +1017,8 @@ inline void get_float() { _arguments->push_float((_ap++)->f); } inline void get_double() { _arguments->push_double((_ap++)->d);} inline void get_object() { _arguments->push_jobject((_ap++)->l); } + // value types are implemented with oops too + inline void get_valuetype() { _arguments->push_jobject((_ap++)->l); } inline void set_ap(const jvalue *rap) { _ap = rap; } @@ -1108,7 +1113,7 @@ JavaCalls::call(result, method, &java_args, CHECK); // Convert result - if (result->get_type() == T_OBJECT || result->get_type() == T_ARRAY) { + if (result->get_type() == T_OBJECT || result->get_type() == T_ARRAY || result->get_type() == T_VALUETYPE) { result->set_jobject(JNIHandles::make_local(env, (oop) result->get_jobject())); } } @@ -1171,7 +1176,7 @@ JavaCalls::call(result, method, &java_args, CHECK); // Convert result - if (result->get_type() == T_OBJECT || result->get_type() == T_ARRAY) { + if (result->get_type() == T_OBJECT || result->get_type() == T_ARRAY || result->get_type() == T_VALUETYPE) { result->set_jobject(JNIHandles::make_local(env, (oop) result->get_jobject())); } } --- old/src/hotspot/share/prims/jniCheck.cpp 2019-03-11 14:27:03.470354363 +0100 +++ new/src/hotspot/share/prims/jniCheck.cpp 2019-03-11 14:27:03.262354366 +0100 @@ -273,7 +273,8 @@ if (!id->find_local_field(&fd)) ReportJNIFatalError(thr, fatal_static_field_not_found); if ((fd.field_type() != ftype) && - !(fd.field_type() == T_ARRAY && ftype == T_OBJECT)) { + !(fd.field_type() == T_ARRAY && ftype == T_OBJECT) && + !(fd.field_type() == T_VALUETYPE && ftype == T_OBJECT)) { ReportJNIFatalError(thr, fatal_static_field_mismatch); } } @@ -310,7 +311,8 @@ ReportJNIFatalError(thr, fatal_instance_field_not_found); if ((fd.field_type() != ftype) && - !(fd.field_type() == T_ARRAY && ftype == T_OBJECT)) { + !(fd.field_type() == T_ARRAY && ftype == T_OBJECT) && + !(fd.field_type() == T_VALUETYPE && ftype == T_OBJECT)) { ReportJNIFatalError(thr, fatal_instance_field_mismatch); } } --- old/src/hotspot/share/prims/jvm.cpp 2019-03-11 14:27:03.918354356 +0100 +++ new/src/hotspot/share/prims/jvm.cpp 2019-03-11 14:27:03.710354359 +0100 @@ -52,6 +52,7 @@ #include "oops/objArrayKlass.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" +#include "oops/valueArrayKlass.hpp" #include "prims/jvm_misc.hpp" #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.hpp" @@ -683,6 +684,7 @@ ResourceMark rm(THREAD); THROW_MSG_0(vmSymbols::java_lang_CloneNotSupportedException(), klass->external_name()); } + assert(!EnableValhalla || !obj->klass()->is_value(), "Clone disallowed on value type"); // Make shallow object copy const int size = obj->size(); @@ -765,7 +767,7 @@ JVMWrapper("JVM_FindPrimitiveClass"); oop mirror = NULL; BasicType t = name2type(utf); - if (t != T_ILLEGAL && t != T_OBJECT && t != T_ARRAY) { + if (t != T_ILLEGAL && t != T_OBJECT && t != T_ARRAY && t != T_VALUETYPE) { mirror = Universe::java_mirror(t); } if (mirror == NULL) { @@ -2236,6 +2238,48 @@ return JNIHandles::make_local(env, asd); JVM_END +// Arrays support ///////////////////////////////////////////////////////////// + +JVM_ENTRY(jboolean, JVM_ArrayIsAccessAtomic(JNIEnv *env, jclass unused, jobject array)) + JVMWrapper("JVM_ArrayIsAccessAtomic"); + oop o = JNIHandles::resolve(array); + Klass* k = o->klass(); + if ((o == NULL) || (!k->is_array_klass())) { + THROW_0(vmSymbols::java_lang_IllegalArgumentException()); + } + if (k->is_valueArray_klass()) { + return ValueArrayKlass::cast(k)->is_atomic(); + } + return true; +JVM_END + +JVM_ENTRY(jobject, JVM_ArrayEnsureAccessAtomic(JNIEnv *env, jclass unused, jobject array)) + JVMWrapper("JVM_ArrayEnsureAccessAtomic"); + oop o = JNIHandles::resolve(array); + Klass* k = o->klass(); + if ((o == NULL) || (!k->is_array_klass())) { + THROW_0(vmSymbols::java_lang_IllegalArgumentException()); + } + if (k->is_valueArray_klass()) { + ValueArrayKlass* vk = ValueArrayKlass::cast(k); + if (!vk->is_atomic()) { + /** + * Need to decide how to implement: + * + * 1) Change to objArrayOop layout, therefore oop->klass() differs so + * then "[Qfoo;" klass needs to subclass "[Qfoo;" to pass through + * "checkcast" & "instanceof" + * + * 2) Use extra header in the valueArrayOop to flag atomicity required and + * possibly per instance lock structure. Said info, could be placed in + * "trailer" rather than disturb the current arrayOop + */ + Unimplemented(); + } + } + return array; +JVM_END + // Verification //////////////////////////////////////////////////////////////////////////////// // Reflection for the verifier ///////////////////////////////////////////////////////////////// @@ -2266,7 +2310,7 @@ ConstantPool* cp = InstanceKlass::cast(k)->constants(); for (int index = cp->length() - 1; index >= 0; index--) { constantTag tag = cp->tag_at(index); - types[index] = (tag.is_unresolved_klass()) ? JVM_CONSTANT_Class : tag.value(); + types[index] = tag.is_unresolved_klass() ? JVM_CONSTANT_Class : tag.value(); } } JVM_END --- old/src/hotspot/share/prims/jvmtiCodeBlobEvents.cpp 2019-03-11 14:27:04.374354350 +0100 +++ new/src/hotspot/share/prims/jvmtiCodeBlobEvents.cpp 2019-03-11 14:27:04.166354353 +0100 @@ -264,7 +264,7 @@ address scopes_data = nm->scopes_data_begin(); for( pcd = nm->scopes_pcs_begin(); pcd < nm->scopes_pcs_end(); ++pcd ) { - ScopeDesc sc0(nm, pcd->scope_decode_offset(), pcd->should_reexecute(), pcd->rethrow_exception(), pcd->return_oop()); + ScopeDesc sc0(nm, pcd->scope_decode_offset(), pcd->should_reexecute(), pcd->rethrow_exception(), pcd->return_oop(), pcd->return_vt()); ScopeDesc *sd = &sc0; while( !sd->is_top() ) { sd = sd->sender(); } int bci = sd->bci(); --- old/src/hotspot/share/prims/jvmtiExport.cpp 2019-03-11 14:27:04.802354344 +0100 +++ new/src/hotspot/share/prims/jvmtiExport.cpp 2019-03-11 14:27:04.586354347 +0100 @@ -1584,7 +1584,7 @@ if (!exception_exit) { oop oop_result; BasicType type = current_frame.interpreter_frame_result(&oop_result, &value); - if (type == T_OBJECT || type == T_ARRAY) { + if (type == T_OBJECT || type == T_ARRAY || type == T_VALUETYPE) { result = Handle(thread, oop_result); } } --- old/src/hotspot/share/prims/jvmtiImpl.cpp 2019-03-11 14:27:05.242354338 +0100 +++ new/src/hotspot/share/prims/jvmtiImpl.cpp 2019-03-11 14:27:05.034354341 +0100 @@ -672,6 +672,7 @@ slot_type = T_INT; break; case T_ARRAY: + case T_VALUETYPE: slot_type = T_OBJECT; break; default: --- old/src/hotspot/share/prims/jvmtiRedefineClasses.cpp 2019-03-11 14:27:05.690354332 +0100 +++ new/src/hotspot/share/prims/jvmtiRedefineClasses.cpp 2019-03-11 14:27:05.474354335 +0100 @@ -559,8 +559,7 @@ // At this stage, String could be here, but not StringIndex case JVM_CONSTANT_StringIndex: // fall through - // At this stage JVM_CONSTANT_UnresolvedClassInError should not be - // here + // At this stage JVM_CONSTANT_UnresolvedClassInError should not be here case JVM_CONSTANT_UnresolvedClassInError: // fall through default: --- old/src/hotspot/share/prims/methodHandles.cpp 2019-03-11 14:27:07.026354313 +0100 +++ new/src/hotspot/share/prims/methodHandles.cpp 2019-03-11 14:27:06.814354316 +0100 @@ -335,6 +335,12 @@ oop MethodHandles::init_field_MemberName(Handle mname, fieldDescriptor& fd, bool is_setter) { int flags = (jushort)( fd.access_flags().as_short() & JVM_RECOGNIZED_FIELD_MODIFIERS ); flags |= IS_FIELD | ((fd.is_static() ? JVM_REF_getStatic : JVM_REF_getField) << REFERENCE_KIND_SHIFT); + if (fd.is_flattenable()) { + flags |= JVM_ACC_FLATTENABLE; + } + if (fd.is_flattened()) { + flags |= JVM_ACC_FIELD_FLATTENED; + } if (is_setter) flags += ((JVM_REF_putField - JVM_REF_getField) << REFERENCE_KIND_SHIFT); int vmindex = fd.offset(); // determines the field uniquely when combined with static bit @@ -569,7 +575,7 @@ if (is_subword_type(bt)) { bsig = vmSymbols::int_signature(); } else { - assert(bt == T_OBJECT || bt == T_ARRAY, "is_basic_type_signature was false"); + assert(bt == T_OBJECT || bt == T_ARRAY || bt == T_VALUETYPE, "is_basic_type_signature was false"); bsig = vmSymbols::object_signature(); } } else { @@ -588,7 +594,7 @@ if (arg_pos == keep_arg_pos) { buffer.write((char*) ss.raw_bytes(), (int) ss.raw_length()); - } else if (bt == T_OBJECT || bt == T_ARRAY) { + } else if (bt == T_OBJECT || bt == T_ARRAY || bt == T_VALUETYPE) { buffer.write(OBJ_SIG, OBJ_SIG_LEN); } else { if (is_subword_type(bt)) --- old/src/hotspot/share/prims/unsafe.cpp 2019-03-11 14:27:07.470354307 +0100 +++ new/src/hotspot/share/prims/unsafe.cpp 2019-03-11 14:27:07.262354310 +0100 @@ -30,13 +30,19 @@ #include "jfr/jfrEvents.hpp" #include "memory/allocation.inline.hpp" #include "memory/resourceArea.hpp" +#include "logging/log.hpp" +#include "logging/logStream.hpp" #include "oops/access.inline.hpp" #include "oops/fieldStreams.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/typeArrayOop.inline.hpp" +#include "oops/valueArrayKlass.hpp" +#include "oops/valueArrayOop.hpp" +#include "oops/valueArrayOop.inline.hpp" #include "prims/unsafe.hpp" #include "runtime/atomic.hpp" +#include "runtime/fieldDescriptor.inline.hpp" #include "runtime/globals.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" @@ -144,7 +150,6 @@ return byte_offset; } - ///// Data read/writes on the Java heap and in native (off-heap) memory /** @@ -229,11 +234,11 @@ GuardUnsafeAccess guard(_thread); RawAccess<>::store(addr(), normalize_for_write(x)); } else { + assert(!_obj->is_value() || _obj->mark()->is_larval_state(), "must be an object instance or a larval value"); HeapAccess<>::store_at(_obj, _offset, normalize_for_write(x)); } } - T get_volatile() { if (_obj == NULL) { GuardUnsafeAccess guard(_thread); @@ -255,6 +260,68 @@ } }; +#ifdef ASSERT +/* + * Get the field descriptor of the field of the given object at the given offset. + */ +static bool get_field_descriptor(oop p, jlong offset, fieldDescriptor* fd) { + bool found = false; + Klass* k = p->klass(); + if (k->is_instance_klass()) { + InstanceKlass* ik = InstanceKlass::cast(k); + found = ik->find_field_from_offset((int)offset, false, fd); + if (!found && ik->is_mirror_instance_klass()) { + Klass* k2 = java_lang_Class::as_Klass(p); + if (k2->is_instance_klass()) { + ik = InstanceKlass::cast(k2); + found = ik->find_field_from_offset((int)offset, true, fd); + } + } + } + return found; +} +#endif // ASSERT + +static void assert_and_log_unsafe_value_access(oop p, jlong offset, ValueKlass* vk) { + Klass* k = p->klass(); +#ifdef ASSERT + if (k->is_instance_klass()) { + assert_field_offset_sane(p, offset); + fieldDescriptor fd; + bool found = get_field_descriptor(p, offset, &fd); + if (found) { + assert(found, "value field not found"); + assert(fd.is_flattened(), "field not flat"); + } else { + if (log_is_enabled(Trace, valuetypes)) { + log_trace(valuetypes)("not a field in %s at offset " SIZE_FORMAT_HEX, + p->klass()->external_name(), offset); + } + } + } else if (k->is_valueArray_klass()) { + ValueArrayKlass* vak = ValueArrayKlass::cast(k); + int index = (offset - vak->array_header_in_bytes()) / vak->element_byte_size(); + address dest = (address)((valueArrayOop)p)->value_at_addr(index, vak->layout_helper()); + assert(dest == ((address)p) + offset, "invalid offset"); + } else { + ShouldNotReachHere(); + } +#endif // ASSERT + if (log_is_enabled(Trace, valuetypes)) { + if (k->is_valueArray_klass()) { + ValueArrayKlass* vak = ValueArrayKlass::cast(k); + int index = (offset - vak->array_header_in_bytes()) / vak->element_byte_size(); + address dest = (address)((valueArrayOop)p)->value_at_addr(index, vak->layout_helper()); + log_trace(valuetypes)("%s array type %s index %d element size %d offset " SIZE_FORMAT_HEX " at " INTPTR_FORMAT, + p->klass()->external_name(), vak->external_name(), + index, vak->element_byte_size(), offset, p2i(dest)); + } else { + log_trace(valuetypes)("%s field type %s at offset " SIZE_FORMAT_HEX, + p->klass()->external_name(), vk->external_name(), offset); + } + } +} + // These functions allow a null base pointer with an arbitrary address. // But if the base pointer is non-null, the offset should make some sense. // That is, it should be in the range [0, MAX_OBJECT_SIZE]. @@ -269,9 +336,73 @@ oop x = JNIHandles::resolve(x_h); oop p = JNIHandles::resolve(obj); assert_field_offset_sane(p, offset); + assert(!p->is_value() || p->mark()->is_larval_state(), "must be an object instance or a larval value"); HeapAccess::oop_store_at(p, offset, x); } UNSAFE_END +UNSAFE_ENTRY(jlong, Unsafe_ValueHeaderSize(JNIEnv *env, jobject unsafe, jclass c)) { + Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(c)); + ValueKlass* vk = ValueKlass::cast(k); + return vk->first_field_offset(); +} UNSAFE_END + +UNSAFE_ENTRY(jboolean, Unsafe_IsFlattenedArray(JNIEnv *env, jobject unsafe, jclass c)) { + Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(c)); + return k->is_valueArray_klass(); +} UNSAFE_END + +UNSAFE_ENTRY(jobject, Unsafe_UninitializedDefaultValue(JNIEnv *env, jobject unsafe, jclass vc)) { + Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(vc)); + ValueKlass* vk = ValueKlass::cast(k); + oop v = vk->default_value(); + return JNIHandles::make_local(env, v); +} UNSAFE_END + +UNSAFE_ENTRY(jobject, Unsafe_GetValue(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jclass vc)) { + oop base = JNIHandles::resolve(obj); + Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(vc)); + ValueKlass* vk = ValueKlass::cast(k); + assert_and_log_unsafe_value_access(base, offset, vk); + Handle base_h(THREAD, base); + oop v = vk->allocate_instance(CHECK_NULL); // allocate instance + vk->initialize(CHECK_NULL); // If field is a default value, value class might not be initialized yet + vk->value_store(((address)(oopDesc*)base_h()) + offset, + vk->data_for_oop(v), + true, true); + return JNIHandles::make_local(env, v); +} UNSAFE_END + +UNSAFE_ENTRY(void, Unsafe_PutValue(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jclass vc, jobject value)) { + oop base = JNIHandles::resolve(obj); + Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(vc)); + ValueKlass* vk = ValueKlass::cast(k); + assert(!base->is_value() || base->mark()->is_larval_state(), "must be an object instance or a larval value"); + assert_and_log_unsafe_value_access(base, offset, vk); + oop v = JNIHandles::resolve(value); + vk->value_store(vk->data_for_oop(v), + ((address)(oopDesc*)base) + offset, true, true); +} UNSAFE_END + +UNSAFE_ENTRY(jobject, Unsafe_MakePrivateBuffer(JNIEnv *env, jobject unsafe, jobject value)) { + oop v = JNIHandles::resolve_non_null(value); + assert(v->is_value(), "must be a value instance"); + Handle vh(THREAD, v); + ValueKlass* vk = ValueKlass::cast(v->klass()); + instanceOop new_value = vk->allocate_instance(CHECK_NULL); + vk->value_store(vk->data_for_oop(vh()), vk->data_for_oop(new_value), true, false); + markOop mark = new_value->mark(); + new_value->set_mark(mark->enter_larval_state()); + return JNIHandles::make_local(env, new_value); +} UNSAFE_END + +UNSAFE_ENTRY(jobject, Unsafe_FinishPrivateBuffer(JNIEnv *env, jobject unsafe, jobject value)) { + oop v = JNIHandles::resolve(value); + assert(v->mark()->is_larval_state(), "must be a larval value"); + markOop mark = v->mark(); + v->set_mark(mark->exit_larval_state()); + return JNIHandles::make_local(env, v); +} UNSAFE_END + UNSAFE_ENTRY(jobject, Unsafe_GetReferenceVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { oop p = JNIHandles::resolve(obj); assert_field_offset_sane(p, offset); @@ -573,6 +704,11 @@ base = tak->array_header_in_bytes(); assert(base == arrayOopDesc::base_offset_in_bytes(tak->element_type()), "array_header_size semantics ok"); scale = (1 << tak->log2_element_size()); + } else if (k->is_valueArray_klass()) { + ValueArrayKlass* vak = ValueArrayKlass::cast(k); + ValueKlass* vklass = vak->element_klass(); + base = vak->array_header_in_bytes(); + scale = vak->element_byte_size(); } else { ShouldNotReachHere(); } @@ -1038,10 +1174,10 @@ #define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f) #define DECLARE_GETPUTOOP(Type, Desc) \ - {CC "get" #Type, CC "(" OBJ "J)" #Desc, FN_PTR(Unsafe_Get##Type)}, \ - {CC "put" #Type, CC "(" OBJ "J" #Desc ")V", FN_PTR(Unsafe_Put##Type)}, \ - {CC "get" #Type "Volatile", CC "(" OBJ "J)" #Desc, FN_PTR(Unsafe_Get##Type##Volatile)}, \ - {CC "put" #Type "Volatile", CC "(" OBJ "J" #Desc ")V", FN_PTR(Unsafe_Put##Type##Volatile)} + {CC "get" #Type, CC "(" OBJ "J)" #Desc, FN_PTR(Unsafe_Get##Type)}, \ + {CC "put" #Type, CC "(" OBJ "J" #Desc ")V", FN_PTR(Unsafe_Put##Type)}, \ + {CC "get" #Type "Volatile", CC "(" OBJ "J)" #Desc, FN_PTR(Unsafe_Get##Type##Volatile)}, \ + {CC "put" #Type "Volatile", CC "(" OBJ "J" #Desc ")V", FN_PTR(Unsafe_Put##Type##Volatile)} static JNINativeMethod jdk_internal_misc_Unsafe_methods[] = { @@ -1050,6 +1186,14 @@ {CC "getReferenceVolatile", CC "(" OBJ "J)" OBJ, FN_PTR(Unsafe_GetReferenceVolatile)}, {CC "putReferenceVolatile", CC "(" OBJ "J" OBJ ")V", FN_PTR(Unsafe_PutReferenceVolatile)}, + {CC "isFlattenedArray", CC "(" CLS ")Z", FN_PTR(Unsafe_IsFlattenedArray)}, + {CC "getValue", CC "(" OBJ "J" CLS ")" OBJ, FN_PTR(Unsafe_GetValue)}, + {CC "putValue", CC "(" OBJ "J" CLS OBJ ")V", FN_PTR(Unsafe_PutValue)}, + {CC "uninitializedDefaultValue", CC "(" CLS ")" OBJ, FN_PTR(Unsafe_UninitializedDefaultValue)}, + {CC "makePrivateBuffer", CC "(" OBJ ")" OBJ, FN_PTR(Unsafe_MakePrivateBuffer)}, + {CC "finishPrivateBuffer", CC "(" OBJ ")" OBJ, FN_PTR(Unsafe_FinishPrivateBuffer)}, + {CC "valueHeaderSize", CC "(" CLS ")J", FN_PTR(Unsafe_ValueHeaderSize)}, + {CC "getUncompressedObject", CC "(" ADR ")" OBJ, FN_PTR(Unsafe_GetUncompressedObject)}, DECLARE_GETPUTOOP(Boolean, Z), --- old/src/hotspot/share/prims/whitebox.cpp 2019-03-11 14:27:07.906354301 +0100 +++ new/src/hotspot/share/prims/whitebox.cpp 2019-03-11 14:27:07.690354304 +0100 @@ -39,16 +39,18 @@ #include "memory/heapShared.inline.hpp" #include "memory/metaspaceShared.hpp" #include "memory/metadataFactory.hpp" -#include "memory/iterator.hpp" +#include "memory/iterator.inline.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "memory/oopFactory.hpp" #include "oops/array.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/constantPool.inline.hpp" #include "oops/method.inline.hpp" #include "oops/objArrayKlass.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" +#include "oops/objArrayOop.inline.hpp" #include "oops/typeArrayOop.inline.hpp" #include "prims/resolvedMethodTable.hpp" #include "prims/wbtestmethods/parserTests.hpp" @@ -1769,6 +1771,98 @@ return ConstantPool::encode_invokedynamic_index(index); WB_END +WB_ENTRY(jobjectArray, WB_getObjectsViaKlassOopMaps(JNIEnv* env, jobject wb, jobject thing)) + oop aoop = JNIHandles::resolve(thing); + if (!aoop->is_instance()) { + return NULL; + } + instanceHandle ih(THREAD, (instanceOop) aoop); + InstanceKlass* klass = InstanceKlass::cast(aoop->klass()); + if (klass->nonstatic_oop_map_count() == 0) { + return NULL; + } + const OopMapBlock* map = klass->start_of_nonstatic_oop_maps(); + const OopMapBlock* const end = map + klass->nonstatic_oop_map_count(); + int oop_count = 0; + while (map < end) { + oop_count += map->count(); + map++; + } + + objArrayOop result_array = + oopFactory::new_objArray(SystemDictionary::Object_klass(), oop_count, CHECK_NULL); + map = klass->start_of_nonstatic_oop_maps(); + instanceOop ioop = ih(); + int index = 0; + while (map < end) { + int offset = map->offset(); + for (unsigned int j = 0; j < map->count(); j++) { + result_array->obj_at_put(index++, ioop->obj_field(offset)); + offset += heapOopSize; + } + map++; + } + return (jobjectArray)JNIHandles::make_local(env, result_array); +WB_END + +class CollectOops : public BasicOopIterateClosure { + public: + GrowableArray* array; + + objArrayOop create_results(TRAPS) { + objArrayOop result_array = + oopFactory::new_objArray(SystemDictionary::Object_klass(), array->length(), CHECK_NULL); + for (int i = 0 ; i < array->length(); i++) { + result_array->obj_at_put(i, array->at(i)()); + } + return result_array; + } + + jobjectArray create_jni_result(JNIEnv* env, TRAPS) { + return (jobjectArray)JNIHandles::make_local(env, create_results(THREAD)); + } + + void add_oop(oop o) { + // Value might be oop, but JLS can't see as Object, just iterate through it... + if (o != NULL && o->is_value()) { + o->oop_iterate(this); + } else { + array->append(Handle(Thread::current(), o)); + } + } + + void do_oop(oop* o) { add_oop(*o); } + void do_oop(narrowOop* v) { add_oop(CompressedOops::decode(*v)); } +}; + + +WB_ENTRY(jobjectArray, WB_getObjectsViaOopIterator(JNIEnv* env, jobject wb, jobject thing)) + ResourceMark rm(THREAD); + GrowableArray* array = new GrowableArray(128); + CollectOops collectOops; + collectOops.array = array; + + JNIHandles::resolve(thing)->oop_iterate(&collectOops); + + return collectOops.create_jni_result(env, THREAD); +WB_END + +WB_ENTRY(jobjectArray, WB_getObjectsViaFrameOopIterator(JNIEnv* env, jobject wb, jint depth)) + ResourceMark rm(THREAD); + GrowableArray* array = new GrowableArray(128); + CollectOops collectOops; + collectOops.array = array; + StackFrameStream sfs(thread); + while (depth > 0) { // Skip the native WB API frame + sfs.next(); + frame* f = sfs.current(); + f->oops_do(&collectOops, NULL, sfs.register_map()); + depth--; + } + return collectOops.create_jni_result(env, THREAD); +WB_END + + WB_ENTRY(void, WB_ClearInlineCaches(JNIEnv* env, jobject wb, jboolean preserve_static_stubs)) VM_ClearICs clear_ics(preserve_static_stubs == JNI_TRUE); VMThread::execute(&clear_ics); @@ -2294,6 +2388,12 @@ CC"(Ljava/lang/Class;I)I", (void*)&WB_ConstantPoolRemapInstructionOperandFromCache}, {CC"encodeConstantPoolIndyIndex0", CC"(I)I", (void*)&WB_ConstantPoolEncodeIndyIndex}, + {CC"getObjectsViaKlassOopMaps0", + CC"(Ljava/lang/Object;)[Ljava/lang/Object;", (void*)&WB_getObjectsViaKlassOopMaps}, + {CC"getObjectsViaOopIterator0", + CC"(Ljava/lang/Object;)[Ljava/lang/Object;",(void*)&WB_getObjectsViaOopIterator}, + {CC"getObjectsViaFrameOopIterator", + CC"(I)[Ljava/lang/Object;", (void*)&WB_getObjectsViaFrameOopIterator}, {CC"getMethodBooleanOption", CC"(Ljava/lang/reflect/Executable;Ljava/lang/String;)Ljava/lang/Boolean;", (void*)&WB_GetMethodBooleaneOption}, --- old/src/hotspot/share/runtime/arguments.cpp 2019-03-11 14:27:08.354354295 +0100 +++ new/src/hotspot/share/runtime/arguments.cpp 2019-03-11 14:27:08.138354298 +0100 @@ -2051,6 +2051,43 @@ status = status && GCArguments::check_args_consistency(); + if (LP64_ONLY(false &&) !FLAG_IS_DEFAULT(ValueTypePassFieldsAsArgs)) { + FLAG_SET_CMDLINE(bool, ValueTypePassFieldsAsArgs, false); + warning("ValueTypePassFieldsAsArgs is not supported on this platform"); + } + + if (LP64_ONLY(false &&) !FLAG_IS_DEFAULT(ValueTypeReturnedAsFields)) { + FLAG_SET_CMDLINE(bool, ValueTypeReturnedAsFields, false); + warning("ValueTypeReturnedAsFields is not supported on this platform"); + } + + if (EnableValhalla) { + if (!EnableValhallaC1) { + // C1 support for value types is incomplete. Don't use it by default. + if (!FLAG_IS_DEFAULT(TieredCompilation)) { + warning("TieredCompilation disabled because value types are not supported by C1"); + } + FLAG_SET_CMDLINE(bool, TieredCompilation, false); + } else { + if (TieredStopAtLevel > 1) { + warning("C1 doesn't work with C2 yet. Forcing TieredStopAtLevel=1"); + FLAG_SET_CMDLINE(intx, TieredStopAtLevel, 1); + } + if (ValueTypePassFieldsAsArgs) { + warning("C1 doesn't work with ValueTypePassFieldsAsArgs yet. Forcing ValueTypePassFieldsAsArgs=false"); + FLAG_SET_CMDLINE(bool, ValueTypePassFieldsAsArgs, false); + } + if (ValueTypeReturnedAsFields) { + warning("C1 doesn't work with ValueTypeReturnedAsFields yet. Forcing ValueTypeReturnedAsFields=false"); + FLAG_SET_CMDLINE(bool, ValueTypeReturnedAsFields, false); + } + } + } else { + FLAG_SET_CMDLINE(bool, ValueArrayFlatten, false); + } + if (!EnableValhalla && ACmpOnValues != 3) { + FLAG_SET_CMDLINE(uint, ACmpOnValues, 0); + } return status; } @@ -2927,6 +2964,12 @@ } } + if (EnableValhalla) { + if (!create_property("valhalla.enableValhalla", "true", InternalProperty)) { + return JNI_ENOMEM; + } + } + // PrintSharedArchiveAndExit will turn on // -Xshare:on // -Xlog:class+path=info @@ -3917,6 +3960,12 @@ warning("Setting CompressedClassSpaceSize has no effect when compressed class pointers are not used"); } + if (!EnableValhalla || is_interpreter_only()) { + // Disable calling convention optimizations if value types are not supported + ValueTypePassFieldsAsArgs = false; + ValueTypeReturnedAsFields = false; + } + #ifndef PRODUCT if (!LogVMOutput && FLAG_IS_DEFAULT(LogVMOutput)) { if (use_vm_log()) { --- old/src/hotspot/share/runtime/biasedLocking.cpp 2019-03-11 14:27:08.802354289 +0100 +++ new/src/hotspot/share/runtime/biasedLocking.cpp 2019-03-11 14:27:08.594354292 +0100 @@ -49,7 +49,9 @@ static GrowableArray* _preserved_mark_stack = NULL; static void enable_biased_locking(InstanceKlass* k) { - k->set_prototype_header(markOopDesc::biased_locking_prototype()); + if (!k->is_value()) { + k->set_prototype_header(markOopDesc::biased_locking_prototype()); + } } class VM_EnableBiasedLocking: public VM_Operation { --- old/src/hotspot/share/runtime/deoptimization.cpp 2019-03-11 14:27:09.246354283 +0100 +++ new/src/hotspot/share/runtime/deoptimization.cpp 2019-03-11 14:27:09.038354286 +0100 @@ -42,6 +42,9 @@ #include "oops/oop.inline.hpp" #include "oops/fieldStreams.hpp" #include "oops/typeArrayOop.inline.hpp" +#include "oops/valueArrayKlass.hpp" +#include "oops/valueArrayOop.hpp" +#include "oops/valueKlass.hpp" #include "oops/verifyOopClosure.hpp" #include "prims/jvmtiThreadState.hpp" #include "runtime/biasedLocking.hpp" @@ -218,26 +221,42 @@ // is set during method compilation (see Compile::Process_OopMap_Node()). // If the previous frame was popped or if we are dispatching an exception, // we don't have an oop result. - bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Unpack_deopt); - Handle return_value; + ScopeDesc* scope = chunk->at(0)->scope(); + bool save_oop_result = scope->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Unpack_deopt); + // In case of the return of multiple values, we must take care + // of all oop return values. + GrowableArray return_oops; + ValueKlass* vk = NULL; + if (save_oop_result && scope->return_vt()) { + vk = ValueKlass::returned_value_klass(map); + if (vk != NULL) { + vk->save_oop_fields(map, return_oops); + save_oop_result = false; + } + } if (save_oop_result) { // Reallocation may trigger GC. If deoptimization happened on return from // call which returns oop we need to save it since it is not in oopmap. oop result = deoptee.saved_oop_result(&map); assert(oopDesc::is_oop_or_null(result), "must be oop"); - return_value = Handle(thread, result); + return_oops.push(Handle(thread, result)); assert(Universe::heap()->is_in_or_null(result), "must be heap pointer"); if (TraceDeoptimization) { ttyLocker ttyl; tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread)); } } - if (objects != NULL) { + if (objects != NULL || vk != NULL) { + bool skip_internal = (cm != NULL) && !cm->is_compiled_by_jvmci(); JRT_BLOCK - realloc_failures = realloc_objects(thread, &deoptee, objects, THREAD); + if (vk != NULL) { + realloc_failures = realloc_value_type_result(vk, map, return_oops, THREAD); + } + if (objects != NULL) { + realloc_failures = realloc_failures || realloc_objects(thread, &deoptee, objects, THREAD); + reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, THREAD); + } JRT_END - bool skip_internal = (cm != NULL) && !cm->is_compiled_by_jvmci(); - reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal); #ifndef PRODUCT if (TraceDeoptimization) { ttyLocker ttyl; @@ -246,9 +265,10 @@ } #endif } - if (save_oop_result) { + if (save_oop_result || vk != NULL) { // Restore result. - deoptee.set_saved_oop_result(&map, return_value()); + assert(return_oops.length() == 1, "no value type"); + deoptee.set_saved_oop_result(&map, return_oops.pop()()); } #if !INCLUDE_JVMCI } @@ -485,7 +505,7 @@ caller_adjustment = last_frame_adjust(callee_parameters, callee_locals); } - // If the sender is deoptimized the we must retrieve the address of the handler + // If the sender is deoptimized we must retrieve the address of the handler // since the frame will "magically" show the original pc before the deopt // and we'd undo the deopt. @@ -807,6 +827,10 @@ if (k->is_instance_klass()) { InstanceKlass* ik = InstanceKlass::cast(k); obj = ik->allocate_instance(THREAD); + } else if (k->is_valueArray_klass()) { + ValueArrayKlass* ak = ValueArrayKlass::cast(k); + // Value type array must be zeroed because not all memory is reassigned + obj = ak->allocate(sv->field_size(), THREAD); } else if (k->is_typeArray_klass()) { TypeArrayKlass* ak = TypeArrayKlass::cast(k); assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length"); @@ -836,6 +860,21 @@ return failures; } +// We're deoptimizing at the return of a call, value type fields are +// in registers. When we go back to the interpreter, it will expect a +// reference to a value type instance. Allocate and initialize it from +// the register values here. +bool Deoptimization::realloc_value_type_result(ValueKlass* vk, const RegisterMap& map, GrowableArray& return_oops, TRAPS) { + oop new_vt = vk->realloc_result(map, return_oops, THREAD); + if (new_vt == NULL) { + CLEAR_PENDING_EXCEPTION; + THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), true); + } + return_oops.clear(); + return_oops.push(Handle(THREAD, new_vt)); + return false; +} + // restore elements of an eliminated type array void Deoptimization::reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type) { int index = 0; @@ -946,10 +985,12 @@ public: int _offset; BasicType _type; + InstanceKlass* _klass; public: ReassignedField() { _offset = 0; _type = T_ILLEGAL; + _klass = NULL; } }; @@ -959,9 +1000,9 @@ // Restore fields of an eliminated instance object using the same field order // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true) -static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal) { +static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal, int base_offset, TRAPS) { if (klass->superklass() != NULL) { - svIndex = reassign_fields_by_klass(klass->superklass(), fr, reg_map, sv, svIndex, obj, skip_internal); + svIndex = reassign_fields_by_klass(klass->superklass(), fr, reg_map, sv, svIndex, obj, skip_internal, 0, CHECK_0); } GrowableArray* fields = new GrowableArray(); @@ -970,6 +1011,15 @@ ReassignedField field; field._offset = fs.offset(); field._type = FieldType::basic_type(fs.signature()); + if (field._type == T_VALUETYPE) { + field._type = T_OBJECT; + } + if (fs.is_flattened()) { + // Resolve klass of flattened value type field + Klass* vk = klass->get_value_field_klass(fs.index()); + field._klass = ValueKlass::cast(vk); + field._type = T_VALUETYPE; + } fields->append(field); } } @@ -978,14 +1028,24 @@ intptr_t val; ScopeValue* scope_field = sv->field_at(svIndex); StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field); - int offset = fields->at(i)._offset; + int offset = base_offset + fields->at(i)._offset; BasicType type = fields->at(i)._type; switch (type) { - case T_OBJECT: case T_ARRAY: + case T_OBJECT: + case T_ARRAY: assert(value->type() == T_OBJECT, "Agreement."); obj->obj_field_put(offset, value->get_obj()()); break; + case T_VALUETYPE: { + // Recursively re-assign flattened value type fields + InstanceKlass* vk = fields->at(i)._klass; + assert(vk != NULL, "must be resolved"); + offset -= ValueKlass::cast(vk)->first_field_offset(); // Adjust offset to omit oop header + svIndex = reassign_fields_by_klass(vk, fr, reg_map, sv, svIndex, obj, skip_internal, offset, CHECK_0); + continue; // Continue because we don't need to increment svIndex + } + // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem. case T_INT: case T_FLOAT: { // 4 bytes. assert(value->type() == T_INT, "Agreement."); @@ -1066,8 +1126,22 @@ return svIndex; } +// restore fields of an eliminated value type array +void Deoptimization::reassign_value_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, valueArrayOop obj, ValueArrayKlass* vak, TRAPS) { + ValueKlass* vk = vak->element_klass(); + assert(vk->flatten_array(), "should only be used for flattened value type arrays"); + // Adjust offset to omit oop header + int base_offset = arrayOopDesc::base_offset_in_bytes(T_VALUETYPE) - ValueKlass::cast(vk)->first_field_offset(); + // Initialize all elements of the flattened value type array + for (int i = 0; i < sv->field_size(); i++) { + ScopeValue* val = sv->field_at(i); + int offset = base_offset + (i << Klass::layout_helper_log2_element_size(vak->layout_helper())); + reassign_fields_by_klass(vk, fr, reg_map, val->as_ObjectValue(), 0, (oop)obj, false /* skip_internal */, offset, CHECK); + } +} + // restore fields of all eliminated objects and arrays -void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray* objects, bool realloc_failures, bool skip_internal) { +void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray* objects, bool realloc_failures, bool skip_internal, TRAPS) { for (int i = 0; i < objects->length(); i++) { ObjectValue* sv = (ObjectValue*) objects->at(i); Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()); @@ -1082,7 +1156,10 @@ if (k->is_instance_klass()) { InstanceKlass* ik = InstanceKlass::cast(k); - reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal); + reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal, 0, CHECK); + } else if (k->is_valueArray_klass()) { + ValueArrayKlass* vak = ValueArrayKlass::cast(k); + reassign_value_array_elements(fr, reg_map, sv, (valueArrayOop) obj(), vak, CHECK); } else if (k->is_typeArray_klass()) { TypeArrayKlass* ak = TypeArrayKlass::cast(k); reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type()); --- old/src/hotspot/share/runtime/deoptimization.hpp 2019-03-11 14:27:09.698354277 +0100 +++ new/src/hotspot/share/runtime/deoptimization.hpp 2019-03-11 14:27:09.470354280 +0100 @@ -160,9 +160,11 @@ // Support for restoring non-escaping objects static bool realloc_objects(JavaThread* thread, frame* fr, GrowableArray* objects, TRAPS); + static bool realloc_value_type_result(ValueKlass* vk, const RegisterMap& map, GrowableArray& return_oops, TRAPS); static void reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type); static void reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj); - static void reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray* objects, bool realloc_failures, bool skip_internal); + static void reassign_value_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, valueArrayOop obj, ValueArrayKlass* vak, TRAPS); + static void reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray* objects, bool realloc_failures, bool skip_internal, TRAPS); static void relock_objects(GrowableArray* monitors, JavaThread* thread, bool realloc_failures); static void pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array); NOT_PRODUCT(static void print_objects(GrowableArray* objects, bool realloc_failures);) --- old/src/hotspot/share/runtime/fieldDescriptor.cpp 2019-03-11 14:27:10.142354270 +0100 +++ new/src/hotspot/share/runtime/fieldDescriptor.cpp 2019-03-11 14:27:09.918354274 +0100 @@ -32,6 +32,7 @@ #include "oops/instanceKlass.hpp" #include "oops/oop.inline.hpp" #include "oops/fieldStreams.hpp" +#include "oops/valueKlass.hpp" #include "runtime/fieldDescriptor.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/signature.hpp" @@ -147,8 +148,10 @@ } void fieldDescriptor::print_on_for(outputStream* st, oop obj) { - print_on(st); BasicType ft = field_type(); + if (ft != T_VALUETYPE) { + print_on(st); + } jint as_int = 0; switch (ft) { case T_BYTE: @@ -204,6 +207,30 @@ st->print_cr("NULL"); } break; + case T_VALUETYPE: + if (is_flattened()) { + // Resolve klass of flattened value type field + Thread* THREAD = Thread::current(); + ResourceMark rm(THREAD); + SignatureStream ss(signature(), false); + Klass* k = ss.as_klass(Handle(THREAD, field_holder()->class_loader()), + Handle(THREAD, field_holder()->protection_domain()), + SignatureStream::ReturnNull, THREAD); + assert(k != NULL && !HAS_PENDING_EXCEPTION, "can resolve klass?"); + ValueKlass* vk = ValueKlass::cast(k); + int field_offset = offset() - vk->first_field_offset(); + obj = (oop)((address)obj + field_offset); + // Print flattened fields of the value type field + st->print_cr("Flattened value type '%s':", vk->name()->as_C_string()); + FieldPrinter print_field(st, obj); + vk->do_nonstatic_fields(&print_field); + return; // Do not print underlying representation + } else { + st->print(" "); + NOT_LP64(as_int = obj->int_field(offset())); + obj->obj_field(offset())->print_value_on(st); + } + break; default: ShouldNotReachHere(); break; @@ -211,7 +238,7 @@ // Print a hint as to the underlying integer representation. This can be wrong for // pointers on an LP64 machine #ifdef _LP64 - if ((ft == T_OBJECT || ft == T_ARRAY) && UseCompressedOops) { + if ((ft == T_OBJECT || ft == T_ARRAY || ft == T_VALUETYPE) && UseCompressedOops) { st->print(" (%x)", obj->int_field(offset())); } else // <- intended @@ -221,6 +248,7 @@ } else if (as_int < 0 || as_int > 9) { st->print(" (%x)", as_int); } + st->cr(); } #endif /* PRODUCT */ --- old/src/hotspot/share/runtime/fieldDescriptor.hpp 2019-03-11 14:27:10.570354264 +0100 +++ new/src/hotspot/share/runtime/fieldDescriptor.hpp 2019-03-11 14:27:10.362354267 +0100 @@ -94,6 +94,8 @@ bool is_stable() const { return access_flags().is_stable(); } bool is_volatile() const { return access_flags().is_volatile(); } bool is_transient() const { return access_flags().is_transient(); } + inline bool is_flattened() const; + inline bool is_flattenable() const; bool is_synthetic() const { return access_flags().is_synthetic(); } --- old/src/hotspot/share/runtime/fieldDescriptor.inline.hpp 2019-03-11 14:27:11.010354258 +0100 +++ new/src/hotspot/share/runtime/fieldDescriptor.inline.hpp 2019-03-11 14:27:10.794354261 +0100 @@ -79,4 +79,7 @@ return FieldType::basic_type(signature()); } -#endif // SHARE_RUNTIME_FIELDDESCRIPTOR_INLINE_HPP +inline bool fieldDescriptor::is_flattened() const { return field()->is_flattened(); } +inline bool fieldDescriptor::is_flattenable() const { return field()->is_flattenable(); } + +#endif // SHARE_RUNTIME_FIELDDESCRIPTOR_INLINE_HPP \ No newline at end of file --- old/src/hotspot/share/runtime/fieldType.cpp 2019-03-11 14:27:11.442354252 +0100 +++ new/src/hotspot/share/runtime/fieldType.cpp 2019-03-11 14:27:11.226354255 +0100 @@ -57,15 +57,15 @@ case 'Z': // T_BOOLEAN // If it is an array, the type is the last character return (i + 1 == len); + case 'Q': // fall through case 'L': - // If it is an object, the last character must be a ';' + // If it is a class name, the last character must be a ';' return sig->char_at(len - 1) == ';'; } return false; } - BasicType FieldType::get_array_info(Symbol* signature, FieldArrayInfo& fd, TRAPS) { assert(basic_type(signature) == T_ARRAY, "must be array"); int index = 1; @@ -77,7 +77,7 @@ ResourceMark rm; char *element = signature->as_C_string() + index; BasicType element_type = char2type(element[0]); - if (element_type == T_OBJECT) { + if (element_type == T_OBJECT || element_type == T_VALUETYPE) { int len = (int)strlen(element); assert(element[len-1] == ';', "last char should be a semicolon"); element[len-1] = '\0'; // chop off semicolon --- old/src/hotspot/share/runtime/fieldType.hpp 2019-03-11 14:27:11.882354246 +0100 +++ new/src/hotspot/share/runtime/fieldType.hpp 2019-03-11 14:27:11.670354249 +0100 @@ -68,6 +68,10 @@ (signature->char_at(sig_length - 1) == ';')); } + static bool is_valuetype(Symbol* signature) { + return signature->is_Q_signature(); + } + // Parse field and extract array information. Works for T_ARRAY only. static BasicType get_array_info(Symbol* signature, FieldArrayInfo& ai, TRAPS); }; --- old/src/hotspot/share/runtime/frame.cpp 2019-03-11 14:27:12.414354239 +0100 +++ new/src/hotspot/share/runtime/frame.cpp 2019-03-11 14:27:12.138354243 +0100 @@ -36,6 +36,7 @@ #include "oops/method.hpp" #include "oops/methodData.hpp" #include "oops/oop.inline.hpp" +#include "oops/valueKlass.hpp" #include "oops/verifyOopClosure.hpp" #include "prims/methodHandles.hpp" #include "runtime/frame.inline.hpp" @@ -732,7 +733,7 @@ public: InterpreterFrameClosure(frame* fr, int max_locals, int max_stack, - OopClosure* f) { + OopClosure* f, BufferedValueClosure* bvt_f) { _fr = fr; _max_locals = max_locals; _max_stack = max_stack; @@ -744,7 +745,9 @@ if (offset < _max_locals) { addr = (oop*) _fr->interpreter_frame_local_at(offset); assert((intptr_t*)addr >= _fr->sp(), "must be inside the frame"); - _f->do_oop(addr); + if (_f != NULL) { + _f->do_oop(addr); + } } else { addr = (oop*) _fr->interpreter_frame_expression_stack_at((offset - _max_locals)); // In case of exceptions, the expression stack is invalid and the esp will be reset to express @@ -756,7 +759,9 @@ in_stack = (intptr_t*)addr >= _fr->interpreter_frame_tos_address(); } if (in_stack) { - _f->do_oop(addr); + if (_f != NULL) { + _f->do_oop(addr); + } } } } @@ -775,7 +780,7 @@ void set(int size, BasicType type) { _offset -= size; - if (type == T_OBJECT || type == T_ARRAY) oop_offset_do(); + if (type == T_OBJECT || type == T_ARRAY || type == T_VALUETYPE) oop_offset_do(); } void oop_offset_do() { @@ -828,7 +833,7 @@ void set(int size, BasicType type) { assert (_offset >= 0, "illegal offset"); - if (type == T_OBJECT || type == T_ARRAY) oop_at_offset_do(_offset); + if (type == T_OBJECT || type == T_ARRAY || type == T_VALUETYPE) oop_at_offset_do(_offset); _offset -= size; } @@ -929,7 +934,7 @@ } } - InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f); + InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f, NULL); // process locals & expression stack InterpreterOopMap mask; @@ -941,6 +946,23 @@ mask.iterate_oop(&blk); } +void frame::buffered_values_interpreted_do(BufferedValueClosure* f) { + assert(is_interpreted_frame(), "Not an interpreted frame"); + Thread *thread = Thread::current(); + methodHandle m (thread, interpreter_frame_method()); + jint bci = interpreter_frame_bci(); + + assert(m->is_method(), "checking frame value"); + assert(!m->is_native() && bci >= 0 && bci < m->code_size(), + "invalid bci value"); + + InterpreterFrameClosure blk(this, m->max_locals(), m->max_stack(), NULL, f); + + // process locals & expression stack + InterpreterOopMap mask; + m->mask_for(bci, &mask); + mask.iterate_oop(&blk); +} void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f) { InterpretedArgumentOopFinder finder(signature, has_receiver, this, f); @@ -979,20 +1001,21 @@ VMRegPair* _regs; // VMReg list of arguments void set(int size, BasicType type) { - if (type == T_OBJECT || type == T_ARRAY) handle_oop_offset(); + if (type == T_OBJECT || type == T_ARRAY || type == T_VALUETYPE) handle_oop_offset(); _offset += size; } virtual void handle_oop_offset() { // Extract low order register number from register array. // In LP64-land, the high-order bits are valid but unhelpful. + assert(_offset < _arg_size, "out of bounds"); VMReg reg = _regs[_offset].first(); oop *loc = _fr.oopmapreg_to_location(reg, _reg_map); _f->do_oop(loc); } public: - CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, bool has_appendix, OopClosure* f, frame fr, const RegisterMap* reg_map) + CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, bool has_appendix, OopClosure* f, frame fr, const RegisterMap* reg_map) : SignatureInfo(signature) { // initialize CompiledArgumentOopFinder @@ -1002,11 +1025,7 @@ _has_appendix = has_appendix; _fr = fr; _reg_map = (RegisterMap*)reg_map; - _arg_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0) + (has_appendix ? 1 : 0); - - int arg_size; - _regs = SharedRuntime::find_callee_arguments(signature, has_receiver, has_appendix, &arg_size); - assert(arg_size == _arg_size, "wrong arg size"); + _regs = SharedRuntime::find_callee_arguments(signature, has_receiver, has_appendix, &_arg_size); } void oops_do() { --- old/src/hotspot/share/runtime/frame.hpp 2019-03-11 14:27:12.942354232 +0100 +++ new/src/hotspot/share/runtime/frame.hpp 2019-03-11 14:27:12.702354235 +0100 @@ -363,6 +363,7 @@ // Oops-do's void oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix, const RegisterMap* reg_map, OopClosure* f); void oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache = true); + void buffered_values_interpreted_do(BufferedValueClosure* f); private: void oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f); --- old/src/hotspot/share/runtime/globals.hpp 2019-03-11 14:27:13.454354225 +0100 +++ new/src/hotspot/share/runtime/globals.hpp 2019-03-11 14:27:13.214354228 +0100 @@ -908,6 +908,27 @@ notproduct(bool, PrintFieldLayout, false, \ "Print field layout for each class") \ \ + notproduct(bool, PrintValueLayout, false, \ + "Print field layout for each value type") \ + \ + notproduct(bool, PrintValueArrayLayout, false, \ + "Print array layout for each value type array") \ + \ + product(bool, ValueArrayFlatten, true, \ + "Flatten value array elements, if possible") \ + \ + product(intx, ValueArrayElemMaxFlatSize, -1, \ + "Max size for flattening value array elements, <0 no limit") \ + \ + product(intx, ValueFieldMaxFlatSize, 128, \ + "Max size for flattening value type fields, <0 no limit") \ + \ + product(intx, ValueArrayElemMaxFlatOops, 4, \ + "Max nof embedded object references in a value type to flatten, <0 no limit") \ + \ + product(bool, ValueArrayAtomicAccess, false, \ + "Atomic value array accesses by-default, for all value arrays") \ + \ /* Need to limit the extent of the padding to reasonable size. */\ /* 8K is well beyond the reasonable HW cache line size, even with */\ /* aggressive prefetching, while still leaving the room for segregating */\ @@ -2544,7 +2565,37 @@ "Start flight recording with options")) \ \ experimental(bool, UseFastUnorderedTimeStamps, false, \ - "Use platform unstable time where supported for timestamps only") + "Use platform unstable time where supported for timestamps only") \ + \ + product(bool, EnableValhalla, false, \ + "Enable experimental Valhalla features") \ + \ + product(bool, EnableValhallaC1, false, \ + "Enable C1 compiler for Valhalla") \ + \ + product_pd(bool, ValueTypePassFieldsAsArgs, \ + "Pass each value type field as an argument at calls") \ + \ + product_pd(bool, ValueTypeReturnedAsFields, \ + "Return fields instead of a value type reference") \ + \ + develop(bool, StressValueTypePassFieldsAsArgs, false, \ + "Stress passing each value type field as an argument at calls") \ + \ + develop(bool, StressValueTypeReturnedAsFields, false, \ + "Stress return of fields instead of a value type reference") \ + \ + develop(bool, ScalarizeValueTypes, true, \ + "Scalarize value types in compiled code") \ + \ + experimental(uint, ACmpOnValues, 2, \ + "0 = regular acmp" \ + "1 = always false for value, perturbation scheme" \ + "2 = always false for value" \ + "3 = substitutability test") \ + range(0, 3) \ + + #define VM_FLAGS(develop, \ develop_pd, \ --- old/src/hotspot/share/runtime/handles.cpp 2019-03-11 14:27:13.938354218 +0100 +++ new/src/hotspot/share/runtime/handles.cpp 2019-03-11 14:27:13.694354221 +0100 @@ -26,6 +26,8 @@ #include "memory/allocation.inline.hpp" #include "oops/constantPool.hpp" #include "oops/oop.inline.hpp" +#include "oops/valueKlass.hpp" +#include "runtime/atomic.hpp" #include "runtime/handles.inline.hpp" #include "runtime/thread.inline.hpp" @@ -96,7 +98,10 @@ // during GC phase 3, a handle may be a forward pointer that // is not yet valid, so loosen the assertion while (bottom < top) { - f->do_oop(bottom++); + if (Universe::heap()->is_in_reserved_or_null(*bottom)) { + f->do_oop(bottom); + } + bottom++; } return handles_visited; } --- old/src/hotspot/share/runtime/handles.hpp 2019-03-11 14:27:14.386354212 +0100 +++ new/src/hotspot/share/runtime/handles.hpp 2019-03-11 14:27:14.170354215 +0100 @@ -29,6 +29,7 @@ #include "oops/oop.hpp" #include "oops/oopsHierarchy.hpp" +class ValueKlass; class InstanceKlass; class Klass; class Thread; @@ -122,6 +123,7 @@ DEF_HANDLE(array , is_array_noinline ) DEF_HANDLE(objArray , is_objArray_noinline ) DEF_HANDLE(typeArray , is_typeArray_noinline ) +DEF_HANDLE(valueArray , is_valueArray_noinline ) //------------------------------------------------------------------------------------------------------------------------ --- old/src/hotspot/share/runtime/handles.inline.hpp 2019-03-11 14:27:14.838354205 +0100 +++ new/src/hotspot/share/runtime/handles.inline.hpp 2019-03-11 14:27:14.606354209 +0100 @@ -52,6 +52,7 @@ DEF_HANDLE_CONSTR(array , is_array_noinline ) DEF_HANDLE_CONSTR(objArray , is_objArray_noinline ) DEF_HANDLE_CONSTR(typeArray, is_typeArray_noinline) +DEF_HANDLE_CONSTR(valueArray, is_valueArray_noinline) // Constructor for metadata handles #define DEF_METADATA_HANDLE_FN(name, type) \ --- old/src/hotspot/share/runtime/javaCalls.cpp 2019-03-11 14:27:15.846354192 +0100 +++ new/src/hotspot/share/runtime/javaCalls.cpp 2019-03-11 14:27:15.610354195 +0100 @@ -32,6 +32,7 @@ #include "memory/universe.hpp" #include "oops/method.inline.hpp" #include "oops/oop.inline.hpp" +#include "oops/valueKlass.hpp" #include "prims/jniCheck.hpp" #include "runtime/compilationPolicy.hpp" #include "runtime/handles.inline.hpp" @@ -158,22 +159,24 @@ // Helper methods static BasicType runtime_type_from(JavaValue* result) { switch (result->get_type()) { - case T_BOOLEAN: // fall through - case T_CHAR : // fall through - case T_SHORT : // fall through - case T_INT : // fall through + case T_BOOLEAN : // fall through + case T_CHAR : // fall through + case T_SHORT : // fall through + case T_INT : // fall through #ifndef _LP64 - case T_OBJECT : // fall through - case T_ARRAY : // fall through + case T_OBJECT : // fall through + case T_ARRAY : // fall through + case T_VALUETYPE: // fall through #endif - case T_BYTE : // fall through - case T_VOID : return T_INT; - case T_LONG : return T_LONG; - case T_FLOAT : return T_FLOAT; - case T_DOUBLE : return T_DOUBLE; + case T_BYTE : // fall through + case T_VOID : return T_INT; + case T_LONG : return T_LONG; + case T_FLOAT : return T_FLOAT; + case T_DOUBLE : return T_DOUBLE; #ifdef _LP64 - case T_ARRAY : // fall through - case T_OBJECT: return T_OBJECT; + case T_ARRAY : // fall through + case T_OBJECT : return T_OBJECT; + case T_VALUETYPE: return T_VALUETYPE; #endif default: ShouldNotReachHere(); @@ -397,7 +400,8 @@ // Figure out if the result value is an oop or not (Note: This is a different value // than result_type. result_type will be T_INT of oops. (it is about size) BasicType result_type = runtime_type_from(result); - bool oop_result_flag = (result->get_type() == T_OBJECT || result->get_type() == T_ARRAY); + bool oop_result_flag = (result->get_type() == T_OBJECT || result->get_type() == T_ARRAY + || result->get_type() == T_VALUETYPE); // NOTE: if we move the computation of the result_val_address inside // the call to call_stub, the optimizer produces wrong code. @@ -617,6 +621,7 @@ void do_long() { check_long(T_LONG); } void do_void() { check_return_type(T_VOID); } void do_object(int begin, int end) { check_obj(T_OBJECT); } + void do_valuetype(int begin, int end){ check_obj(T_VALUETYPE); } void do_array(int begin, int end) { check_obj(T_OBJECT); } }; --- old/src/hotspot/share/runtime/mutexLocker.cpp 2019-03-11 14:27:16.798354178 +0100 +++ new/src/hotspot/share/runtime/mutexLocker.cpp 2019-03-11 14:27:16.582354181 +0100 @@ -244,7 +244,7 @@ def(CGCPhaseManager_lock , PaddedMonitor, leaf, false, Monitor::_safepoint_check_sometimes); def(CodeCache_lock , PaddedMutex , special, true, Monitor::_safepoint_check_never); def(RawMonitor_lock , PaddedMutex , special, true, Monitor::_safepoint_check_never); - def(OopMapCacheAlloc_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always); // used for oop_map_cache allocation. + def(OopMapCacheAlloc_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never); // used for oop_map_cache allocation. def(MetaspaceExpand_lock , PaddedMutex , leaf-1, true, Monitor::_safepoint_check_never); def(ClassLoaderDataGraph_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_always); --- old/src/hotspot/share/runtime/perfMemory.hpp 2019-03-11 14:27:17.226354172 +0100 +++ new/src/hotspot/share/runtime/perfMemory.hpp 2019-03-11 14:27:17.014354175 +0100 @@ -82,7 +82,7 @@ jint name_offset; // offset of the data item name jint vector_length; // length of the vector. If 0, then scalar jbyte data_type; // type of the data item - - // 'B','Z','J','I','S','C','D','F','V','L','[' + // 'B','Z','J','I','S','C','D','F','V','L','Q','[' jbyte flags; // flags indicating misc attributes jbyte data_units; // unit of measure for the data type jbyte data_variability; // variability classification of data type --- old/src/hotspot/share/runtime/reflection.cpp 2019-03-11 14:27:17.674354166 +0100 +++ new/src/hotspot/share/runtime/reflection.cpp 2019-03-11 14:27:17.458354169 +0100 @@ -40,6 +40,7 @@ #include "oops/objArrayKlass.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" +#include "oops/valueKlass.hpp" #include "oops/typeArrayOop.inline.hpp" #include "prims/jvmtiExport.hpp" #include "runtime/arguments.hpp" @@ -51,6 +52,7 @@ #include "runtime/signature.hpp" #include "runtime/thread.inline.hpp" #include "runtime/vframe.inline.hpp" +#include "utilities/globalDefinitions.hpp" static void trace_class_resolution(const Klass* to_class) { ResourceMark rm; @@ -92,7 +94,7 @@ if (type == T_VOID) { return NULL; } - if (type == T_OBJECT || type == T_ARRAY) { + if (type == T_OBJECT || type == T_ARRAY || type == T_VALUETYPE) { // regular objects are not boxed return (oop) value->l; } @@ -342,7 +344,7 @@ if (k->is_array_klass() && ArrayKlass::cast(k)->dimension() >= MAX_DIM) { THROW_0(vmSymbols::java_lang_IllegalArgumentException()); } - return oopFactory::new_objArray(k, length, THREAD); + return oopFactory::new_array(k, length, THREAD); } } @@ -750,13 +752,24 @@ ); } +// Returns Q-mirror if qtype_if_value is true and k is a ValueKlass; +// otherwise returns java_mirror or L-mirror for ValueKlass +static oop java_mirror(Klass* k, jboolean qtype_if_value) { + if (qtype_if_value && k->is_value()) { + ValueKlass* vk = ValueKlass::cast(InstanceKlass::cast(k)); + return vk->value_mirror(); + } else { + return k->java_mirror(); + } +} + // Utility method converting a single SignatureStream element into java.lang.Class instance static oop get_mirror_from_signature(const methodHandle& method, SignatureStream* ss, TRAPS) { - - if (T_OBJECT == ss->type() || T_ARRAY == ss->type()) { + BasicType bt = ss->type(); + if (T_OBJECT == bt || T_ARRAY == bt || T_VALUETYPE == bt) { Symbol* name = ss->as_symbol(CHECK_NULL); oop loader = method->method_holder()->class_loader(); oop protection_domain = method->method_holder()->protection_domain(); @@ -768,13 +781,13 @@ if (log_is_enabled(Debug, class, resolve)) { trace_class_resolution(k); } - return k->java_mirror(); + return java_mirror((Klass*)k, bt == T_VALUETYPE); } - assert(ss->type() != T_VOID || ss->at_return_type(), + assert(bt != T_VOID || ss->at_return_type(), "T_VOID should only appear as return type"); - return java_lang_Class::primitive_mirror(ss->type()); + return java_lang_Class::primitive_mirror(bt); } static objArrayHandle get_parameter_types(const methodHandle& method, @@ -810,7 +823,7 @@ static Handle new_type(Symbol* signature, Klass* k, TRAPS) { // Basic types BasicType type = vmSymbols::signature_type(signature); - if (type != T_OBJECT) { + if (type != T_OBJECT && type != T_VALUETYPE) { return Handle(THREAD, Universe::java_mirror(type)); } @@ -823,8 +836,7 @@ if (log_is_enabled(Debug, class, resolve)) { trace_class_resolution(result); } - - oop nt = result->java_mirror(); + oop nt = java_mirror(result, type == T_VALUETYPE); return Handle(THREAD, nt); } @@ -932,7 +944,14 @@ java_lang_reflect_Field::set_name(rh(), name()); java_lang_reflect_Field::set_type(rh(), type()); // Note the ACC_ANNOTATION bit, which is a per-class access flag, is never set here. - java_lang_reflect_Field::set_modifiers(rh(), fd->access_flags().as_int() & JVM_RECOGNIZED_FIELD_MODIFIERS); + int modifiers = fd->access_flags().as_int() & JVM_RECOGNIZED_FIELD_MODIFIERS; + if (fd->is_flattenable()) { + modifiers |= JVM_ACC_FLATTENABLE; + } + if (fd->is_flattened()) { + modifiers |= JVM_ACC_FIELD_FLATTENED; + } + java_lang_reflect_Field::set_modifiers(rh(), modifiers); java_lang_reflect_Field::set_override(rh(), false); if (fd->has_generic_signature()) { Symbol* gs = fd->generic_signature(); @@ -1203,6 +1222,8 @@ BasicType rtype; if (java_lang_Class::is_primitive(return_type_mirror)) { rtype = basic_type_mirror_to_basic_type(return_type_mirror, CHECK_NULL); + } else if (java_lang_Class::value_mirror(return_type_mirror) == return_type_mirror) { + rtype = T_VALUETYPE; } else { rtype = T_OBJECT; } --- old/src/hotspot/share/runtime/safepoint.cpp 2019-03-11 14:27:18.130354160 +0100 +++ new/src/hotspot/share/runtime/safepoint.cpp 2019-03-11 14:27:17.906354163 +0100 @@ -44,6 +44,7 @@ #include "memory/universe.hpp" #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" +#include "oops/valueKlass.hpp" #include "runtime/atomic.hpp" #include "runtime/compilationPolicy.hpp" #include "runtime/deoptimization.hpp" @@ -1053,16 +1054,40 @@ // return point does not mark the return value as an oop (if it is), so // it needs a handle here to be updated. if( nm->is_at_poll_return(real_return_addr) ) { + ResourceMark rm; // See if return type is an oop. - bool return_oop = nm->method()->is_returning_oop(); - Handle return_value; + Method* method = nm->method(); + bool return_oop = method->may_return_oop(); + + GrowableArray return_values; + ValueKlass* vk = NULL; + + if (return_oop && ValueTypeReturnedAsFields) { + SignatureStream ss(method->signature()); + while (!ss.at_return_type()) { + ss.next(); + } + if (ss.type() == T_VALUETYPE) { + // Check if value type is returned as fields + vk = ValueKlass::returned_value_klass(map); + if (vk != NULL) { + // We're at a safepoint at the return of a method that returns + // multiple values. We must make sure we preserve the oop values + // across the safepoint. + assert(vk == method->returned_value_type(thread()), "bad value klass"); + vk->save_oop_fields(map, return_values); + return_oop = false; + } + } + } + if (return_oop) { // The oop result has been saved on the stack together with all // the other registers. In order to preserve it over GCs we need // to keep it in a handle. oop result = caller_fr.saved_oop_result(&map); assert(oopDesc::is_oop_or_null(result), "must be oop"); - return_value = Handle(thread(), result); + return_values.push(Handle(thread(), result)); assert(Universe::heap()->is_in_or_null(result), "must be heap pointer"); } @@ -1071,7 +1096,10 @@ // restore oop result, if any if (return_oop) { - caller_fr.set_saved_oop_result(&map, return_value()); + assert(return_values.length() == 1, "only one return value"); + caller_fr.set_saved_oop_result(&map, return_values.pop()()); + } else if (vk != NULL) { + vk->restore_oop_results(map, return_values); } } --- old/src/hotspot/share/runtime/sharedRuntime.cpp 2019-03-11 14:27:18.622354153 +0100 +++ new/src/hotspot/share/runtime/sharedRuntime.cpp 2019-03-11 14:27:18.378354157 +0100 @@ -44,12 +44,17 @@ #include "jfr/jfrEvents.hpp" #include "logging/log.hpp" #include "memory/metaspaceShared.hpp" +#include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" +#include "oops/access.hpp" +#include "oops/fieldStreams.hpp" #include "oops/klass.hpp" #include "oops/method.inline.hpp" #include "oops/objArrayKlass.hpp" +#include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" +#include "oops/valueKlass.hpp" #include "prims/forte.hpp" #include "prims/jvmtiExport.hpp" #include "prims/methodHandles.hpp" @@ -1116,6 +1121,12 @@ default: break; } + } else { + assert(attached_method->has_scalarized_args(), "invalid use of attached method"); + if (!attached_method->method_holder()->is_value()) { + // Ignore the attached method in this case to not confuse below code + attached_method = NULL; + } } } @@ -1134,18 +1145,33 @@ // Caller-frame is a compiled frame frame callerFrame = stubFrame.sender(®_map2); - if (attached_method.is_null()) { - methodHandle callee = bytecode.static_target(CHECK_NH); + methodHandle callee = attached_method; + if (callee.is_null()) { + callee = bytecode.static_target(CHECK_NH); if (callee.is_null()) { THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle); } } + if (callee->has_scalarized_args() && callee->method_holder()->is_value()) { + // If the receiver is a value type that is passed as fields, no oop is available. + // Resolve the call without receiver null checking. + assert(!attached_method.is_null(), "must have attached method"); + if (bc == Bytecodes::_invokevirtual) { + LinkInfo link_info(attached_method->method_holder(), attached_method->name(), attached_method->signature()); + LinkResolver::resolve_virtual_call(callinfo, receiver, callee->method_holder(), link_info, /*check_null_and_abstract=*/ false, CHECK_NH); + } else { + assert(bc == Bytecodes::_invokeinterface, "anything else?"); + LinkInfo link_info(constantPoolHandle(THREAD, caller->constants()), bytecode_index, CHECK_NH); + LinkResolver::resolve_interface_call(callinfo, receiver, callee->method_holder(), link_info, /*check_null_and_abstract=*/ false, CHECK_NH); + } + return receiver; // is null + } else { + // Retrieve from a compiled argument list + receiver = Handle(THREAD, callerFrame.retrieve_receiver(®_map2)); - // Retrieve from a compiled argument list - receiver = Handle(THREAD, callerFrame.retrieve_receiver(®_map2)); - - if (receiver.is_null()) { - THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle); + if (receiver.is_null()) { + THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle); + } } } @@ -1273,10 +1299,16 @@ bool is_nmethod = caller_nm->is_nmethod(); if (is_virtual) { - assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check"); + Klass* receiver_klass = NULL; + if (ValueTypePassFieldsAsArgs && callee_method->method_holder()->is_value()) { + // If the receiver is a value type that is passed as fields, no oop is available + receiver_klass = callee_method->method_holder(); + } else { + assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check"); + receiver_klass = invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass(); + } bool static_bound = call_info.resolved_method()->can_be_statically_bound(); - Klass* klass = invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass(); - CompiledIC::compute_monomorphic_entry(callee_method, klass, + CompiledIC::compute_monomorphic_entry(callee_method, receiver_klass, is_optimized, static_bound, is_nmethod, virtual_call_info, CHECK_false); } else { @@ -1341,6 +1373,16 @@ // CLEANUP - with lazy deopt shouldn't need this lock nmethodLocker caller_lock(caller_nm); + if (!is_virtual && !is_optimized) { + SimpleScopeDesc ssd(caller_nm, caller_frame.pc()); + Bytecode bc(ssd.method(), ssd.method()->bcp_from(ssd.bci())); + // Substitutability test implementation piggy backs on static call resolution + if (bc.code() == Bytecodes::_if_acmpeq || bc.code() == Bytecodes::_if_acmpne) { + SystemDictionary::ValueBootstrapMethods_klass()->initialize(CHECK_NULL); + return SystemDictionary::ValueBootstrapMethods_klass()->find_method(vmSymbols::isSubstitutable_name(), vmSymbols::object_object_boolean_signature()); + } + } + // determine call info & receiver // note: a) receiver is NULL for static calls // b) an exception is thrown if receiver is NULL for non-static calls @@ -1428,14 +1470,16 @@ #endif /* ASSERT */ methodHandle callee_method; + bool is_optimized = false; JRT_BLOCK - callee_method = SharedRuntime::handle_ic_miss_helper(thread, CHECK_NULL); + callee_method = SharedRuntime::handle_ic_miss_helper(thread, is_optimized, CHECK_NULL); // Return Method* through TLS thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints - assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); - return callee_method->verified_code_entry(); + assert(callee_method->verified_code_entry() != NULL, "Jump to zero!"); + assert(callee_method->verified_value_ro_code_entry() != NULL, "Jump to zero!"); + return is_optimized ? callee_method->verified_code_entry() : callee_method->verified_value_ro_code_entry(); JRT_END @@ -1466,14 +1510,16 @@ // Must be compiled to compiled path which is safe to stackwalk methodHandle callee_method; + bool is_optimized = false; JRT_BLOCK // Force resolving of caller (if we called from compiled frame) - callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_NULL); + callee_method = SharedRuntime::reresolve_call_site(thread, is_optimized, CHECK_NULL); thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints - assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); - return callee_method->verified_code_entry(); + assert(callee_method->verified_code_entry() != NULL, "Jump to zero!"); + assert(callee_method->verified_value_ro_code_entry() != NULL, "Jump to zero!"); + return is_optimized ? callee_method->verified_code_entry() : callee_method->verified_value_ro_code_entry(); JRT_END // Handle abstract method call @@ -1516,7 +1562,7 @@ thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints - assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); + assert(callee_method->verified_code_entry() != NULL, "Jump to zero!"); return callee_method->verified_code_entry(); JRT_END @@ -1529,8 +1575,8 @@ thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints - assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); - return callee_method->verified_code_entry(); + assert(callee_method->verified_value_ro_code_entry() != NULL, "Jump to zero!"); + return callee_method->verified_value_ro_code_entry(); JRT_END @@ -1543,7 +1589,7 @@ thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints - assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); + assert(callee_method->verified_code_entry() != NULL, "Jump to zero!"); return callee_method->verified_code_entry(); JRT_END @@ -1555,7 +1601,7 @@ bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm, const frame& caller_frame, methodHandle callee_method, Bytecodes::Code bc, CallInfo& call_info, - bool& needs_ic_stub_refill, TRAPS) { + bool& needs_ic_stub_refill, bool& is_optimized, TRAPS) { CompiledICLocker ml(caller_nm); CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc()); bool should_be_mono = false; @@ -1566,6 +1612,7 @@ callee_method->print_short_name(tty); tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code())); } + is_optimized = true; should_be_mono = true; } else if (inline_cache->is_icholder_call()) { CompiledICHolder* ic_oop = inline_cache->cached_icholder(); @@ -1627,7 +1674,7 @@ return true; } -methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) { +methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, bool& is_optimized, TRAPS) { ResourceMark rm(thread); CallInfo call_info; Bytecodes::Code bc; @@ -1647,7 +1694,7 @@ // did this would still be the correct thing to do for it too, hence no ifdef. // if (call_info.resolved_method()->can_be_statically_bound()) { - methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_(methodHandle())); + methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, is_optimized, CHECK_(methodHandle())); if (TraceCallFixup) { RegisterMap reg_map(thread, false); frame caller_frame = thread->last_frame().sender(®_map); @@ -1702,7 +1749,7 @@ ICRefillVerifier ic_refill_verifier; bool needs_ic_stub_refill = false; bool successful = handle_ic_miss_helper_internal(receiver, caller_nm, caller_frame, callee_method, - bc, call_info, needs_ic_stub_refill, CHECK_(methodHandle())); + bc, call_info, needs_ic_stub_refill, is_optimized, CHECK_(methodHandle())); if (successful || !needs_ic_stub_refill) { return callee_method; } else { @@ -1734,7 +1781,7 @@ // sites, and static call sites. Typically used to change a call sites // destination from compiled to interpreted. // -methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) { +methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, bool& is_optimized, TRAPS) { ResourceMark rm(thread); RegisterMap reg_map(thread, false); frame stub_frame = thread->last_frame(); @@ -1795,6 +1842,7 @@ assert(iter.type() == relocInfo::virtual_call_type || iter.type() == relocInfo::opt_virtual_call_type , "unexpected relocInfo. type"); + is_optimized = (iter.type() == relocInfo::opt_virtual_call_type); } } else { assert(!UseInlineCaches, "relocation info. must exist for this address"); @@ -1820,7 +1868,6 @@ methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle())); - #ifndef PRODUCT Atomic::inc(&_wrong_method_ctr); @@ -2316,14 +2363,27 @@ // Remap BasicTypes that are handled equivalently by the adapters. // These are correct for the current system but someday it might be // necessary to make this mapping platform dependent. - static int adapter_encoding(BasicType in) { + static int adapter_encoding(BasicType in, bool is_valuetype) { switch (in) { case T_BOOLEAN: case T_BYTE: case T_SHORT: - case T_CHAR: - // There are all promoted to T_INT in the calling convention - return T_INT; + case T_CHAR: { + if (is_valuetype) { + // Do not widen value type field types + assert(ValueTypePassFieldsAsArgs, "must be enabled"); + return in; + } else { + // They are all promoted to T_INT in the calling convention + return T_INT; + } + } + + case T_VALUETYPE: { + // If value types are passed as fields, return 'in' to differentiate + // between a T_VALUETYPE and a T_OBJECT in the signature. + return ValueTypePassFieldsAsArgs ? in : adapter_encoding(T_OBJECT, false); + } case T_OBJECT: case T_ARRAY: @@ -2349,9 +2409,10 @@ } public: - AdapterFingerPrint(int total_args_passed, BasicType* sig_bt) { + AdapterFingerPrint(const GrowableArray* sig, bool has_ro_adapter = false) { // The fingerprint is based on the BasicType signature encoded // into an array of ints with eight entries per int. + int total_args_passed = (sig != NULL) ? sig->length() : 0; int* ptr; int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int; if (len <= _compact_int_count) { @@ -2369,17 +2430,37 @@ // Now pack the BasicTypes with 8 per int int sig_index = 0; + BasicType prev_sbt = T_ILLEGAL; + int vt_count = 0; for (int index = 0; index < len; index++) { int value = 0; for (int byte = 0; byte < _basic_types_per_int; byte++) { - int bt = ((sig_index < total_args_passed) - ? adapter_encoding(sig_bt[sig_index++]) - : 0); + int bt = 0; + if (sig_index < total_args_passed) { + BasicType sbt = sig->at(sig_index++)._bt; + if (ValueTypePassFieldsAsArgs && sbt == T_VALUETYPE) { + // Found start of value type in signature + vt_count++; + if (sig_index == 1 && has_ro_adapter) { + // With a ro_adapter, replace receiver value type delimiter by T_VOID to prevent matching + // with other adapters that have the same value type as first argument and no receiver. + sbt = T_VOID; + } + } else if (ValueTypePassFieldsAsArgs && sbt == T_VOID && + prev_sbt != T_LONG && prev_sbt != T_DOUBLE) { + // Found end of value type in signature + vt_count--; + assert(vt_count >= 0, "invalid vt_count"); + } + bt = adapter_encoding(sbt, vt_count > 0); + prev_sbt = sbt; + } assert((bt & _basic_type_mask) == bt, "must fit in 4 bits"); value = (value << _basic_type_bits) | bt; } ptr[index] = value; } + assert(vt_count == 0, "invalid vt_count"); } ~AdapterFingerPrint() { @@ -2465,9 +2546,9 @@ : BasicHashtable(293, (DumpSharedSpaces ? sizeof(CDSAdapterHandlerEntry) : sizeof(AdapterHandlerEntry))) { } // Create a new entry suitable for insertion in the table - AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, address c2i_unverified_entry) { + AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, address c2i_value_entry, address c2i_value_ro_entry, address c2i_unverified_entry) { AdapterHandlerEntry* entry = (AdapterHandlerEntry*)BasicHashtable::new_entry(fingerprint->compute_hash()); - entry->init(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); + entry->init(fingerprint, i2c_entry, c2i_entry, c2i_value_entry, c2i_value_ro_entry, c2i_unverified_entry); if (DumpSharedSpaces) { ((CDSAdapterHandlerEntry*)entry)->init(); } @@ -2486,9 +2567,9 @@ } // Find a entry with the same fingerprint if it exists - AdapterHandlerEntry* lookup(int total_args_passed, BasicType* sig_bt) { + AdapterHandlerEntry* lookup(const GrowableArray* sig, bool has_ro_adapter = false) { NOT_PRODUCT(_lookups++); - AdapterFingerPrint fp(total_args_passed, sig_bt); + AdapterFingerPrint fp(sig, has_ro_adapter); unsigned int hash = fp.compute_hash(); int index = hash_to_index(hash); for (AdapterHandlerEntry* e = bucket(index); e != NULL; e = e->next()) { @@ -2608,16 +2689,18 @@ // Pass wrong_method_abstract for the c2i transitions to return // AbstractMethodError for invalid invocations. address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub(); - _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, NULL), + _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(NULL), StubRoutines::throw_AbstractMethodError_entry(), - wrong_method_abstract, wrong_method_abstract); + wrong_method_abstract, wrong_method_abstract, wrong_method_abstract, wrong_method_abstract); } AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, + address c2i_value_entry, + address c2i_value_ro_entry, address c2i_unverified_entry) { - return _adapters->new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); + return _adapters->new_entry(fingerprint, i2c_entry, c2i_entry, c2i_value_entry, c2i_value_ro_entry, c2i_unverified_entry); } AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) { @@ -2644,6 +2727,53 @@ return entry; } +static int compute_scalarized_cc(const methodHandle& method, GrowableArray& sig_cc, VMRegPair*& regs_cc, bool scalar_receiver) { + InstanceKlass* holder = method->method_holder(); + sig_cc = GrowableArray(method->size_of_parameters()); + if (!method->is_static()) { + if (holder->is_value() && scalar_receiver) { + sig_cc.appendAll(ValueKlass::cast(holder)->extended_sig()); + } else { + SigEntry::add_entry(&sig_cc, T_OBJECT); + } + } + Thread* THREAD = Thread::current(); + for (SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) { + if (ss.type() == T_VALUETYPE) { + Klass* k = ss.as_klass(Handle(THREAD, holder->class_loader()), + Handle(THREAD, holder->protection_domain()), + SignatureStream::ReturnNull, THREAD); + assert(k != NULL && !HAS_PENDING_EXCEPTION, "value klass should have been pre-loaded"); + sig_cc.appendAll(ValueKlass::cast(k)->extended_sig()); + } else { + SigEntry::add_entry(&sig_cc, ss.type()); + } + } + regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, sig_cc.length() + 2); + return SharedRuntime::java_calling_convention(&sig_cc, regs_cc); +} + +static int insert_reserved_entry(GrowableArray& sig_cc, VMRegPair*& regs_cc, int ret_off) { + // Find index in signature that belongs to return address slot + BasicType bt = T_ILLEGAL; + int i = 0; + for (uint off = 0; i < sig_cc.length(); ++i) { + if (SigEntry::skip_value_delimiters(&sig_cc, i)) { + VMReg first = regs_cc[off++].first(); + if (first->is_valid() && first->is_stack()) { + // Select a type for the reserved entry that will end up on the stack + bt = sig_cc.at(i)._bt; + if (((int)first->reg2stack() + VMRegImpl::slots_per_word) == ret_off) { + break; // Index of the return address found + } + } + } + } + // Insert reserved entry and re-compute calling convention + SigEntry::insert_reserved_entry(&sig_cc, i, bt); + return SharedRuntime::java_calling_convention(&sig_cc, regs_cc); +} + AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter0(const methodHandle& method) { // Use customized signature handler. Need to lock around updates to // the AdapterHandlerTable (it is not safe for concurrent readers @@ -2652,36 +2782,123 @@ ResourceMark rm; - NOT_PRODUCT(int insts_size); + NOT_PRODUCT(int insts_size = 0); AdapterBlob* new_adapter = NULL; AdapterHandlerEntry* entry = NULL; AdapterFingerPrint* fingerprint = NULL; + { MutexLocker mu(AdapterHandlerLibrary_lock); // make sure data structure is initialized initialize(); - if (method->is_abstract()) { + bool has_value_arg = false; + bool has_value_recv = false; + GrowableArray sig(method->size_of_parameters()); + if (!method->is_static()) { + has_value_recv = method->method_holder()->is_value(); + has_value_arg = has_value_recv; + SigEntry::add_entry(&sig, T_OBJECT); + } + for (SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) { + BasicType bt = ss.type(); + if (bt == T_VALUETYPE) { + has_value_arg = true; + bt = T_OBJECT; + } + SigEntry::add_entry(&sig, bt); + } + + // Process abstract method if it has value type args to set has_scalarized_args accordingly + if (method->is_abstract() && !(ValueTypePassFieldsAsArgs && has_value_arg)) { return _abstract_method_handler; } - // Fill in the signature array, for the calling-convention call. - int total_args_passed = method->size_of_parameters(); // All args on stack + // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage + VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sig.length()); + int args_on_stack = SharedRuntime::java_calling_convention(&sig, regs); - BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed); - VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed); - int i = 0; - if (!method->is_static()) // Pass in receiver first - sig_bt[i++] = T_OBJECT; - for (SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) { - sig_bt[i++] = ss.type(); // Collect remaining bits of signature - if (ss.type() == T_LONG || ss.type() == T_DOUBLE) - sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots + // Now compute the scalarized calling convention if there are value types in the signature + GrowableArray sig_cc = sig; + GrowableArray sig_cc_ro = sig; + VMRegPair* regs_cc = regs; + VMRegPair* regs_cc_ro = regs; + int args_on_stack_cc = args_on_stack; + int args_on_stack_cc_ro = args_on_stack; + + if (ValueTypePassFieldsAsArgs && has_value_arg && !method->is_native()) { + MutexUnlocker mul(AdapterHandlerLibrary_lock); + args_on_stack_cc = compute_scalarized_cc(method, sig_cc, regs_cc, /* scalar_receiver = */ true); + + sig_cc_ro = sig_cc; + regs_cc_ro = regs_cc; + args_on_stack_cc_ro = args_on_stack_cc; + if (has_value_recv || args_on_stack_cc > args_on_stack) { + // For interface calls, we need another entry point / adapter to unpack the receiver + args_on_stack_cc_ro = compute_scalarized_cc(method, sig_cc_ro, regs_cc_ro, /* scalar_receiver = */ false); + } + + // Compute the stack extension that is required to convert between the calling conventions. + // The stack slots at these offsets are occupied by the return address with the unscalarized + // calling convention. Don't use them for arguments with the scalarized calling convention. + int ret_off = args_on_stack_cc - args_on_stack; + int ret_off_ro = args_on_stack_cc - args_on_stack_cc_ro; + assert(ret_off_ro <= 0 || ret_off > 0, "receiver unpacking requires more stack space than expected"); + + if (ret_off > 0) { + // Make sure the stack of the scalarized calling convention with the reserved + // entries (2 slots each) remains 16-byte (4 slots) aligned after stack extension. + int alignment = StackAlignmentInBytes / VMRegImpl::stack_slot_size; + if (ret_off_ro != ret_off && ret_off_ro >= 0) { + ret_off += 4; // Account for two reserved entries (4 slots) + ret_off_ro += 4; + ret_off = align_up(ret_off, alignment); + ret_off_ro = align_up(ret_off_ro, alignment); + // TODO can we avoid wasting a stack slot here? + //assert(ret_off != ret_off_ro, "fail"); + if (ret_off > ret_off_ro) { + swap(ret_off, ret_off_ro); // Sort by offset + } + args_on_stack_cc = insert_reserved_entry(sig_cc, regs_cc, ret_off); + args_on_stack_cc = insert_reserved_entry(sig_cc, regs_cc, ret_off_ro); + } else { + ret_off += 2; // Account for one reserved entry (2 slots) + ret_off = align_up(ret_off, alignment); + args_on_stack_cc = insert_reserved_entry(sig_cc, regs_cc, ret_off); + } + } + + // Upper bound on stack arguments to avoid hitting the argument limit and + // bailing out of compilation ("unsupported incoming calling sequence"). + // TODO we need a reasonable limit (flag?) here + if (args_on_stack_cc > 50) { + // Don't scalarize value type arguments + sig_cc = sig; + sig_cc_ro = sig; + regs_cc = regs; + regs_cc_ro = regs; + args_on_stack_cc = args_on_stack; + } else { + method->set_has_scalarized_args(true); + method->set_needs_stack_repair(args_on_stack_cc > args_on_stack); + } + } + + if (method->is_abstract()) { + // Save a C heap allocated version of the signature for abstract methods with scalarized value type arguments + assert(ValueTypePassFieldsAsArgs && has_value_arg, "must have scalarized value type args"); + address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub(); + entry = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(NULL), + StubRoutines::throw_AbstractMethodError_entry(), + wrong_method_abstract, wrong_method_abstract, wrong_method_abstract, wrong_method_abstract); + GrowableArray* heap_sig = new (ResourceObj::C_HEAP, mtInternal) GrowableArray(sig_cc_ro.length(), true); + heap_sig->appendAll(&sig_cc_ro); + entry->set_sig_cc(heap_sig); + return entry; } - assert(i == total_args_passed, ""); // Lookup method signature's fingerprint - entry = _adapters->lookup(total_args_passed, sig_bt); + entry = _adapters->lookup(&sig_cc, regs_cc != regs_cc_ro); #ifdef ASSERT AdapterHandlerEntry* shared_entry = NULL; @@ -2696,11 +2913,8 @@ return entry; } - // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage - int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false); - // Make a C heap allocated version of the fingerprint to store in the adapter - fingerprint = new AdapterFingerPrint(total_args_passed, sig_bt); + fingerprint = new AdapterFingerPrint(&sig_cc, regs_cc != regs_cc_ro); // StubRoutines::code2() is initialized after this function can be called. As a result, // VerifyAdapterCalls and VerifyAdapterSharing can fail if we re-use code that generated @@ -2718,14 +2932,30 @@ MacroAssembler _masm(&buffer); entry = SharedRuntime::generate_i2c2i_adapters(&_masm, - total_args_passed, - comp_args_on_stack, - sig_bt, + args_on_stack, + args_on_stack_cc, + &sig, regs, - fingerprint); + &sig_cc, + regs_cc, + &sig_cc_ro, + regs_cc_ro, + fingerprint, + new_adapter); + + if (regs != regs_cc) { + // Save a C heap allocated version of the scalarized signature and store it in the adapter + GrowableArray* heap_sig = new (ResourceObj::C_HEAP, mtInternal) GrowableArray(sig_cc.length(), true); + heap_sig->appendAll(&sig_cc); + entry->set_sig_cc(heap_sig); + } + #ifdef ASSERT if (VerifyAdapterSharing) { if (shared_entry != NULL) { + if (!shared_entry->compare_code(buf->code_begin(), buffer.insts_size())) { + method->print(); + } assert(shared_entry->compare_code(buf->code_begin(), buffer.insts_size()), "code must match"); // Release the one just created and return the original _adapters->free_entry(entry); @@ -2736,7 +2966,6 @@ } #endif - new_adapter = AdapterBlob::create(&buffer); NOT_PRODUCT(insts_size = buffer.insts_size()); } if (new_adapter == NULL) { @@ -2792,6 +3021,8 @@ address base = _i2c_entry; if (base == NULL) base = _c2i_entry; assert(base <= _c2i_entry || _c2i_entry == NULL, ""); + assert(base <= _c2i_value_entry || _c2i_value_entry == NULL, ""); + assert(base <= _c2i_value_ro_entry || _c2i_value_ro_entry == NULL, ""); assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == NULL, ""); return base; } @@ -2804,6 +3035,10 @@ _i2c_entry += delta; if (_c2i_entry != NULL) _c2i_entry += delta; + if (_c2i_value_entry != NULL) + _c2i_value_entry += delta; + if (_c2i_value_ro_entry != NULL) + _c2i_value_ro_entry += delta; if (_c2i_unverified_entry != NULL) _c2i_unverified_entry += delta; assert(base_address() == new_base, ""); @@ -2812,6 +3047,9 @@ void AdapterHandlerEntry::deallocate() { delete _fingerprint; + if (_sig_cc != NULL) { + delete _sig_cc; + } #ifdef ASSERT if (_saved_code) FREE_C_HEAP_ARRAY(unsigned char, _saved_code); #endif @@ -2883,7 +3121,8 @@ sig_bt[i++] = T_OBJECT; SignatureStream ss(method->signature()); for (; !ss.at_return_type(); ss.next()) { - sig_bt[i++] = ss.type(); // Collect remaining bits of signature + BasicType bt = ss.type(); + sig_bt[i++] = bt; // Collect remaining bits of signature if (ss.type() == T_LONG || ss.type() == T_DOUBLE) sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots } @@ -2998,12 +3237,16 @@ while (*s++ != ';'); // Skip signature sig_bt[cnt++] = T_OBJECT; break; + case 'Q': // Value type + while (*s++ != ';'); // Skip signature + sig_bt[cnt++] = T_VALUETYPE; + break; case '[': { // Array do { // Skip optional size while (*s >= '0' && *s <= '9') s++; } while (*s++ == '['); // Nested arrays? // Skip element type - if (s[-1] == 'L') + if (s[-1] == 'L' || s[-1] == 'Q') while (*s++ != ';'); // Skip signature sig_bt[cnt++] = T_ARRAY; break; @@ -3144,9 +3387,9 @@ } void AdapterHandlerEntry::print_adapter_on(outputStream* st) const { - st->print_cr("AHE@" INTPTR_FORMAT ": %s i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT, + st->print_cr("AHE@" INTPTR_FORMAT ": %s i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iVE: " INTPTR_FORMAT " c2iVROE: " INTPTR_FORMAT " c2iUE: " INTPTR_FORMAT, p2i(this), fingerprint()->as_string(), - p2i(get_i2c_entry()), p2i(get_c2i_entry()), p2i(get_c2i_unverified_entry())); + p2i(get_i2c_entry()), p2i(get_c2i_entry()), p2i(get_c2i_value_entry()), p2i(get_c2i_value_ro_entry()), p2i(get_c2i_unverified_entry())); } @@ -3240,3 +3483,210 @@ BarrierSet *bs = BarrierSet::barrier_set(); bs->on_slowpath_allocation_exit(thread, new_obj); } + +// We are at a compiled code to interpreter call. We need backing +// buffers for all value type arguments. Allocate an object array to +// hold them (convenient because once we're done with it we don't have +// to worry about freeing it). +JRT_ENTRY(void, SharedRuntime::allocate_value_types(JavaThread* thread, Method* callee_method, bool allocate_receiver)) +{ + assert(ValueTypePassFieldsAsArgs, "no reason to call this"); + ResourceMark rm; + JavaThread* THREAD = thread; + methodHandle callee(callee_method); + + int nb_slots = 0; + InstanceKlass* holder = callee->method_holder(); + allocate_receiver &= !callee->is_static() && holder->is_value(); + if (allocate_receiver) { + nb_slots++; + } + Handle class_loader(THREAD, holder->class_loader()); + Handle protection_domain(THREAD, holder->protection_domain()); + for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) { + if (ss.type() == T_VALUETYPE) { + nb_slots++; + } + } + objArrayOop array_oop = oopFactory::new_objectArray(nb_slots, CHECK); + objArrayHandle array(THREAD, array_oop); + int i = 0; + if (allocate_receiver) { + ValueKlass* vk = ValueKlass::cast(holder); + oop res = vk->allocate_instance(CHECK); + array->obj_at_put(i, res); + i++; + } + for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) { + if (ss.type() == T_VALUETYPE) { + Klass* k = ss.as_klass(class_loader, protection_domain, SignatureStream::ReturnNull, THREAD); + assert(k != NULL && !HAS_PENDING_EXCEPTION, "can't resolve klass"); + ValueKlass* vk = ValueKlass::cast(k); + oop res = vk->allocate_instance(CHECK); + array->obj_at_put(i, res); + i++; + } + } + thread->set_vm_result(array()); + thread->set_vm_result_2(callee()); // TODO: required to keep callee live? +} +JRT_END + +// Iterate of the array of heap allocated value types and apply the GC post barrier to all reference fields. +// This is called from the C2I adapter after value type arguments are heap allocated and initialized. +JRT_LEAF(void, SharedRuntime::apply_post_barriers(JavaThread* thread, objArrayOopDesc* array)) +{ + assert(ValueTypePassFieldsAsArgs, "no reason to call this"); + assert(oopDesc::is_oop(array), "should be oop"); + for (int i = 0; i < array->length(); ++i) { + instanceOop valueOop = (instanceOop)array->obj_at(i); + ValueKlass* vk = ValueKlass::cast(valueOop->klass()); + if (vk->contains_oops()) { + const address dst_oop_addr = ((address) (void*) valueOop); + OopMapBlock* map = vk->start_of_nonstatic_oop_maps(); + OopMapBlock* const end = map + vk->nonstatic_oop_map_count(); + while (map != end) { + address doop_address = dst_oop_addr + map->offset(); + barrier_set_cast(BarrierSet::barrier_set())-> + write_ref_array((HeapWord*) doop_address, map->count()); + map++; + } + } + } +} +JRT_END + +// We're returning from an interpreted method: load each field into a +// register following the calling convention +JRT_LEAF(void, SharedRuntime::load_value_type_fields_in_regs(JavaThread* thread, oopDesc* res)) +{ + assert(res->klass()->is_value(), "only value types here"); + ResourceMark rm; + RegisterMap reg_map(thread); + frame stubFrame = thread->last_frame(); + frame callerFrame = stubFrame.sender(®_map); + assert(callerFrame.is_interpreted_frame(), "should be coming from interpreter"); + + ValueKlass* vk = ValueKlass::cast(res->klass()); + + const Array* sig_vk = vk->extended_sig(); + const Array* regs = vk->return_regs(); + + if (regs == NULL) { + // The fields of the value klass don't fit in registers, bail out + return; + } + + int j = 1; + for (int i = 0; i < sig_vk->length(); i++) { + BasicType bt = sig_vk->at(i)._bt; + if (bt == T_VALUETYPE) { + continue; + } + if (bt == T_VOID) { + if (sig_vk->at(i-1)._bt == T_LONG || + sig_vk->at(i-1)._bt == T_DOUBLE) { + j++; + } + continue; + } + int off = sig_vk->at(i)._offset; + assert(off > 0, "offset in object should be positive"); + VMRegPair pair = regs->at(j); + address loc = reg_map.location(pair.first()); + switch(bt) { + case T_BOOLEAN: + *(jboolean*)loc = res->bool_field(off); + break; + case T_CHAR: + *(jchar*)loc = res->char_field(off); + break; + case T_BYTE: + *(jbyte*)loc = res->byte_field(off); + break; + case T_SHORT: + *(jshort*)loc = res->short_field(off); + break; + case T_INT: { + *(jint*)loc = res->int_field(off); + break; + } + case T_LONG: +#ifdef _LP64 + *(intptr_t*)loc = res->long_field(off); +#else + Unimplemented(); +#endif + break; + case T_OBJECT: + case T_ARRAY: { + *(oop*)loc = res->obj_field(off); + break; + } + case T_FLOAT: + *(jfloat*)loc = res->float_field(off); + break; + case T_DOUBLE: + *(jdouble*)loc = res->double_field(off); + break; + default: + ShouldNotReachHere(); + } + j++; + } + assert(j == regs->length(), "missed a field?"); + +#ifdef ASSERT + VMRegPair pair = regs->at(0); + address loc = reg_map.location(pair.first()); + assert(*(oopDesc**)loc == res, "overwritten object"); +#endif + + thread->set_vm_result(res); +} +JRT_END + +// We've returned to an interpreted method, the interpreter needs a +// reference to a value type instance. Allocate it and initialize it +// from field's values in registers. +JRT_BLOCK_ENTRY(void, SharedRuntime::store_value_type_fields_to_buf(JavaThread* thread, intptr_t res)) +{ + ResourceMark rm; + RegisterMap reg_map(thread); + frame stubFrame = thread->last_frame(); + frame callerFrame = stubFrame.sender(®_map); + +#ifdef ASSERT + ValueKlass* verif_vk = ValueKlass::returned_value_klass(reg_map); +#endif + + if (!is_set_nth_bit(res, 0)) { + // We're not returning with value type fields in registers (the + // calling convention didn't allow it for this value klass) + assert(!Metaspace::contains((void*)res), "should be oop or pointer in buffer area"); + thread->set_vm_result((oopDesc*)res); + assert(verif_vk == NULL, "broken calling convention"); + return; + } + + clear_nth_bit(res, 0); + ValueKlass* vk = (ValueKlass*)res; + assert(verif_vk == vk, "broken calling convention"); + assert(Metaspace::contains((void*)res), "should be klass"); + + // Allocate handles for every oop field so they are safe in case of + // a safepoint when allocating + GrowableArray handles; + vk->save_oop_fields(reg_map, handles); + + // It's unsafe to safepoint until we are here + JRT_BLOCK; + { + Thread* THREAD = thread; + oop vt = vk->realloc_result(reg_map, handles, CHECK); + thread->set_vm_result(vt); + } + JRT_BLOCK_END; +} +JRT_END + --- old/src/hotspot/share/runtime/sharedRuntime.hpp 2019-03-11 14:27:19.206354145 +0100 +++ new/src/hotspot/share/runtime/sharedRuntime.hpp 2019-03-11 14:27:18.942354149 +0100 @@ -30,6 +30,7 @@ #include "interpreter/linkResolver.hpp" #include "memory/allocation.hpp" #include "memory/resourceArea.hpp" +#include "runtime/signature.hpp" #include "utilities/hashtable.hpp" #include "utilities/macros.hpp" @@ -37,6 +38,7 @@ class AdapterHandlerTable; class AdapterFingerPrint; class vframeStream; +class SigEntry; // Runtime is the base class for various runtime interfaces // (InterpreterRuntime, CompilerRuntime, etc.). It provides @@ -84,7 +86,7 @@ enum { POLL_AT_RETURN, POLL_AT_LOOP, POLL_AT_VECTOR_LOOP }; static SafepointBlob* generate_handler_blob(address call_ptr, int poll_type); static RuntimeStub* generate_resolve_blob(address destination, const char* name); - + static RuntimeStub* generate_return_value_blob(address destination, const char* name); public: static void generate_stubs(void); @@ -329,17 +331,17 @@ static bool handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm, const frame& caller_frame, methodHandle callee_method, Bytecodes::Code bc, CallInfo& call_info, - bool& needs_ic_stub_refill, TRAPS); + bool& needs_ic_stub_refill, bool& is_optimized, TRAPS); public: static DeoptimizationBlob* deopt_blob(void) { return _deopt_blob; } // Resets a call-site in compiled code so it will get resolved again. - static methodHandle reresolve_call_site(JavaThread *thread, TRAPS); + static methodHandle reresolve_call_site(JavaThread *thread, bool& is_optimized, TRAPS); // In the code prolog, if the klass comparison fails, the inline cache // misses and the call site is patched to megamorphic - static methodHandle handle_ic_miss_helper(JavaThread* thread, TRAPS); + static methodHandle handle_ic_miss_helper(JavaThread* thread, bool& is_optimized, TRAPS); // Find the method that called us. static methodHandle find_callee_method(JavaThread* thread, TRAPS); @@ -378,6 +380,14 @@ // will be just above it. ( // return value is the maximum number of VMReg stack slots the convention will use. static int java_calling_convention(const BasicType* sig_bt, VMRegPair* regs, int total_args_passed, int is_outgoing); + static int java_calling_convention(const GrowableArray* sig, VMRegPair* regs) { + BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sig->length()); + int total_args_passed = SigEntry::fill_sig_bt(sig, sig_bt); + return java_calling_convention(sig_bt, regs, total_args_passed, false); + } + static int java_return_convention(const BasicType* sig_bt, VMRegPair* regs, int total_args_passed); + static const uint java_return_convention_max_int; + static const uint java_return_convention_max_float; static void check_member_name_argument_is_last_argument(const methodHandle& method, const BasicType* sig_bt, @@ -425,17 +435,21 @@ // pointer as needed. This means the i2c adapter code doesn't need any special // handshaking path with compiled code to keep the stack walking correct. - static AdapterHandlerEntry* generate_i2c2i_adapters(MacroAssembler *_masm, - int total_args_passed, - int max_arg, - const BasicType *sig_bt, - const VMRegPair *regs, - AdapterFingerPrint* fingerprint); + static AdapterHandlerEntry* generate_i2c2i_adapters(MacroAssembler *masm, + int comp_args_on_stack, + int comp_args_on_stack_cc, + const GrowableArray* sig, + const VMRegPair* regs, + const GrowableArray* sig_cc, + const VMRegPair* regs_cc, + const GrowableArray* sig_cc_ro, + const VMRegPair* regs_cc_ro, + AdapterFingerPrint* fingerprint, + AdapterBlob*& new_adapter); static void gen_i2c_adapter(MacroAssembler *_masm, - int total_args_passed, int comp_args_on_stack, - const BasicType *sig_bt, + const GrowableArray* sig, const VMRegPair *regs); // OSR support @@ -511,6 +525,9 @@ static address resolve_virtual_call_C (JavaThread *thread); static address resolve_opt_virtual_call_C(JavaThread *thread); + static void load_value_type_fields_in_regs(JavaThread *thread, oopDesc* res); + static void store_value_type_fields_to_buf(JavaThread *thread, intptr_t res); + // arraycopy, the non-leaf version. (See StubRoutines for all the leaf calls.) static void slow_arraycopy_C(oopDesc* src, jint src_pos, oopDesc* dest, jint dest_pos, @@ -521,9 +538,12 @@ static address handle_wrong_method(JavaThread* thread); static address handle_wrong_method_abstract(JavaThread* thread); static address handle_wrong_method_ic_miss(JavaThread* thread); + static void allocate_value_types(JavaThread* thread, Method* callee, bool allocate_receiver); + static void apply_post_barriers(JavaThread* thread, objArrayOopDesc* array); static address handle_unsafe_access(JavaThread* thread, address next_pc); + static BufferedValueTypeBlob* generate_buffered_value_type_adapter(const ValueKlass* vk); #ifndef PRODUCT // Collect and print inline cache miss statistics @@ -638,8 +658,13 @@ AdapterFingerPrint* _fingerprint; address _i2c_entry; address _c2i_entry; + address _c2i_value_entry; + address _c2i_value_ro_entry; address _c2i_unverified_entry; + // Support for scalarized value type calling convention + const GrowableArray* _sig_cc; + #ifdef ASSERT // Captures code and signature used to generate this adapter when // verifying adapter equivalence. @@ -647,11 +672,14 @@ int _saved_code_length; #endif - void init(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, address c2i_unverified_entry) { + void init(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, address c2i_value_entry, address c2i_value_ro_entry, address c2i_unverified_entry) { _fingerprint = fingerprint; _i2c_entry = i2c_entry; _c2i_entry = c2i_entry; + _c2i_value_entry = c2i_value_entry; + _c2i_value_ro_entry = c2i_value_ro_entry; _c2i_unverified_entry = c2i_unverified_entry; + _sig_cc = NULL; #ifdef ASSERT _saved_code = NULL; _saved_code_length = 0; @@ -666,10 +694,16 @@ public: address get_i2c_entry() const { return _i2c_entry; } address get_c2i_entry() const { return _c2i_entry; } + address get_c2i_value_entry() const { return _c2i_value_entry; } + address get_c2i_value_ro_entry() const { return _c2i_value_ro_entry; } address get_c2i_unverified_entry() const { return _c2i_unverified_entry; } address base_address(); void relocate(address new_base); + // Support for scalarized value type calling convention + void set_sig_cc(const GrowableArray* sig) { _sig_cc = sig; } + const GrowableArray* get_sig_cc() const { return _sig_cc; } + AdapterFingerPrint* fingerprint() const { return _fingerprint; } AdapterHandlerEntry* next() { @@ -712,7 +746,7 @@ public: static AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint, - address i2c_entry, address c2i_entry, address c2i_unverified_entry); + address i2c_entry, address c2i_entry, address c2i_value_entry, address c2i_value_ro_entry, address c2i_unverified_entry); static void create_native_wrapper(const methodHandle& method); static AdapterHandlerEntry* get_adapter(const methodHandle& method); --- old/src/hotspot/share/runtime/signature.cpp 2019-03-11 14:27:19.714354138 +0100 +++ new/src/hotspot/share/runtime/signature.cpp 2019-03-11 14:27:19.450354142 +0100 @@ -31,6 +31,7 @@ #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" #include "oops/typeArrayKlass.hpp" +#include "oops/valueKlass.hpp" #include "runtime/signature.hpp" // Implementation of SignatureIterator @@ -40,7 +41,7 @@ // Signature = "(" {Parameter} ")" ReturnType. // Parameter = FieldType. // ReturnType = FieldType | "V". -// FieldType = "B" | "C" | "D" | "F" | "I" | "J" | "S" | "Z" | "L" ClassName ";" | "[" FieldType. +// FieldType = "B" | "C" | "D" | "F" | "I" | "J" | "S" | "Z" | "L" ClassName ";" | "Q" ValueClassName ";" | "[" FieldType. // ClassName = string. @@ -89,13 +90,22 @@ if (_parameter_index < 0 ) _return_type = T_OBJECT; size = T_OBJECT_size; break; + case 'Q': + { int begin = ++_index; + Symbol* sig = _signature; + while (sig->char_at(_index++) != ';') ; + do_valuetype(begin, _index); + } + if (_parameter_index < 0 ) _return_type = T_VALUETYPE; + size = T_VALUETYPE_size; + break; case '[': { int begin = ++_index; Symbol* sig = _signature; while (sig->char_at(_index) == '[') { _index++; } - if (sig->char_at(_index) == 'L') { + if (sig->char_at(_index) == 'L' || sig->char_at(_index) == 'Q') { while (sig->char_at(_index++) != ';') ; } else { _index++; @@ -232,6 +242,7 @@ _index++; } break; + case 'Q': case 'L': { while (sig->char_at(_index++) != ';') ; @@ -243,7 +254,7 @@ while (sig->char_at(_index) == '[') { _index++; } - if (sig->char_at(_index) == 'L') { + if (sig->char_at(_index) == 'L' || sig->char_at(_index) == 'Q' ) { while (sig->char_at(_index++) != ';') ; } else { _index++; @@ -307,6 +318,12 @@ while (sig->char_at(_end++) != ';'); break; } + case 'Q': { + _type = T_VALUETYPE; + Symbol* sig = _signature; + while (sig->char_at(_end++) != ';'); + break; + } case '[': { _type = T_ARRAY; Symbol* sig = _signature; @@ -341,7 +358,8 @@ bool SignatureStream::is_object() const { return _type == T_OBJECT - || _type == T_ARRAY; + || _type == T_ARRAY + || _type == T_VALUETYPE; } bool SignatureStream::is_array() const { @@ -353,10 +371,12 @@ int begin = _begin; int end = _end; - if ( _signature->char_at(_begin) == 'L' - && _signature->char_at(_end-1) == ';') { + if (_type == T_OBJECT || _type == T_VALUETYPE) { begin++; end--; + if (begin == end) { + return vmSymbols::java_lang_Object(); + } } // Save names for cleaning up reference count at the end of @@ -384,7 +404,7 @@ return Universe::java_mirror(type()); Klass* klass = as_klass(class_loader, protection_domain, failure_mode, CHECK_NULL); if (klass == NULL) return NULL; - return klass->java_mirror(); + return _type == T_VALUETYPE ? ValueKlass::cast(InstanceKlass::cast(klass))->value_mirror() : klass->java_mirror(); } Symbol* SignatureStream::as_symbol_or_null() { @@ -394,10 +414,12 @@ int begin = _begin; int end = _end; - if ( _signature->char_at(_begin) == 'L' - && _signature->char_at(_end-1) == ';') { + if (_type == T_OBJECT || _type == T_VALUETYPE) { begin++; end--; + if (begin == end) { + return vmSymbols::java_lang_Object(); + } } char* buffer = NEW_RESOURCE_ARRAY(char, end - begin); @@ -475,6 +497,7 @@ case 'B': case 'C': case 'D': case 'F': case 'I': case 'J': case 'S': case 'Z': case 'V': return index + 1; + case 'Q': // fall through case 'L': for (index = index + 1; index < limit; ++index) { char c = type[index]; @@ -499,3 +522,87 @@ return false; } } + +// Adds an argument to the signature +void SigEntry::add_entry(GrowableArray* sig, BasicType bt, int offset) { + sig->append(SigEntry(bt, offset)); + if (bt == T_LONG || bt == T_DOUBLE) { + sig->append(SigEntry(T_VOID, offset)); // Longs and doubles take two stack slots + } +} + +// Inserts a reserved argument at position 'i' +void SigEntry::insert_reserved_entry(GrowableArray* sig, int i, BasicType bt) { + if (bt == T_OBJECT || bt == T_ARRAY || bt == T_VALUETYPE) { + // Treat this as INT to not confuse the GC + bt = T_INT; + } else if (bt == T_LONG || bt == T_DOUBLE) { + // Longs and doubles take two stack slots + sig->insert_before(i, SigEntry(T_VOID, SigEntry::ReservedOffset)); + } + sig->insert_before(i, SigEntry(bt, SigEntry::ReservedOffset)); +} + +// Returns true if the argument at index 'i' is a reserved argument +bool SigEntry::is_reserved_entry(const GrowableArray* sig, int i) { + return sig->at(i)._offset == SigEntry::ReservedOffset; +} + +// Returns true if the argument at index 'i' is not a value type delimiter +bool SigEntry::skip_value_delimiters(const GrowableArray* sig, int i) { + return (sig->at(i)._bt != T_VALUETYPE && + (sig->at(i)._bt != T_VOID || sig->at(i-1)._bt == T_LONG || sig->at(i-1)._bt == T_DOUBLE)); +} + +// Fill basic type array from signature array +int SigEntry::fill_sig_bt(const GrowableArray* sig, BasicType* sig_bt) { + int count = 0; + for (int i = 0; i < sig->length(); i++) { + if (skip_value_delimiters(sig, i)) { + sig_bt[count++] = sig->at(i)._bt; + } + } + return count; +} + +// Create a temporary symbol from the signature array +TempNewSymbol SigEntry::create_symbol(const GrowableArray* sig) { + ResourceMark rm; + int length = sig->length(); + char* sig_str = NEW_RESOURCE_ARRAY(char, 2*length + 3); + int idx = 0; + sig_str[idx++] = '('; + for (int i = 0; i < length; i++) { + BasicType bt = sig->at(i)._bt; + if (bt == T_VALUETYPE || bt == T_VOID) { + // Ignore + } else { + if (bt == T_ARRAY) { + bt = T_OBJECT; // We don't know the element type, treat as Object + } + sig_str[idx++] = type2char(bt); + if (bt == T_OBJECT) { + sig_str[idx++] = ';'; + } + } + } + sig_str[idx++] = ')'; + sig_str[idx++] = '\0'; + return SymbolTable::new_symbol(sig_str, Thread::current()); +} + +// Increment signature iterator (skips value type delimiters and T_VOID) and check if next entry is reserved +bool SigEntry::next_is_reserved(ExtendedSignature& sig, BasicType& bt, bool can_be_void) { + assert(can_be_void || bt != T_VOID, "should never see void"); + if (sig.at_end() || (can_be_void && type2size[bt] == 2 && (*sig)._offset != SigEntry::ReservedOffset)) { + // Don't increment at the end or at a T_LONG/T_DOUBLE which will be followed by a (skipped) T_VOID + return false; + } + assert(bt == T_VOID || type2wfield[bt] == type2wfield[(*sig)._bt], "inconsistent signature"); + ++sig; + if (!sig.at_end() && (*sig)._offset == SigEntry::ReservedOffset) { + bt = (*sig)._bt; + return true; + } + return false; +} --- old/src/hotspot/share/runtime/signature.hpp 2019-03-11 14:27:20.218354131 +0100 +++ new/src/hotspot/share/runtime/signature.hpp 2019-03-11 14:27:19.982354134 +0100 @@ -25,6 +25,7 @@ #ifndef SHARE_RUNTIME_SIGNATURE_HPP #define SHARE_RUNTIME_SIGNATURE_HPP +#include "classfile/symbolTable.hpp" #include "memory/allocation.hpp" #include "oops/method.hpp" @@ -113,6 +114,7 @@ // Object types (begin indexes the first character of the entry, end indexes the first character after the entry) virtual void do_object(int begin, int end) = 0; + virtual void do_valuetype(int begin, int end) = 0; virtual void do_array (int begin, int end) = 0; static bool is_static(uint64_t fingerprint) { @@ -142,6 +144,7 @@ void do_long() { type_name("jlong" ); } void do_void() { type_name("void" ); } void do_object(int begin, int end) { type_name("jobject" ); } + void do_valuetype(int begin, int end) { type_name("jobject"); } void do_array (int begin, int end) { type_name("jobject" ); } public: @@ -170,6 +173,7 @@ void do_long () { set(T_LONG_size , T_LONG ); } void do_void () { set(T_VOID_size , T_VOID ); } void do_object(int begin, int end) { set(T_OBJECT_size , T_OBJECT ); } + void do_valuetype(int begin, int end) { set(T_VALUETYPE_size, T_VALUETYPE ); } void do_array (int begin, int end) { set(T_ARRAY_size , T_ARRAY ); } public: @@ -236,6 +240,7 @@ void do_double() { _fingerprint |= (((uint64_t)double_parm) << _shift_count); _shift_count += parameter_feature_size; } void do_object(int begin, int end) { _fingerprint |= (((uint64_t)obj_parm) << _shift_count); _shift_count += parameter_feature_size; } + void do_valuetype(int begin, int end) { _fingerprint |= (((uint64_t)obj_parm) << _shift_count); _shift_count += parameter_feature_size; } void do_array (int begin, int end) { _fingerprint |= (((uint64_t)obj_parm) << _shift_count); _shift_count += parameter_feature_size; } void do_void() { ShouldNotReachHere(); } @@ -301,6 +306,7 @@ void do_void () { ShouldNotReachHere(); } void do_object(int begin, int end) { pass_object(); _jni_offset++; _offset++; } void do_array (int begin, int end) { pass_object(); _jni_offset++; _offset++; } + void do_valuetype(int begin, int end){ pass_valuetype(); _jni_offset++; _offset++; } public: methodHandle method() const { return _method; } @@ -311,6 +317,7 @@ virtual void pass_int() = 0; virtual void pass_long() = 0; virtual void pass_object() = 0; + virtual void pass_valuetype() = 0; virtual void pass_float() = 0; #ifdef _LP64 virtual void pass_double() = 0; @@ -428,4 +435,63 @@ static bool invalid_name_char(char); }; +class SigEntryFilter; +typedef GrowableArrayFilterIterator ExtendedSignature; + +// Used for adapter generation. One SigEntry is used per element of +// the signature of the method. Value type arguments are treated +// specially. See comment for ValueKlass::collect_fields(). +class SigEntry { + public: + BasicType _bt; + int _offset; + + enum { ReservedOffset = -2 }; // Special offset to mark the reserved entry + + SigEntry() + : _bt(T_ILLEGAL), _offset(-1) { + } + SigEntry(BasicType bt, int offset) + : _bt(bt), _offset(offset) {} + + SigEntry(BasicType bt) + : _bt(bt), _offset(-1) {} + + static int compare(SigEntry* e1, SigEntry* e2) { + if (e1->_offset != e2->_offset) { + return e1->_offset - e2->_offset; + } + assert((e1->_bt == T_LONG && (e2->_bt == T_LONG || e2->_bt == T_VOID)) || + (e1->_bt == T_DOUBLE && (e2->_bt == T_DOUBLE || e2->_bt == T_VOID)) || + e1->_bt == T_VALUETYPE || e2->_bt == T_VALUETYPE || e1->_bt == T_VOID || e2->_bt == T_VOID, "bad bt"); + if (e1->_bt == e2->_bt) { + assert(e1->_bt == T_VALUETYPE || e1->_bt == T_VOID, "only ones with duplicate offsets"); + return 0; + } + if (e1->_bt == T_VOID || + e2->_bt == T_VALUETYPE) { + return 1; + } + if (e1->_bt == T_VALUETYPE || + e2->_bt == T_VOID) { + return -1; + } + ShouldNotReachHere(); + return 0; + } + static void add_entry(GrowableArray* sig, BasicType bt, int offset = -1); + static void insert_reserved_entry(GrowableArray* sig, int i, BasicType bt); + static bool is_reserved_entry(const GrowableArray* sig, int i); + static bool skip_value_delimiters(const GrowableArray* sig, int i); + static int fill_sig_bt(const GrowableArray* sig, BasicType* sig_bt); + static TempNewSymbol create_symbol(const GrowableArray* sig); + + static bool next_is_reserved(ExtendedSignature& sig, BasicType& bt, bool can_be_void = false); +}; + +class SigEntryFilter { +public: + bool operator()(const SigEntry& entry) { return entry._bt != T_VALUETYPE && entry._bt != T_VOID; } +}; + #endif // SHARE_RUNTIME_SIGNATURE_HPP --- old/src/hotspot/share/runtime/stubRoutines.cpp 2019-03-11 14:27:20.642354125 +0100 +++ new/src/hotspot/share/runtime/stubRoutines.cpp 2019-03-11 14:27:20.434354128 +0100 @@ -169,6 +169,9 @@ address StubRoutines::_safefetchN_fault_pc = NULL; address StubRoutines::_safefetchN_continuation_pc = NULL; +address StubRoutines::_load_value_type_fields_in_regs = NULL; +address StubRoutines::_store_value_type_fields_to_buf = NULL; + // Initialization // // Note: to break cycle with universe initialization, stubs are generated in two phases. @@ -481,6 +484,7 @@ case T_DOUBLE: case T_LONG: case T_ARRAY: + case T_VALUETYPE: case T_OBJECT: case T_NARROWOOP: case T_NARROWKLASS: --- old/src/hotspot/share/runtime/stubRoutines.hpp 2019-03-11 14:27:21.070354119 +0100 +++ new/src/hotspot/share/runtime/stubRoutines.hpp 2019-03-11 14:27:20.858354122 +0100 @@ -210,6 +210,9 @@ static address _safefetchN_fault_pc; static address _safefetchN_continuation_pc; + static address _load_value_type_fields_in_regs; + static address _store_value_type_fields_to_buf; + public: // Initialization/Testing static void initialize1(); // must happen before universe::genesis @@ -413,6 +416,9 @@ static void arrayof_jlong_copy (HeapWord* src, HeapWord* dest, size_t count); static void arrayof_oop_copy (HeapWord* src, HeapWord* dest, size_t count); static void arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count); + + static address load_value_type_fields_in_regs() { return _load_value_type_fields_in_regs; } + static address store_value_type_fields_to_buf() { return _store_value_type_fields_to_buf; } }; // Safefetch allows to load a value from a location that's not known --- old/src/hotspot/share/runtime/synchronizer.cpp 2019-03-11 14:27:21.554354113 +0100 +++ new/src/hotspot/share/runtime/synchronizer.cpp 2019-03-11 14:27:21.326354116 +0100 @@ -129,6 +129,19 @@ static volatile int gMonitorFreeCount = 0; // # on gFreeList static volatile int gMonitorPopulation = 0; // # Extant -- in circulation +#define CHECK_THROW_NOSYNC_IMSE(obj) \ + if ((obj)->mark()->is_always_locked()) { \ + ResourceMark rm(THREAD); \ + THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \ + } + +#define CHECK_THROW_NOSYNC_IMSE_0(obj) \ + if ((obj)->mark()->is_always_locked()) { \ + ResourceMark rm(THREAD); \ + THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \ + } + + #define CHAINMARKER (cast_to_oop(-1)) @@ -160,6 +173,7 @@ assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); NoSafepointVerifier nsv; if (obj == NULL) return false; // slow-path for invalid obj + assert(!EnableValhalla || !obj->klass()->is_value(), "monitor op on value type"); const markOop mark = obj->mark(); if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) { @@ -210,6 +224,7 @@ assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant"); NoSafepointVerifier nsv; if (obj == NULL) return false; // Need to throw NPE + assert(!EnableValhalla || !obj->klass()->is_value(), "monitor op on value type"); const markOop mark = obj->mark(); if (mark->has_monitor()) { @@ -265,6 +280,7 @@ void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) { + CHECK_THROW_NOSYNC_IMSE(obj); if (UseBiasedLocking) { if (!SafepointSynchronize::is_at_safepoint()) { BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); @@ -283,6 +299,10 @@ void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { markOop mark = object->mark(); + if (EnableValhalla && mark->is_always_locked()) { + return; + } + assert(!EnableValhalla || !object->klass()->is_value(), "monitor op on value type"); // We cannot check for Biased Locking if we are racing an inflation. assert(mark == markOopDesc::INFLATING() || !mark->has_bias_pattern(), "should not see bias pattern here"); @@ -336,6 +356,7 @@ // We don't need to use fast path here, because it must have been // failed in the interpreter/compiler code. void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { + CHECK_THROW_NOSYNC_IMSE(obj); markOop mark = obj->mark(); assert(!mark->has_bias_pattern(), "should not see bias pattern here"); @@ -384,6 +405,7 @@ // 5) lock lock2 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { + assert(!EnableValhalla || !obj->klass()->is_value(), "monitor op on value type"); if (UseBiasedLocking) { BiasedLocking::revoke_and_rebias(obj, false, THREAD); assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); @@ -396,6 +418,7 @@ // NOTE: must use heavy weight monitor to handle complete_exit/reenter() void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { + assert(!EnableValhalla || !obj->klass()->is_value(), "monitor op on value type"); if (UseBiasedLocking) { BiasedLocking::revoke_and_rebias(obj, false, THREAD); assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); @@ -410,6 +433,7 @@ // NOTE: must use heavy weight monitor to handle jni monitor enter void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // the current locking is from JNI instead of Java code + CHECK_THROW_NOSYNC_IMSE(obj); if (UseBiasedLocking) { BiasedLocking::revoke_and_rebias(obj, false, THREAD); assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); @@ -421,6 +445,7 @@ // NOTE: must use heavy weight monitor to handle jni monitor exit void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { + CHECK_THROW_NOSYNC_IMSE(obj); if (UseBiasedLocking) { Handle h_obj(THREAD, obj); BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); @@ -461,6 +486,7 @@ // Wait/Notify/NotifyAll // NOTE: must use heavy weight monitor to handle wait() int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { + CHECK_THROW_NOSYNC_IMSE_0(obj); if (UseBiasedLocking) { BiasedLocking::revoke_and_rebias(obj, false, THREAD); assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); @@ -481,6 +507,7 @@ } void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) { + CHECK_THROW_NOSYNC_IMSE(obj); if (UseBiasedLocking) { BiasedLocking::revoke_and_rebias(obj, false, THREAD); assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); @@ -492,6 +519,7 @@ } void ObjectSynchronizer::notify(Handle obj, TRAPS) { + CHECK_THROW_NOSYNC_IMSE(obj); if (UseBiasedLocking) { BiasedLocking::revoke_and_rebias(obj, false, THREAD); assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); @@ -506,6 +534,7 @@ // NOTE: see comment of notify() void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { + CHECK_THROW_NOSYNC_IMSE(obj); if (UseBiasedLocking) { BiasedLocking::revoke_and_rebias(obj, false, THREAD); assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); @@ -679,6 +708,14 @@ } intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) { + if (EnableValhalla && obj->klass()->is_value()) { + // Expected tooling to override hashCode for value type, just don't crash + if (log_is_enabled(Debug, monitorinflation)) { + ResourceMark rm; + log_debug(monitorinflation)("FastHashCode for value type: %s", obj->klass()->external_name()); + } + return obj->klass()->java_mirror()->identity_hash(); + } if (UseBiasedLocking) { // NOTE: many places throughout the JVM do not expect a safepoint // to be taken here, in particular most operations on perm gen @@ -783,15 +820,12 @@ return hash; } -// Deprecated -- use FastHashCode() instead. - -intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { - return FastHashCode(Thread::current(), obj()); -} - bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, Handle h_obj) { + if (EnableValhalla && h_obj->mark()->is_always_locked()) { + return false; + } if (UseBiasedLocking) { BiasedLocking::revoke_and_rebias(h_obj, false, thread); assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); @@ -1302,6 +1336,10 @@ assert(Universe::verify_in_progress() || !SafepointSynchronize::is_at_safepoint(), "invariant"); + if (EnableValhalla) { + guarantee(!object->klass()->is_value(), "Attempt to inflate value type"); + } + EventJavaMonitorInflate event; for (;;) { --- old/src/hotspot/share/runtime/synchronizer.hpp 2019-03-11 14:27:21.994354107 +0100 +++ new/src/hotspot/share/runtime/synchronizer.hpp 2019-03-11 14:27:21.782354109 +0100 @@ -120,7 +120,6 @@ // Returns the identity hash value for an oop // NOTE: It may cause monitor inflation - static intptr_t identity_hash_value_for(Handle obj); static intptr_t FastHashCode(Thread * Self, oop obj); // java.lang.Thread support --- old/src/hotspot/share/runtime/thread.cpp 2019-03-11 14:27:22.418354101 +0100 +++ new/src/hotspot/share/runtime/thread.cpp 2019-03-11 14:27:22.206354104 +0100 @@ -56,6 +56,7 @@ #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" #include "oops/typeArrayOop.inline.hpp" +#include "oops/valueKlass.hpp" #include "oops/verifyOopClosure.hpp" #include "prims/jvm_misc.hpp" #include "prims/jvmtiExport.hpp" @@ -1597,6 +1598,7 @@ set_callee_target(NULL); set_vm_result(NULL); set_vm_result_2(NULL); + set_return_buffered_value(NULL); set_vframe_array_head(NULL); set_vframe_array_last(NULL); set_deferred_locals(NULL); @@ -2777,6 +2779,9 @@ void JavaThread::frames_do(void f(frame*, const RegisterMap* map)) { // ignore is there is no stack if (!has_last_Java_frame()) return; + // Because this method is used to verify oops, it must support + // oops in buffered values + // traverse the stack frames. Starts from top frame. for (StackFrameStream fst(this); !fst.is_done(); fst.next()) { frame* fr = fst.current(); --- old/src/hotspot/share/runtime/thread.hpp 2019-03-11 14:27:22.878354094 +0100 +++ new/src/hotspot/share/runtime/thread.hpp 2019-03-11 14:27:22.666354097 +0100 @@ -443,6 +443,7 @@ is_definitely_current_thread = true }; + public: // Constructor Thread(); virtual ~Thread() = 0; // Thread is abstract. @@ -981,6 +982,7 @@ friend class VMStructs; friend class JVMCIVMStructs; friend class WhiteBox; + friend class VTBuffer; private: JavaThread* _next; // The next thread in the Threads list bool _on_thread_list; // Is set when this JavaThread is added to the Threads list @@ -1041,6 +1043,7 @@ // Used to pass back results to the interpreter or generated code running Java code. oop _vm_result; // oop result is GC-preserved Metadata* _vm_result_2; // non-oop result + oop _return_buffered_value; // buffered value being returned // See ReduceInitialCardMarks: this holds the precise space interval of // the most recent slow path allocation for which compiled code has @@ -1529,6 +1532,9 @@ Metadata* vm_result_2() const { return _vm_result_2; } void set_vm_result_2 (Metadata* x) { _vm_result_2 = x; } + oop return_buffered_value() const { return _return_buffered_value; } + void set_return_buffered_value(oop val) { _return_buffered_value = val; } + MemRegion deferred_card_mark() const { return _deferred_card_mark; } void set_deferred_card_mark(MemRegion mr) { _deferred_card_mark = mr; } @@ -1771,6 +1777,7 @@ static ByteSize callee_target_offset() { return byte_offset_of(JavaThread, _callee_target); } static ByteSize vm_result_offset() { return byte_offset_of(JavaThread, _vm_result); } static ByteSize vm_result_2_offset() { return byte_offset_of(JavaThread, _vm_result_2); } + static ByteSize return_buffered_value_offset() { return byte_offset_of(JavaThread, _return_buffered_value); } static ByteSize thread_state_offset() { return byte_offset_of(JavaThread, _thread_state); } static ByteSize saved_exception_pc_offset() { return byte_offset_of(JavaThread, _saved_exception_pc); } static ByteSize osthread_offset() { return byte_offset_of(JavaThread, _osthread); } --- old/src/hotspot/share/runtime/vmOperations.hpp 2019-03-11 14:27:24.182354076 +0100 +++ new/src/hotspot/share/runtime/vmOperations.hpp 2019-03-11 14:27:23.970354079 +0100 @@ -127,6 +127,7 @@ template(ScavengeMonitors) \ template(PrintMetadata) \ template(GTestExecuteAtSafepoint) \ + template(VTBufferStats) \ class VM_Operation: public CHeapObj { public: --- old/src/hotspot/share/runtime/vmStructs.cpp 2019-03-11 14:27:24.614354070 +0100 +++ new/src/hotspot/share/runtime/vmStructs.cpp 2019-03-11 14:27:24.398354073 +0100 @@ -235,6 +235,7 @@ nonstatic_field(InstanceKlass, _static_oop_field_count, u2) \ nonstatic_field(InstanceKlass, _nonstatic_oop_map_size, int) \ nonstatic_field(InstanceKlass, _is_marked_dependent, bool) \ + nonstatic_field(InstanceKlass, _extra_flags, u1) \ nonstatic_field(InstanceKlass, _misc_flags, u2) \ nonstatic_field(InstanceKlass, _minor_version, u2) \ nonstatic_field(InstanceKlass, _major_version, u2) \ --- old/src/hotspot/share/services/diagnosticCommand.cpp 2019-03-11 14:27:25.062354064 +0100 +++ new/src/hotspot/share/services/diagnosticCommand.cpp 2019-03-11 14:27:24.850354067 +0100 @@ -123,7 +123,6 @@ DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(jmx_agent_export_flags, true,false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(jmx_agent_export_flags, true,false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(jmx_agent_export_flags, true,false)); - // Debug on cmd (only makes sense with JVMTI since the agentlib needs it). #if INCLUDE_JVMTI DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); --- old/src/hotspot/share/services/nmtCommon.cpp 2019-03-11 14:27:25.918354052 +0100 +++ new/src/hotspot/share/services/nmtCommon.cpp 2019-03-11 14:27:25.702354055 +0100 @@ -30,6 +30,7 @@ const char* NMTUtil::_memory_type_names[] = { MEMORY_TYPES_DO(MEMORY_TYPE_DECLARE_NAME) + "Value Types", }; --- old/src/hotspot/share/utilities/accessFlags.hpp 2019-03-11 14:27:26.374354046 +0100 +++ new/src/hotspot/share/utilities/accessFlags.hpp 2019-03-11 14:27:26.138354049 +0100 @@ -83,14 +83,18 @@ JVM_ACC_FIELD_MODIFICATION_WATCHED = 0x00008000, // field modification is watched by JVMTI JVM_ACC_FIELD_INTERNAL = 0x00000400, // internal field, same as JVM_ACC_ABSTRACT JVM_ACC_FIELD_STABLE = 0x00000020, // @Stable field, same as JVM_ACC_SYNCHRONIZED and JVM_ACC_SUPER - JVM_ACC_FIELD_INITIALIZED_FINAL_UPDATE = 0x00000100, // (static) final field updated outside (class) initializer, same as JVM_ACC_NATIVE + JVM_ACC_FIELD_INITIALIZED_FINAL_UPDATE = 0x00000200, // (static) final field updated outside (class) initializer, same as JVM_ACC_NATIVE JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE = 0x00000800, // field has generic signature + /* JVM_ACC_FLATTENABLE = 0x00000100, */ // To be enabled when ACC_FLATTENABLE is removed from java.base + JVM_ACC_FIELD_FLATTENED = 0x00008000, // flattened value field JVM_ACC_FIELD_INTERNAL_FLAGS = JVM_ACC_FIELD_ACCESS_WATCHED | JVM_ACC_FIELD_MODIFICATION_WATCHED | JVM_ACC_FIELD_INTERNAL | JVM_ACC_FIELD_STABLE | - JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE, + JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE | + JVM_ACC_FLATTENABLE | + JVM_ACC_FIELD_FLATTENED, // flags accepted by set_field_flags() JVM_ACC_FIELD_FLAGS = JVM_RECOGNIZED_FIELD_MODIFIERS | JVM_ACC_FIELD_INTERNAL_FLAGS @@ -121,6 +125,8 @@ bool is_interface () const { return (_flags & JVM_ACC_INTERFACE ) != 0; } bool is_abstract () const { return (_flags & JVM_ACC_ABSTRACT ) != 0; } bool is_strict () const { return (_flags & JVM_ACC_STRICT ) != 0; } + bool is_value_type () const { return (_flags & JVM_ACC_VALUE ) != 0; } + bool is_flattenable () const { return (_flags & JVM_ACC_FLATTENABLE ) != 0; } // Attribute flags bool is_synthetic () const { return (_flags & JVM_ACC_SYNTHETIC ) != 0; } @@ -210,6 +216,7 @@ void set_is_obsolete() { atomic_set_bits(JVM_ACC_IS_OBSOLETE); } void set_is_deleted() { atomic_set_bits(JVM_ACC_IS_DELETED); } void set_is_prefixed_native() { atomic_set_bits(JVM_ACC_IS_PREFIXED_NATIVE); } + void set_is_flattenable() { atomic_set_bits(JVM_ACC_FLATTENABLE); } void clear_not_c1_compilable() { atomic_clear_bits(JVM_ACC_NOT_C1_COMPILABLE); } void clear_not_c2_compilable() { atomic_clear_bits(JVM_ACC_NOT_C2_COMPILABLE); } --- old/src/hotspot/share/utilities/constantTag.cpp 2019-03-11 14:27:26.834354040 +0100 +++ new/src/hotspot/share/utilities/constantTag.cpp 2019-03-11 14:27:26.618354043 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,7 @@ #endif // PRODUCT BasicType constantTag::basic_type() const { - switch (_tag) { + switch (value()) { case JVM_CONSTANT_Integer : return T_INT; case JVM_CONSTANT_Float : @@ -69,7 +69,7 @@ jbyte constantTag::non_error_value() const { - switch (_tag) { + switch (value()) { case JVM_CONSTANT_UnresolvedClassInError: return JVM_CONSTANT_UnresolvedClass; case JVM_CONSTANT_MethodHandleInError: @@ -79,13 +79,13 @@ case JVM_CONSTANT_DynamicInError: return JVM_CONSTANT_Dynamic; default: - return _tag; + return value(); } } jbyte constantTag::error_value() const { - switch (_tag) { + switch (value()) { case JVM_CONSTANT_UnresolvedClass: return JVM_CONSTANT_UnresolvedClassInError; case JVM_CONSTANT_MethodHandle: @@ -106,6 +106,8 @@ return "Invalid index"; case JVM_CONSTANT_Class : return "Class"; + case (JVM_CONSTANT_Class | (jbyte)JVM_CONSTANT_QDESC_BIT): + return "Q-Descriptor"; case JVM_CONSTANT_Fieldref : return "Field"; case JVM_CONSTANT_Methodref : --- old/src/hotspot/share/utilities/constantTag.hpp 2019-03-11 14:27:27.278354033 +0100 +++ new/src/hotspot/share/utilities/constantTag.hpp 2019-03-11 14:27:27.066354036 +0100 @@ -40,8 +40,8 @@ JVM_CONSTANT_Invalid = 0, // For bad value initialization JVM_CONSTANT_InternalMin = 100, // First implementation tag (aside from bad value of course) JVM_CONSTANT_UnresolvedClass = 100, // Temporary tag until actual use - JVM_CONSTANT_ClassIndex = 101, // Temporary tag while constructing constant pool - JVM_CONSTANT_StringIndex = 102, // Temporary tag while constructing constant pool + JVM_CONSTANT_ClassIndex = 101, // Temporary tag while constructing constant pool, class redefinition + JVM_CONSTANT_StringIndex = 102, // Temporary tag while constructing constant pool, class redefinition JVM_CONSTANT_UnresolvedClassInError = 103, // Error tag due to resolution error JVM_CONSTANT_MethodHandleInError = 104, // Error tag due to resolution error JVM_CONSTANT_MethodTypeInError = 105, // Error tag due to resolution error @@ -49,12 +49,13 @@ JVM_CONSTANT_InternalMax = 106 // Last implementation tag }; +#define JVM_CONSTANT_QDESC_BIT (1 << 7) class constantTag { private: jbyte _tag; public: - bool is_klass() const { return _tag == JVM_CONSTANT_Class; } + bool is_klass() const { return value() == JVM_CONSTANT_Class; } bool is_field () const { return _tag == JVM_CONSTANT_Fieldref; } bool is_method() const { return _tag == JVM_CONSTANT_Methodref; } bool is_interface_method() const { return _tag == JVM_CONSTANT_InterfaceMethodref; } @@ -69,11 +70,15 @@ bool is_invalid() const { return _tag == JVM_CONSTANT_Invalid; } bool is_unresolved_klass() const { - return _tag == JVM_CONSTANT_UnresolvedClass || _tag == JVM_CONSTANT_UnresolvedClassInError; + return value() == JVM_CONSTANT_UnresolvedClass || value() == JVM_CONSTANT_UnresolvedClassInError; } bool is_unresolved_klass_in_error() const { - return _tag == JVM_CONSTANT_UnresolvedClassInError; + return value() == JVM_CONSTANT_UnresolvedClassInError; + } + + bool is_Qdescriptor_klass() const { + return (_tag & JVM_CONSTANT_QDESC_BIT) != 0; } bool is_method_handle_in_error() const { @@ -116,9 +121,14 @@ _tag = JVM_CONSTANT_Invalid; } constantTag(jbyte tag) { - assert((tag >= 0 && tag <= JVM_CONSTANT_NameAndType) || - (tag >= JVM_CONSTANT_MethodHandle && tag <= JVM_CONSTANT_InvokeDynamic) || - (tag >= JVM_CONSTANT_InternalMin && tag <= JVM_CONSTANT_InternalMax), "Invalid constant tag"); + jbyte entry_tag = tag & ~JVM_CONSTANT_QDESC_BIT; + assert((((tag & JVM_CONSTANT_QDESC_BIT) == 0) && (entry_tag >= 0 && entry_tag <= JVM_CONSTANT_NameAndType) || + (entry_tag >= JVM_CONSTANT_MethodHandle && entry_tag <= JVM_CONSTANT_InvokeDynamic) || + (entry_tag >= JVM_CONSTANT_InternalMin && entry_tag <= JVM_CONSTANT_InternalMax)) + || (((tag & JVM_CONSTANT_QDESC_BIT) != 0) && (entry_tag == JVM_CONSTANT_Class || + entry_tag == JVM_CONSTANT_UnresolvedClass || entry_tag == JVM_CONSTANT_UnresolvedClassInError + || entry_tag == JVM_CONSTANT_ClassIndex)) + , "Invalid constant tag"); _tag = tag; } @@ -136,7 +146,8 @@ return constantTag(); } - jbyte value() const { return _tag; } + jbyte value() const { return _tag & ~JVM_CONSTANT_QDESC_BIT; } + jbyte tag() const { return _tag; } jbyte error_value() const; jbyte non_error_value() const; --- old/src/hotspot/share/utilities/exceptions.hpp 2019-03-11 14:27:27.722354027 +0100 +++ new/src/hotspot/share/utilities/exceptions.hpp 2019-03-11 14:27:27.494354030 +0100 @@ -242,6 +242,8 @@ #else #define THREAD_AND_LOCATION THREAD, __FILE__, __LINE__ #endif +#define THREAD_AND_LOCATION_DECL TRAPS, const char* file, int line +#define THREAD_AND_LOCATION_ARGS THREAD, file, line #define THROW_OOP(e) \ { Exceptions::_throw_oop(THREAD_AND_LOCATION, e); return; } --- old/src/hotspot/share/utilities/globalDefinitions.cpp 2019-03-11 14:27:28.154354021 +0100 +++ new/src/hotspot/share/utilities/globalDefinitions.cpp 2019-03-11 14:27:27.938354024 +0100 @@ -90,7 +90,7 @@ num_type_chars++; } } - assert(num_type_chars == 11, "must have tested the right number of mappings"); + assert(num_type_chars == 12, "must have tested the right number of mappings"); assert(char2type(0) == T_ILLEGAL, "correct illegality"); { @@ -108,6 +108,7 @@ case T_DOUBLE: case T_LONG: case T_OBJECT: + case T_VALUETYPE: case T_ADDRESS: // random raw pointer case T_METADATA: // metadata pointer case T_NARROWOOP: // compressed pointer @@ -173,11 +174,12 @@ } _type2aelembytes[T_OBJECT] = heapOopSize; _type2aelembytes[T_ARRAY] = heapOopSize; + _type2aelembytes[T_VALUETYPE] = heapOopSize; } // Map BasicType to signature character -char type2char_tab[T_CONFLICT+1]={ 0, 0, 0, 0, 'Z', 'C', 'F', 'D', 'B', 'S', 'I', 'J', 'L', '[', 'V', 0, 0, 0, 0, 0}; +char type2char_tab[T_CONFLICT+1]={ 0, 0, 0, 0, 'Z', 'C', 'F', 'D', 'B', 'S', 'I', 'J', 'L', '[', 'Q', 'V', 0, 0, 0, 0, 0}; // Map BasicType to Java type name const char* type2name_tab[T_CONFLICT+1] = { @@ -192,6 +194,7 @@ "long", "object", "array", + "valuetype", "void", "*address*", "*narrowoop*", @@ -211,7 +214,7 @@ } // Map BasicType to size in words -int type2size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, 1, 1, 1, -1}; +int type2size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 0, 1, 1, 1, 1, -1}; BasicType type2field[T_CONFLICT+1] = { (BasicType)0, // 0, @@ -228,12 +231,13 @@ T_LONG, // T_LONG = 11, T_OBJECT, // T_OBJECT = 12, T_OBJECT, // T_ARRAY = 13, - T_VOID, // T_VOID = 14, - T_ADDRESS, // T_ADDRESS = 15, - T_NARROWOOP, // T_NARROWOOP= 16, - T_METADATA, // T_METADATA = 17, - T_NARROWKLASS, // T_NARROWKLASS = 18, - T_CONFLICT // T_CONFLICT = 19, + T_VALUETYPE, // T_VALUETYPE = 14, + T_VOID, // T_VOID = 15, + T_ADDRESS, // T_ADDRESS = 16, + T_NARROWOOP, // T_NARROWOOP= 17, + T_METADATA, // T_METADATA = 18, + T_NARROWKLASS, // T_NARROWKLASS = 19, + T_CONFLICT // T_CONFLICT = 20 }; @@ -252,12 +256,13 @@ T_LONG, // T_LONG = 11, T_OBJECT, // T_OBJECT = 12, T_OBJECT, // T_ARRAY = 13, - T_VOID, // T_VOID = 14, - T_ADDRESS, // T_ADDRESS = 15, - T_NARROWOOP, // T_NARROWOOP = 16, - T_METADATA, // T_METADATA = 17, - T_NARROWKLASS, // T_NARROWKLASS = 18, - T_CONFLICT // T_CONFLICT = 19, + T_OBJECT, // T_VALUETYPE = 14, + T_VOID, // T_VOID = 15, + T_ADDRESS, // T_ADDRESS = 16, + T_NARROWOOP, // T_NARROWOOP = 17, + T_METADATA, // T_METADATA = 18, + T_NARROWKLASS, // T_NARROWKLASS = 19, + T_CONFLICT // T_CONFLICT = 20 }; @@ -276,12 +281,13 @@ T_LONG_aelem_bytes, // T_LONG = 11, T_OBJECT_aelem_bytes, // T_OBJECT = 12, T_ARRAY_aelem_bytes, // T_ARRAY = 13, - 0, // T_VOID = 14, - T_OBJECT_aelem_bytes, // T_ADDRESS = 15, - T_NARROWOOP_aelem_bytes, // T_NARROWOOP= 16, - T_OBJECT_aelem_bytes, // T_METADATA = 17, - T_NARROWKLASS_aelem_bytes, // T_NARROWKLASS= 18, - 0 // T_CONFLICT = 19, + T_VALUETYPE_aelem_bytes, // T_VALUETYPE = 14, + 0, // T_VOID = 15, + T_OBJECT_aelem_bytes, // T_ADDRESS = 16, + T_NARROWOOP_aelem_bytes, // T_NARROWOOP= 17, + T_OBJECT_aelem_bytes, // T_METADATA = 18, + T_NARROWKLASS_aelem_bytes, // T_NARROWKLASS= 19, + 0 // T_CONFLICT = 20 }; #ifdef ASSERT --- old/src/hotspot/share/utilities/globalDefinitions.hpp 2019-03-11 14:27:28.606354015 +0100 +++ new/src/hotspot/share/utilities/globalDefinitions.hpp 2019-03-11 14:27:28.374354018 +0100 @@ -471,6 +471,15 @@ //---------------------------------------------------------------------------------------------------- +// Prototyping +// "Code Missing Here" macro, un-define when integrating back from prototyping stage and break +// compilation on purpose (i.e. "forget me not") +#define PROTOTYPE +#ifdef PROTOTYPE +#define CMH(m) +#endif + +//---------------------------------------------------------------------------------------------------- // Miscellaneous // 6302670 Eliminate Hotspot __fabsf dependency @@ -547,12 +556,13 @@ T_LONG = 11, T_OBJECT = 12, T_ARRAY = 13, - T_VOID = 14, - T_ADDRESS = 15, - T_NARROWOOP = 16, - T_METADATA = 17, - T_NARROWKLASS = 18, - T_CONFLICT = 19, // for stack value type with conflicting contents + T_VALUETYPE = 14, + T_VOID = 15, + T_ADDRESS = 16, + T_NARROWOOP = 17, + T_METADATA = 18, + T_NARROWKLASS = 19, + T_CONFLICT = 20, // for stack value type with conflicting contents T_ILLEGAL = 99 }; @@ -570,7 +580,7 @@ } inline bool is_reference_type(BasicType t) { - return (t == T_OBJECT || t == T_ARRAY); + return (t == T_OBJECT || t == T_ARRAY || t == T_VALUETYPE); } // Convert a char from a classfile signature to a BasicType @@ -587,6 +597,7 @@ case 'V': return T_VOID; case 'L': return T_OBJECT; case '[': return T_ARRAY; + case 'Q': return T_VALUETYPE; } return T_ILLEGAL; } @@ -617,7 +628,8 @@ T_ARRAY_size = 1, T_NARROWOOP_size = 1, T_NARROWKLASS_size = 1, - T_VOID_size = 0 + T_VOID_size = 0, + T_VALUETYPE_size = 1 }; @@ -640,9 +652,11 @@ #ifdef _LP64 T_OBJECT_aelem_bytes = 8, T_ARRAY_aelem_bytes = 8, + T_VALUETYPE_aelem_bytes = 8, #else T_OBJECT_aelem_bytes = 4, T_ARRAY_aelem_bytes = 4, + T_VALUETYPE_aelem_bytes = 4, #endif T_NARROWOOP_aelem_bytes = 4, T_NARROWKLASS_aelem_bytes = 4, @@ -738,7 +752,7 @@ ftos = 6, // float tos cached dtos = 7, // double tos cached atos = 8, // object cached - vtos = 9, // tos not cached + vtos = 9, // tos not cached, number_of_states, ilgl // illegal state: should not occur }; @@ -755,7 +769,8 @@ case T_FLOAT : return ftos; case T_DOUBLE : return dtos; case T_VOID : return vtos; - case T_ARRAY : // fall through + case T_VALUETYPE: // fall through + case T_ARRAY : // fall through case T_OBJECT : return atos; default : return ilgl; } @@ -970,6 +985,16 @@ return log2_intptr(x); } +// the argument doesn't need to be a power of two +inline int upper_log2(intptr_t x) { + int shift = log2_intptr(x); + intptr_t y = 1ULL << shift; + if (y < x) { + shift++; + } + return shift; +} + //* the argument must be exactly a power of 2 inline int exact_log2_long(jlong x) { assert(is_power_of_2_long(x), "x must be a power of 2: " JLONG_FORMAT, x); --- old/src/hotspot/share/utilities/growableArray.hpp 2019-03-11 14:27:29.038354009 +0100 +++ new/src/hotspot/share/utilities/growableArray.hpp 2019-03-11 14:27:28.826354012 +0100 @@ -26,6 +26,7 @@ #define SHARE_UTILITIES_GROWABLEARRAY_HPP #include "memory/allocation.hpp" +#include "oops/array.hpp" #include "oops/oop.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" @@ -402,6 +403,12 @@ } } + void appendAll(const Array* l) { + for (int i = 0; i < l->length(); i++) { + raw_at_put_grow(_len, l->at(i), E()); + } + } + void sort(int f(E*,E*)) { qsort(_data, length(), sizeof(E), (_sort_Fn)f); } @@ -540,10 +547,10 @@ UnaryPredicate _predicate; // Unary predicate the elements of the GrowableArray should satisfy public: - GrowableArrayFilterIterator(const GrowableArrayIterator& begin, UnaryPredicate filter_predicate) - : _array(begin._array), _position(begin._position), _predicate(filter_predicate) { + GrowableArrayFilterIterator(const GrowableArray* array, UnaryPredicate filter_predicate) + : _array(array), _position(0), _predicate(filter_predicate) { // Advance to first element satisfying the predicate - while(_position != _array->length() && !_predicate(_array->at(_position))) { + while(!at_end() && !_predicate(_array->at(_position))) { ++_position; } } @@ -552,7 +559,7 @@ do { // Advance to next element satisfying the predicate ++_position; - } while(_position != _array->length() && !_predicate(_array->at(_position))); + } while(!at_end() && !_predicate(_array->at(_position))); return *this; } @@ -577,6 +584,10 @@ assert(_array == rhs._array, "iterator belongs to different array"); return _position != rhs._position; } + + bool at_end() const { + return _array == NULL || _position == _array->end()._position; + } }; // Arrays for basic types --- old/test/hotspot/jtreg/ProblemList.txt 2019-03-11 14:27:29.470354003 +0100 +++ new/test/hotspot/jtreg/ProblemList.txt 2019-03-11 14:27:29.262354006 +0100 @@ -84,6 +84,27 @@ runtime/NMT/CheckForProperDetailStackTrace.java 8218458 generic-all runtime/SharedArchiveFile/SASymbolTableTest.java 8193639 solaris-all +# Valhalla TODO: +runtime/RedefineTests/RedefineLeak.java 8205032 generic-all +runtime/SharedArchiveFile/BootAppendTests.java 8210258 generic-all +runtime/SharedArchiveFile/CdsDifferentCompactStrings.java 8210258 generic-all +runtime/SharedArchiveFile/CdsDifferentObjectAlignment.java 8210258 generic-all +runtime/SharedArchiveFile/NonBootLoaderClasses.java 8210258 generic-all +runtime/SharedArchiveFile/PrintSharedArchiveAndExit.java 8210258 generic-all +runtime/SharedArchiveFile/SharedArchiveFile.java 8210258 generic-all +runtime/SharedArchiveFile/SharedStringsDedup.java 8210258 generic-all +runtime/SharedArchiveFile/SharedStringsRunAuto.java 8210258 generic-all +runtime/SharedArchiveFile/SharedSymbolTableBucketSize.java 8210258 generic-all +runtime/SharedArchiveFile/SpaceUtilizationCheck.java 8210258 generic-all +runtime/SharedArchiveFile/TestInterpreterMethodEntries.java 8210258 generic-all +runtime/SharedArchiveFile/serviceability/transformRelatedClasses/TransformInterfaceAndImplementor.java 8210258 generic-all +runtime/SharedArchiveFile/serviceability/transformRelatedClasses/TransformSuperAndSubClasses.java 8210258 generic-all +runtime/SharedArchiveFile/serviceability/transformRelatedClasses/TransformSuperSubTwoPckgs.java 8210258 generic-all +runtime/appcds/ClassLoaderTest.java 8210258 generic-all +runtime/appcds/HelloTest.java 8210258 generic-all +runtime/appcds/sharedStrings/SharedStringsBasic.java 8210258 generic-all + + ############################################################################# # :hotspot_serviceability @@ -137,6 +158,31 @@ serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatIntervalTest.java 8214032 generic-all +# Valhalla TODO: +serviceability/sa/ClhsdbCDSCore.java 8190936 generic-all +serviceability/sa/ClhsdbCDSJstackPrintAll.java 8190936 generic-all +serviceability/sa/ClhsdbFindPC.java 8190936 generic-all +serviceability/sa/ClhsdbInspect.java 8190936 generic-all +serviceability/sa/ClhsdbJdis.java 8190936 generic-all +serviceability/sa/ClhsdbJstack.java 8190936 generic-all +serviceability/sa/ClhsdbPrintAll.java 8190936 generic-all +serviceability/sa/ClhsdbPrintAs.java 8190936 generic-all +serviceability/sa/ClhsdbPrintStatics.java 8190936 generic-all +serviceability/sa/ClhsdbSource.java 8190936 generic-all +serviceability/sa/ClhsdbSymbol.java 8190936 generic-all +serviceability/sa/ClhsdbWhere.java 8190936 generic-all +serviceability/sa/JhsdbThreadInfoTest.java 8190936 generic-all +serviceability/sa/TestClassDump.java 8190936 generic-all +serviceability/sa/TestClhsdbJstackLock.java 8190936 generic-all +serviceability/sa/TestCpoolForInvokeDynamic.java 8190936 generic-all +serviceability/sa/TestHeapDumpForInvokeDynamic.java 8190936 generic-all +serviceability/sa/TestHeapDumpForLargeArray.java 8190936 generic-all +serviceability/sa/TestIntConstant.java 8190936 generic-all +serviceability/sa/TestJhsdbJstackLock.java 8190936 generic-all +serviceability/sa/TestJmapCore.java 8190936 generic-all +serviceability/sa/TestJmapCoreMetaspace.java 8190936 generic-all +serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java 8190936 generic-all + ############################################################################# # :hotspot_misc --- old/test/hotspot/jtreg/TEST.groups 2019-03-11 14:27:29.898353997 +0100 +++ new/test/hotspot/jtreg/TEST.groups 2019-03-11 14:27:29.686354000 +0100 @@ -44,7 +44,7 @@ -gc/nvdimm hotspot_runtime = \ - runtime + runtime \ hotspot_handshake = \ runtime/handshake @@ -52,6 +52,13 @@ hotspot_serviceability = \ serviceability +hotspot_valhalla = \ + runtime/valhalla \ + compiler/valhalla + +hotspot_valhalla_runtime = \ + runtime/valhalla + hotspot_misc = \ / \ -applications \ @@ -131,6 +138,7 @@ compiler/types/ \ compiler/uncommontrap/ \ compiler/unsafe/ \ + compiler/valhalla/ \ compiler/vectorization/ \ -compiler/intrinsics/bmi \ -compiler/intrinsics/mathexact \ --- old/test/hotspot/jtreg/compiler/tiered/ConstantGettersTransitionsTest.java 2019-03-11 14:27:30.326353991 +0100 +++ new/test/hotspot/jtreg/compiler/tiered/ConstantGettersTransitionsTest.java 2019-03-11 14:27:30.114353994 +0100 @@ -24,6 +24,7 @@ /** * @test ConstantGettersTransitionsTest * @summary Test the correctness of compilation level transitions for constant getters methods + * @requires vm.opt.final.TieredCompilation * @library /test/lib / * @modules java.base/jdk.internal.misc * java.management --- old/test/hotspot/jtreg/compiler/types/TestMeetIncompatibleInterfaceArrays.java 2019-03-11 14:27:30.754353985 +0100 +++ new/test/hotspot/jtreg/compiler/types/TestMeetIncompatibleInterfaceArrays.java 2019-03-11 14:27:30.542353988 +0100 @@ -25,6 +25,7 @@ * @test * @bug 8141551 * @summary C2 can not handle returns with inccompatible interface arrays + * @requires vm.opt.final.TieredCompilation * @requires vm.compMode == "Xmixed" & vm.flavor == "server" * @modules java.base/jdk.internal.org.objectweb.asm * java.base/jdk.internal.misc --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/MyInterface.java 2019-03-11 14:27:30.970353982 +0100 @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package compiler.valhalla.valuetypes; + +public interface MyInterface { + public long hash(); +} + --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/MyValue1.java 2019-03-11 14:27:31.426353976 +0100 @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package compiler.valhalla.valuetypes; + +value public final class MyValue1 implements MyInterface { + static int s; + static final long sf = ValueTypeTest.rL; + final int x; + final long y; + final short z; + final Integer o; + final int[] oa; + final MyValue2.val v1; + final MyValue2.val v2; + static final MyValue2.val v3 = MyValue2.createWithFieldsInline(ValueTypeTest.rI, true); + final int c; + + private MyValue1() { + s = 0; + this.x = 0; + this.y = 0; + this.z = 0; + this.o = null; + this.oa = null; + this.v1 = MyValue2.createDefaultInline(); + this.v2 = MyValue2.createDefaultInline(); + this.c = 0; + } + + @DontInline + static MyValue1 createDefaultDontInline() { + return createDefaultInline(); + } + + @ForceInline + static MyValue1 createDefaultInline() { + return MyValue1.default; + } + + @DontInline + static MyValue1 createWithFieldsDontInline(int x, long y) { + return createWithFieldsInline(x, y); + } + + @ForceInline + static MyValue1 createWithFieldsInline(int x, long y) { + MyValue1 v = createDefaultInline(); + v = setX(v, x); + v = setY(v, y); + v = setZ(v, (short)x); + v = setO(v, new Integer(x)); + int[] oa = {x}; + v = setOA(v, oa); + v = setV1(v, MyValue2.createWithFieldsInline(x, y, true)); + v = setV2(v, MyValue2.createWithFieldsInline(x, y, false)); + v = setC(v, (int)(x+y)); + return v; + } + + // Hash only primitive and value type fields to avoid NullPointerException + @ForceInline + public long hashPrimitive() { + return s + sf + x + y + z + c + v1.hash() + v2.hash() + v3.hash(); + } + + @ForceInline + public long hash() { + long res = hashPrimitive(); + try { + res += o; + } catch(NullPointerException npe) {} + try { + res += oa[0]; + } catch(NullPointerException npe) {} + return res; + } + + @DontCompile + public long hashInterpreted() { + return s + sf + x + y + z + o + oa[0] + c + v1.hashInterpreted() + v2.hashInterpreted() + v3.hashInterpreted(); + } + + @ForceInline + public void print() { + System.out.print("s=" + s + ", sf=" + sf + ", x=" + x + ", y=" + y + ", z=" + z + ", o=" + (o != null ? (Integer)o : "NULL") + ", oa=" + (oa != null ? oa[0] : "NULL") + ", v1["); + v1.print(); + System.out.print("], v2["); + v2.print(); + System.out.print("], v3["); + v3.print(); + System.out.print("], c=" + c); + } + + @ForceInline + static MyValue1 setX(MyValue1 v, int x) { + v = __WithField(v.x, x); + return v; + } + + @ForceInline + static MyValue1 setY(MyValue1 v, long y) { + v = __WithField(v.y, y); + return v; + } + + @ForceInline + static MyValue1 setZ(MyValue1 v, short z) { + v = __WithField(v.z, z); + return v; + } + + @ForceInline + static MyValue1 setO(MyValue1 v, Integer o) { + v = __WithField(v.o, o); + return v; + } + + @ForceInline + static MyValue1 setOA(MyValue1 v, int[] oa) { + v = __WithField(v.oa, oa); + return v; + } + + @ForceInline + static MyValue1 setC(MyValue1 v, int c) { + v = __WithField(v.c, c); + return v; + } + + @ForceInline + static MyValue1 setV1(MyValue1 v, MyValue2 v1) { + v = __WithField(v.v1, v1); + return v; + } + + @ForceInline + static MyValue1 setV2(MyValue1 v, MyValue2 v2) { + v = __WithField(v.v2, v2); + return v; + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/MyValue2.java 2019-03-11 14:27:31.882353970 +0100 @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package compiler.valhalla.valuetypes; + +value final class MyValue2Inline { + final boolean b; + final long c; + + private MyValue2Inline() { + this.b = false; + this.c = 0; + } + + @ForceInline + static MyValue2Inline setB(MyValue2Inline v, boolean b) { + v = __WithField(v.b, b); + return v; + } + + @ForceInline + static MyValue2Inline setC(MyValue2Inline v, long c) { + v = __WithField(v.c, c); + return v; + } + + @ForceInline + public static MyValue2Inline createDefault() { + return MyValue2Inline.default; + } + + @ForceInline + public static MyValue2Inline createWithFieldsInline(boolean b, long c) { + MyValue2Inline v = MyValue2Inline.createDefault(); + v = MyValue2Inline.setB(v, b); + v = MyValue2Inline.setC(v, c); + return v; + } +} + +value public final class MyValue2 implements MyInterface { + final int x; + final byte y; + final MyValue2Inline.val v1; + + private MyValue2() { + this.x = 0; + this.y = 0; + this.v1 = MyValue2Inline.createDefault(); + } + + @ForceInline + public static MyValue2 createDefaultInline() { + return MyValue2.default; + } + + @ForceInline + public static MyValue2 createWithFieldsInline(int x, long y, boolean b) { + MyValue2 v = createDefaultInline(); + v = setX(v, x); + v = setY(v, (byte)x); + v = setV1(v, MyValue2Inline.createWithFieldsInline(b, y)); + return v; + } + + @ForceInline + public static MyValue2 createWithFieldsInline(int x, boolean b) { + MyValue2 v = createDefaultInline(); + v = setX(v, x); + v = setY(v, (byte)x); + v = setV1(v, MyValue2Inline.createWithFieldsInline(b, ValueTypeTest.rL)); + return v; + } + + @DontInline + public static MyValue2 createWithFieldsDontInline(int x, boolean b) { + MyValue2 v = createDefaultInline(); + v = setX(v, x); + v = setY(v, (byte)x); + v = setV1(v, MyValue2Inline.createWithFieldsInline(b, ValueTypeTest.rL)); + return v; + } + + @ForceInline + public long hash() { + return x + y + (v1.b ? 0 : 1) + v1.c; + } + + @DontInline + public long hashInterpreted() { + return x + y + (v1.b ? 0 : 1) + v1.c; + } + + @ForceInline + public void print() { + System.out.print("x=" + x + ", y=" + y + ", b=" + v1.b + ", c=" + v1.c); + } + + @ForceInline + static MyValue2 setX(MyValue2 v, int x) { + v = __WithField(v.x, x); + return v; + } + + @ForceInline + static MyValue2 setY(MyValue2 v, byte y) { + v = __WithField(v.y, y); + return v; + } + + @ForceInline + static MyValue2 setV1(MyValue2 v, MyValue2Inline v1) { + v = __WithField(v.v1, v1); + return v; + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/MyValue3.java 2019-03-11 14:27:32.342353963 +0100 @@ -0,0 +1,260 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package compiler.valhalla.valuetypes; + +import jdk.test.lib.Asserts; +import jdk.test.lib.Utils; + +value final class MyValue3Inline { + final float f7; + final double f8; + + private MyValue3Inline() { + this.f7 = 0; + this.f8 = 0; + } + + @ForceInline + static MyValue3Inline setF7(MyValue3Inline v, float f7) { + v = __WithField(v.f7, f7); + return v; + } + + @ForceInline + static MyValue3Inline setF8(MyValue3Inline v, double f8) { + v = __WithField(v.f8, f8); + return v; + } + + @ForceInline + public static MyValue3Inline createDefault() { + return MyValue3Inline.default; + } + + @ForceInline + public static MyValue3Inline createWithFieldsInline(float f7, double f8) { + MyValue3Inline v = createDefault(); + v = setF7(v, f7); + v = setF8(v, f8); + return v; + } +} + +// Value type definition to stress test return of a value in registers +// (uses all registers of calling convention on x86_64) +value public final class MyValue3 implements MyInterface { + final char c; + final byte bb; + final short s; + final int i; + final long l; + final Object o; + final float f1; + final double f2; + final float f3; + final double f4; + final float f5; + final double f6; + final MyValue3Inline.val v1; + + private MyValue3() { + this.c = 0; + this.bb = 0; + this.s = 0; + this.i = 0; + this.l = 0; + this.o = null; + this.f1 = 0; + this.f2 = 0; + this.f3 = 0; + this.f4 = 0; + this.f5 = 0; + this.f6 = 0; + this.v1 = MyValue3Inline.createDefault(); + } + + @ForceInline + static MyValue3 setC(MyValue3 v, char c) { + v = __WithField(v.c, c); + return v; + } + + @ForceInline + static MyValue3 setBB(MyValue3 v, byte bb) { + v = __WithField(v.bb, bb); + return v; + } + + @ForceInline + static MyValue3 setS(MyValue3 v, short s) { + v = __WithField(v.s, s); + return v; + } + + @ForceInline + static MyValue3 setI(MyValue3 v, int i) { + v = __WithField(v.i, i); + return v; + } + + @ForceInline + static MyValue3 setL(MyValue3 v, long l) { + v = __WithField(v.l, l); + return v; + } + + @ForceInline + static MyValue3 setO(MyValue3 v, Object o) { + v = __WithField(v.o, o); + return v; + } + + @ForceInline + static MyValue3 setF1(MyValue3 v, float f1) { + v = __WithField(v.f1, f1); + return v; + } + + @ForceInline + static MyValue3 setF2(MyValue3 v, double f2) { + v = __WithField(v.f2, f2); + return v; + } + + @ForceInline + static MyValue3 setF3(MyValue3 v, float f3) { + v = __WithField(v.f3, f3); + return v; + } + + @ForceInline + static MyValue3 setF4(MyValue3 v, double f4) { + v = __WithField(v.f4, f4); + return v; + } + + @ForceInline + static MyValue3 setF5(MyValue3 v, float f5) { + v = __WithField(v.f5, f5); + return v; + } + + @ForceInline + static MyValue3 setF6(MyValue3 v, double f6) { + v = __WithField(v.f6, f6); + return v; + } + + @ForceInline + static MyValue3 setV1(MyValue3 v, MyValue3Inline v1) { + v = __WithField(v.v1, v1); + return v; + } + + @ForceInline + public static MyValue3 createDefault() { + return MyValue3.default; + } + + @ForceInline + public static MyValue3 create() { + java.util.Random r = Utils.getRandomInstance(); + MyValue3 v = createDefault(); + v = setC(v, (char)r.nextInt()); + v = setBB(v, (byte)r.nextInt()); + v = setS(v, (short)r.nextInt()); + v = setI(v, r.nextInt()); + v = setL(v, r.nextLong()); + v = setO(v, new Object()); + v = setF1(v, r.nextFloat()); + v = setF2(v, r.nextDouble()); + v = setF3(v, r.nextFloat()); + v = setF4(v, r.nextDouble()); + v = setF5(v, r.nextFloat()); + v = setF6(v, r.nextDouble()); + v = setV1(v, MyValue3Inline.createWithFieldsInline(r.nextFloat(), r.nextDouble())); + return v; + } + + @DontInline + public static MyValue3 createDontInline() { + return create(); + } + + @ForceInline + public static MyValue3 copy(MyValue3 other) { + MyValue3 v = createDefault(); + v = setC(v, other.c); + v = setBB(v, other.bb); + v = setS(v, other.s); + v = setI(v, other.i); + v = setL(v, other.l); + v = setO(v, other.o); + v = setF1(v, other.f1); + v = setF2(v, other.f2); + v = setF3(v, other.f3); + v = setF4(v, other.f4); + v = setF5(v, other.f5); + v = setF6(v, other.f6); + v = setV1(v, other.v1); + return v; + } + + @DontInline + public void verify(MyValue3 other) { + Asserts.assertEQ(c, other.c); + Asserts.assertEQ(bb, other.bb); + Asserts.assertEQ(s, other.s); + Asserts.assertEQ(i, other.i); + Asserts.assertEQ(l, other.l); + Asserts.assertEQ(o, other.o); + Asserts.assertEQ(f1, other.f1); + Asserts.assertEQ(f2, other.f2); + Asserts.assertEQ(f3, other.f3); + Asserts.assertEQ(f4, other.f4); + Asserts.assertEQ(f5, other.f5); + Asserts.assertEQ(f6, other.f6); + Asserts.assertEQ(v1.f7, other.v1.f7); + Asserts.assertEQ(v1.f8, other.v1.f8); + } + + @ForceInline + public long hash() { + return c + + bb + + s + + i + + l + + o.hashCode() + + Float.hashCode(f1) + + Double.hashCode(f2) + + Float.hashCode(f3) + + Double.hashCode(f4) + + Float.hashCode(f5) + + Double.hashCode(f6) + + Float.hashCode(v1.f7) + + Double.hashCode(v1.f8); + } +} + --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/MyValue4.java 2019-03-11 14:27:32.794353957 +0100 @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package compiler.valhalla.valuetypes; + +// Value type definition with too many fields to return in registers +value final class MyValue4 implements MyInterface { + final MyValue3.val v1; + final MyValue3.val v2; + + private MyValue4() { + this.v1 = MyValue3.createDefault(); + this.v2 = MyValue3.createDefault(); + } + + @ForceInline + static MyValue4 setV1(MyValue4 v, MyValue3 v1) { + v = __WithField(v.v1, v1); + return v; + } + + @ForceInline + static MyValue4 setV2(MyValue4 v, MyValue3 v2) { + v = __WithField(v.v2, v2); + return v; + } + + @ForceInline + public static MyValue4 createDefault() { + return MyValue4.default; + } + + @ForceInline + public static MyValue4 create() { + MyValue4 v = createDefault(); + MyValue3 v1 = MyValue3.create(); + v = setV1(v, v1); + MyValue3 v2 = MyValue3.create(); + v = setV2(v, v2); + return v; + } + + public void verify(MyValue4 other) { + v1.verify(other.v1); + v2.verify(other.v2); + } + + @ForceInline + public long hash() { + return v1.hash() + v2.hash(); + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/SimpleValueType.java 2019-03-11 14:27:33.254353951 +0100 @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +value final class SimpleValueType { + final int x; + + private SimpleValueType() { + x = 0; + } + + static SimpleValueType create() { + return SimpleValueType.default; + } +} + --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/TestArrays.java 2019-03-11 14:27:33.710353945 +0100 @@ -0,0 +1,1675 @@ +/* + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package compiler.valhalla.valuetypes; + +import jdk.test.lib.Asserts; +import java.lang.reflect.Method; +import java.util.Arrays; + +/* + * @test + * @summary Test value type arrays + * @library /testlibrary /test/lib /compiler/whitebox / + * @requires os.simpleArch == "x64" + * @compile -XDallowWithFieldOperator TestArrays.java + * @run driver ClassFileInstaller sun.hotspot.WhiteBox jdk.test.lib.Platform + * @run main/othervm/timeout=120 -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UnlockExperimentalVMOptions -XX:+WhiteBoxAPI -XX:+EnableValhalla + * compiler.valhalla.valuetypes.ValueTypeTest + * compiler.valhalla.valuetypes.TestArrays + */ +public class TestArrays extends ValueTypeTest { + // Unlike C2, C1 intrinsics never deoptimize System.arraycopy. Instead, we fall back to + // a normal method invocation when encountering flattened arrays. + private static void assertDeoptimizedByC2(Method m) { + int CompLevel_none = 0, // Interpreter + CompLevel_simple = 1, // C1 + CompLevel_limited_profile = 2, // C1, invocation & backedge counters + CompLevel_full_profile = 3, // C1, invocation & backedge counters + mdo + CompLevel_full_optimization = 4; // C2 or JVMCI + + if (!XCOMP && WHITE_BOX.isMethodCompiled(m, false) && + WHITE_BOX.getMethodCompilationLevel(m, false) >= CompLevel_full_optimization) { + throw new RuntimeException("Type check should have caused it to deoptimize"); + } + } + + // Extra VM parameters for some test scenarios. See ValueTypeTest.getVMParameters() + @Override + public String[] getExtraVMParameters(int scenario) { + switch (scenario) { + case 3: return new String[] {"-XX:-MonomorphicArrayCheck", "-XX:+ValueArrayFlatten"}; + case 4: return new String[] {"-XX:-MonomorphicArrayCheck"}; + } + return null; + } + + public static void main(String[] args) throws Throwable { + TestArrays test = new TestArrays(); + test.run(args, MyValue1.class, MyValue2.class, MyValue2Inline.class); + } + + // Helper methods + + protected long hash() { + return hash(rI, rL); + } + + protected long hash(int x, long y) { + return MyValue1.createWithFieldsInline(x, y).hash(); + } + + // Test value type array creation and initialization + @Test(valid = ValueTypeArrayFlattenOff, failOn = LOAD) + @Test(valid = ValueTypeArrayFlattenOn) + public MyValue1[] test1(int len) { + MyValue1[] va = new MyValue1[len]; + for (int i = 0; i < len; ++i) { + va[i] = MyValue1.createWithFieldsDontInline(rI, rL); + } + return va; + } + + @DontCompile + public void test1_verifier(boolean warmup) { + int len = Math.abs(rI % 10); + MyValue1[] va = test1(len); + for (int i = 0; i < len; ++i) { + Asserts.assertEQ(va[i].hash(), hash()); + } + } + + // Test creation of a value type array and element access + @Test(failOn = ALLOC + ALLOCA + LOOP + LOAD + STORE + TRAP) + public long test2() { + MyValue1[] va = new MyValue1[1]; + va[0] = MyValue1.createWithFieldsInline(rI, rL); + return va[0].hash(); + } + + @DontCompile + public void test2_verifier(boolean warmup) { + long result = test2(); + Asserts.assertEQ(result, hash()); + } + + // Test receiving a value type array from the interpreter, + // updating its elements in a loop and computing a hash. + @Test(failOn = ALLOCA) + public long test3(MyValue1[] va) { + long result = 0; + for (int i = 0; i < 10; ++i) { + result += va[i].hash(); + va[i] = MyValue1.createWithFieldsInline(rI + 1, rL + 1); + } + return result; + } + + @DontCompile + public void test3_verifier(boolean warmup) { + MyValue1[] va = new MyValue1[10]; + long expected = 0; + for (int i = 0; i < 10; ++i) { + va[i] = MyValue1.createWithFieldsDontInline(rI + i, rL + i); + expected += va[i].hash(); + } + long result = test3(va); + Asserts.assertEQ(expected, result); + for (int i = 0; i < 10; ++i) { + if (va[i].hash() != hash(rI + 1, rL + 1)) { + Asserts.assertEQ(va[i].hash(), hash(rI + 1, rL + 1)); + } + } + } + + // Test returning a value type array received from the interpreter + @Test(failOn = ALLOC + ALLOCA + LOAD + STORE + LOOP + TRAP) + public MyValue1[] test4(MyValue1[] va) { + return va; + } + + @DontCompile + public void test4_verifier(boolean warmup) { + MyValue1[] va = new MyValue1[10]; + for (int i = 0; i < 10; ++i) { + va[i] = MyValue1.createWithFieldsDontInline(rI + i, rL + i); + } + va = test4(va); + for (int i = 0; i < 10; ++i) { + Asserts.assertEQ(va[i].hash(), hash(rI + i, rL + i)); + } + } + + // Merge value type arrays created from two branches + @Test + public MyValue1[] test5(boolean b) { + MyValue1[] va; + if (b) { + va = new MyValue1[5]; + for (int i = 0; i < 5; ++i) { + va[i] = MyValue1.createWithFieldsInline(rI, rL); + } + } else { + va = new MyValue1[10]; + for (int i = 0; i < 10; ++i) { + va[i] = MyValue1.createWithFieldsInline(rI + i, rL + i); + } + } + long sum = va[0].hashInterpreted(); + if (b) { + va[0] = MyValue1.createWithFieldsDontInline(rI, sum); + } else { + va[0] = MyValue1.createWithFieldsDontInline(rI + 1, sum + 1); + } + return va; + } + + @DontCompile + public void test5_verifier(boolean warmup) { + MyValue1[] va = test5(true); + Asserts.assertEQ(va.length, 5); + Asserts.assertEQ(va[0].hash(), hash(rI, hash())); + for (int i = 1; i < 5; ++i) { + Asserts.assertEQ(va[i].hash(), hash()); + } + va = test5(false); + Asserts.assertEQ(va.length, 10); + Asserts.assertEQ(va[0].hash(), hash(rI + 1, hash(rI, rL) + 1)); + for (int i = 1; i < 10; ++i) { + Asserts.assertEQ(va[i].hash(), hash(rI + i, rL + i)); + } + } + + // Test creation of value type array with single element + @Test(failOn = ALLOCA + LOOP + LOAD + TRAP) + public MyValue1 test6() { + MyValue1[] va = new MyValue1[1]; + return va[0]; + } + + @DontCompile + public void test6_verifier(boolean warmup) { + MyValue1[] va = new MyValue1[1]; + MyValue1 v = test6(); + Asserts.assertEQ(v.hashPrimitive(), va[0].hashPrimitive()); + } + + // Test default initialization of value type arrays + @Test(failOn = LOAD) + public MyValue1[] test7(int len) { + return new MyValue1[len]; + } + + @DontCompile + public void test7_verifier(boolean warmup) { + int len = Math.abs(rI % 10); + MyValue1[] va = new MyValue1[len]; + MyValue1[] var = test7(len); + for (int i = 0; i < len; ++i) { + Asserts.assertEQ(va[i].hashPrimitive(), var[i].hashPrimitive()); + } + } + + // Test creation of value type array with zero length + @Test(failOn = ALLOC + LOAD + STORE + LOOP + TRAP) + public MyValue1[] test8() { + return new MyValue1[0]; + } + + @DontCompile + public void test8_verifier(boolean warmup) { + MyValue1[] va = test8(); + Asserts.assertEQ(va.length, 0); + } + + static MyValue1[] test9_va; + + // Test that value type array loaded from field has correct type + @Test(failOn = LOOP) + public long test9() { + return test9_va[0].hash(); + } + + @DontCompile + public void test9_verifier(boolean warmup) { + test9_va = new MyValue1[1]; + test9_va[0] = MyValue1.createWithFieldsInline(rI, rL); + long result = test9(); + Asserts.assertEQ(result, hash()); + } + + // Multi-dimensional arrays + @Test + public MyValue1[][][] test10(int len1, int len2, int len3) { + MyValue1[][][] arr = new MyValue1[len1][len2][len3]; + for (int i = 0; i < len1; i++) { + for (int j = 0; j < len2; j++) { + for (int k = 0; k < len3; k++) { + arr[i][j][k] = MyValue1.createWithFieldsDontInline(rI + i , rL + j + k); + } + } + } + return arr; + } + + @DontCompile + public void test10_verifier(boolean warmup) { + MyValue1[][][] arr = test10(2, 3, 4); + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 3; j++) { + for (int k = 0; k < 4; k++) { + Asserts.assertEQ(arr[i][j][k].hash(), MyValue1.createWithFieldsDontInline(rI + i , rL + j + k).hash()); + } + } + } + } + + @Test + public void test11(MyValue1[][][] arr, long[] res) { + int l = 0; + for (int i = 0; i < arr.length; i++) { + for (int j = 0; j < arr[i].length; j++) { + for (int k = 0; k < arr[i][j].length; k++) { + res[l] = arr[i][j][k].hash(); + l++; + } + } + } + } + + @DontCompile + public void test11_verifier(boolean warmup) { + MyValue1[][][] arr = new MyValue1[2][3][4]; + long[] res = new long[2*3*4]; + long[] verif = new long[2*3*4]; + int l = 0; + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 3; j++) { + for (int k = 0; k < 4; k++) { + arr[i][j][k] = MyValue1.createWithFieldsDontInline(rI + i, rL + j + k); + verif[l] = arr[i][j][k].hash(); + l++; + } + } + } + test11(arr, res); + for (int i = 0; i < verif.length; i++) { + Asserts.assertEQ(res[i], verif[i]); + } + } + + // Array load out of bounds (upper bound) at compile time + @Test + public int test12() { + int arraySize = Math.abs(rI) % 10;; + MyValue1[] va = new MyValue1[arraySize]; + + for (int i = 0; i < arraySize; i++) { + va[i] = MyValue1.createWithFieldsDontInline(rI + 1, rL); + } + + try { + return va[arraySize + 1].x; + } catch (ArrayIndexOutOfBoundsException e) { + return rI; + } + } + + public void test12_verifier(boolean warmup) { + Asserts.assertEQ(test12(), rI); + } + + // Array load out of bounds (lower bound) at compile time + @Test + public int test13() { + int arraySize = Math.abs(rI) % 10;; + MyValue1[] va = new MyValue1[arraySize]; + + for (int i = 0; i < arraySize; i++) { + va[i] = MyValue1.createWithFieldsDontInline(rI + i, rL); + } + + try { + return va[-arraySize].x; + } catch (ArrayIndexOutOfBoundsException e) { + return rI; + } + } + + public void test13_verifier(boolean warmup) { + Asserts.assertEQ(test13(), rI); + } + + // Array load out of bound not known to compiler (both lower and upper bound) + @Test + public int test14(MyValue1[] va, int index) { + return va[index].x; + } + + public void test14_verifier(boolean warmup) { + int arraySize = Math.abs(rI) % 10; + MyValue1[] va = new MyValue1[arraySize]; + + for (int i = 0; i < arraySize; i++) { + va[i] = MyValue1.createWithFieldsDontInline(rI, rL); + } + + int result; + for (int i = -20; i < 20; i++) { + try { + result = test14(va, i); + } catch (ArrayIndexOutOfBoundsException e) { + result = rI; + } + Asserts.assertEQ(result, rI); + } + } + + // Array store out of bounds (upper bound) at compile time + @Test + public int test15() { + int arraySize = Math.abs(rI) % 10;; + MyValue1[] va = new MyValue1[arraySize]; + + try { + for (int i = 0; i <= arraySize; i++) { + va[i] = MyValue1.createWithFieldsDontInline(rI + 1, rL); + } + return rI - 1; + } catch (ArrayIndexOutOfBoundsException e) { + return rI; + } + } + + public void test15_verifier(boolean warmup) { + Asserts.assertEQ(test15(), rI); + } + + // Array store out of bounds (lower bound) at compile time + @Test + public int test16() { + int arraySize = Math.abs(rI) % 10;; + MyValue1[] va = new MyValue1[arraySize]; + + try { + for (int i = -1; i <= arraySize; i++) { + va[i] = MyValue1.createWithFieldsDontInline(rI + 1, rL); + } + return rI - 1; + } catch (ArrayIndexOutOfBoundsException e) { + return rI; + } + } + + public void test16_verifier(boolean warmup) { + Asserts.assertEQ(test16(), rI); + } + + // Array store out of bound not known to compiler (both lower and upper bound) + @Test + public int test17(MyValue1[] va, int index, MyValue1 vt) { + va[index] = vt; + return va[index].x; + } + + @DontCompile + public void test17_verifier(boolean warmup) { + int arraySize = Math.abs(rI) % 10; + MyValue1[] va = new MyValue1[arraySize]; + + for (int i = 0; i < arraySize; i++) { + va[i] = MyValue1.createWithFieldsDontInline(rI, rL); + } + + MyValue1 vt = MyValue1.createWithFieldsDontInline(rI + 1, rL); + int result; + for (int i = -20; i < 20; i++) { + try { + result = test17(va, i, vt); + } catch (ArrayIndexOutOfBoundsException e) { + result = rI + 1; + } + Asserts.assertEQ(result, rI + 1); + } + + for (int i = 0; i < arraySize; i++) { + Asserts.assertEQ(va[i].x, rI + 1); + } + } + + // clone() as stub call + @Test + public MyValue1[] test18(MyValue1[] va) { + return va.clone(); + } + + @DontCompile + public void test18_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + MyValue1[] va = new MyValue1[len]; + for (int i = 0; i < len; ++i) { + va[i] = MyValue1.createWithFieldsInline(rI, rL); + } + MyValue1[] result = test18(va); + for (int i = 0; i < len; ++i) { + Asserts.assertEQ(result[i].hash(), va[i].hash()); + } + } + + // clone() as series of loads/stores + static MyValue1[] test19_orig = null; + + @Test + public MyValue1[] test19() { + MyValue1[] va = new MyValue1[8]; + for (int i = 0; i < va.length; ++i) { + va[i] = MyValue1.createWithFieldsInline(rI, rL); + } + test19_orig = va; + + return va.clone(); + } + + @DontCompile + public void test19_verifier(boolean warmup) { + MyValue1[] result = test19(); + for (int i = 0; i < test19_orig.length; ++i) { + Asserts.assertEQ(result[i].hash(), test19_orig[i].hash()); + } + } + + // arraycopy() of value type array with oop fields + @Test + public void test20(MyValue1[] src, MyValue1[] dst) { + System.arraycopy(src, 0, dst, 0, src.length); + } + + @DontCompile + public void test20_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + MyValue1[] src = new MyValue1[len]; + MyValue1[] dst = new MyValue1[len]; + for (int i = 0; i < len; ++i) { + src[i] = MyValue1.createWithFieldsInline(rI, rL); + } + test20(src, dst); + for (int i = 0; i < len; ++i) { + Asserts.assertEQ(src[i].hash(), dst[i].hash()); + } + } + + // arraycopy() of value type array with no oop field + @Test + public void test21(MyValue2[] src, MyValue2[] dst) { + System.arraycopy(src, 0, dst, 0, src.length); + } + + @DontCompile + public void test21_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + MyValue2[] src = new MyValue2[len]; + MyValue2[] dst = new MyValue2[len]; + for (int i = 0; i < len; ++i) { + src[i] = MyValue2.createWithFieldsInline(rI, (i % 2) == 0); + } + test21(src, dst); + for (int i = 0; i < len; ++i) { + Asserts.assertEQ(src[i].hash(), dst[i].hash()); + } + } + + // arraycopy() of value type array with oop field and tightly + // coupled allocation as dest + @Test + public MyValue1[] test22(MyValue1[] src) { + MyValue1[] dst = new MyValue1[src.length]; + System.arraycopy(src, 0, dst, 0, src.length); + return dst; + } + + @DontCompile + public void test22_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + MyValue1[] src = new MyValue1[len]; + for (int i = 0; i < len; ++i) { + src[i] = MyValue1.createWithFieldsInline(rI, rL); + } + MyValue1[] dst = test22(src); + for (int i = 0; i < len; ++i) { + Asserts.assertEQ(src[i].hash(), dst[i].hash()); + } + } + + // arraycopy() of value type array with oop fields and tightly + // coupled allocation as dest + @Test + public MyValue1[] test23(MyValue1[] src) { + MyValue1[] dst = new MyValue1[src.length + 10]; + System.arraycopy(src, 0, dst, 5, src.length); + return dst; + } + + @DontCompile + public void test23_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + MyValue1[] src = new MyValue1[len]; + for (int i = 0; i < len; ++i) { + src[i] = MyValue1.createWithFieldsInline(rI, rL); + } + MyValue1[] dst = test23(src); + for (int i = 5; i < len; ++i) { + Asserts.assertEQ(src[i].hash(), dst[i].hash()); + } + } + + // arraycopy() of value type array passed as Object + @Test + public void test24(MyValue1[] src, Object dst) { + System.arraycopy(src, 0, dst, 0, src.length); + } + + @DontCompile + public void test24_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + MyValue1[] src = new MyValue1[len]; + MyValue1[] dst = new MyValue1[len]; + for (int i = 0; i < len; ++i) { + src[i] = MyValue1.createWithFieldsInline(rI, rL); + } + test24(src, dst); + for (int i = 0; i < len; ++i) { + Asserts.assertEQ(src[i].hash(), dst[i].hash()); + } + } + + // short arraycopy() with no oop field + @Test + public void test25(MyValue2[] src, MyValue2[] dst) { + System.arraycopy(src, 0, dst, 0, 8); + } + + @DontCompile + public void test25_verifier(boolean warmup) { + MyValue2[] src = new MyValue2[8]; + MyValue2[] dst = new MyValue2[8]; + for (int i = 0; i < 8; ++i) { + src[i] = MyValue2.createWithFieldsInline(rI, (i % 2) == 0); + } + test25(src, dst); + for (int i = 0; i < 8; ++i) { + Asserts.assertEQ(src[i].hash(), dst[i].hash()); + } + } + + // short arraycopy() with oop fields + @Test + public void test26(MyValue1[] src, MyValue1[] dst) { + System.arraycopy(src, 0, dst, 0, 8); + } + + @DontCompile + public void test26_verifier(boolean warmup) { + MyValue1[] src = new MyValue1[8]; + MyValue1[] dst = new MyValue1[8]; + for (int i = 0; i < 8; ++i) { + src[i] = MyValue1.createWithFieldsInline(rI, rL); + } + test26(src, dst); + for (int i = 0; i < 8; ++i) { + Asserts.assertEQ(src[i].hash(), dst[i].hash()); + } + } + + // short arraycopy() with oop fields and offsets + @Test + public void test27(MyValue1[] src, MyValue1[] dst) { + System.arraycopy(src, 1, dst, 2, 6); + } + + @DontCompile + public void test27_verifier(boolean warmup) { + MyValue1[] src = new MyValue1[8]; + MyValue1[] dst = new MyValue1[8]; + for (int i = 0; i < 8; ++i) { + src[i] = MyValue1.createWithFieldsInline(rI, rL); + } + test27(src, dst); + for (int i = 2; i < 8; ++i) { + Asserts.assertEQ(src[i-1].hash(), dst[i].hash()); + } + } + + // non escaping allocations + @Test(failOn = ALLOCA + LOOP + LOAD + TRAP) + public MyValue2 test28() { + MyValue2[] src = new MyValue2[10]; + src[0] = MyValue2.createWithFieldsInline(rI, false); + MyValue2[] dst = (MyValue2[])src.clone(); + return dst[0]; + } + + @DontCompile + public void test28_verifier(boolean warmup) { + MyValue2 v = MyValue2.createWithFieldsInline(rI, false); + MyValue2 result = test28(); + Asserts.assertEQ(result.hash(), v.hash()); + } + + // non escaping allocations + @Test(failOn = ALLOCA + LOOP + LOAD + TRAP) + public MyValue2 test29(MyValue2[] src) { + MyValue2[] dst = new MyValue2[10]; + System.arraycopy(src, 0, dst, 0, 10); + return dst[0]; + } + + @DontCompile + public void test29_verifier(boolean warmup) { + MyValue2[] src = new MyValue2[10]; + for (int i = 0; i < 10; ++i) { + src[i] = MyValue2.createWithFieldsInline(rI, (i % 2) == 0); + } + MyValue2 v = test29(src); + Asserts.assertEQ(src[0].hash(), v.hash()); + } + + // non escaping allocation with uncommon trap that needs + // eliminated value type array element as debug info + @Test + @Warmup(10000) + public MyValue2 test30(MyValue2[] src, boolean flag) { + MyValue2[] dst = new MyValue2[10]; + System.arraycopy(src, 0, dst, 0, 10); + if (flag) { } + return dst[0]; + } + + @DontCompile + public void test30_verifier(boolean warmup) { + MyValue2[] src = new MyValue2[10]; + for (int i = 0; i < 10; ++i) { + src[i] = MyValue2.createWithFieldsInline(rI, (i % 2) == 0); + } + MyValue2 v = test30(src, false); + Asserts.assertEQ(src[0].hash(), v.hash()); + } + + // non escaping allocation with memory phi + @Test(failOn = ALLOC + ALLOCA + LOOP + LOAD + TRAP) + public long test31(boolean b, boolean deopt) { + MyValue2[] src = new MyValue2[1]; + if (b) { + src[0] = MyValue2.createWithFieldsInline(rI, true); + } else { + src[0] = MyValue2.createWithFieldsInline(rI, false); + } + if (deopt) { + // uncommon trap + WHITE_BOX.deoptimizeMethod(tests.get(getClass().getSimpleName() + "::test31")); + } + return src[0].hash(); + } + + @DontCompile + public void test31_verifier(boolean warmup) { + MyValue2 v1 = MyValue2.createWithFieldsInline(rI, true); + long result1 = test31(true, !warmup); + Asserts.assertEQ(result1, v1.hash()); + MyValue2 v2 = MyValue2.createWithFieldsInline(rI, false); + long result2 = test31(false, !warmup); + Asserts.assertEQ(result2, v2.hash()); + } + + // Tests with Object arrays and clone/arraycopy + // clone() as stub call + @Test + public Object[] test32(Object[] va) { + return va.clone(); + } + + @DontCompile + public void test32_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + MyValue1[] va = new MyValue1[len]; + for (int i = 0; i < len; ++i) { + va[i] = MyValue1.createWithFieldsInline(rI, rL); + } + MyValue1[] result = (MyValue1[])test32(va); + for (int i = 0; i < len; ++i) { + Asserts.assertEQ(((MyValue1)result[i]).hash(), ((MyValue1)va[i]).hash()); + } + } + + @Test + public Object[] test33(Object[] va) { + return va.clone(); + } + + @DontCompile + public void test33_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + Object[] va = new Object[len]; + for (int i = 0; i < len; ++i) { + va[i] = MyValue1.createWithFieldsInline(rI, rL); + } + Object[] result = test33(va); + for (int i = 0; i < len; ++i) { + Asserts.assertEQ(((MyValue1)result[i]).hash(), ((MyValue1)va[i]).hash()); + } + } + + // clone() as series of loads/stores + static Object[] test34_orig = null; + + @ForceInline + public Object[] test34_helper(boolean flag) { + Object[] va = null; + if (flag) { + va = new MyValue1[8]; + for (int i = 0; i < va.length; ++i) { + va[i] = MyValue1.createWithFieldsDontInline(rI, rL); + } + } else { + va = new Object[8]; + } + return va; + } + + @Test + public Object[] test34(boolean flag) { + Object[] va = test34_helper(flag); + test34_orig = va; + return va.clone(); + } + + @DontCompile + public void test34_verifier(boolean warmup) { + test34(false); + for (int i = 0; i < 10; i++) { // make sure we do deopt + Object[] result = test34(true); + verify(test34_orig, result); + } + if (compile_and_run_again_if_deoptimized(warmup, "TestArrays::test34")) { + Object[] result = test34(true); + verify(test34_orig, result); + } + } + + static void verify(Object[] src, Object[] dst) { + for (int i = 0; i < src.length; ++i) { + Asserts.assertEQ(((MyInterface)src[i]).hash(), ((MyInterface)dst[i]).hash()); + } + } + + static void verify(MyValue1[] src, MyValue1[] dst) { + for (int i = 0; i < src.length; ++i) { + Asserts.assertEQ(src[i].hash(), dst[i].hash()); + } + } + + static void verify(MyValue1[] src, Object[] dst) { + for (int i = 0; i < src.length; ++i) { + Asserts.assertEQ(src[i].hash(), ((MyInterface)dst[i]).hash()); + } + } + + static void verify(MyValue2[] src, MyValue2[] dst) { + for (int i = 0; i < src.length; ++i) { + Asserts.assertEQ(src[i].hash(), dst[i].hash()); + } + } + + static void verify(MyValue2[] src, Object[] dst) { + for (int i = 0; i < src.length; ++i) { + Asserts.assertEQ(src[i].hash(), ((MyInterface)dst[i]).hash()); + } + } + + static boolean compile_and_run_again_if_deoptimized(boolean warmup, String test) { + if (!warmup) { + Method m = tests.get(test); + if (!WHITE_BOX.isMethodCompiled(m, false)) { + if (!ValueTypeArrayFlatten && !XCOMP) { + throw new RuntimeException("Unexpected deoptimization"); + } + WHITE_BOX.enqueueMethodForCompilation(m, COMP_LEVEL_FULL_OPTIMIZATION); + return true; + } + } + return false; + } + + // arraycopy() of value type array of unknown size + @Test + public void test35(Object src, Object dst, int len) { + System.arraycopy(src, 0, dst, 0, len); + } + + @DontCompile + public void test35_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + MyValue1[] src = new MyValue1[len]; + MyValue1[] dst = new MyValue1[len]; + for (int i = 0; i < len; ++i) { + src[i] = MyValue1.createWithFieldsInline(rI, rL); + } + test35(src, dst, src.length); + verify(src, dst); + if (compile_and_run_again_if_deoptimized(warmup, "TestArrays::test35")) { + test35(src, dst, src.length); + verify(src, dst); + } + } + + @Test + public void test36(Object src, MyValue2[] dst) { + System.arraycopy(src, 0, dst, 0, dst.length); + } + + @DontCompile + public void test36_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + MyValue2[] src = new MyValue2[len]; + MyValue2[] dst = new MyValue2[len]; + for (int i = 0; i < len; ++i) { + src[i] = MyValue2.createWithFieldsInline(rI, (i % 2) == 0); + } + test36(src, dst); + verify(src, dst); + if (compile_and_run_again_if_deoptimized(warmup, "TestArrays::test36")) { + test36(src, dst); + verify(src, dst); + } + } + + @Test + public void test37(MyValue2[] src, Object dst) { + System.arraycopy(src, 0, dst, 0, src.length); + } + + @DontCompile + public void test37_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + MyValue2[] src = new MyValue2[len]; + MyValue2[] dst = new MyValue2[len]; + for (int i = 0; i < len; ++i) { + src[i] = MyValue2.createWithFieldsInline(rI, (i % 2) == 0); + } + test37(src, dst); + verify(src, dst); + if (compile_and_run_again_if_deoptimized(warmup, "TestArrays::test37")) { + test37(src, dst); + verify(src, dst); + } + } + + @Test + @Warmup(1) // Avoid early compilation + public void test38(Object src, MyValue2[] dst) { + System.arraycopy(src, 0, dst, 0, dst.length); + } + + @DontCompile + public void test38_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + Object[] src = new Object[len]; + MyValue2[] dst = new MyValue2[len]; + for (int i = 0; i < len; ++i) { + src[i] = MyValue2.createWithFieldsInline(rI, (i % 2) == 0); + } + test38(src, dst); + verify(dst, src); + if (!warmup) { + Method m = tests.get("TestArrays::test38"); + assertDeoptimizedByC2(m); + WHITE_BOX.enqueueMethodForCompilation(m, COMP_LEVEL_FULL_OPTIMIZATION); + test38(src, dst); + verify(dst, src); + if (!WHITE_BOX.isMethodCompiled(m, false) && !XCOMP) { + throw new RuntimeException("unexpected deoptimization"); + } + } + } + + @Test + public void test39(MyValue2[] src, Object dst) { + System.arraycopy(src, 0, dst, 0, src.length); + } + + @DontCompile + public void test39_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + MyValue2[] src = new MyValue2[len]; + Object[] dst = new Object[len]; + for (int i = 0; i < len; ++i) { + src[i] = MyValue2.createWithFieldsInline(rI, (i % 2) == 0); + } + test39(src, dst); + verify(src, dst); + if (compile_and_run_again_if_deoptimized(warmup, "TestArrays::test39")) { + test39(src, dst); + verify(src, dst); + } + } + + @Test + @Warmup(1) // Avoid early compilation + public void test40(Object[] src, Object dst) { + System.arraycopy(src, 0, dst, 0, src.length); + } + + @DontCompile + public void test40_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + Object[] src = new Object[len]; + MyValue2[] dst = new MyValue2[len]; + for (int i = 0; i < len; ++i) { + src[i] = MyValue2.createWithFieldsInline(rI, (i % 2) == 0); + } + test40(src, dst); + verify(dst, src); + if (!warmup) { + Method m = tests.get("TestArrays::test40"); + assertDeoptimizedByC2(m); + WHITE_BOX.enqueueMethodForCompilation(m, COMP_LEVEL_FULL_OPTIMIZATION); + test40(src, dst); + verify(dst, src); + if (!WHITE_BOX.isMethodCompiled(m, false) && !XCOMP) { + throw new RuntimeException("unexpected deoptimization"); + } + } + } + + @Test + public void test41(Object src, Object[] dst) { + System.arraycopy(src, 0, dst, 0, dst.length); + } + + @DontCompile + public void test41_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + MyValue2[] src = new MyValue2[len]; + Object[] dst = new Object[len]; + for (int i = 0; i < len; ++i) { + src[i] = MyValue2.createWithFieldsInline(rI, (i % 2) == 0); + } + test41(src, dst); + verify(src, dst); + if (compile_and_run_again_if_deoptimized(warmup, "TestArrays::test41")) { + test41(src, dst); + verify(src, dst); + } + } + + @Test + public void test42(Object[] src, Object[] dst) { + System.arraycopy(src, 0, dst, 0, src.length); + } + + @DontCompile + public void test42_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + Object[] src = new Object[len]; + Object[] dst = new Object[len]; + for (int i = 0; i < len; ++i) { + src[i] = MyValue2.createWithFieldsInline(rI, (i % 2) == 0); + } + test42(src, dst); + verify(src, dst); + if (!warmup) { + Method m = tests.get("TestArrays::test42"); + if (!WHITE_BOX.isMethodCompiled(m, false) && !XCOMP) { + throw new RuntimeException("unexpected deoptimization"); + } + } + } + + // short arraycopy()'s + @Test + public void test43(Object src, Object dst) { + System.arraycopy(src, 0, dst, 0, 8); + } + + @DontCompile + public void test43_verifier(boolean warmup) { + MyValue1[] src = new MyValue1[8]; + MyValue1[] dst = new MyValue1[8]; + for (int i = 0; i < 8; ++i) { + src[i] = MyValue1.createWithFieldsInline(rI, rL); + } + test43(src, dst); + verify(src, dst); + if (compile_and_run_again_if_deoptimized(warmup, "TestArrays::test43")) { + test43(src, dst); + verify(src, dst); + } + } + + @Test + public void test44(Object src, MyValue2[] dst) { + System.arraycopy(src, 0, dst, 0, 8); + } + + @DontCompile + public void test44_verifier(boolean warmup) { + MyValue2[] src = new MyValue2[8]; + MyValue2[] dst = new MyValue2[8]; + for (int i = 0; i < 8; ++i) { + src[i] = MyValue2.createWithFieldsInline(rI, (i % 2) == 0); + } + test44(src, dst); + verify(src, dst); + if (compile_and_run_again_if_deoptimized(warmup, "TestArrays::test44")) { + test44(src, dst); + verify(src, dst); + } + } + + @Test + public void test45(MyValue2[] src, Object dst) { + System.arraycopy(src, 0, dst, 0, 8); + } + + @DontCompile + public void test45_verifier(boolean warmup) { + MyValue2[] src = new MyValue2[8]; + MyValue2[] dst = new MyValue2[8]; + for (int i = 0; i < 8; ++i) { + src[i] = MyValue2.createWithFieldsInline(rI, (i % 2) == 0); + } + test45(src, dst); + verify(src, dst); + if (compile_and_run_again_if_deoptimized(warmup, "TestArrays::test45")) { + test45(src, dst); + verify(src, dst); + } + } + + @Test + @Warmup(1) // Avoid early compilation + public void test46(Object[] src, MyValue2[] dst) { + System.arraycopy(src, 0, dst, 0, 8); + } + + @DontCompile + public void test46_verifier(boolean warmup) { + Object[] src = new Object[8]; + MyValue2[] dst = new MyValue2[8]; + for (int i = 0; i < 8; ++i) { + src[i] = MyValue2.createWithFieldsInline(rI, (i % 2) == 0); + } + test46(src, dst); + verify(dst, src); + if (!warmup) { + Method m = tests.get("TestArrays::test46"); + assertDeoptimizedByC2(m); + WHITE_BOX.enqueueMethodForCompilation(m, COMP_LEVEL_FULL_OPTIMIZATION); + test46(src, dst); + verify(dst, src); + if (!WHITE_BOX.isMethodCompiled(m, false) && !XCOMP) { + throw new RuntimeException("unexpected deoptimization"); + } + } + } + + @Test + public void test47(MyValue2[] src, Object[] dst) { + System.arraycopy(src, 0, dst, 0, 8); + } + + @DontCompile + public void test47_verifier(boolean warmup) { + MyValue2[] src = new MyValue2[8]; + Object[] dst = new Object[8]; + for (int i = 0; i < 8; ++i) { + src[i] = MyValue2.createWithFieldsInline(rI, (i % 2) == 0); + } + test47(src, dst); + verify(src, dst); + if (compile_and_run_again_if_deoptimized(warmup, "TestArrays::test47")) { + test47(src, dst); + verify(src, dst); + } + } + + @Test + @Warmup(1) // Avoid early compilation + public void test48(Object[] src, Object dst) { + System.arraycopy(src, 0, dst, 0, 8); + } + + @DontCompile + public void test48_verifier(boolean warmup) { + Object[] src = new Object[8]; + MyValue2[] dst = new MyValue2[8]; + for (int i = 0; i < 8; ++i) { + src[i] = MyValue2.createWithFieldsInline(rI, (i % 2) == 0); + } + test48(src, dst); + verify(dst, src); + if (!warmup) { + Method m = tests.get("TestArrays::test48"); + assertDeoptimizedByC2(m); + WHITE_BOX.enqueueMethodForCompilation(m, COMP_LEVEL_FULL_OPTIMIZATION); + test48(src, dst); + verify(dst, src); + if (!WHITE_BOX.isMethodCompiled(m, false) && !XCOMP) { + throw new RuntimeException("unexpected deoptimization"); + } + } + } + + @Test + public void test49(Object src, Object[] dst) { + System.arraycopy(src, 0, dst, 0, 8); + } + + @DontCompile + public void test49_verifier(boolean warmup) { + MyValue2[] src = new MyValue2[8]; + Object[] dst = new Object[8]; + for (int i = 0; i < 8; ++i) { + src[i] = MyValue2.createWithFieldsInline(rI, (i % 2) == 0); + } + test49(src, dst); + verify(src, dst); + if (compile_and_run_again_if_deoptimized(warmup, "TestArrays::test49")) { + test49(src, dst); + verify(src, dst); + } + } + + @Test + public void test50(Object[] src, Object[] dst) { + System.arraycopy(src, 0, dst, 0, 8); + } + + @DontCompile + public void test50_verifier(boolean warmup) { + Object[] src = new Object[8]; + Object[] dst = new Object[8]; + for (int i = 0; i < 8; ++i) { + src[i] = MyValue2.createWithFieldsInline(rI, (i % 2) == 0); + } + test50(src, dst); + verify(src, dst); + if (!warmup) { + Method m = tests.get("TestArrays::test50"); + if (!WHITE_BOX.isMethodCompiled(m, false) && !XCOMP) { + throw new RuntimeException("unexpected deoptimization"); + } + } + } + + @Test + public MyValue1[] test51(MyValue1[] va) { + return Arrays.copyOf(va, va.length, MyValue1[].class); + } + + @DontCompile + public void test51_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + MyValue1[] va = new MyValue1[len]; + for (int i = 0; i < len; ++i) { + va[i] = MyValue1.createWithFieldsInline(rI, rL); + } + MyValue1[] result = test51(va); + verify(va, result); + } + + static final MyValue1[] test52_va = new MyValue1[8]; + + @Test + public MyValue1[] test52() { + return Arrays.copyOf(test52_va, 8, MyValue1[].class); + } + + @DontCompile + public void test52_verifier(boolean warmup) { + for (int i = 0; i < 8; ++i) { + test52_va[i] = MyValue1.createWithFieldsInline(rI, rL); + } + MyValue1[] result = test52(); + verify(test52_va, result); + } + + @Test + public MyValue1[] test53(Object[] va) { + return Arrays.copyOf(va, va.length, MyValue1[].class); + } + + @DontCompile + public void test53_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + MyValue1[] va = new MyValue1[len]; + for (int i = 0; i < len; ++i) { + va[i] = MyValue1.createWithFieldsInline(rI, rL); + } + MyValue1[] result = test53(va); + verify(result, va); + } + + @Test + public Object[] test54(MyValue1[] va) { + return Arrays.copyOf(va, va.length, Object[].class); + } + + @DontCompile + public void test54_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + MyValue1[] va = new MyValue1[len]; + for (int i = 0; i < len; ++i) { + va[i] = MyValue1.createWithFieldsInline(rI, rL); + } + Object[] result = test54(va); + verify(va, result); + } + + @Test + public Object[] test55(Object[] va) { + return Arrays.copyOf(va, va.length, Object[].class); + } + + @DontCompile + public void test55_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + MyValue1[] va = new MyValue1[len]; + for (int i = 0; i < len; ++i) { + va[i] = MyValue1.createWithFieldsInline(rI, rL); + } + Object[] result = test55(va); + verify(va, result); + } + + @Test + public MyValue1[] test56(Object[] va) { + return Arrays.copyOf(va, va.length, MyValue1[].class); + } + + @DontCompile + public void test56_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + Object[] va = new Object[len]; + for (int i = 0; i < len; ++i) { + va[i] = MyValue1.createWithFieldsInline(rI, rL); + } + MyValue1[] result = test56(va); + verify(result, va); + } + + @Test + public Object[] test57(Object[] va, Class klass) { + // Arrays.copyOf returns a MyValue1[], which cannot be + // type-casted to Object[] without array co-variance. + return Arrays.copyOf(va, va.length, klass); + } + + @DontCompile + public void test57_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + Object[] va = new MyValue1[len]; + for (int i = 0; i < len; ++i) { + va[i] = MyValue1.createWithFieldsInline(rI, rL); + } + Object[] result = test57(va, MyValue1[].class); + verify(va, result); + } + + @Test + public Object[] test58(MyValue1[] va, Class klass) { + return Arrays.copyOf(va, va.length, klass); + } + + @DontCompile + public void test58_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + MyValue1[] va = new MyValue1[len]; + for (int i = 0; i < len; ++i) { + va[i] = MyValue1.createWithFieldsInline(rI, rL); + } + for (int i = 0; i < 10; i++) { + Object[] result = test58(va, MyValue1[].class); + verify(va, result); + } + if (compile_and_run_again_if_deoptimized(warmup, "TestArrays::test58")) { + Object[] result = test58(va, MyValue1[].class); + verify(va, result); + } + } + + @Test + public Object[] test59(MyValue1[] va) { + return Arrays.copyOf(va, va.length+1, MyValue1[].class); + } + + @DontCompile + public void test59_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + MyValue1[] va = new MyValue1[len]; + MyValue1[] verif = new MyValue1[len+1]; + for (int i = 0; i < len; ++i) { + va[i] = MyValue1.createWithFieldsInline(rI, rL); + verif[i] = va[i]; + } + Object[] result = test59(va); + verify(verif, result); + } + + @Test + public Object[] test60(Object[] va, Class klass) { + // Arrays.copyOf returns a MyValue1[], which cannot be + // type-casted to Object[] without array co-variance. + return Arrays.copyOf(va, va.length+1, klass); + } + + @DontCompile + public void test60_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + MyValue1[] va = new MyValue1[len]; + MyValue1[] verif = new MyValue1[len+1]; + for (int i = 0; i < len; ++i) { + va[i] = MyValue1.createWithFieldsInline(rI, rL); + verif[i] = (MyValue1)va[i]; + } + Object[] result = test60(va, MyValue1[].class); + verify(verif, result); + } + + @Test + public Object[] test61(Object[] va, Class klass) { + return Arrays.copyOf(va, va.length+1, klass); + } + + @DontCompile + public void test61_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + Object[] va = new Integer[len]; + for (int i = 0; i < len; ++i) { + va[i] = new Integer(rI); + } + Object[] result = test61(va, Integer[].class); + for (int i = 0; i < va.length; ++i) { + Asserts.assertEQ(va[i], result[i]); + } + } + + @ForceInline + public Object[] test62_helper(int i, MyValue1[] va, Integer[] oa) { + Object[] arr = null; + if (i == 10) { + arr = oa; + } else { + arr = va; + } + return arr; + } + + @Test + public Object[] test62(MyValue1[] va, Integer[] oa) { + int i = 0; + for (; i < 10; i++); + + Object[] arr = test62_helper(i, va, oa); + + return Arrays.copyOf(arr, arr.length+1, arr.getClass()); + } + + @DontCompile + public void test62_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + MyValue1[] va = new MyValue1[len]; + Integer[] oa = new Integer[len]; + for (int i = 0; i < len; ++i) { + oa[i] = new Integer(rI); + } + test62_helper(42, va, oa); + Object[] result = test62(va, oa); + for (int i = 0; i < va.length; ++i) { + Asserts.assertEQ(oa[i], result[i]); + } + } + + @ForceInline + public Object[] test63_helper(int i, MyValue1[] va, Integer[] oa) { + Object[] arr = null; + if (i == 10) { + arr = va; + } else { + arr = oa; + } + return arr; + } + + @Test + public Object[] test63(MyValue1[] va, Integer[] oa) { + int i = 0; + for (; i < 10; i++); + + Object[] arr = test63_helper(i, va, oa); + + return Arrays.copyOf(arr, arr.length+1, arr.getClass()); + } + + @DontCompile + public void test63_verifier(boolean warmup) { + int len = Math.abs(rI) % 10; + MyValue1[] va = new MyValue1[len]; + MyValue1[] verif = new MyValue1[len+1]; + for (int i = 0; i < len; ++i) { + va[i] = MyValue1.createWithFieldsInline(rI, rL); + verif[i] = va[i]; + } + Integer[] oa = new Integer[len]; + test63_helper(42, va, oa); + Object[] result = test63(va, oa); + verify(verif, result); + } + + // Test default initialization of value type arrays: small array + @Test + public MyValue1[] test64() { + return new MyValue1[8]; + } + + @DontCompile + public void test64_verifier(boolean warmup) { + MyValue1[] va = new MyValue1[8]; + MyValue1[] var = test64(); + for (int i = 0; i < 8; ++i) { + Asserts.assertEQ(va[i].hashPrimitive(), var[i].hashPrimitive()); + } + } + + // Test default initialization of value type arrays: large array + @Test + public MyValue1[] test65() { + return new MyValue1[32]; + } + + @DontCompile + public void test65_verifier(boolean warmup) { + MyValue1[] va = new MyValue1[32]; + MyValue1[] var = test65(); + for (int i = 0; i < 32; ++i) { + Asserts.assertEQ(va[i].hashPrimitive(), var[i].hashPrimitive()); + } + } + + // Check init store elimination + @Test + public MyValue1[] test66(MyValue1 vt) { + MyValue1[] va = new MyValue1[1]; + va[0] = vt; + return va; + } + + @DontCompile + public void test66_verifier(boolean warmup) { + MyValue1 vt = MyValue1.createWithFieldsDontInline(rI, rL); + MyValue1[] va = test66(vt); + Asserts.assertEQ(va[0].hashPrimitive(), vt.hashPrimitive()); + } + + // Zeroing elimination and arraycopy + @Test + public MyValue1[] test67(MyValue1[] src) { + MyValue1[] dst = new MyValue1[16]; + System.arraycopy(src, 0, dst, 0, 13); + return dst; + } + + @DontCompile + public void test67_verifier(boolean warmup) { + MyValue1[] va = new MyValue1[16]; + MyValue1[] var = test67(va); + for (int i = 0; i < 16; ++i) { + Asserts.assertEQ(va[i].hashPrimitive(), var[i].hashPrimitive()); + } + } + + // A store with a default value can be eliminated + @Test + public MyValue1[] test68() { + MyValue1[] va = new MyValue1[2]; + va[0] = va[1]; + return va; + } + + @DontCompile + public void test68_verifier(boolean warmup) { + MyValue1[] va = new MyValue1[2]; + MyValue1[] var = test68(); + for (int i = 0; i < 2; ++i) { + Asserts.assertEQ(va[i].hashPrimitive(), var[i].hashPrimitive()); + } + } + + // Requires individual stores to init array + @Test + public MyValue1[] test69(MyValue1 vt) { + MyValue1[] va = new MyValue1[4]; + va[0] = vt; + va[3] = vt; + return va; + } + + @DontCompile + public void test69_verifier(boolean warmup) { + MyValue1 vt = MyValue1.createWithFieldsDontInline(rI, rL); + MyValue1[] va = new MyValue1[4]; + va[0] = vt; + va[3] = vt; + MyValue1[] var = test69(vt); + for (int i = 0; i < va.length; ++i) { + Asserts.assertEQ(va[i].hashPrimitive(), var[i].hashPrimitive()); + } + } + + // A store with a default value can be eliminated: same as test68 + // but store is farther away from allocation + @Test + public MyValue1[] test70(MyValue1[] other) { + other[1] = other[0]; + MyValue1[] va = new MyValue1[2]; + other[0] = va[1]; + va[0] = va[1]; + return va; + } + + @DontCompile + public void test70_verifier(boolean warmup) { + MyValue1[] va = new MyValue1[2]; + MyValue1[] var = test70(va); + for (int i = 0; i < 2; ++i) { + Asserts.assertEQ(va[i].hashPrimitive(), var[i].hashPrimitive()); + } + } + + // EA needs to consider oop fields in flattened arrays + @Test + public void test71() { + int len = 10; + MyValue2[] src = new MyValue2[len]; + MyValue2[] dst = new MyValue2[len]; + for (int i = 0; i < len; ++i) { + src[i] = MyValue2.createWithFieldsDontInline(rI, (i % 2) == 0); + } + System.arraycopy(src, 0, dst, 0, src.length); + for (int i = 0; i < len; ++i) { + Asserts.assertEQ(src[i].hash(), dst[i].hash()); + } + } + + @DontCompile + public void test71_verifier(boolean warmup) { + test71(); + } + + // Test EA with leaf call to 'store_unknown_value' + @Test + public void test72(Object[] o, boolean b, Object element) { + Object[] arr1 = new Object[10]; + Object[] arr2 = new Object[10]; + if (b) { + arr1 = o; + } + arr1[0] = element; + arr2[0] = element; + } + + @DontCompile + public void test72_verifier(boolean warmup) { + Object[] arr = new Object[1]; + Object elem = new Object(); + test72(arr, true, elem); + test72(arr, false, elem); + } + + @Test + public void test73(Object[] oa, MyValue1 v, Object o) { + // TestLWorld.test38 use a C1 Phi node for the array. This test + // adds the case where the stored value is a C1 Phi node. + Object o2 = (o == null) ? v : o; + oa[0] = v; // The stored value is known to be flattenable + oa[1] = o; // The stored value may be flattenable + oa[2] = o2; // The stored value may be flattenable (a C1 Phi node) + oa[0] = oa; // The stored value is known to be not flattenable (an Object[]) + } + + @DontCompile + public void test73_verifier(boolean warmup) { + MyValue1 v0 = MyValue1.createWithFieldsDontInline(rI, rL); + MyValue1 v1 = MyValue1.createWithFieldsDontInline(rI+1, rL+1); + MyValue1[] arr = new MyValue1[3]; + try { + test73(arr, v0, v1); + throw new RuntimeException("ArrayStoreException expected"); + } catch (ArrayStoreException t) { + // expected + } + Asserts.assertEQ(arr[0].hash(), v0.hash()); + Asserts.assertEQ(arr[1].hash(), v1.hash()); + Asserts.assertEQ(arr[2].hash(), v1.hash()); + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/TestBasicFunctionality.java 2019-03-11 14:27:34.190353938 +0100 @@ -0,0 +1,785 @@ +/* + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package compiler.valhalla.valuetypes; + +import jdk.test.lib.Asserts; + +/* + * @test + * @summary Test the basic value type implementation in C2 + * @library /testlibrary /test/lib /compiler/whitebox / + * @requires os.simpleArch == "x64" + * @compile -XDallowWithFieldOperator TestBasicFunctionality.java + * @run driver ClassFileInstaller sun.hotspot.WhiteBox jdk.test.lib.Platform + * @run main/othervm/timeout=120 -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UnlockExperimentalVMOptions -XX:+WhiteBoxAPI -XX:+EnableValhalla + * compiler.valhalla.valuetypes.ValueTypeTest + * compiler.valhalla.valuetypes.TestBasicFunctionality + */ +public class TestBasicFunctionality extends ValueTypeTest { + // Extra VM parameters for some test scenarios. See ValueTypeTest.getVMParameters() + @Override + public String[] getExtraVMParameters(int scenario) { + switch (scenario) { + case 3: return new String[] {"-XX:-ValueArrayFlatten"}; + } + return null; + } + + public static void main(String[] args) throws Throwable { + TestBasicFunctionality test = new TestBasicFunctionality(); + test.run(args, MyValue1.class, MyValue2.class, MyValue2Inline.class, MyValue3.class, MyValue3Inline.class); + } + + // Helper methods + + protected long hash() { + return hash(rI, rL); + } + + protected long hash(int x, long y) { + return MyValue1.createWithFieldsInline(x, y).hash(); + } + + // Receive value type through call to interpreter + @Test(failOn = ALLOC + STORE + TRAP) + public long test1() { + MyValue1 v = MyValue1.createWithFieldsDontInline(rI, rL); + return v.hash(); + } + + @DontCompile + public void test1_verifier(boolean warmup) { + long result = test1(); + Asserts.assertEQ(result, hash()); + } + + // Receive value type from interpreter via parameter + @Test(failOn = ALLOC + STORE + TRAP) + public long test2(MyValue1 v) { + return v.hash(); + } + + @DontCompile + public void test2_verifier(boolean warmup) { + MyValue1 v = MyValue1.createWithFieldsDontInline(rI, rL); + long result = test2(v); + Asserts.assertEQ(result, hash()); + } + + // Return incoming value type without accessing fields + @Test(valid = ValueTypePassFieldsAsArgsOn, match = {ALLOC, STORE}, matchCount = {1, 14}, failOn = LOAD + TRAP) + @Test(valid = ValueTypePassFieldsAsArgsOff, failOn = ALLOC + LOAD + STORE + TRAP) + public MyValue1 test3(MyValue1 v) { + return v; + } + + @DontCompile + public void test3_verifier(boolean warmup) { + MyValue1 v1 = MyValue1.createWithFieldsDontInline(rI, rL); + MyValue1 v2 = test3(v1); + Asserts.assertEQ(v1.x, v2.x); + Asserts.assertEQ(v1.y, v2.y); + } + + // Create a value type in compiled code and only use fields. + // Allocation should go away because value type does not escape. + @Test(failOn = ALLOC + LOAD + STORE + TRAP) + public long test4() { + MyValue1 v = MyValue1.createWithFieldsInline(rI, rL); + return v.hash(); + } + + @DontCompile + public void test4_verifier(boolean warmup) { + long result = test4(); + Asserts.assertEQ(result, hash()); + } + + // Create a value type in compiled code and pass it to + // an inlined compiled method via a call. + @Test(failOn = ALLOC + LOAD + STORE + TRAP) + public long test5() { + MyValue1 v = MyValue1.createWithFieldsInline(rI, rL); + return test5Inline(v); + } + + @ForceInline + public long test5Inline(MyValue1 v) { + return v.hash(); + } + + @DontCompile + public void test5_verifier(boolean warmup) { + long result = test5(); + Asserts.assertEQ(result, hash()); + } + + // Create a value type in compiled code and pass it to + // the interpreter via a call. + @Test(valid = ValueTypePassFieldsAsArgsOn, failOn = LOAD + TRAP + ALLOC) + @Test(valid = ValueTypePassFieldsAsArgsOff, match = {ALLOC}, matchCount = {1}, failOn = LOAD + TRAP) + public long test6() { + MyValue1 v = MyValue1.createWithFieldsInline(rI, rL); + // Pass to interpreter + return v.hashInterpreted(); + } + + @DontCompile + public void test6_verifier(boolean warmup) { + long result = test6(); + Asserts.assertEQ(result, hash()); + } + + // Create a value type in compiled code and pass it to + // the interpreter by returning. + @Test(match = {ALLOC}, matchCount = {1}, failOn = LOAD + TRAP) + public MyValue1 test7(int x, long y) { + return MyValue1.createWithFieldsInline(x, y); + } + + @DontCompile + public void test7_verifier(boolean warmup) { + MyValue1 v = test7(rI, rL); + Asserts.assertEQ(v.hash(), hash()); + } + + // Merge value types created from two branches + @Test(failOn = ALLOC + STORE + TRAP) + public long test8(boolean b) { + MyValue1 v; + if (b) { + v = MyValue1.createWithFieldsInline(rI, rL); + } else { + v = MyValue1.createWithFieldsDontInline(rI + 1, rL + 1); + } + return v.hash(); + } + + @DontCompile + public void test8_verifier(boolean warmup) { + Asserts.assertEQ(test8(true), hash()); + Asserts.assertEQ(test8(false), hash(rI + 1, rL + 1)); + } + + // Merge value types created from two branches + @Test(valid = ValueTypePassFieldsAsArgsOn, match = {LOAD}, matchCount = {12}, failOn = TRAP + ALLOC + STORE) + @Test(valid = ValueTypePassFieldsAsArgsOff, match = {ALLOC, STORE}, matchCount = {1, 12}, failOn = LOAD + TRAP) + public MyValue1 test9(boolean b, int localrI, long localrL) { + MyValue1 v; + if (b) { + // Value type is not allocated + // Do not use rI/rL directly here as null values may cause + // some redundant null initializations to be optimized out + // and matching to fail. + v = MyValue1.createWithFieldsInline(localrI, localrL); + } else { + // Value type is allocated by the callee + v = MyValue1.createWithFieldsDontInline(rI + 1, rL + 1); + } + // Need to allocate value type if 'b' is true + long sum = v.hashInterpreted(); + if (b) { + v = MyValue1.createWithFieldsDontInline(rI, sum); + } else { + v = MyValue1.createWithFieldsDontInline(rI, sum + 1); + } + // Don't need to allocate value type because both branches allocate + return v; + } + + @DontCompile + public void test9_verifier(boolean warmup) { + MyValue1 v = test9(true, rI, rL); + Asserts.assertEQ(v.x, rI); + Asserts.assertEQ(v.y, hash()); + v = test9(false, rI, rL); + Asserts.assertEQ(v.x, rI); + Asserts.assertEQ(v.y, hash(rI + 1, rL + 1) + 1); + } + + // Merge value types created in a loop (not inlined) + @Test(failOn = ALLOC + STORE + TRAP) + public long test10(int x, long y) { + MyValue1 v = MyValue1.createWithFieldsDontInline(x, y); + for (int i = 0; i < 10; ++i) { + v = MyValue1.createWithFieldsDontInline(v.x + 1, v.y + 1); + } + return v.hash(); + } + + @DontCompile + public void test10_verifier(boolean warmup) { + long result = test10(rI, rL); + Asserts.assertEQ(result, hash(rI + 10, rL + 10)); + } + + // Merge value types created in a loop (inlined) + @Test(failOn = ALLOC + LOAD + STORE + TRAP) + public long test11(int x, long y) { + MyValue1 v = MyValue1.createWithFieldsInline(x, y); + for (int i = 0; i < 10; ++i) { + v = MyValue1.createWithFieldsInline(v.x + 1, v.y + 1); + } + return v.hash(); + } + + @DontCompile + public void test11_verifier(boolean warmup) { + long result = test11(rI, rL); + Asserts.assertEQ(result, hash(rI + 10, rL + 10)); + } + + // Test loop with uncommon trap referencing a value type + @Test(match = {SCOBJ}, matchCount = {-1 /* at least 1 */}, failOn = LOAD) + public long test12(boolean b) { + MyValue1 v = MyValue1.createWithFieldsInline(rI, rL); + MyValue1[] va = new MyValue1[Math.abs(rI) % 10]; + for (int i = 0; i < va.length; ++i) { + va[i] = MyValue1.createWithFieldsInline(rI, rL); + } + long result = rL; + for (int i = 0; i < 1000; ++i) { + if (b) { + result += v.x; + } else { + // Uncommon trap referencing v. We delegate allocation to the + // interpreter by adding a SafePointScalarObjectNode. + result = v.hashInterpreted(); + for (int j = 0; j < va.length; ++j) { + result += va[j].hash(); + } + } + } + return result; + } + + @DontCompile + public void test12_verifier(boolean warmup) { + long result = test12(warmup); + Asserts.assertEQ(result, warmup ? rL + (1000 * rI) : ((Math.abs(rI) % 10) + 1) * hash()); + } + + // Test loop with uncommon trap referencing a value type + @Test + public long test13(boolean b) { + MyValue1 v = MyValue1.createWithFieldsDontInline(rI, rL); + MyValue1[] va = new MyValue1[Math.abs(rI) % 10]; + for (int i = 0; i < va.length; ++i) { + va[i] = MyValue1.createWithFieldsDontInline(rI, rL); + } + long result = rL; + for (int i = 0; i < 1000; ++i) { + if (b) { + result += v.x; + } else { + // Uncommon trap referencing v. Should not allocate + // but just pass the existing oop to the uncommon trap. + result = v.hashInterpreted(); + for (int j = 0; j < va.length; ++j) { + result += va[j].hashInterpreted(); + } + } + } + return result; + } + + @DontCompile + public void test13_verifier(boolean warmup) { + long result = test13(warmup); + Asserts.assertEQ(result, warmup ? rL + (1000 * rI) : ((Math.abs(rI) % 10) + 1) * hash()); + } + + // Create a value type in a non-inlined method and then call a + // non-inlined method on that value type. + @Test(valid = ValueTypePassFieldsAsArgsOn, failOn = (ALLOC + STORE + TRAP), match = {LOAD}, matchCount = {12}) + @Test(valid = ValueTypePassFieldsAsArgsOff, failOn = (ALLOC + LOAD + STORE + TRAP)) + public long test14() { + MyValue1 v = MyValue1.createWithFieldsDontInline(rI, rL); + return v.hashInterpreted(); + } + + @DontCompile + public void test14_verifier(boolean b) { + long result = test14(); + Asserts.assertEQ(result, hash()); + } + + // Create a value type in an inlined method and then call a + // non-inlined method on that value type. + @Test(valid = ValueTypePassFieldsAsArgsOn, failOn = (LOAD + TRAP + ALLOC)) + @Test(valid = ValueTypePassFieldsAsArgsOff, failOn = (LOAD + TRAP), match = {ALLOC}, matchCount = {1}) + public long test15() { + MyValue1 v = MyValue1.createWithFieldsInline(rI, rL); + return v.hashInterpreted(); + } + + @DontCompile + public void test15_verifier(boolean b) { + long result = test15(); + Asserts.assertEQ(result, hash()); + } + + // Create a value type in a non-inlined method and then call an + // inlined method on that value type. + @Test(failOn = (ALLOC + STORE + TRAP)) + public long test16() { + MyValue1 v = MyValue1.createWithFieldsDontInline(rI, rL); + return v.hash(); + } + + @DontCompile + public void test16_verifier(boolean b) { + long result = test16(); + Asserts.assertEQ(result, hash()); + } + + // Create a value type in an inlined method and then call an + // inlined method on that value type. + @Test(failOn = (ALLOC + LOAD + STORE + TRAP)) + public long test17() { + MyValue1 v = MyValue1.createWithFieldsInline(rI, rL); + return v.hash(); + } + + @DontCompile + public void test17_verifier(boolean b) { + long result = test17(); + Asserts.assertEQ(result, hash()); + } + + // Create a value type in compiled code and pass it to the + // interpreter via a call. The value is live at the first call so + // debug info should include a reference to all its fields. + @Test(valid = ValueTypePassFieldsAsArgsOn, failOn = ALLOC + LOAD + TRAP) + @Test(valid = ValueTypePassFieldsAsArgsOff, match = {ALLOC}, matchCount = {1}, failOn = LOAD + TRAP) + public long test18() { + MyValue1 v = MyValue1.createWithFieldsInline(rI, rL); + v.hashInterpreted(); + return v.hashInterpreted(); + } + + @DontCompile + public void test18_verifier(boolean warmup) { + long result = test18(); + Asserts.assertEQ(result, hash()); + } + + // Create a value type in compiled code and pass it to the + // interpreter via a call. The value type is passed twice but + // should only be allocated once. + @Test(valid = ValueTypePassFieldsAsArgsOn, failOn = ALLOC + LOAD + TRAP) + @Test(valid = ValueTypePassFieldsAsArgsOff, match = {ALLOC}, matchCount = {1}, failOn = LOAD + TRAP) + public long test19() { + MyValue1 v = MyValue1.createWithFieldsInline(rI, rL); + return sumValue(v, v); + } + + @DontCompile + public long sumValue(MyValue1 v, MyValue1 dummy) { + return v.hash(); + } + + @DontCompile + public void test19_verifier(boolean warmup) { + long result = test19(); + Asserts.assertEQ(result, hash()); + } + + // Create a value type (array) in compiled code and pass it to the + // interpreter via a call. The value type is live at the uncommon + // trap: verify that deoptimization causes the value type to be + // correctly allocated. + @Test(valid = ValueTypePassFieldsAsArgsOn, failOn = LOAD + ALLOC + STORE) + @Test(valid = ValueTypePassFieldsAsArgsOff, match = {ALLOC}, matchCount = {1}, failOn = LOAD) + public long test20(boolean deopt) { + MyValue1 v = MyValue1.createWithFieldsInline(rI, rL); + MyValue2[] va = new MyValue2[3]; + if (deopt) { + // uncommon trap + WHITE_BOX.deoptimizeMethod(tests.get(getClass().getSimpleName() + "::test20")); + } + return v.hashInterpreted() + va[0].hashInterpreted() + + va[1].hashInterpreted() + va[2].hashInterpreted(); + } + + @DontCompile + public void test20_verifier(boolean warmup) { + MyValue2[] va = new MyValue2[42]; + long result = test20(!warmup); + Asserts.assertEQ(result, hash() + va[0].hash() + va[1].hash() + va[2].hash()); + } + + // Value type fields in regular object + MyValue1.val val1; + MyValue2.val val2; + final MyValue1.val val3 = MyValue1.createWithFieldsInline(rI, rL); + static MyValue1.val val4; + static final MyValue1.val val5 = MyValue1.createWithFieldsInline(rI, rL); + + // Test value type fields in objects + @Test(match = {ALLOC}, matchCount = {1}, failOn = (TRAP)) + public long test21(int x, long y) { + // Compute hash of value type fields + long result = val1.hash() + val2.hash() + val3.hash() + val4.hash() + val5.hash(); + // Update fields + val1 = MyValue1.createWithFieldsInline(x, y); + val2 = MyValue2.createWithFieldsInline(x, true); + val4 = MyValue1.createWithFieldsInline(x, y); + return result; + } + + @DontCompile + public void test21_verifier(boolean warmup) { + // Check if hash computed by test18 is correct + val1 = MyValue1.createWithFieldsInline(rI, rL); + val2 = val1.v2; + // val3 is initialized in the constructor + val4 = val1; + // val5 is initialized in the static initializer + long hash = val1.hash() + val2.hash() + val3.hash() + val4.hash() + val5.hash(); + long result = test21(rI + 1, rL + 1); + Asserts.assertEQ(result, hash); + // Check if value type fields were updated + Asserts.assertEQ(val1.hash(), hash(rI + 1, rL + 1)); + Asserts.assertEQ(val2.hash(), MyValue2.createWithFieldsInline(rI + 1, true).hash()); + Asserts.assertEQ(val4.hash(), hash(rI + 1, rL + 1)); + } + + // Test folding of constant value type fields + @Test(failOn = ALLOC + LOAD + STORE + LOOP + TRAP) + public long test22() { + // This should be constant folded + return val5.hash() + val5.v3.hash(); + } + + @DontCompile + public void test22_verifier(boolean warmup) { + long result = test22(); + Asserts.assertEQ(result, val5.hash() + val5.v3.hash()); + } + + // Test defaultvalue + @Test(failOn = ALLOC + LOAD + STORE + LOOP + TRAP) + public long test23() { + MyValue2 v = MyValue2.createDefaultInline(); + return v.hash(); + } + + @DontCompile + public void test23_verifier(boolean warmup) { + long result = test23(); + Asserts.assertEQ(result, MyValue2.createDefaultInline().hash()); + } + + // Test defaultvalue + @Test(failOn = ALLOC + STORE + LOOP + TRAP) + public long test24() { + MyValue1 v1 = MyValue1.createDefaultInline(); + MyValue1 v2 = MyValue1.createDefaultDontInline(); + return v1.hashPrimitive() + v2.hashPrimitive(); + } + + @DontCompile + public void test24_verifier(boolean warmup) { + long result = test24(); + Asserts.assertEQ(result, 2 * MyValue1.createDefaultInline().hashPrimitive()); + } + + // Test withfield + @Test(failOn = ALLOC + LOAD + STORE + LOOP + TRAP) + public long test25() { + MyValue2 v = MyValue2.createWithFieldsInline(rI, true); + return v.hash(); + } + + @DontCompile + public void test25_verifier(boolean warmup) { + long result = test25(); + Asserts.assertEQ(result, MyValue2.createWithFieldsInline(rI, true).hash()); + } + + // Test withfield + @Test(failOn = ALLOC + STORE + LOOP + TRAP) + public long test26() { + MyValue1 v1 = MyValue1.createWithFieldsInline(rI, rL); + MyValue1 v2 = MyValue1.createWithFieldsDontInline(rI, rL); + return v1.hash() + v2.hash(); + } + + @DontCompile + public void test26_verifier(boolean warmup) { + long result = test26(); + Asserts.assertEQ(result, 2 * hash()); + } + + class TestClass27 { + public MyValue1.val v; + } + + // Test allocation elimination of unused object with initialized value type field + @Test(failOn = ALLOC + LOAD + STORE + LOOP) + public void test27(boolean deopt) { + TestClass27 unused = new TestClass27(); + MyValue1 v = MyValue1.createWithFieldsInline(rI, rL); + unused.v = v; + if (deopt) { + // uncommon trap + WHITE_BOX.deoptimizeMethod(tests.get(getClass().getSimpleName() + "::test27")); + } + } + + @DontCompile + public void test27_verifier(boolean warmup) { + test27(!warmup); + } + + static MyValue3.val staticVal3; + static MyValue3.val staticVal3_copy; + + // Check elimination of redundant value type allocations + @Test(match = {ALLOC}, matchCount = {1}) + public MyValue3 test28(MyValue3[] va) { + // Create value type and force allocation + MyValue3 vt = MyValue3.create(); + va[0] = vt; + staticVal3 = vt; + vt.verify(staticVal3); + + // Value type is now allocated, make a copy and force allocation. + // Because copy is equal to vt, C2 should remove this redundant allocation. + MyValue3 copy = MyValue3.setC(vt, vt.c); + va[0] = copy; + staticVal3_copy = copy; + copy.verify(staticVal3_copy); + return copy; + } + + @DontCompile + public void test28_verifier(boolean warmup) { + MyValue3[] va = new MyValue3[1]; + MyValue3 vt = test28(va); + staticVal3.verify(vt); + staticVal3.verify(va[0]); + staticVal3_copy.verify(vt); + staticVal3_copy.verify(va[0]); + } + + // Verify that only dominating allocations are re-used + @Test() + public MyValue3 test29(boolean warmup) { + MyValue3 vt = MyValue3.create(); + if (warmup) { + staticVal3 = vt; // Force allocation + } + // Force allocation to verify that above + // non-dominating allocation is not re-used + MyValue3 copy = MyValue3.setC(vt, vt.c); + staticVal3_copy = copy; + copy.verify(vt); + return copy; + } + + @DontCompile + public void test29_verifier(boolean warmup) { + MyValue3 vt = test29(warmup); + if (warmup) { + staticVal3.verify(vt); + } + } + + // Verify that C2 recognizes value type loads and re-uses the oop to avoid allocations + @Test(failOn = ALLOC + ALLOCA + STORE) + public MyValue3 test30(MyValue3[] va) { + // C2 can re-use the oop of staticVal3 because staticVal3 is equal to copy + MyValue3 copy = MyValue3.copy(staticVal3); + va[0] = copy; + staticVal3 = copy; + copy.verify(staticVal3); + return copy; + } + + @DontCompile + public void test30_verifier(boolean warmup) { + staticVal3 = MyValue3.create(); + MyValue3[] va = new MyValue3[1]; + MyValue3 vt = test30(va); + staticVal3.verify(vt); + staticVal3.verify(va[0]); + } + + // Verify that C2 recognizes value type loads and re-uses the oop to avoid allocations + @Test(valid = ValueTypeReturnedAsFieldsOn) + @Test(valid = ValueTypeReturnedAsFieldsOff, failOn = ALLOC + ALLOCA + STORE) + public MyValue3 test31(MyValue3[] va) { + // C2 can re-use the oop returned by createDontInline() + // because the corresponding value type is equal to 'copy'. + MyValue3 copy = MyValue3.copy(MyValue3.createDontInline()); + va[0] = copy; + staticVal3 = copy; + copy.verify(staticVal3); + return copy; + } + + @DontCompile + public void test31_verifier(boolean warmup) { + MyValue3[] va = new MyValue3[1]; + MyValue3 vt = test31(va); + staticVal3.verify(vt); + staticVal3.verify(va[0]); + } + + // Verify that C2 recognizes value type loads and re-uses the oop to avoid allocations + @Test(valid = ValueTypePassFieldsAsArgsOn) + @Test(valid = ValueTypePassFieldsAsArgsOff, failOn = ALLOC + ALLOCA + STORE) + public MyValue3 test32(MyValue3 vt, MyValue3[] va) { + // C2 can re-use the oop of vt because vt is equal to 'copy'. + MyValue3 copy = MyValue3.copy(vt); + va[0] = copy; + staticVal3 = copy; + copy.verify(staticVal3); + return copy; + } + + @DontCompile + public void test32_verifier(boolean warmup) { + MyValue3 vt = MyValue3.create(); + MyValue3[] va = new MyValue3[1]; + MyValue3 result = test32(vt, va); + staticVal3.verify(vt); + va[0].verify(vt); + result.verify(vt); + } + + // Test correct identification of value type copies + @Test() + public MyValue3 test33(MyValue3[] va) { + MyValue3 vt = MyValue3.copy(staticVal3); + vt = MyValue3.setI(vt, vt.c); + // vt is not equal to staticVal3, so C2 should not re-use the oop + va[0] = vt; + staticVal3 = vt; + vt.verify(staticVal3); + return vt; + } + + @DontCompile + public void test33_verifier(boolean warmup) { + staticVal3 = MyValue3.create(); + MyValue3[] va = new MyValue3[1]; + MyValue3 vt = test33(va); + Asserts.assertEQ(staticVal3.i, (int)staticVal3.c); + Asserts.assertEQ(va[0].i, (int)staticVal3.c); + Asserts.assertEQ(vt.i, (int)staticVal3.c); + } + + // Verify that the default value type is never allocated. + // C2 code should load and use the default oop from the java mirror. + @Test(failOn = ALLOC + ALLOCA + LOAD + STORE + LOOP + TRAP) + public MyValue3 test34(MyValue3[] va) { + // Explicitly create default value + MyValue3 vt = MyValue3.createDefault(); + va[0] = vt; + staticVal3 = vt; + vt.verify(vt); + + // Load default value from uninitialized value array + MyValue3[] dva = new MyValue3[1]; + staticVal3_copy = dva[0]; + va[1] = dva[0]; + dva[0].verify(dva[0]); + return vt; + } + + @DontCompile + public void test34_verifier(boolean warmup) { + MyValue3 vt = MyValue3.createDefault(); + MyValue3[] va = new MyValue3[2]; + va[0] = MyValue3.create(); + va[1] = MyValue3.create(); + MyValue3 res = test34(va); + res.verify(vt); + staticVal3.verify(vt); + staticVal3_copy.verify(vt); + va[0].verify(vt); + va[1].verify(vt); + } + + // Same as above but manually initialize value type fields to default. + @Test(failOn = ALLOC + ALLOCA + LOAD + STORE + LOOP + TRAP) + public MyValue3 test35(MyValue3 vt, MyValue3[] va) { + vt = MyValue3.setC(vt, (char)0); + vt = MyValue3.setBB(vt, (byte)0); + vt = MyValue3.setS(vt, (short)0); + vt = MyValue3.setI(vt, 0); + vt = MyValue3.setL(vt, 0); + vt = MyValue3.setO(vt, null); + vt = MyValue3.setF1(vt, 0); + vt = MyValue3.setF2(vt, 0); + vt = MyValue3.setF3(vt, 0); + vt = MyValue3.setF4(vt, 0); + vt = MyValue3.setF5(vt, 0); + vt = MyValue3.setF6(vt, 0); + vt = MyValue3.setV1(vt, MyValue3Inline.createDefault()); + va[0] = vt; + staticVal3 = vt; + vt.verify(vt); + return vt; + } + + @DontCompile + public void test35_verifier(boolean warmup) { + MyValue3 vt = MyValue3.createDefault(); + MyValue3[] va = new MyValue3[1]; + va[0] = MyValue3.create(); + MyValue3 res = test35(va[0], va); + res.verify(vt); + staticVal3.verify(vt); + va[0].verify(vt); + } + + // Merge value types created from two branches + + private Object test36_helper(Object v) { + return v; + } + + @Test(failOn = ALLOC + STORE + TRAP) + public long test36(boolean b) { + Object o; + if (b) { + o = test36_helper(MyValue1.createWithFieldsInline(rI, rL)); + } else { + o = test36_helper(MyValue1.createWithFieldsDontInline(rI + 1, rL + 1)); + } + MyValue1 v = (MyValue1)o; + return v.hash(); + } + + @DontCompile + public void test36_verifier(boolean warmup) { + Asserts.assertEQ(test36(true), hash()); + Asserts.assertEQ(test36(false), hash(rI + 1, rL + 1)); + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/TestBimorphicInlining.java 2019-03-11 14:27:34.646353932 +0100 @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package compiler.valhalla.valuetypes; + +import java.util.Random; +import jdk.test.lib.Asserts; + +/** + * @test + * @bug 8209009 + * @summary Test bimorphic inlining with value receivers. + * @library /testlibrary /test/lib + * @run main/othervm -XX:+EnableValhalla -Xbatch -XX:TypeProfileLevel=222 + * -XX:CompileCommand=compileonly,compiler.valhalla.valuetypes.TestBimorphicInlining::test* + * -XX:CompileCommand=quiet -XX:CompileCommand=print,compiler.valhalla.valuetypes.TestBimorphicInlining::test* + * compiler.valhalla.valuetypes.TestBimorphicInlining + * @run main/othervm -XX:+EnableValhalla -Xbatch -XX:TypeProfileLevel=222 + * -XX:+UnlockExperimentalVMOptions -XX:PerMethodTrapLimit=0 -XX:PerMethodSpecTrapLimit=0 + * -XX:CompileCommand=compileonly,compiler.valhalla.valuetypes.TestBimorphicInlining::test* + * -XX:CompileCommand=quiet -XX:CompileCommand=print,compiler.valhalla.valuetypes.TestBimorphicInlining::test* + * compiler.valhalla.valuetypes.TestBimorphicInlining + */ + +interface MyInterface { + public MyInterface hash(MyInterface arg); +} + +value final class TestValue1 implements MyInterface { + final int x; + + public TestValue1(int x) { + this.x = x; + } + + public TestValue1 hash(MyInterface arg) { + return new TestValue1(x + ((TestValue1)arg).x); + } +} + +value final class TestValue2 implements MyInterface { + final int x; + + public TestValue2(int x) { + this.x = x; + } + + public TestValue2 hash(MyInterface arg) { + return new TestValue2(x + ((TestValue2)arg).x); + } +} + +class TestClass implements MyInterface { + int x; + + public TestClass(int x) { + this.x = x; + } + + public MyInterface hash(MyInterface arg) { + return new TestClass(x + ((TestClass)arg).x); + } +} + +public class TestBimorphicInlining { + + public static MyInterface test1(MyInterface i1, MyInterface i2) { + MyInterface result = i1.hash(i2); + i1.hash(i2); + return result; + } + + public static MyInterface test2(MyInterface i1, MyInterface i2) { + MyInterface result = i1.hash(i2); + i1.hash(i2); + return result; + } + + public static MyInterface test3(MyInterface i1, MyInterface i2) { + MyInterface result = i1.hash(i2); + i1.hash(i2); + return result; + } + + public static MyInterface test4(MyInterface i1, MyInterface i2) { + MyInterface result = i1.hash(i2); + i1.hash(i2); + return result; + } + + static public void main(String[] args) { + Random rand = new Random(); + TestClass testObject = new TestClass(rand.nextInt()); + TestValue1 testValue1 = new TestValue1(rand.nextInt()); + TestValue2 testValue2 = new TestValue2(rand.nextInt()); + + for (int i = 0; i < 10_000; ++i) { + // Trigger bimorphic inlining by calling test methods with different arguments + MyInterface arg, res; + boolean rare = (i % 10 == 0); + + arg = rare ? testValue1 : testObject; + res = test1(arg, arg); + Asserts.assertEQ(rare ? ((TestValue1)res).x : ((TestClass)res).x, 2 * (rare ? testValue1.x : testObject.x), "test1 failed"); + + arg = rare ? testObject : testValue1; + res = test2(arg, arg); + Asserts.assertEQ(rare ? ((TestClass)res).x : ((TestValue1)res).x, 2 * (rare ? testObject.x : testValue1.x), "test2 failed"); + + arg = rare ? testValue1 : testValue2; + res = test3(arg, arg); + Asserts.assertEQ(rare ? ((TestValue1)res).x : ((TestValue2)res).x, 2 * (rare ? testValue1.x : testValue2.x), "test3 failed"); + + arg = rare ? testValue2 : testValue1; + res = test4(arg, arg); + Asserts.assertEQ(rare ? ((TestValue2)res).x : ((TestValue1)res).x, 2 * (rare ? testValue2.x : testValue1.x), "test4 failed"); + } + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/TestC2CCalls.java 2019-03-11 14:27:35.102353925 +0100 @@ -0,0 +1,598 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @library /test/lib + * @summary Test value type calling convention with compiled to compiled calls. + * @run main/othervm -XX:+EnableValhalla + * TestC2CCalls + * @run main/othervm -XX:+EnableValhalla -XX:-UseBimorphicInlining -Xbatch + * -XX:CompileCommand=compileonly,TestC2CCalls*::test* + * -XX:CompileCommand=dontinline,TestC2CCalls*::test* + * TestC2CCalls + * @run main/othervm -XX:+EnableValhalla -XX:-UseBimorphicInlining -Xbatch -XX:-ProfileInterpreter + * -XX:CompileCommand=compileonly,TestC2CCalls*::test* + * -XX:CompileCommand=dontinline,TestC2CCalls*::test* + * TestC2CCalls + * @run main/othervm -XX:+EnableValhalla -XX:-UseBimorphicInlining -Xbatch + * -XX:CompileCommand=compileonly,TestC2CCalls::test* + * -XX:CompileCommand=dontinline,TestC2CCalls*::test* + * TestC2CCalls + * @run main/othervm -XX:+EnableValhalla -XX:-UseBimorphicInlining -Xbatch -XX:-ProfileInterpreter + * -XX:CompileCommand=compileonly,TestC2CCalls::test* + * -XX:CompileCommand=dontinline,TestC2CCalls*::test* + * TestC2CCalls + */ + +import jdk.test.lib.Asserts; +import jdk.test.lib.Utils; + +public class TestC2CCalls { + + public static final int rI = Utils.getRandomInstance().nextInt() % 1000; + + static value class OtherVal { + public final int x; + + private OtherVal(int x) { + this.x = x; + } + } + + static interface MyInterface1 { + public MyInterface1 test1(OtherVal other, int y); + public MyInterface1 test2(OtherVal.val other1, OtherVal.box other2, int y); + public MyInterface1 test3(OtherVal.val other1, OtherVal.box other2, int y, boolean deopt); + public MyInterface1 test4(OtherVal.val other1, OtherVal.box other2, int y); + public MyInterface1 test5(OtherVal.val other1, OtherVal.box other2, int y); + public MyInterface1 test6(); + public MyInterface1 test7(int i1, int i2, int i3, int i4, int i5, int i6); + public MyInterface1 test8(int i1, int i2, int i3, int i4, int i5, int i6, int i7); + public MyInterface1 test9(MyValue3 other, int i1, int i2, int i3, int i4, int i5, int i6); + public MyInterface1 test10(MyValue4 other, int i1, int i2, int i3, int i4, int i5, int i6); + + public int getValue(); + } + + static value class MyValue1 implements MyInterface1 { + public final int x; + + private MyValue1(int x) { + this.x = x; + } + + @Override + public int getValue() { + return x; + } + + @Override + public MyValue1 test1(OtherVal other, int y) { + return new MyValue1(x + other.x + y); + } + + @Override + public MyValue1 test2(OtherVal.val other1, OtherVal.box other2, int y) { + return new MyValue1(x + other1.x + other2.x + y); + } + + @Override + public MyValue1 test3(OtherVal.val other1, OtherVal.box other2, int y, boolean deopt) { + if (!deopt) { + return new MyValue1(x + other1.x + other2.x + y); + } else { + // Uncommon trap + return test1(other1, y); + } + } + + @Override + public MyValue1 test4(OtherVal.val other1, OtherVal.box other2, int y) { + return new MyValue1(x + other1.x + other2.x + y); + } + + @Override + public MyValue1 test5(OtherVal.val other1, OtherVal.box other2, int y) { + return new MyValue1(x + other1.x + other2.x + y); + } + + @Override + public MyValue1 test6() { + return this; + } + + @Override + public MyValue1 test7(int i1, int i2, int i3, int i4, int i5, int i6) { + return new MyValue1(x + i1 + i2 + i3 + i4 + i5 + i6); + } + + @Override + public MyValue1 test8(int i1, int i2, int i3, int i4, int i5, int i6, int i7) { + return new MyValue1(x + i1 + i2 + i3 + i4 + i5 + i6 + i7); + } + + public MyValue1 test9(MyValue3 other, int i1, int i2, int i3, int i4, int i5, int i6) { + return new MyValue1(x + (int)(other.d1 + other.d2 + other.d3 + other.d4) + i1 + i2 + i3 + i4 + i5 + i6); + } + + public MyValue1 test10(MyValue4 other, int i1, int i2, int i3, int i4, int i5, int i6) { + return new MyValue1(x + other.x1 + other.x2 + other.x3 + other.x4 + i1 + i2 + i3 + i4 + i5 + i6); + } + } + + static value class MyValue2 implements MyInterface1 { + public final int x; + + private MyValue2(int x) { + this.x = x; + } + + @Override + public int getValue() { + return x; + } + + @Override + public MyValue2 test1(OtherVal other, int y) { + return new MyValue2(x + other.x + y); + } + + @Override + public MyValue2 test2(OtherVal.val other1, OtherVal.box other2, int y) { + return new MyValue2(x + other1.x + other2.x + y); + } + + @Override + public MyValue2 test3(OtherVal.val other1, OtherVal.box other2, int y, boolean deopt) { + if (!deopt) { + return new MyValue2(x + other1.x + other2.x + y); + } else { + // Uncommon trap + return test1(other1, y); + } + } + + @Override + public MyValue2 test4(OtherVal.val other1, OtherVal.box other2, int y) { + return new MyValue2(x + other1.x + other2.x + y); + } + + @Override + public MyValue2 test5(OtherVal.val other1, OtherVal.box other2, int y) { + return new MyValue2(x + other1.x + other2.x + y); + } + + @Override + public MyValue2 test6() { + return this; + } + + @Override + public MyValue2 test7(int i1, int i2, int i3, int i4, int i5, int i6) { + return new MyValue2(x + i1 + i2 + i3 + i4 + i5 + i6); + } + + @Override + public MyValue2 test8(int i1, int i2, int i3, int i4, int i5, int i6, int i7) { + return new MyValue2(x + i1 + i2 + i3 + i4 + i5 + i6 + i7); + } + + public MyValue2 test9(MyValue3 other, int i1, int i2, int i3, int i4, int i5, int i6) { + return new MyValue2(x + (int)(other.d1 + other.d2 + other.d3 + other.d4) + i1 + i2 + i3 + i4 + i5 + i6); + } + + public MyValue2 test10(MyValue4 other, int i1, int i2, int i3, int i4, int i5, int i6) { + return new MyValue2(x + other.x1 + other.x2 + other.x3 + other.x4 + i1 + i2 + i3 + i4 + i5 + i6); + } + } + + static value class MyValue3 implements MyInterface1 { + public final double d1; + public final double d2; + public final double d3; + public final double d4; + + private MyValue3(double d) { + this.d1 = d; + this.d2 = d; + this.d3 = d; + this.d4 = d; + } + + @Override + public int getValue() { + return (int)d4; + } + + @Override + public MyValue3 test1(OtherVal other, int y) { return MyValue3.default; } + @Override + public MyValue3 test2(OtherVal.val other1, OtherVal.box other2, int y) { return MyValue3.default; } + @Override + public MyValue3 test3(OtherVal.val other1, OtherVal.box other2, int y, boolean deopt) { return MyValue3.default; } + @Override + public MyValue3 test4(OtherVal.val other1, OtherVal.box other2, int y) { return MyValue3.default; } + @Override + public MyValue3 test5(OtherVal.val other1, OtherVal.box other2, int y) { return MyValue3.default; } + @Override + public MyValue3 test6() { return MyValue3.default; } + + @Override + public MyValue3 test7(int i1, int i2, int i3, int i4, int i5, int i6) { + return new MyValue3(d1 + d2 + d3 + d4 + i1 + i2 + i3 + i4 + i5 + i6); + } + + @Override + public MyValue3 test8(int i1, int i2, int i3, int i4, int i5, int i6, int i7) { + return new MyValue3(d1 + d2 + d3 + d4 + i1 + i2 + i3 + i4 + i5 + i6 + i7); + } + + public MyValue3 test9(MyValue3 other, int i1, int i2, int i3, int i4, int i5, int i6) { + return new MyValue3(d1 + d2 + d3 + d4 + other.d1 + other.d2 + other.d3 + other.d4 + i1 + i2 + i3 + i4 + i5 + i6); + } + + public MyValue3 test10(MyValue4 other, int i1, int i2, int i3, int i4, int i5, int i6) { + return new MyValue3(d1 + d2 + d3 + d4 + other.x1 + other.x2 + other.x3 + other.x4 + i1 + i2 + i3 + i4 + i5 + i6); + } + } + + static value class MyValue4 implements MyInterface1 { + public final int x1; + public final int x2; + public final int x3; + public final int x4; + + private MyValue4(int i) { + this.x1 = i; + this.x2 = i; + this.x3 = i; + this.x4 = i; + } + + @Override + public int getValue() { + return x4; + } + + @Override + public MyValue4 test1(OtherVal other, int y) { return MyValue4.default; } + @Override + public MyValue4 test2(OtherVal.val other1, OtherVal.box other2, int y) { return MyValue4.default; } + @Override + public MyValue4 test3(OtherVal.val other1, OtherVal.box other2, int y, boolean deopt) { return MyValue4.default; } + @Override + public MyValue4 test4(OtherVal.val other1, OtherVal.box other2, int y) { return MyValue4.default; } + @Override + public MyValue4 test5(OtherVal.val other1, OtherVal.box other2, int y) { return MyValue4.default; } + @Override + public MyValue4 test6() { return MyValue4.default; } + + @Override + public MyValue4 test7(int i1, int i2, int i3, int i4, int i5, int i6) { + return new MyValue4(x1 + x2 + x3 + x4 + i1 + i2 + i3 + i4 + i5 + i6); + } + + @Override + public MyValue4 test8(int i1, int i2, int i3, int i4, int i5, int i6, int i7) { + return new MyValue4(x1 + x2 + x3 + x4 + i1 + i2 + i3 + i4 + i5 + i6 + i7); + } + + public MyValue4 test9(MyValue3 other, int i1, int i2, int i3, int i4, int i5, int i6) { + return new MyValue4(x1 + x2 + x3 + x4 + (int)(other.d1 + other.d2 + other.d3 + other.d4) + i1 + i2 + i3 + i4 + i5 + i6); + } + + public MyValue4 test10(MyValue4 other, int i1, int i2, int i3, int i4, int i5, int i6) { + return new MyValue4(x1 + x2 + x3 + x4 + other.x1 + other.x2 + other.x3 + other.x4 + i1 + i2 + i3 + i4 + i5 + i6); + } + } + + static class MyObject implements MyInterface1 { + private final int x; + + private MyObject(int x) { + this.x = x; + } + + @Override + public int getValue() { + return x; + } + + @Override + public MyObject test1(OtherVal other, int y) { + return new MyObject(x + other.x + y); + } + + @Override + public MyObject test2(OtherVal.val other1, OtherVal.box other2, int y) { + return new MyObject(x + other1.x + other2.x + y); + } + + @Override + public MyObject test3(OtherVal.val other1, OtherVal.box other2, int y, boolean deopt) { + if (!deopt) { + return new MyObject(x + other1.x + other2.x + y); + } else { + // Uncommon trap + return test1(other1, y); + } + } + + @Override + public MyObject test4(OtherVal.val other1, OtherVal.box other2, int y) { + return new MyObject(x + other1.x + other2.x + y); + } + + @Override + public MyObject test5(OtherVal.val other1, OtherVal.box other2, int y) { + return new MyObject(x + other1.x + other2.x + y); + } + + @Override + public MyObject test6() { + return this; + } + + @Override + public MyObject test7(int i1, int i2, int i3, int i4, int i5, int i6) { + return new MyObject(x + i1 + i2 + i3 + i4 + i5 + i6); + } + + @Override + public MyObject test8(int i1, int i2, int i3, int i4, int i5, int i6, int i7) { + return new MyObject(x + i1 + i2 + i3 + i4 + i5 + i6 + i7); + } + + public MyObject test9(MyValue3 other, int i1, int i2, int i3, int i4, int i5, int i6) { + return new MyObject(x + (int)(other.d1 + other.d2 + other.d3 + other.d4) + i1 + i2 + i3 + i4 + i5 + i6); + } + + public MyObject test10(MyValue4 other, int i1, int i2, int i3, int i4, int i5, int i6) { + return new MyObject(x + other.x1 + other.x2 + other.x3 + other.x4 + i1 + i2 + i3 + i4 + i5 + i6); + } + } + + // Test calling methods with value type arguments through an interface + public static int test1(MyInterface1 intf, OtherVal other, int y) { + return intf.test1(other, y).getValue(); + } + + public static int test2(MyInterface1 intf, OtherVal other, int y) { + return intf.test2(other, other, y).getValue(); + } + + // Test mixing null-tolerant and null-free value type arguments + public static int test3(MyValue1 vt, OtherVal other, int y) { + return vt.test2(other, other, y).getValue(); + } + + public static int test4(MyObject obj, OtherVal other, int y) { + return obj.test2(other, other, y).getValue(); + } + + // Optimized interface call with value receiver + public static int test5(MyInterface1 intf, OtherVal other, int y) { + return intf.test1(other, y).getValue(); + } + + public static int test6(MyInterface1 intf, OtherVal other, int y) { + return intf.test2(other, other, y).getValue(); + } + + // Optimized interface call with object receiver + public static int test7(MyInterface1 intf, OtherVal other, int y) { + return intf.test1(other, y).getValue(); + } + + public static int test8(MyInterface1 intf, OtherVal other, int y) { + return intf.test2(other, other, y).getValue(); + } + + // Interface calls with deoptimized callee + public static int test9(MyInterface1 intf, OtherVal other, int y, boolean deopt) { + return intf.test3(other, other, y, deopt).getValue(); + } + + public static int test10(MyInterface1 intf, OtherVal other, int y, boolean deopt) { + return intf.test3(other, other, y, deopt).getValue(); + } + + // Optimized interface calls with deoptimized callee + public static int test11(MyInterface1 intf, OtherVal other, int y, boolean deopt) { + return intf.test3(other, other, y, deopt).getValue(); + } + + public static int test12(MyInterface1 intf, OtherVal other, int y, boolean deopt) { + return intf.test3(other, other, y, deopt).getValue(); + } + + public static int test13(MyInterface1 intf, OtherVal other, int y, boolean deopt) { + return intf.test3(other, other, y, deopt).getValue(); + } + + public static int test14(MyInterface1 intf, OtherVal other, int y, boolean deopt) { + return intf.test3(other, other, y, deopt).getValue(); + } + + // Interface calls without warmed up / compiled callees + public static int test15(MyInterface1 intf, OtherVal other, int y) { + return intf.test4(other, other, y).getValue(); + } + + public static int test16(MyInterface1 intf, OtherVal other, int y) { + return intf.test5(other, other, y).getValue(); + } + + // Interface call with no arguments + public static int test17(MyInterface1 intf) { + return intf.test6().getValue(); + } + + // Calls that require stack extension + public static int test18(MyInterface1 intf, int y) { + return intf.test7(y, y, y, y, y, y).getValue(); + } + + public static int test19(MyInterface1 intf, int y) { + return intf.test8(y, y, y, y, y, y, y).getValue(); + } + + public static int test20(MyInterface1 intf, MyValue3 v, int y) { + return intf.test9(v, y, y, y, y, y, y).getValue(); + } + + public static int test21(MyInterface1 intf, MyValue4 v, int y) { + return intf.test10(v, y, y, y, y, y, y).getValue(); + } + + public static void main(String[] args) { + MyValue1 val1 = new MyValue1(rI); + MyValue2 val2 = new MyValue2(rI+1); + MyValue3 val3 = new MyValue3(rI+2); + MyValue4 val4 = new MyValue4(rI+3); + OtherVal other = new OtherVal(rI+4); + MyObject obj = new MyObject(rI+5); + + // Make sure callee methods are compiled + for (int i = 0; i < 10_000; ++i) { + Asserts.assertEQ(val1.test1(other, rI).getValue(), val1.x + other.x + rI); + Asserts.assertEQ(val2.test1(other, rI).getValue(), val2.x + other.x + rI); + Asserts.assertEQ(obj.test1(other, rI).getValue(), obj.x + other.x + rI); + Asserts.assertEQ(val1.test2(other, other, rI).getValue(), val1.x + 2*other.x + rI); + Asserts.assertEQ(val2.test2(other, other, rI).getValue(), val2.x + 2*other.x + rI); + Asserts.assertEQ(obj.test2(other, other, rI).getValue(), obj.x + 2*other.x + rI); + Asserts.assertEQ(val1.test3(other, other, rI, false).getValue(), val1.x + 2*other.x + rI); + Asserts.assertEQ(val2.test3(other, other, rI, false).getValue(), val2.x + 2*other.x + rI); + Asserts.assertEQ(obj.test3(other, other, rI, false).getValue(), obj.x + 2*other.x + rI); + Asserts.assertEQ(val1.test7(rI, rI, rI, rI, rI, rI).getValue(), val1.x + 6*rI); + Asserts.assertEQ(val2.test7(rI, rI, rI, rI, rI, rI).getValue(), val2.x + 6*rI); + Asserts.assertEQ(val3.test7(rI, rI, rI, rI, rI, rI).getValue(), (int)(4*val3.d1 + 6*rI)); + Asserts.assertEQ(val4.test7(rI, rI, rI, rI, rI, rI).getValue(), (int)(4*val4.x1 + 6*rI)); + Asserts.assertEQ(obj.test7(rI, rI, rI, rI, rI, rI).getValue(), obj.x + 6*rI); + Asserts.assertEQ(val1.test8(rI, rI, rI, rI, rI, rI, rI).getValue(), val1.x + 7*rI); + Asserts.assertEQ(val2.test8(rI, rI, rI, rI, rI, rI, rI).getValue(), val2.x + 7*rI); + Asserts.assertEQ(val3.test8(rI, rI, rI, rI, rI, rI, rI).getValue(), (int)(4*val3.d1 + 7*rI)); + Asserts.assertEQ(val4.test8(rI, rI, rI, rI, rI, rI, rI).getValue(), (int)(4*val4.x1 + 7*rI)); + Asserts.assertEQ(obj.test8(rI, rI, rI, rI, rI, rI, rI).getValue(), obj.x + 7*rI); + Asserts.assertEQ(val1.test9(val3, rI, rI, rI, rI, rI, rI).getValue(), (int)(val1.x + 4*val3.d1 + 6*rI)); + Asserts.assertEQ(val2.test9(val3, rI, rI, rI, rI, rI, rI).getValue(), (int)(val2.x + 4*val3.d1 + 6*rI)); + Asserts.assertEQ(val3.test9(val3, rI, rI, rI, rI, rI, rI).getValue(), (int)(4*val3.d1 + 4*val3.d1 + 6*rI)); + Asserts.assertEQ(val4.test9(val3, rI, rI, rI, rI, rI, rI).getValue(), (int)(4*val4.x1 + 4*val3.d1 + 6*rI)); + Asserts.assertEQ(obj.test9(val3, rI, rI, rI, rI, rI, rI).getValue(), (int)(obj.x + 4*val3.d1 + 6*rI)); + Asserts.assertEQ(val1.test10(val4, rI, rI, rI, rI, rI, rI).getValue(), (int)(val1.x + 4*val4.x1 + 6*rI)); + Asserts.assertEQ(val2.test10(val4, rI, rI, rI, rI, rI, rI).getValue(), (int)(val2.x + 4*val4.x1 + 6*rI)); + Asserts.assertEQ(val3.test10(val4, rI, rI, rI, rI, rI, rI).getValue(), (int)(4*val3.d1 + 4*val4.x1 + 6*rI)); + Asserts.assertEQ(val4.test10(val4, rI, rI, rI, rI, rI, rI).getValue(), (int)(4*val4.x1 + 4*val4.x1 + 6*rI)); + Asserts.assertEQ(obj.test10(val4, rI, rI, rI, rI, rI, rI).getValue(), (int)(obj.x + 4*val4.x1 + 6*rI)); + } + + // Polute call profile + for (int i = 0; i < 100; ++i) { + Asserts.assertEQ(test15(val1, other, rI), val1.x + 2*other.x + rI); + Asserts.assertEQ(test16(obj, other, rI), obj.x + 2*other.x + rI); + Asserts.assertEQ(test17(obj), obj.x); + } + + // Trigger compilation of caller methods + for (int i = 0; i < 100_000; ++i) { + val1 = new MyValue1(rI+i); + val2 = new MyValue2(rI+i+1); + val3 = new MyValue3(rI+i+2); + val4 = new MyValue4(rI+i+3); + other = new OtherVal(rI+i+4); + obj = new MyObject(rI+i+5); + + Asserts.assertEQ(test1(val1, other, rI), val1.x + other.x + rI); + Asserts.assertEQ(test1(obj, other, rI), obj.x + other.x + rI); + Asserts.assertEQ(test2(obj, other, rI), obj.x + 2*other.x + rI); + Asserts.assertEQ(test2(val1, other, rI), val1.x + 2*other.x + rI); + Asserts.assertEQ(test3(val1, other, rI), val1.x + 2*other.x + rI); + Asserts.assertEQ(test4(obj, other, rI), obj.x + 2*other.x + rI); + Asserts.assertEQ(test5(val1, other, rI), val1.x + other.x + rI); + Asserts.assertEQ(test6(val1, other, rI), val1.x + 2*other.x + rI); + Asserts.assertEQ(test7(obj, other, rI), obj.x + other.x + rI); + Asserts.assertEQ(test8(obj, other, rI), obj.x + 2*other.x + rI); + Asserts.assertEQ(test9(val1, other, rI, false), val1.x + 2*other.x + rI); + Asserts.assertEQ(test9(obj, other, rI, false), obj.x + 2*other.x + rI); + Asserts.assertEQ(test10(val1, other, rI, false), val1.x + 2*other.x + rI); + Asserts.assertEQ(test10(obj, other, rI, false), obj.x + 2*other.x + rI); + Asserts.assertEQ(test11(val1, other, rI, false), val1.x + 2*other.x + rI); + Asserts.assertEQ(test12(val1, other, rI, false), val1.x + 2*other.x + rI); + Asserts.assertEQ(test13(obj, other, rI, false), obj.x + 2*other.x + rI); + Asserts.assertEQ(test14(obj, other, rI, false), obj.x + 2*other.x + rI); + Asserts.assertEQ(test15(obj, other, rI), obj.x + 2*other.x + rI); + Asserts.assertEQ(test16(val1, other, rI), val1.x + 2*other.x + rI); + Asserts.assertEQ(test17(val1), val1.x); + Asserts.assertEQ(test18(val1, rI), val1.x + 6*rI); + Asserts.assertEQ(test18(val2, rI), val2.x + 6*rI); + Asserts.assertEQ(test18(val3, rI), (int)(4*val3.d1 + 6*rI)); + Asserts.assertEQ(test18(val4, rI), 4*val4.x1 + 6*rI); + Asserts.assertEQ(test18(obj, rI), obj.x + 6*rI); + Asserts.assertEQ(test19(val1, rI), val1.x + 7*rI); + Asserts.assertEQ(test19(val2, rI), val2.x + 7*rI); + Asserts.assertEQ(test19(val3, rI), (int)(4*val3.d1 + 7*rI)); + Asserts.assertEQ(test19(val4, rI), 4*val4.x1 + 7*rI); + Asserts.assertEQ(test19(obj, rI), obj.x + 7*rI); + Asserts.assertEQ(test20(val1, val3, rI), (int)(val1.x + 4*val3.d1 + 6*rI)); + Asserts.assertEQ(test20(val2, val3, rI), (int)(val2.x + 4*val3.d1 + 6*rI)); + Asserts.assertEQ(test20(val3, val3, rI), (int)(4*val3.d1 + 4*val3.d1 + 6*rI)); + Asserts.assertEQ(test20(val4, val3, rI), (int)(4*val4.x1 + 4*val3.d1 + 6*rI)); + Asserts.assertEQ(test20(obj, val3, rI), (int)(obj.x + 4*val3.d1 + 6*rI)); + Asserts.assertEQ(test21(val1, val4, rI), val1.x + 4*val4.x1 + 6*rI); + Asserts.assertEQ(test21(val2, val4, rI), val2.x + 4*val4.x1 + 6*rI); + Asserts.assertEQ(test21(val3, val4, rI), (int)(4*val3.d1 + 4*val4.x1 + 6*rI)); + Asserts.assertEQ(test21(val4, val4, rI), 4*val4.x1 + 4*val4.x1 + 6*rI); + Asserts.assertEQ(test21(obj, val4, rI), obj.x + 4*val4.x1 + 6*rI); + } + + // Trigger deoptimization + Asserts.assertEQ(val1.test3(other, other, rI, true).getValue(), val1.x + other.x + rI); + Asserts.assertEQ(obj.test3(other, other, rI, true).getValue(), obj.x + other.x + rI); + + // Check results of methods still calling the deoptimized methods + Asserts.assertEQ(test9(val1, other, rI, false), val1.x + 2*other.x + rI); + Asserts.assertEQ(test9(obj, other, rI, false), obj.x + 2*other.x + rI); + Asserts.assertEQ(test10(obj, other, rI, false), obj.x + 2*other.x + rI); + Asserts.assertEQ(test10(val1, other, rI, false), val1.x + 2*other.x + rI); + Asserts.assertEQ(test11(val1, other, rI, false), val1.x + 2*other.x + rI); + Asserts.assertEQ(test11(obj, other, rI, false), obj.x + 2*other.x + rI); + Asserts.assertEQ(test12(obj, other, rI, false), obj.x + 2*other.x + rI); + Asserts.assertEQ(test12(val1, other, rI, false), val1.x + 2*other.x + rI); + Asserts.assertEQ(test13(val1, other, rI, false), val1.x + 2*other.x + rI); + Asserts.assertEQ(test13(obj, other, rI, false), obj.x + 2*other.x + rI); + Asserts.assertEQ(test14(obj, other, rI, false), obj.x + 2*other.x + rI); + Asserts.assertEQ(test14(val1, other, rI, false), val1.x + 2*other.x + rI); + + // Check with unexpected arguments + Asserts.assertEQ(test1(val2, other, rI), val2.x + other.x + rI); + Asserts.assertEQ(test2(val2, other, rI), val2.x + 2*other.x + rI); + Asserts.assertEQ(test5(val2, other, rI), val2.x + other.x + rI); + Asserts.assertEQ(test6(val2, other, rI), val2.x + 2*other.x + rI); + Asserts.assertEQ(test7(val1, other, rI), val1.x + other.x + rI); + Asserts.assertEQ(test8(val1, other, rI), val1.x + 2*other.x + rI); + Asserts.assertEQ(test15(val1, other, rI), val1.x + 2*other.x + rI); + Asserts.assertEQ(test16(obj, other, rI), obj.x + 2*other.x + rI); + Asserts.assertEQ(test17(obj), obj.x); + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/TestCallingConvention.java 2019-03-11 14:27:35.566353919 +0100 @@ -0,0 +1,572 @@ +/* + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package compiler.valhalla.valuetypes; + +import jdk.test.lib.Asserts; + +import java.lang.reflect.Method; + +/* + * @test + * @summary Test value type calling convention optimizations + * @library /testlibrary /test/lib /compiler/whitebox / + * @requires os.simpleArch == "x64" + * @compile -XDallowWithFieldOperator TestCallingConvention.java + * @run driver ClassFileInstaller sun.hotspot.WhiteBox jdk.test.lib.Platform + * @run main/othervm/timeout=120 -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UnlockExperimentalVMOptions -XX:+WhiteBoxAPI -XX:+EnableValhalla + * compiler.valhalla.valuetypes.ValueTypeTest + * compiler.valhalla.valuetypes.TestCallingConvention + */ +public class TestCallingConvention extends ValueTypeTest { + // Extra VM parameters for some test scenarios. See ValueTypeTest.getVMParameters() + @Override + public String[] getExtraVMParameters(int scenario) { + switch (scenario) { + case 3: return new String[] {"-XX:-ValueArrayFlatten"}; + } + return null; + } + + public static void main(String[] args) throws Throwable { + TestCallingConvention test = new TestCallingConvention(); + test.run(args, MyValue1.class, MyValue2.class, MyValue2Inline.class, MyValue4.class, Test27Value1.class, Test27Value2.class, Test27Value3.class); + } + + // Test interpreter to compiled code with various signatures + @Test(failOn = ALLOC + STORE + TRAP) + public long test1(MyValue2 v) { + return v.hash(); + } + + @DontCompile + public void test1_verifier(boolean warmup) { + MyValue2 v = MyValue2.createWithFieldsInline(rI, true); + long result = test1(v); + Asserts.assertEQ(result, v.hashInterpreted()); + } + + @Test(failOn = ALLOC + STORE + TRAP) + public long test2(int i1, MyValue2 v, int i2) { + return v.hash() + i1 - i2; + } + + @DontCompile + public void test2_verifier(boolean warmup) { + MyValue2 v = MyValue2.createWithFieldsInline(rI, true); + long result = test2(rI, v, 2*rI); + Asserts.assertEQ(result, v.hashInterpreted() - rI); + } + + @Test(failOn = ALLOC + STORE + TRAP) + public long test3(long l1, MyValue2 v, long l2) { + return v.hash() + l1 - l2; + } + + @DontCompile + public void test3_verifier(boolean warmup) { + MyValue2 v = MyValue2.createWithFieldsInline(rI, true); + long result = test3(rL, v, 2*rL); + Asserts.assertEQ(result, v.hashInterpreted() - rL); + } + + @Test(failOn = ALLOC + STORE + TRAP) + public long test4(int i, MyValue2 v, long l) { + return v.hash() + i + l; + } + + @DontCompile + public void test4_verifier(boolean warmup) { + MyValue2 v = MyValue2.createWithFieldsInline(rI, true); + long result = test4(rI, v, rL); + Asserts.assertEQ(result, v.hashInterpreted() + rL + rI); + } + + @Test(failOn = ALLOC + STORE + TRAP) + public long test5(long l, MyValue2 v, int i) { + return v.hash() + i + l; + } + + @DontCompile + public void test5_verifier(boolean warmup) { + MyValue2 v = MyValue2.createWithFieldsInline(rI, true); + long result = test5(rL, v, rI); + Asserts.assertEQ(result, v.hashInterpreted() + rL + rI); + } + + @Test(failOn = ALLOC + STORE + TRAP) + public long test6(long l, MyValue1 v1, int i, MyValue2 v2) { + return v1.hash() + i + l + v2.hash(); + } + + @DontCompile + public void test6_verifier(boolean warmup) { + MyValue1 v1 = MyValue1.createWithFieldsDontInline(rI, rL); + MyValue2 v2 = MyValue2.createWithFieldsInline(rI, true); + long result = test6(rL, v1, rI, v2); + Asserts.assertEQ(result, v1.hashInterpreted() + rL + rI + v2.hashInterpreted()); + } + + // Test compiled code to interpreter with various signatures + @DontCompile + public long test7_interp(MyValue2 v) { + return v.hash(); + } + + @Test(failOn = ALLOC + STORE + TRAP) + public long test7(MyValue2 v) { + return test7_interp(v); + } + + @DontCompile + public void test7_verifier(boolean warmup) { + MyValue2 v = MyValue2.createWithFieldsInline(rI, true); + long result = test7(v); + Asserts.assertEQ(result, v.hashInterpreted()); + } + + @DontCompile + public long test8_interp(int i1, MyValue2 v, int i2) { + return v.hash() + i1 - i2; + } + + @Test(failOn = ALLOC + STORE + TRAP) + public long test8(int i1, MyValue2 v, int i2) { + return test8_interp(i1, v, i2); + } + + @DontCompile + public void test8_verifier(boolean warmup) { + MyValue2 v = MyValue2.createWithFieldsInline(rI, true); + long result = test8(rI, v, 2*rI); + Asserts.assertEQ(result, v.hashInterpreted() - rI); + } + + @DontCompile + public long test9_interp(long l1, MyValue2 v, long l2) { + return v.hash() + l1 - l2; + } + + @Test(failOn = ALLOC + STORE + TRAP) + public long test9(long l1, MyValue2 v, long l2) { + return test9_interp(l1, v, l2); + } + + @DontCompile + public void test9_verifier(boolean warmup) { + MyValue2 v = MyValue2.createWithFieldsInline(rI, true); + long result = test9(rL, v, 2*rL); + Asserts.assertEQ(result, v.hashInterpreted() - rL); + } + + @DontCompile + public long test10_interp(int i, MyValue2 v, long l) { + return v.hash() + i + l; + } + + @Test(failOn = ALLOC + STORE + TRAP) + public long test10(int i, MyValue2 v, long l) { + return test10_interp(i, v, l); + } + + @DontCompile + public void test10_verifier(boolean warmup) { + MyValue2 v = MyValue2.createWithFieldsInline(rI, true); + long result = test10(rI, v, rL); + Asserts.assertEQ(result, v.hashInterpreted() + rL + rI); + } + + @DontCompile + public long test11_interp(long l, MyValue2 v, int i) { + return v.hash() + i + l; + } + + @Test(failOn = ALLOC + STORE + TRAP) + public long test11(long l, MyValue2 v, int i) { + return test11_interp(l, v, i); + } + + @DontCompile + public void test11_verifier(boolean warmup) { + MyValue2 v = MyValue2.createWithFieldsInline(rI, true); + long result = test11(rL, v, rI); + Asserts.assertEQ(result, v.hashInterpreted() + rL + rI); + } + + @DontCompile + public long test12_interp(long l, MyValue1 v1, int i, MyValue2 v2) { + return v1.hash() + i + l + v2.hash(); + } + + @Test(failOn = ALLOC + STORE + TRAP) + public long test12(long l, MyValue1 v1, int i, MyValue2 v2) { + return test12_interp(l, v1, i, v2); + } + + @DontCompile + public void test12_verifier(boolean warmup) { + MyValue1 v1 = MyValue1.createWithFieldsDontInline(rI, rL); + MyValue2 v2 = MyValue2.createWithFieldsInline(rI, true); + long result = test12(rL, v1, rI, v2); + Asserts.assertEQ(result, v1.hashInterpreted() + rL + rI + v2.hashInterpreted()); + } + + // Test that debug info at a call is correct + @DontCompile + public long test13_interp(MyValue2 v, MyValue1[] va, boolean deopt) { + if (deopt) { + // uncommon trap + WHITE_BOX.deoptimizeMethod(tests.get(getClass().getSimpleName() + "::test13")); + } + return v.hash() + va[0].hash() + va[1].hash(); + } + + @Test(failOn = ALLOC + STORE + TRAP) + public long test13(MyValue2 v, MyValue1[] va, boolean flag, long l) { + return test13_interp(v, va, flag) + l; + } + + @DontCompile + public void test13_verifier(boolean warmup) { + MyValue2 v = MyValue2.createWithFieldsInline(rI, true); + MyValue1[] va = new MyValue1[2]; + va[0] = MyValue1.createWithFieldsDontInline(rI, rL); + va[1] = MyValue1.createWithFieldsDontInline(rI, rL); + long result = test13(v, va, !warmup, rL); + Asserts.assertEQ(result, v.hashInterpreted() + va[0].hash() + va[1].hash() + rL); + } + + // Test deoptimization at call return with return value in registers + @DontCompile + public MyValue2 test14_interp(boolean deopt) { + if (deopt) { + // uncommon trap + WHITE_BOX.deoptimizeMethod(tests.get(getClass().getSimpleName() + "::test14")); + } + return MyValue2.createWithFieldsInline(rI, true); + } + + @Test() + public MyValue2 test14(boolean flag) { + return test14_interp(flag); + } + + @DontCompile + public void test14_verifier(boolean warmup) { + MyValue2 result = test14(!warmup); + MyValue2 v = MyValue2.createWithFieldsInline(rI, true); + Asserts.assertEQ(result.hash(), v.hash()); + } + + // Return value types in registers from interpreter -> compiled + final MyValue3 test15_vt = MyValue3.create(); + @DontCompile + public MyValue3 test15_interp() { + return test15_vt; + } + + MyValue3 test15_vt2; + @Test(valid = ValueTypeReturnedAsFieldsOn, failOn = ALLOC + LOAD + TRAP) + @Test(valid = ValueTypeReturnedAsFieldsOff) + public void test15() { + test15_vt2 = test15_interp(); + } + + @DontCompile + public void test15_verifier(boolean warmup) { + test15(); + test15_vt.verify(test15_vt2); + } + + // Return value types in registers from compiled -> interpreter + final MyValue3 test16_vt = MyValue3.create(); + @Test(valid = ValueTypeReturnedAsFieldsOn, failOn = ALLOC + STORE + TRAP) + @Test(valid = ValueTypeReturnedAsFieldsOff) + public MyValue3 test16() { + return test16_vt; + } + + @DontCompile + public void test16_verifier(boolean warmup) { + MyValue3 vt = test16(); + test16_vt.verify(vt); + } + + // Return value types in registers from compiled -> compiled + final MyValue3 test17_vt = MyValue3.create(); + @DontInline + public MyValue3 test17_comp() { + return test17_vt; + } + + MyValue3 test17_vt2; + @Test(valid = ValueTypeReturnedAsFieldsOn, failOn = ALLOC + LOAD + TRAP) + @Test(valid = ValueTypeReturnedAsFieldsOff) + public void test17() { + test17_vt2 = test17_comp(); + } + + @DontCompile + public void test17_verifier(boolean warmup) throws Exception { + Method helper_m = getClass().getDeclaredMethod("test17_comp"); + if (!warmup && USE_COMPILER && !WHITE_BOX.isMethodCompiled(helper_m, false)) { + WHITE_BOX.enqueueMethodForCompilation(helper_m, COMP_LEVEL_FULL_OPTIMIZATION); + Asserts.assertTrue(WHITE_BOX.isMethodCompiled(helper_m, false), "test17_comp not compiled"); + } + test17(); + test17_vt.verify(test17_vt2); + } + + // Same tests as above but with a value type that cannot be returned in registers + + // Return value types in registers from interpreter -> compiled + final MyValue4 test18_vt = MyValue4.create(); + @DontCompile + public MyValue4 test18_interp() { + return test18_vt; + } + + MyValue4 test18_vt2; + @Test + public void test18() { + test18_vt2 = test18_interp(); + } + + @DontCompile + public void test18_verifier(boolean warmup) { + test18(); + test18_vt.verify(test18_vt2); + } + + // Return value types in registers from compiled -> interpreter + final MyValue4 test19_vt = MyValue4.create(); + @Test + public MyValue4 test19() { + return test19_vt; + } + + @DontCompile + public void test19_verifier(boolean warmup) { + MyValue4 vt = test19(); + test19_vt.verify(vt); + } + + // Return value types in registers from compiled -> compiled + final MyValue4 test20_vt = MyValue4.create(); + @DontInline + public MyValue4 test20_comp() { + return test20_vt; + } + + MyValue4 test20_vt2; + @Test + public void test20() { + test20_vt2 = test20_comp(); + } + + @DontCompile + public void test20_verifier(boolean warmup) throws Exception { + Method helper_m = getClass().getDeclaredMethod("test20_comp"); + if (!warmup && USE_COMPILER && !WHITE_BOX.isMethodCompiled(helper_m, false)) { + WHITE_BOX.enqueueMethodForCompilation(helper_m, COMP_LEVEL_FULL_OPTIMIZATION); + Asserts.assertTrue(WHITE_BOX.isMethodCompiled(helper_m, false), "test20_comp not compiled"); + } + test20(); + test20_vt.verify(test20_vt2); + } + + // Test no result from inlined method for incremental inlining + final MyValue3 test21_vt = MyValue3.create(); + public MyValue3 test21_inlined() { + throw new RuntimeException(); + } + + @Test + public MyValue3 test21() { + try { + return test21_inlined(); + } catch (RuntimeException ex) { + return test21_vt; + } + } + + @DontCompile + public void test21_verifier(boolean warmup) { + MyValue3 vt = test21(); + test21_vt.verify(vt); + } + + // Test returning a non-flattened value type as fields + MyValue3.box test22_vt = MyValue3.create(); + + @Test + public MyValue3 test22() { + return test22_vt; + } + + @DontCompile + public void test22_verifier(boolean warmup) { + MyValue3 vt = test22(); + test22_vt.verify(vt); + } + + // Test calling a method that has circular register/stack dependencies when unpacking value type arguments + value class TestValue23 { + final double f1; + TestValue23(double val) { + f1 = val; + } + } + + static double test23Callee(int i1, int i2, int i3, int i4, int i5, int i6, + TestValue23 v1, TestValue23 v2, TestValue23 v3, TestValue23 v4, TestValue23 v5, TestValue23 v6, TestValue23 v7, TestValue23 v8, + double d1, double d2, double d3, double d4, double d5, double d6, double d7, double d8) { + return i1 + i2 + i3 + i4 + i5 + i6 + v1.f1 + v2.f1 + v3.f1 + v4.f1 + v5.f1 + v6.f1 + v7.f1 + v8.f1 + d1 + d2 + d3 + d4 + d5 + d6 + d7 + d8; + } + + @Test + public double test23(int i1, int i2, int i3, int i4, int i5, int i6, + TestValue23 v1, TestValue23 v2, TestValue23 v3, TestValue23 v4, TestValue23 v5, TestValue23 v6, TestValue23 v7, TestValue23 v8, + double d1, double d2, double d3, double d4, double d5, double d6, double d7, double d8) { + return test23Callee(i1, i2, i3, i4, i5, i6, + v1, v2, v3, v4, v5, v6, v7, v8, + d1, d2, d3, d4, d5, d6, d7, d8); + } + + @DontCompile + public void test23_verifier(boolean warmup) { + TestValue23 vt = new TestValue23(rI); + double res1 = test23(rI, rI, rI, rI, rI, rI, + vt, vt, vt, vt, vt, vt, vt, vt, + rI, rI, rI, rI, rI, rI, rI, rI); + double res2 = test23Callee(rI, rI, rI, rI, rI, rI, + vt, vt, vt, vt, vt, vt, vt, vt, + rI, rI, rI, rI, rI, rI, rI, rI); + double res3 = 6*rI + 8*rI + 8*rI; + Asserts.assertEQ(res1, res2); + Asserts.assertEQ(res2, res3); + } + + // Should not return a nullable value type as fields + @Test + public MyValue2.box test24() { + return null; + } + + @DontCompile + public void test24_verifier(boolean warmup) { + MyValue2.box vt = test24(); + Asserts.assertEQ(vt, null); + } + + // Same as test24 but with control flow and inlining + @ForceInline + public MyValue2.box test26_callee(boolean b) { + if (b) { + return null; + } else { + return MyValue2.createWithFieldsInline(rI, true); + } + } + + @Test + public MyValue2.box test26(boolean b) { + return test26_callee(b); + } + + @DontCompile + public void test26_verifier(boolean warmup) { + MyValue2.box vt = test26(true); + Asserts.assertEQ(vt, null); + vt = test26(false); + Asserts.assertEQ(vt.hash(), MyValue2.createWithFieldsInline(rI, true).hash()); + } + + // Test calling convention with deep hierarchy of flattened fields + value final class Test27Value1 { + final Test27Value2.val valueField; + + private Test27Value1(Test27Value2 val2) { + valueField = val2; + } + + @DontInline + public int test(Test27Value1 val1) { + return valueField.test(valueField) + val1.valueField.test(valueField); + } + } + + value final class Test27Value2 { + final Test27Value3.val valueField; + + private Test27Value2(Test27Value3 val3) { + valueField = val3; + } + + @DontInline + public int test(Test27Value2 val2) { + return valueField.test(valueField) + val2.valueField.test(valueField); + } + } + + value final class Test27Value3 { + final int x; + + private Test27Value3(int x) { + this.x = x; + } + + @DontInline + public int test(Test27Value3 val3) { + return x + val3.x; + } + } + + @Test + public int test27(Test27Value1 val) { + return val.test(val); + } + + @DontCompile + public void test27_verifier(boolean warmup) { + Test27Value3 val3 = new Test27Value3(rI); + Test27Value2 val2 = new Test27Value2(val3); + Test27Value1 val1 = new Test27Value1(val2); + int result = test27(val1); + Asserts.assertEQ(result, 8*rI); + } + + static final MyValue1.box test28Val = MyValue1.createWithFieldsDontInline(rI, rL); + + @Test + @Warmup(0) + public String test28() { + return test28Val.toString(); + } + + @DontCompile + public void test28_verifier(boolean warmup) { + String result = test28(); + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/TestIntrinsics.java 2019-03-11 14:27:36.022353913 +0100 @@ -0,0 +1,713 @@ +/* + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package compiler.valhalla.valuetypes; + +import java.lang.reflect.Array; +import java.lang.reflect.Field; +import java.util.Arrays; + +import jdk.test.lib.Asserts; +import jdk.internal.misc.Unsafe; + +/* + * @test + * @summary Test intrinsic support for value types + * @library /testlibrary /test/lib /compiler/whitebox / + * @modules java.base/jdk.internal.misc + * @requires os.simpleArch == "x64" + * @compile -XDallowWithFieldOperator TestIntrinsics.java + * @run driver ClassFileInstaller sun.hotspot.WhiteBox jdk.test.lib.Platform + * @run main/othervm/timeout=120 -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UnlockExperimentalVMOptions -XX:+WhiteBoxAPI -XX:+EnableValhalla + * compiler.valhalla.valuetypes.ValueTypeTest + * compiler.valhalla.valuetypes.TestIntrinsics + */ +public class TestIntrinsics extends ValueTypeTest { + // Extra VM parameters for some test scenarios. See ValueTypeTest.getVMParameters() + @Override + public String[] getExtraVMParameters(int scenario) { + switch (scenario) { + case 3: return new String[] {"-XX:-MonomorphicArrayCheck", "-XX:+ValueArrayFlatten"}; + case 4: return new String[] {"-XX:-MonomorphicArrayCheck"}; + } + return null; + } + + public static void main(String[] args) throws Throwable { + TestIntrinsics test = new TestIntrinsics(); + test.run(args, MyValue1.class, MyValue2.class, MyValue2Inline.class); + } + + // Test correctness of the Class::isAssignableFrom intrinsic + @Test() + public boolean test1(Class supercls, Class subcls) { + return supercls.isAssignableFrom(subcls); + } + + public void test1_verifier(boolean warmup) { + Asserts.assertTrue(test1(Object.class, MyValue1.class), "test1_1 failed"); + Asserts.assertTrue(test1(MyValue1.class, MyValue1.class), "test1_2 failed"); + Asserts.assertTrue(test1(Object.class, java.util.ArrayList.class), "test1_3 failed"); + Asserts.assertTrue(test1(java.util.ArrayList.class, java.util.ArrayList.class), "test1_4 failed"); + } + + // Verify that Class::isAssignableFrom checks with statically known classes are folded + @Test(failOn = LOADK) + public boolean test2() { + boolean check1 = java.util.AbstractList.class.isAssignableFrom(java.util.ArrayList.class); + boolean check2 = MyValue1.class.isAssignableFrom(MyValue1.class); + boolean check3 = Object.class.isAssignableFrom(java.util.ArrayList.class); + boolean check4 = Object.class.isAssignableFrom(MyValue1.class); + boolean check5 = !MyValue1.class.isAssignableFrom(Object.class); + return check1 && check2 && check3 && check4 && check5; + } + + public void test2_verifier(boolean warmup) { + Asserts.assertTrue(test2(), "test2 failed"); + } + + // Test correctness of the Class::getSuperclass intrinsic + @Test() + public Class test3(Class cls) { + return cls.getSuperclass(); + } + + public void test3_verifier(boolean warmup) { + Asserts.assertTrue(test3(Object.class) == null, "test3_1 failed"); + Asserts.assertTrue(test3(MyValue1.class) == Object.class, "test3_2 failed"); + Asserts.assertTrue(test3(Class.class) == Object.class, "test3_3 failed"); + } + + // Verify that Class::getSuperclass checks with statically known classes are folded + @Test(failOn = LOADK) + public boolean test4() { + boolean check1 = Object.class.getSuperclass() == null; + boolean check2 = MyValue1.class.getSuperclass() == Object.class; + boolean check3 = Class.class.getSuperclass() == Object.class; + return check1 && check2 && check3; + } + + public void test4_verifier(boolean warmup) { + Asserts.assertTrue(test4(), "test4 failed"); + } + + // Test toString() method + @Test() + public String test5(MyValue1 v) { + return v.toString(); + } + + @DontCompile + public void test5_verifier(boolean warmup) { + MyValue1 v = MyValue1.createDefaultInline(); + test5(v); + } + + // Test hashCode() method + @Test() + public int test6(MyValue1 v) { + return v.hashCode(); + } + + @DontCompile + public void test6_verifier(boolean warmup) { + MyValue1 v = MyValue1.createWithFieldsInline(rI, rL); + int res = test6(v); + Asserts.assertEQ(res, v.hashCode()); + } + + // Test default value type array creation via reflection + @Test() + public Object[] test7(Class componentType, int len) { + Object[] va = (Object[])Array.newInstance(componentType, len); + return va; + } + + @DontCompile + public void test7_verifier(boolean warmup) { + int len = Math.abs(rI) % 42; + long hash = MyValue1.createDefaultDontInline().hashPrimitive(); + Object[] va = test7(MyValue1.class, len); + for (int i = 0; i < len; ++i) { + Asserts.assertEQ(((MyValue1)va[i]).hashPrimitive(), hash); + } + } + + // Class.isInstance + @Test() + public boolean test8(Class c, MyValue1 vt) { + return c.isInstance(vt); + } + + @DontCompile + public void test8_verifier(boolean warmup) { + MyValue1 vt = MyValue1.createWithFieldsInline(rI, rL); + boolean result = test8(MyValue1.class, vt); + Asserts.assertTrue(result); + } + + @Test() + public boolean test9(Class c, MyValue1 vt) { + return c.isInstance(vt); + } + + @DontCompile + public void test9_verifier(boolean warmup) { + MyValue1 vt = MyValue1.createWithFieldsInline(rI, rL); + boolean result = test9(MyValue2.class, vt); + Asserts.assertFalse(result); + } + + // Class.cast + @Test() + public Object test10(Class c, MyValue1 vt) { + return c.cast(vt); + } + + @DontCompile + public void test10_verifier(boolean warmup) { + MyValue1 vt = MyValue1.createWithFieldsInline(rI, rL); + Object result = test10(MyValue1.class, vt); + Asserts.assertEQ(((MyValue1)result).hash(), vt.hash()); + } + + @Test() + public Object test11(Class c, MyValue1 vt) { + return c.cast(vt); + } + + @DontCompile + public void test11_verifier(boolean warmup) { + MyValue1 vt = MyValue1.createWithFieldsInline(rI, rL); + try { + test11(MyValue2.class, vt); + throw new RuntimeException("should have thrown"); + } catch(ClassCastException cce) { + } + } + + @Test() + public Object test12(MyValue1 vt) { + return MyValue1.class.cast(vt); + } + + @DontCompile + public void test12_verifier(boolean warmup) { + MyValue1 vt = MyValue1.createWithFieldsInline(rI, rL); + Object result = test12(vt); + Asserts.assertEQ(((MyValue1)result).hash(), vt.hash()); + } + + @Test() + public Object test13(MyValue1 vt) { + return MyValue2.class.cast(vt); + } + + @DontCompile + public void test13_verifier(boolean warmup) { + MyValue1 vt = MyValue1.createWithFieldsInline(rI, rL); + try { + test13(vt); + throw new RuntimeException("should have thrown"); + } catch(ClassCastException cce) { + } + } + + // value type array creation via reflection + @Test() + public void test14(int len, long hash) { + Object[] va = (Object[])Array.newInstance(MyValue1.class, len); + for (int i = 0; i < len; ++i) { + Asserts.assertEQ(((MyValue1)va[i]).hashPrimitive(), hash); + } + } + + @DontCompile + public void test14_verifier(boolean warmup) { + int len = Math.abs(rI) % 42; + long hash = MyValue1.createDefaultDontInline().hashPrimitive(); + test14(len, hash); + } + + // Test hashCode() method + @Test() + public int test15(Object v) { + return v.hashCode(); + } + + @DontCompile + public void test15_verifier(boolean warmup) { + MyValue1 v = MyValue1.createWithFieldsInline(rI, rL); + int res = test15(v); + Asserts.assertEQ(res, v.hashCode()); + } + + @Test() + public int test16(Object v) { + return System.identityHashCode(v); + } + + @DontCompile + public void test16_verifier(boolean warmup) { + MyValue1 v = MyValue1.createWithFieldsInline(rI, rL); + int res = test16(v); + Asserts.assertEQ(res, System.identityHashCode((Object)v)); + } + + @Test() + public int test17(Object v) { + return System.identityHashCode(v); + } + + @DontCompile + public void test17_verifier(boolean warmup) { + Integer v = new Integer(rI); + int res = test17(v); + Asserts.assertEQ(res, System.identityHashCode(v)); + } + + @Test() + public int test18(Object v) { + return System.identityHashCode(v); + } + + @DontCompile + public void test18_verifier(boolean warmup) { + Object v = null; + int res = test18(v); + Asserts.assertEQ(res, System.identityHashCode(v)); + } + + // hashCode() and toString() with different value types + @Test() + public int test19(MyValue1 vt1, MyValue1 vt2, boolean b) { + MyValue1 res = b ? vt1 : vt2; + return res.hashCode(); + } + + @DontCompile + public void test19_verifier(boolean warmup) { + MyValue1 vt = MyValue1.createWithFieldsInline(rI, rL); + int res = test19(vt, vt, true); + Asserts.assertEQ(res, vt.hashCode()); + res = test19(vt, vt, false); + Asserts.assertEQ(res, vt.hashCode()); + } + + @Test() + public String test20(MyValue1 vt1, MyValue1 vt2, boolean b) { + MyValue1 res = b ? vt1 : vt2; + return res.toString(); + } + + @DontCompile + public void test20_verifier(boolean warmup) { + MyValue1 vt = MyValue1.createWithFieldsInline(rI, rL); + String res = test20(vt, vt, true); + Asserts.assertEQ(res, vt.toString()); + res = test20(vt, vt, false); + Asserts.assertEQ(res, vt.toString()); + } + + private static final Unsafe U = Unsafe.getUnsafe(); + private static final long X_OFFSET; + private static final long Y_OFFSET; + private static final long V1_OFFSET; + private static final boolean V1_FLATTENED; + static { + try { + Field xField = MyValue1.class.getDeclaredField("x"); + X_OFFSET = U.objectFieldOffset(xField); + Field yField = MyValue1.class.getDeclaredField("y"); + Y_OFFSET = U.objectFieldOffset(yField); + Field v1Field = MyValue1.class.getDeclaredField("v1"); + V1_OFFSET = U.objectFieldOffset(v1Field); + V1_FLATTENED = U.isFlattened(v1Field); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + protected static final String CALL_Unsafe = START + "CallStaticJava" + MID + "# Static jdk.internal.misc.Unsafe::" + END; + + @Test(failOn=CALL_Unsafe) + public int test21(MyValue1 v) { + return U.getInt(v, X_OFFSET); + } + + @DontCompile + public void test21_verifier(boolean warmup) { + MyValue1 v = MyValue1.createWithFieldsInline(rI, rL); + int res = test21(v); + Asserts.assertEQ(res, v.x); + } + + MyValue1.val test22_vt; + @Test(failOn=CALL_Unsafe + ALLOC) + public void test22(MyValue1 v) { + v = U.makePrivateBuffer(v); + U.putInt(v, X_OFFSET, rI); + v = U.finishPrivateBuffer(v); + test22_vt = v; + } + + @DontCompile + public void test22_verifier(boolean warmup) { + MyValue1 v = MyValue1.createWithFieldsInline(rI, rL); + test22(v.setX(v, 0)); + Asserts.assertEQ(test22_vt.hash(), v.hash()); + } + + @Test(failOn=CALL_Unsafe) + public int test23(MyValue1 v, long offset) { + return U.getInt(v, offset); + } + + @DontCompile + public void test23_verifier(boolean warmup) { + MyValue1 v = MyValue1.createWithFieldsInline(rI, rL); + int res = test23(v, X_OFFSET); + Asserts.assertEQ(res, v.x); + } + + MyValue1.val test24_vt = MyValue1.createWithFieldsInline(rI, rL); + + @Test(failOn=CALL_Unsafe) + public int test24(long offset) { + return U.getInt(test24_vt, offset); + } + + @DontCompile + public void test24_verifier(boolean warmup) { + int res = test24(X_OFFSET); + Asserts.assertEQ(res, test24_vt.x); + } + + // Test copyOf intrinsic with allocated value type in it's debug information + value final class Test25Value { + final int x; + public Test25Value() { + this.x = 42; + } + } + + final Test25Value[] test25Array = new Test25Value[10]; + + @Test + public Test25Value[] test25(Test25Value element) { + Test25Value[] newArray = Arrays.copyOf(test25Array, test25Array.length + 1); + newArray[test25Array.length] = element; + return newArray; + } + + @DontCompile + public void test25_verifier(boolean warmup) { + Test25Value vt = new Test25Value(); + test25(vt); + } + + @Test + public Object test26() { + Class[] ca = new Class[1]; + for (int i = 0; i < 1; ++i) { + // Folds during loop opts + ca[i] = MyValue1.class; + } + return Array.newInstance(ca[0], 1); + } + + @DontCompile + public void test26_verifier(boolean warmup) { + Object[] res = (Object[])test26(); + Asserts.assertEQ(((MyValue1)res[0]).hashPrimitive(), MyValue1.createDefaultInline().hashPrimitive()); + } + + // Load non-flattenable value type field with unsafe + MyValue1.box test27_vt = MyValue1.createWithFieldsInline(rI, rL); + private static final long TEST27_OFFSET; + static { + try { + Field field = TestIntrinsics.class.getDeclaredField("test27_vt"); + TEST27_OFFSET = U.objectFieldOffset(field); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + @Test(failOn=CALL_Unsafe) + public MyValue1 test27() { + return (MyValue1)U.getReference(this, TEST27_OFFSET); + } + + @DontCompile + public void test27_verifier(boolean warmup) { + MyValue1 res = test27(); + Asserts.assertEQ(res.hash(), test24_vt.hash()); + } + + // Mismatched type + @Test(failOn=CALL_Unsafe) + public int test28(MyValue1 v) { + return U.getByte(v, X_OFFSET); + } + + @DontCompile + public void test28_verifier(boolean warmup) { + MyValue1 v = MyValue1.createWithFieldsInline(rI, rL); + int res = test28(v); + if (java.nio.ByteOrder.nativeOrder() == java.nio.ByteOrder.LITTLE_ENDIAN) { + Asserts.assertEQ(res, (int)((byte)v.x)); + } else { + Asserts.assertEQ(res, (int)((byte)Integer.reverseBytes(v.x))); + } + } + + // Wrong alignment + @Test(failOn=CALL_Unsafe) + public long test29(MyValue1 v) { + // Read the field that's guaranteed to not be last in the + // value so we don't read out of the value + if (X_OFFSET < Y_OFFSET) { + return U.getInt(v, X_OFFSET+1); + } + return U.getLong(v, Y_OFFSET+1); + } + + @DontCompile + public void test29_verifier(boolean warmup) { + MyValue1 v = MyValue1.createWithFieldsInline(rI, rL); + long res = test29(v); + if (java.nio.ByteOrder.nativeOrder() == java.nio.ByteOrder.LITTLE_ENDIAN) { + if (X_OFFSET < Y_OFFSET) { + Asserts.assertEQ(((int)res) << 8, (v.x >> 8) << 8); + } else { + Asserts.assertEQ(res << 8, (v.y >> 8) << 8); + } + } else { + if (X_OFFSET < Y_OFFSET) { + Asserts.assertEQ(((int)res), v.x >>> 8); + } else { + Asserts.assertEQ(res, v.y >>> 8); + } + } + } + + // getValue to retrieve flattened field from value + @Test(failOn=CALL_Unsafe) + public MyValue2 test30(MyValue1 v) { + if (V1_FLATTENED) { + return U.getValue(v, V1_OFFSET, MyValue2.class); + } + return (MyValue2)U.getReference(v, V1_OFFSET); + } + + @DontCompile + public void test30_verifier(boolean warmup) { + MyValue1 v = MyValue1.createWithFieldsInline(rI, rL); + MyValue2 res = test30(v); + Asserts.assertEQ(res.hash(), v.v1.hash()); + } + + MyValue1.val test31_vt; + private static final long TEST31_VT_OFFSET; + private static final boolean TEST31_VT_FLATTENED; + static { + try { + Field test31_vt_Field = TestIntrinsics.class.getDeclaredField("test31_vt"); + TEST31_VT_OFFSET = U.objectFieldOffset(test31_vt_Field); + TEST31_VT_FLATTENED = U.isFlattened(test31_vt_Field); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + // getValue to retrieve flattened field from object + @Test(failOn=CALL_Unsafe) + public MyValue1 test31() { + if (TEST31_VT_FLATTENED) { + return U.getValue(this, TEST31_VT_OFFSET, MyValue1.class); + } + return (MyValue1)U.getReference(this, TEST31_VT_OFFSET); + } + + @DontCompile + public void test31_verifier(boolean warmup) { + test31_vt = MyValue1.createWithFieldsInline(rI, rL); + MyValue1 res = test31(); + Asserts.assertEQ(res.hash(), test31_vt.hash()); + } + + // putValue to set flattened field in object + @Test(failOn=CALL_Unsafe) + public void test32(MyValue1 vt) { + if (TEST31_VT_FLATTENED) { + U.putValue(this, TEST31_VT_OFFSET, MyValue1.class, vt); + } else { + U.putReference(this, TEST31_VT_OFFSET, vt); + } + } + + @DontCompile + public void test32_verifier(boolean warmup) { + MyValue1 vt = MyValue1.createWithFieldsInline(rI, rL); + test31_vt = MyValue1.createDefaultInline(); + test32(vt); + Asserts.assertEQ(vt.hash(), test31_vt.hash()); + } + + private static final int TEST33_BASE_OFFSET; + private static final int TEST33_INDEX_SCALE; + private static final boolean TEST33_FLATTENED_ARRAY; + static { + try { + TEST33_BASE_OFFSET = U.arrayBaseOffset(MyValue1[].class); + TEST33_INDEX_SCALE = U.arrayIndexScale(MyValue1[].class); + TEST33_FLATTENED_ARRAY = U.isFlattenedArray(MyValue1[].class); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + // getValue to retrieve flattened field from array + @Test(failOn=CALL_Unsafe) + public MyValue1 test33(MyValue1[] arr) { + if (TEST33_FLATTENED_ARRAY) { + return U.getValue(arr, TEST33_BASE_OFFSET + TEST33_INDEX_SCALE, MyValue1.class); + } + return (MyValue1)U.getReference(arr, TEST33_BASE_OFFSET + TEST33_INDEX_SCALE); + } + + @DontCompile + public void test33_verifier(boolean warmup) { + MyValue1[] arr = new MyValue1[2]; + MyValue1 vt = MyValue1.createWithFieldsInline(rI, rL); + arr[1] = vt; + MyValue1 res = test33(arr); + Asserts.assertEQ(res.hash(), vt.hash()); + } + + // putValue to set flattened field in array + @Test(failOn=CALL_Unsafe) + public void test34(MyValue1[] arr, MyValue1 vt) { + if (TEST33_FLATTENED_ARRAY) { + U.putValue(arr, TEST33_BASE_OFFSET + TEST33_INDEX_SCALE, MyValue1.class, vt); + } else { + U.putReference(arr, TEST33_BASE_OFFSET + TEST33_INDEX_SCALE, vt); + } + } + + @DontCompile + public void test34_verifier(boolean warmup) { + MyValue1[] arr = new MyValue1[2]; + MyValue1 vt = MyValue1.createWithFieldsInline(rI, rL); + test34(arr, vt); + Asserts.assertEQ(arr[1].hash(), vt.hash()); + } + + // getValue to retrieve flattened field from object with unknown + // container type + @Test(failOn=CALL_Unsafe) + public MyValue1 test35(Object o) { + if (TEST31_VT_FLATTENED) { + return U.getValue(o, TEST31_VT_OFFSET, MyValue1.class); + } + return (MyValue1)U.getReference(o, TEST31_VT_OFFSET); + } + + @DontCompile + public void test35_verifier(boolean warmup) { + test31_vt = MyValue1.createWithFieldsInline(rI, rL); + MyValue1 res = test35(this); + Asserts.assertEQ(res.hash(), test31_vt.hash()); + } + + // getValue to retrieve flattened field from object at unknown + // offset + @Test(failOn=CALL_Unsafe) + public MyValue1 test36(long offset) { + if (TEST31_VT_FLATTENED) { + return U.getValue(this, offset, MyValue1.class); + } + return (MyValue1)U.getReference(this, offset); + } + + @DontCompile + public void test36_verifier(boolean warmup) { + test31_vt = MyValue1.createWithFieldsInline(rI, rL); + MyValue1 res = test36(TEST31_VT_OFFSET); + Asserts.assertEQ(res.hash(), test31_vt.hash()); + } + + // putValue to set flattened field in object with unknown + // container + @Test(failOn=CALL_Unsafe) + public void test37(Object o, MyValue1 vt) { + if (TEST31_VT_FLATTENED) { + U.putValue(o, TEST31_VT_OFFSET, MyValue1.class, vt); + } else { + U.putReference(o, TEST31_VT_OFFSET, vt); + } + } + + @DontCompile + public void test37_verifier(boolean warmup) { + MyValue1 vt = MyValue1.createWithFieldsInline(rI, rL); + test31_vt = MyValue1.createDefaultInline(); + test37(this, vt); + Asserts.assertEQ(vt.hash(), test31_vt.hash()); + } + + // putValue to set flattened field in object, non value argument + // to store + @Test(match = { CALL_Unsafe }, matchCount = { 1 }) + public void test38(Object o) { + if (TEST31_VT_FLATTENED) { + U.putValue(this, TEST31_VT_OFFSET, MyValue1.class, o); + } else { + U.putReference(this, TEST31_VT_OFFSET, o); + } + } + + @DontCompile + public void test38_verifier(boolean warmup) { + MyValue1 vt = MyValue1.createWithFieldsInline(rI, rL); + test31_vt = MyValue1.createDefaultInline(); + test38(vt); + Asserts.assertEQ(vt.hash(), test31_vt.hash()); + } + + @Test(failOn=CALL_Unsafe) + public MyValue1 test39(MyValue1 v) { + v = U.makePrivateBuffer(v); + U.putInt(v, X_OFFSET, rI); + v = U.finishPrivateBuffer(v); + return v; + } + + @DontCompile + public void test39_verifier(boolean warmup) { + MyValue1 v = MyValue1.createWithFieldsInline(rI, rL); + MyValue1 res = test39(v.setX(v, 0)); + Asserts.assertEQ(res.hash(), v.hash()); + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/TestJNICalls.java 2019-03-11 14:27:36.486353906 +0100 @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package compiler.valhalla.valuetypes; + +import jdk.test.lib.Asserts; + +import java.lang.reflect.Method; + +/* + * @test + * @summary Test calling native methods with value type arguments from compiled code. + * @library /testlibrary /test/lib /compiler/whitebox / + * @requires os.simpleArch == "x64" + * @compile -XDallowWithFieldOperator TestJNICalls.java + * @run driver ClassFileInstaller sun.hotspot.WhiteBox jdk.test.lib.Platform + * @run main/othervm/timeout=120 -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UnlockExperimentalVMOptions -XX:+WhiteBoxAPI -XX:+EnableValhalla + * compiler.valhalla.valuetypes.ValueTypeTest + * compiler.valhalla.valuetypes.TestJNICalls + */ +public class TestJNICalls extends ValueTypeTest { + // Extra VM parameters for some test scenarios. See ValueTypeTest.getVMParameters() + @Override + public String[] getExtraVMParameters(int scenario) { + return null; + } + + public static void main(String[] args) throws Throwable { + TestJNICalls test = new TestJNICalls(); + test.run(args, MyValue1.class); + } + + static { + System.loadLibrary("TestJNICalls"); + } + + public native Object testMethod1(MyValue1 o); + public native long testMethod2(MyValue1 o); + + // Pass a value type to a native method that calls back into Java code and returns a value + @Test + @Warmup(10000) // Make sure native method is compiled + public MyValue1 test1(MyValue1 vt, boolean callback) { + if (!callback) { + return (MyValue1)testMethod1(vt); + } else { + return vt; + } + } + + @DontCompile + public void test1_verifier(boolean warmup) { + MyValue1 vt = MyValue1.createWithFieldsInline(rI, rL); + MyValue1 result = test1(vt, false); + Asserts.assertEQ(result.hash(), vt.hash()); + result = test1(vt, true); + Asserts.assertEQ(result.hash(), vt.hash()); + } + + // Pass a value type to a native method that calls the hash method and returns the result + @Test + @Warmup(10000) // Make sure native method is compiled + public long test2(MyValue1 vt) { + return testMethod2(vt); + } + + @DontCompile + public void test2_verifier(boolean warmup) { + MyValue1 vt = MyValue1.createWithFieldsInline(rI, rL); + long result = test2(vt); + Asserts.assertEQ(result, vt.hash()); + } + + static value class MyValueWithNative { + public final int x; + + private MyValueWithNative(int x) { + this.x = x; + } + + public native int testMethod3(); + } + + // Call a native method with a value type receiver + @Test + @Warmup(10000) // Make sure native method is compiled + public int test3(MyValueWithNative vt) { + return vt.testMethod3(); + } + + @DontCompile + public void test3_verifier(boolean warmup) { + MyValueWithNative vt = new MyValueWithNative(rI); + int result = test3(vt); + Asserts.assertEQ(result, rI); + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/TestLWorld.java 2019-03-11 14:27:36.938353900 +0100 @@ -0,0 +1,1981 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package compiler.valhalla.valuetypes; + +import java.lang.invoke.*; +import java.lang.reflect.Method; + +import jdk.experimental.value.MethodHandleBuilder; +import jdk.test.lib.Asserts; + +/* + * @test + * @summary Test value types in LWorld. + * @modules java.base/jdk.experimental.value + * @library /testlibrary /test/lib /compiler/whitebox / + * @requires os.simpleArch == "x64" + * @compile -XDallowWithFieldOperator TestLWorld.java + * @run driver ClassFileInstaller sun.hotspot.WhiteBox jdk.test.lib.Platform + * @run main/othervm/timeout=120 -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UnlockExperimentalVMOptions -XX:+WhiteBoxAPI -XX:+EnableValhalla + * compiler.valhalla.valuetypes.ValueTypeTest + * compiler.valhalla.valuetypes.TestLWorld + */ +public class TestLWorld extends ValueTypeTest { + public int getNumScenarios() { + if (TEST_C1) { + return 2; + } else { + return super.getNumScenarios(); + } + } + + // Extra VM parameters for some test scenarios. See ValueTypeTest.getVMParameters() + @Override + public String[] getExtraVMParameters(int scenario) { + if (TEST_C1) { + switch (scenario) { + case 1: return new String[] {"-XX:-UseBiasedLocking"}; + } + return null; + } + + switch (scenario) { + case 1: return new String[] {"-XX:-UseOptoBiasInlining"}; + case 2: return new String[] {"-XX:-UseBiasedLocking"}; + case 3: return new String[] {"-XX:-MonomorphicArrayCheck", "-XX:-UseBiasedLocking", "-XX:+ValueArrayFlatten"}; + case 4: return new String[] {"-XX:-MonomorphicArrayCheck"}; + } + return null; + } + + public static void main(String[] args) throws Throwable { + TestLWorld test = new TestLWorld(); + test.run(args, MyValue1.class, MyValue2.class, MyValue2Inline.class, MyValue3.class, + MyValue3Inline.class, Test51Value.class); + } + + // Helper methods + + private static final MyValue1 testValue1 = MyValue1.createWithFieldsInline(rI, rL); + private static final MyValue2 testValue2 = MyValue2.createWithFieldsInline(rI, true); + + protected long hash() { + return testValue1.hash(); + } + + // Test passing a value type as an Object + @DontInline + public Object test1_dontinline1(Object o) { + return o; + } + + @DontInline + public MyValue1 test1_dontinline2(Object o) { + return (MyValue1)o; + } + + @ForceInline + public Object test1_inline1(Object o) { + return o; + } + + @ForceInline + public MyValue1 test1_inline2(Object o) { + return (MyValue1)o; + } + + @Test() + public MyValue1 test1() { + MyValue1 vt = testValue1; + vt = (MyValue1)test1_dontinline1(vt); + vt = test1_dontinline2(vt); + vt = (MyValue1)test1_inline1(vt); + vt = test1_inline2(vt); + return vt; + } + + @DontCompile + public void test1_verifier(boolean warmup) { + Asserts.assertEQ(test1().hash(), hash()); + } + + // Test storing/loading value types to/from Object and value type fields + Object objectField1 = null; + Object objectField2 = null; + Object objectField3 = null; + Object objectField4 = null; + Object objectField5 = null; + Object objectField6 = null; + + MyValue1.val valueField1 = testValue1; + MyValue1.val valueField2 = testValue1; + MyValue1.box valueField3 = testValue1; + MyValue1.val valueField4; + MyValue1.box valueField5; + + static MyValue1.box staticValueField1 = testValue1; + static MyValue1.val staticValueField2 = testValue1; + static MyValue1.val staticValueField3; + static MyValue1.box staticValueField4; + + @DontInline + public Object readValueField5() { + return (Object)valueField5; + } + + @DontInline + public Object readStaticValueField4() { + return (Object)staticValueField4; + } + + @Test() + public long test2(MyValue1 vt1, Object vt2) { + objectField1 = vt1; + objectField2 = (MyValue1)vt2; + objectField3 = testValue1; + objectField4 = MyValue1.createWithFieldsDontInline(rI, rL); + objectField5 = valueField1; + objectField6 = valueField3; + valueField1 = (MyValue1)objectField1; + valueField2 = (MyValue1)vt2; + valueField3 = (MyValue1)vt2; + staticValueField1 = (MyValue1)objectField1; + staticValueField2 = (MyValue1)vt1; + // Don't inline these methods because reading NULL will trigger a deoptimization + if (readValueField5() != null || readStaticValueField4() != null) { + throw new RuntimeException("Should be null"); + } + return ((MyValue1)objectField1).hash() + ((MyValue1)objectField2).hash() + + ((MyValue1)objectField3).hash() + ((MyValue1)objectField4).hash() + + ((MyValue1)objectField5).hash() + ((MyValue1)objectField6).hash() + + valueField1.hash() + valueField2.hash() + valueField3.hash() + valueField4.hashPrimitive() + + staticValueField1.hash() + staticValueField2.hash() + staticValueField3.hashPrimitive(); + } + + @DontCompile + public void test2_verifier(boolean warmup) { + MyValue1 vt = testValue1; + MyValue1 def = MyValue1.createDefaultDontInline(); + long result = test2(vt, vt); + Asserts.assertEQ(result, 11*vt.hash() + 2*def.hashPrimitive()); + } + + // Test merging value types and objects + @Test() + public Object test3(int state) { + Object res = null; + if (state == 0) { + res = new Integer(rI); + } else if (state == 1) { + res = MyValue1.createWithFieldsInline(rI, rL); + } else if (state == 2) { + res = MyValue1.createWithFieldsDontInline(rI, rL); + } else if (state == 3) { + res = (MyValue1)objectField1; + } else if (state == 4) { + res = valueField1; + } else if (state == 5) { + res = null; + } else if (state == 6) { + res = MyValue2.createWithFieldsInline(rI, true); + } else if (state == 7) { + res = testValue2; + } + return res; + } + + @DontCompile + public void test3_verifier(boolean warmup) { + objectField1 = valueField1; + Object result = null; + result = test3(0); + Asserts.assertEQ((Integer)result, rI); + result = test3(1); + Asserts.assertEQ(((MyValue1)result).hash(), hash()); + result = test3(2); + Asserts.assertEQ(((MyValue1)result).hash(), hash()); + result = test3(3); + Asserts.assertEQ(((MyValue1)result).hash(), hash()); + result = test3(4); + Asserts.assertEQ(((MyValue1)result).hash(), hash()); + result = test3(5); + Asserts.assertEQ(result, null); + result = test3(6); + Asserts.assertEQ(((MyValue2)result).hash(), testValue2.hash()); + result = test3(7); + Asserts.assertEQ(((MyValue2)result).hash(), testValue2.hash()); + } + + // Test merging value types and objects in loops + @Test() + public Object test4(int iters) { + Object res = new Integer(rI); + for (int i = 0; i < iters; ++i) { + if (res instanceof Integer) { + res = MyValue1.createWithFieldsInline(rI, rL); + } else { + res = MyValue1.createWithFieldsInline(((MyValue1)res).x + 1, rL); + } + } + return res; + } + + @DontCompile + public void test4_verifier(boolean warmup) { + Integer result1 = (Integer)test4(0); + Asserts.assertEQ(result1, rI); + int iters = (Math.abs(rI) % 10) + 1; + MyValue1 result2 = (MyValue1)test4(iters); + MyValue1 vt = MyValue1.createWithFieldsInline(rI + iters - 1, rL); + Asserts.assertEQ(result2.hash(), vt.hash()); + } + + // Test value types in object variables that are live at safepoint + @Test(failOn = ALLOC + STORE + LOOP) + public long test5(MyValue1 arg, boolean deopt) { + Object vt1 = MyValue1.createWithFieldsInline(rI, rL); + Object vt2 = MyValue1.createWithFieldsDontInline(rI, rL); + Object vt3 = arg; + Object vt4 = valueField1; + if (deopt) { + // uncommon trap + WHITE_BOX.deoptimizeMethod(tests.get(getClass().getSimpleName() + "::test5")); + } + return ((MyValue1)vt1).hash() + ((MyValue1)vt2).hash() + + ((MyValue1)vt3).hash() + ((MyValue1)vt4).hash(); + } + + @DontCompile + public void test5_verifier(boolean warmup) { + long result = test5(valueField1, !warmup); + Asserts.assertEQ(result, 4*hash()); + } + + // Test comparing value types with objects + @Test(failOn = ALLOC + LOAD + STORE + LOOP) + public boolean test6(Object arg) { + Object vt = MyValue1.createWithFieldsInline(rI, rL); + if (vt == arg || vt == (Object)valueField1 || vt == objectField1 || vt == null || + arg == vt || (Object)valueField1 == vt || objectField1 == vt || null == vt) { + return true; + } + return false; + } + + @DontCompile + public void test6_verifier(boolean warmup) { + boolean result = test6(null); + Asserts.assertFalse(result); + } + + // merge of value and non value + @Test + public Object test7(boolean flag) { + Object res = null; + if (flag) { + res = valueField1; + } else { + res = objectField1; + } + return res; + } + + @DontCompile + public void test7_verifier(boolean warmup) { + test7(true); + test7(false); + } + + @Test + public Object test8(boolean flag) { + Object res = null; + if (flag) { + res = objectField1; + } else { + res = valueField1; + } + return res; + } + + @DontCompile + public void test8_verifier(boolean warmup) { + test8(true); + test8(false); + } + + // merge of values in a loop, stored in an object local + @Test + public Object test9() { + Object o = valueField1; + for (int i = 1; i < 100; i *= 2) { + MyValue1 v = (MyValue1)o; + o = MyValue1.setX(v, v.x + 1); + } + return o; + } + + @DontCompile + public void test9_verifier(boolean warmup) { + test9(); + } + + // merge of values in an object local + public Object test10_helper() { + return valueField1; + } + + @Test(failOn = ALLOC + LOAD + STORE) + public void test10(boolean flag) { + Object o = null; + if (flag) { + o = valueField1; + } else { + o = test10_helper(); + } + valueField1 = (MyValue1)o; + } + + @DontCompile + public void test10_verifier(boolean warmup) { + test10(true); + test10(false); + } + + // Interface tests + + @DontInline + public MyInterface test11_dontinline1(MyInterface o) { + return o; + } + + @DontInline + public MyValue1 test11_dontinline2(MyInterface o) { + return (MyValue1)o; + } + + @ForceInline + public MyInterface test11_inline1(MyInterface o) { + return o; + } + + @ForceInline + public MyValue1 test11_inline2(MyInterface o) { + return (MyValue1)o; + } + + @Test() + public MyValue1 test11() { + MyValue1 vt = MyValue1.createWithFieldsInline(rI, rL); + vt = (MyValue1)test11_dontinline1(vt); + vt = test11_dontinline2(vt); + vt = (MyValue1)test11_inline1(vt); + vt = test11_inline2(vt); + return vt; + } + + @DontCompile + public void test11_verifier(boolean warmup) { + Asserts.assertEQ(test11().hash(), hash()); + } + + // Test storing/loading value types to/from interface and value type fields + MyInterface interfaceField1 = null; + MyInterface interfaceField2 = null; + MyInterface interfaceField3 = null; + MyInterface interfaceField4 = null; + MyInterface interfaceField5 = null; + MyInterface interfaceField6 = null; + + @DontInline + public MyInterface readValueField5AsInterface() { + return (MyInterface)valueField5; + } + + @DontInline + public MyInterface readStaticValueField4AsInterface() { + return (MyInterface)staticValueField4; + } + + @Test() + public long test12(MyValue1 vt1, MyInterface vt2) { + interfaceField1 = vt1; + interfaceField2 = (MyValue1)vt2; + interfaceField3 = MyValue1.createWithFieldsInline(rI, rL); + interfaceField4 = MyValue1.createWithFieldsDontInline(rI, rL); + interfaceField5 = valueField1; + interfaceField6 = valueField3; + valueField1 = (MyValue1)interfaceField1; + valueField2 = (MyValue1)vt2; + valueField3 = (MyValue1)vt2; + staticValueField1 = (MyValue1)interfaceField1; + staticValueField2 = (MyValue1)vt1; + // Don't inline these methods because reading NULL will trigger a deoptimization + if (readValueField5AsInterface() != null || readStaticValueField4AsInterface() != null) { + throw new RuntimeException("Should be null"); + } + return ((MyValue1)interfaceField1).hash() + ((MyValue1)interfaceField2).hash() + + ((MyValue1)interfaceField3).hash() + ((MyValue1)interfaceField4).hash() + + ((MyValue1)interfaceField5).hash() + ((MyValue1)interfaceField6).hash() + + valueField1.hash() + valueField2.hash() + valueField3.hash() + valueField4.hashPrimitive() + + staticValueField1.hash() + staticValueField2.hash() + staticValueField3.hashPrimitive(); + } + + @DontCompile + public void test12_verifier(boolean warmup) { + MyValue1 vt = testValue1; + MyValue1 def = MyValue1.createDefaultDontInline(); + long result = test12(vt, vt); + Asserts.assertEQ(result, 11*vt.hash() + 2*def.hashPrimitive()); + } + + class MyObject implements MyInterface { + public int x; + + public MyObject(int x) { + this.x = x; + } + + @ForceInline + public long hash() { + return x; + } + } + + // Test merging value types and interfaces + @Test() + public MyInterface test13(int state) { + MyInterface res = null; + if (state == 0) { + res = new MyObject(rI); + } else if (state == 1) { + res = MyValue1.createWithFieldsInline(rI, rL); + } else if (state == 2) { + res = MyValue1.createWithFieldsDontInline(rI, rL); + } else if (state == 3) { + res = (MyValue1)objectField1; + } else if (state == 4) { + res = valueField1; + } else if (state == 5) { + res = null; + } + return res; + } + + @DontCompile + public void test13_verifier(boolean warmup) { + objectField1 = valueField1; + MyInterface result = null; + result = test13(0); + Asserts.assertEQ(((MyObject)result).x, rI); + result = test13(1); + Asserts.assertEQ(((MyValue1)result).hash(), hash()); + result = test13(2); + Asserts.assertEQ(((MyValue1)result).hash(), hash()); + result = test13(3); + Asserts.assertEQ(((MyValue1)result).hash(), hash()); + result = test13(4); + Asserts.assertEQ(((MyValue1)result).hash(), hash()); + result = test13(5); + Asserts.assertEQ(result, null); + } + + // Test merging value types and interfaces in loops + @Test() + public MyInterface test14(int iters) { + MyInterface res = new MyObject(rI); + for (int i = 0; i < iters; ++i) { + if (res instanceof MyObject) { + res = MyValue1.createWithFieldsInline(rI, rL); + } else { + res = MyValue1.createWithFieldsInline(((MyValue1)res).x + 1, rL); + } + } + return res; + } + + @DontCompile + public void test14_verifier(boolean warmup) { + MyObject result1 = (MyObject)test14(0); + Asserts.assertEQ(result1.x, rI); + int iters = (Math.abs(rI) % 10) + 1; + MyValue1 result2 = (MyValue1)test14(iters); + MyValue1 vt = MyValue1.createWithFieldsInline(rI + iters - 1, rL); + Asserts.assertEQ(result2.hash(), vt.hash()); + } + + // Test value types in interface variables that are live at safepoint + @Test(failOn = ALLOC + STORE + LOOP) + public long test15(MyValue1 arg, boolean deopt) { + MyInterface vt1 = MyValue1.createWithFieldsInline(rI, rL); + MyInterface vt2 = MyValue1.createWithFieldsDontInline(rI, rL); + MyInterface vt3 = arg; + MyInterface vt4 = valueField1; + if (deopt) { + // uncommon trap + WHITE_BOX.deoptimizeMethod(tests.get(getClass().getSimpleName() + "::test15")); + } + return ((MyValue1)vt1).hash() + ((MyValue1)vt2).hash() + + ((MyValue1)vt3).hash() + ((MyValue1)vt4).hash(); + } + + @DontCompile + public void test15_verifier(boolean warmup) { + long result = test15(valueField1, !warmup); + Asserts.assertEQ(result, 4*hash()); + } + + // Test comparing value types with interfaces + @Test(failOn = ALLOC + LOAD + STORE + LOOP) + public boolean test16(Object arg) { + MyInterface vt = MyValue1.createWithFieldsInline(rI, rL); + if (vt == arg || vt == (MyInterface)valueField1 || vt == interfaceField1 || vt == null || + arg == vt || (MyInterface)valueField1 == vt || interfaceField1 == vt || null == vt) { + return true; + } + return false; + } + + @DontCompile + public void test16_verifier(boolean warmup) { + boolean result = test16(null); + Asserts.assertFalse(result); + } + + // Test subtype check when casting to value type + @Test + public MyValue1 test17(MyValue1 vt, Object obj) { + try { + vt = (MyValue1)obj; + throw new RuntimeException("ClassCastException expected"); + } catch (ClassCastException e) { + // Expected + } + return vt; + } + + @DontCompile + public void test17_verifier(boolean warmup) { + MyValue1 vt = testValue1; + MyValue1 result = test17(vt, new Integer(rI)); + Asserts.assertEquals(result.hash(), vt.hash()); + } + + @Test + public MyValue1 test18(MyValue1 vt) { + Object obj = vt; + vt = (MyValue1)obj; + return vt; + } + + @DontCompile + public void test18_verifier(boolean warmup) { + MyValue1 vt = testValue1; + MyValue1 result = test18(vt); + Asserts.assertEquals(result.hash(), vt.hash()); + } + + @Test + public void test19(MyValue1 vt) { + Object obj = vt; + try { + MyValue2 vt2 = (MyValue2)obj; + throw new RuntimeException("ClassCastException expected"); + } catch (ClassCastException e) { + // Expected + } + } + + @DontCompile + public void test19_verifier(boolean warmup) { + test19(valueField1); + } + + @Test + public void test20(MyValue1 vt) { + Object obj = vt; + try { + Integer i = (Integer)obj; + throw new RuntimeException("ClassCastException expected"); + } catch (ClassCastException e) { + // Expected + } + } + + @DontCompile + public void test20_verifier(boolean warmup) { + test20(valueField1); + } + + // Array tests + + private static final MyValue1[] testValue1Array = new MyValue1[] {testValue1, + testValue1, + testValue1}; + + private static final MyValue1[][] testValue1Array2 = new MyValue1[][] {testValue1Array, + testValue1Array, + testValue1Array}; + + private static final MyValue2[] testValue2Array = new MyValue2[] {testValue2, + testValue2, + testValue2}; + + private static final Integer[] testIntegerArray = new Integer[42]; + + // Test load from (flattened) value type array disguised as object array + @Test() + public Object test21(Object[] oa, int index) { + return oa[index]; + } + + @DontCompile + public void test21_verifier(boolean warmup) { + MyValue1 result = (MyValue1)test21(testValue1Array, Math.abs(rI) % 3); + Asserts.assertEQ(result.hash(), hash()); + } + + // Test load from (flattened) value type array disguised as interface array + @Test() + public Object test22(MyInterface[] ia, int index) { + return ia[index]; + } + + @DontCompile + public void test22_verifier(boolean warmup) { + MyValue1 result = (MyValue1)test22(testValue1Array, Math.abs(rI) % 3); + Asserts.assertEQ(result.hash(), hash()); + } + + // Test value store to (flattened) value type array disguised as object array + + @ForceInline + public void test23_inline(Object[] oa, Object o, int index) { + oa[index] = o; + } + + @Test() + public void test23(Object[] oa, MyValue1 vt, int index) { + test23_inline(oa, vt, index); + } + + @DontCompile + public void test23_verifier(boolean warmup) { + int index = Math.abs(rI) % 3; + MyValue1 vt = MyValue1.createWithFieldsInline(rI + 1, rL + 1); + test23(testValue1Array, vt, index); + Asserts.assertEQ(testValue1Array[index].hash(), vt.hash()); + testValue1Array[index] = testValue1; + try { + test23(testValue2Array, vt, index); + throw new RuntimeException("No ArrayStoreException thrown"); + } catch (ArrayStoreException e) { + // Expected + } + Asserts.assertEQ(testValue2Array[index].hash(), testValue2.hash()); + } + + @ForceInline + public void test24_inline(Object[] oa, Object o, int index) { + oa[index] = o; + } + + @Test() + public void test24(Object[] oa, MyValue1 vt, int index) { + test24_inline(oa, vt, index); + } + + @DontCompile + public void test24_verifier(boolean warmup) { + int index = Math.abs(rI) % 3; + try { + test24(testIntegerArray, testValue1, index); + throw new RuntimeException("No ArrayStoreException thrown"); + } catch (ArrayStoreException e) { + // Expected + } + } + + @ForceInline + public void test25_inline(Object[] oa, Object o, int index) { + oa[index] = o; + } + + @Test() + public void test25(Object[] oa, MyValue1 vt, int index) { + test25_inline(oa, vt, index); + } + + @DontCompile + public void test25_verifier(boolean warmup) { + int index = Math.abs(rI) % 3; + try { + test25(null, testValue1, index); + throw new RuntimeException("No NPE thrown"); + } catch (NullPointerException e) { + // Expected + } + } + + // Test value store to (flattened) value type array disguised as interface array + @ForceInline + public void test26_inline(MyInterface[] ia, MyInterface i, int index) { + ia[index] = i; + } + + @Test() + public void test26(MyInterface[] ia, MyValue1 vt, int index) { + test26_inline(ia, vt, index); + } + + @DontCompile + public void test26_verifier(boolean warmup) { + int index = Math.abs(rI) % 3; + MyValue1 vt = MyValue1.createWithFieldsInline(rI + 1, rL + 1); + test26(testValue1Array, vt, index); + Asserts.assertEQ(testValue1Array[index].hash(), vt.hash()); + testValue1Array[index] = testValue1; + try { + test26(testValue2Array, vt, index); + throw new RuntimeException("No ArrayStoreException thrown"); + } catch (ArrayStoreException e) { + // Expected + } + Asserts.assertEQ(testValue2Array[index].hash(), testValue2.hash()); + } + + @ForceInline + public void test27_inline(MyInterface[] ia, MyInterface i, int index) { + ia[index] = i; + } + + @Test() + public void test27(MyInterface[] ia, MyValue1 vt, int index) { + test27_inline(ia, vt, index); + } + + @DontCompile + public void test27_verifier(boolean warmup) { + int index = Math.abs(rI) % 3; + try { + test27(null, testValue1, index); + throw new RuntimeException("No NPE thrown"); + } catch (NullPointerException e) { + // Expected + } + } + + // Test object store to (flattened) value type array disguised as object array + @ForceInline + public void test28_inline(Object[] oa, Object o, int index) { + oa[index] = o; + } + + @Test() + public void test28(Object[] oa, Object o, int index) { + test28_inline(oa, o, index); + } + + @DontCompile + public void test28_verifier(boolean warmup) { + int index = Math.abs(rI) % 3; + MyValue1 vt1 = MyValue1.createWithFieldsInline(rI + 1, rL + 1); + test28(testValue1Array, vt1, index); + Asserts.assertEQ(testValue1Array[index].hash(), vt1.hash()); + try { + test28(testValue1Array, testValue2, index); + throw new RuntimeException("No ArrayStoreException thrown"); + } catch (ArrayStoreException e) { + // Expected + } + Asserts.assertEQ(testValue1Array[index].hash(), vt1.hash()); + testValue1Array[index] = testValue1; + } + + @ForceInline + public void test29_inline(Object[] oa, Object o, int index) { + oa[index] = o; + } + + @Test() + public void test29(Object[] oa, Object o, int index) { + test29_inline(oa, o, index); + } + + @DontCompile + public void test29_verifier(boolean warmup) { + int index = Math.abs(rI) % 3; + try { + test29(testValue2Array, testValue1, index); + throw new RuntimeException("No ArrayStoreException thrown"); + } catch (ArrayStoreException e) { + // Expected + } + Asserts.assertEQ(testValue2Array[index].hash(), testValue2.hash()); + } + + @ForceInline + public void test30_inline(Object[] oa, Object o, int index) { + oa[index] = o; + } + + @Test() + public void test30(Object[] oa, Object o, int index) { + test30_inline(oa, o, index); + } + + @DontCompile + public void test30_verifier(boolean warmup) { + int index = Math.abs(rI) % 3; + try { + test30(testIntegerArray, testValue1, index); + throw new RuntimeException("No ArrayStoreException thrown"); + } catch (ArrayStoreException e) { + // Expected + } + } + + // Test value store to (flattened) value type array disguised as interface array + @ForceInline + public void test31_inline(MyInterface[] ia, MyInterface i, int index) { + ia[index] = i; + } + + @Test() + public void test31(MyInterface[] ia, MyInterface i, int index) { + test31_inline(ia, i, index); + } + + @DontCompile + public void test31_verifier(boolean warmup) { + int index = Math.abs(rI) % 3; + MyValue1 vt1 = MyValue1.createWithFieldsInline(rI + 1, rL + 1); + test31(testValue1Array, vt1, index); + Asserts.assertEQ(testValue1Array[index].hash(), vt1.hash()); + try { + test31(testValue1Array, testValue2, index); + throw new RuntimeException("No ArrayStoreException thrown"); + } catch (ArrayStoreException e) { + // Expected + } + Asserts.assertEQ(testValue1Array[index].hash(), vt1.hash()); + testValue1Array[index] = testValue1; + } + + @ForceInline + public void test32_inline(MyInterface[] ia, MyInterface i, int index) { + ia[index] = i; + } + + @Test() + public void test32(MyInterface[] ia, MyInterface i, int index) { + test32_inline(ia, i, index); + } + + @DontCompile + public void test32_verifier(boolean warmup) { + int index = Math.abs(rI) % 3; + try { + test32(testValue2Array, testValue1, index); + throw new RuntimeException("No ArrayStoreException thrown"); + } catch (ArrayStoreException e) { + // Expected + } + } + + // Test writing null to a (flattened) value type array disguised as object array + @ForceInline + public void test33_inline(Object[] oa, Object o, int index) { + oa[index] = o; + } + + @Test() + public void test33(Object[] oa, Object o, int index) { + test33_inline(oa, o, index); + } + + @DontCompile + public void test33_verifier(boolean warmup) { + int index = Math.abs(rI) % 3; + try { + test33(testValue1Array, null, index); + throw new RuntimeException("No NPE thrown"); + } catch (NullPointerException e) { + // Expected + } + Asserts.assertEQ(testValue1Array[index].hash(), hash()); + } + + // Test writing constant null to a (flattened) value type array disguised as object array + + @ForceInline + public void test34_inline(Object[] oa, Object o, int index) { + oa[index] = o; + } + + @Test() + public void test34(Object[] oa, int index) { + test34_inline(oa, null, index); + } + + @DontCompile + public void test34_verifier(boolean warmup) { + int index = Math.abs(rI) % 3; + try { + test34(testValue1Array, index); + throw new RuntimeException("No NPE thrown"); + } catch (NullPointerException e) { + // Expected + } + Asserts.assertEQ(testValue1Array[index].hash(), hash()); + } + + // Test writing constant null to a (flattened) value type array + + private static final MethodHandle setArrayElementNull = MethodHandleBuilder.loadCode(MethodHandles.lookup(), + "setArrayElementNull", + MethodType.methodType(void.class, TestLWorld.class, MyValue1[].class, int.class), + CODE -> { + CODE. + aload_1(). + iload_2(). + aconst_null(). + aastore(). + return_(); + }); + + @Test() + public void test35(MyValue1[] va, int index) throws Throwable { + setArrayElementNull.invoke(this, va, index); + } + + @DontCompile + public void test35_verifier(boolean warmup) throws Throwable { + int index = Math.abs(rI) % 3; + try { + test35(testValue1Array, index); + throw new RuntimeException("No NPE thrown"); + } catch (NullPointerException e) { + // Expected + } + Asserts.assertEQ(testValue1Array[index].hash(), hash()); + } + + // Test writing a value type to a null value type array + @Test() + public void test36(MyValue1[] va, MyValue1 vt, int index) { + va[index] = vt; + } + + @DontCompile + public void test36_verifier(boolean warmup) { + int index = Math.abs(rI) % 3; + try { + test36(null, testValue1Array[index], index); + throw new RuntimeException("No NPE thrown"); + } catch (NullPointerException e) { + // Expected + } + } + + // Test incremental inlining + @ForceInline + public void test37_inline(Object[] oa, Object o, int index) { + oa[index] = o; + } + + @Test() + public void test37(MyValue1[] va, Object o, int index) { + test37_inline(va, o, index); + } + + @DontCompile + public void test37_verifier(boolean warmup) { + int index = Math.abs(rI) % 3; + MyValue1 vt1 = MyValue1.createWithFieldsInline(rI + 1, rL + 1); + test37(testValue1Array, vt1, index); + Asserts.assertEQ(testValue1Array[index].hash(), vt1.hash()); + try { + test37(testValue1Array, testValue2, index); + throw new RuntimeException("No ArrayStoreException thrown"); + } catch (ArrayStoreException e) { + // Expected + } + Asserts.assertEQ(testValue1Array[index].hash(), vt1.hash()); + testValue1Array[index] = testValue1; + } + + // Test merging of value type arrays + + @ForceInline + public Object[] test38_inline() { + return new MyValue1[42]; + } + + @Test() + public Object[] test38(Object[] oa, Object o, int i1, int i2, int num) { + Object[] result = null; + switch (num) { + case 0: + result = test38_inline(); + break; + case 1: + result = oa; + break; + case 2: + result = testValue1Array; + break; + case 3: + result = testValue2Array; + break; + case 4: + result = testIntegerArray; + break; + case 5: + result = null; + break; + case 6: + result = testValue1Array2; + break; + } + result[i1] = result[i2]; + result[i2] = o; + return result; + } + + @DontCompile + public void test38_verifier(boolean warmup) { + int index = Math.abs(rI) % 3; + MyValue1[] va = new MyValue1[42]; + Object[] result = test38(null, testValue1, index, index, 0); + Asserts.assertEQ(((MyValue1)result[index]).hash(), testValue1.hash()); + result = test38(testValue1Array, testValue1, index, index, 1); + Asserts.assertEQ(((MyValue1)result[index]).hash(), testValue1.hash()); + result = test38(null, testValue1, index, index, 2); + Asserts.assertEQ(((MyValue1)result[index]).hash(), testValue1.hash()); + result = test38(null, testValue2, index, index, 3); + Asserts.assertEQ(((MyValue2)result[index]).hash(), testValue2.hash()); + try { + result = test38(null, null, index, index, 3); + throw new RuntimeException("No NPE thrown"); + } catch (NullPointerException e) { + // Expected + } + result = test38(null, null, index, index, 4); + try { + result = test38(null, testValue1, index, index, 4); + throw new RuntimeException("No ArrayStoreException thrown"); + } catch (ArrayStoreException e) { + // Expected + } + try { + result = test38(null, testValue1, index, index, 5); + throw new RuntimeException("No NPE thrown"); + } catch (NullPointerException e) { + // Expected + } + result = test38(null, testValue1Array, index, index, 6); + Asserts.assertEQ(((MyValue1[][])result)[index][index].hash(), testValue1.hash()); + } + + @ForceInline + public Object test39_inline() { + return new MyValue1[42]; + } + + // Same as above but merging into Object instead of Object[] + @Test() + public Object test39(Object oa, Object o, int i1, int i2, int num) { + Object result = null; + switch (num) { + case 0: + result = test39_inline(); + break; + case 1: + result = oa; + break; + case 2: + result = testValue1Array; + break; + case 3: + result = testValue2Array; + break; + case 4: + result = testIntegerArray; + break; + case 5: + result = null; + break; + case 6: + result = testValue1; + break; + case 7: + result = testValue2; + break; + case 8: + result = MyValue1.createWithFieldsInline(rI, rL); + break; + case 9: + result = new Integer(42); + break; + case 10: + result = testValue1Array2; + break; + } + if (result instanceof Object[]) { + ((Object[])result)[i1] = ((Object[])result)[i2]; + ((Object[])result)[i2] = o; + } + return result; + } + + @DontCompile + public void test39_verifier(boolean warmup) { + if (!ENABLE_VALUE_ARRAY_COVARIANCE) { + return; + } + int index = Math.abs(rI) % 3; + MyValue1[] va = new MyValue1[42]; + Object result = test39(null, testValue1, index, index, 0); + Asserts.assertEQ(((MyValue1[])result)[index].hash(), testValue1.hash()); + result = test39(testValue1Array, testValue1, index, index, 1); + Asserts.assertEQ(((MyValue1[])result)[index].hash(), testValue1.hash()); + result = test39(null, testValue1, index, index, 2); + Asserts.assertEQ(((MyValue1[])result)[index].hash(), testValue1.hash()); + result = test39(null, testValue2, index, index, 3); + Asserts.assertEQ(((MyValue2[])result)[index].hash(), testValue2.hash()); + try { + result = test39(null, null, index, index, 3); + throw new RuntimeException("No NPE thrown"); + } catch (NullPointerException e) { + // Expected + } + result = test39(null, null, index, index, 4); + try { + result = test39(null, testValue1, index, index, 4); + throw new RuntimeException("No ArrayStoreException thrown"); + } catch (ArrayStoreException e) { + // Expected + } + result = test39(null, testValue1, index, index, 5); + Asserts.assertEQ(result, null); + result = test39(null, testValue1, index, index, 6); + Asserts.assertEQ(((MyValue1)result).hash(), testValue1.hash()); + result = test39(null, testValue1, index, index, 7); + Asserts.assertEQ(((MyValue2)result).hash(), testValue2.hash()); + result = test39(null, testValue1, index, index, 8); + Asserts.assertEQ(((MyValue1)result).hash(), testValue1.hash()); + result = test39(null, testValue1, index, index, 9); + Asserts.assertEQ(((Integer)result), 42); + result = test39(null, testValue1Array, index, index, 10); + Asserts.assertEQ(((MyValue1[][])result)[index][index].hash(), testValue1.hash()); + } + + // Test instanceof with value types and arrays + @Test() + public long test40(Object o, int index) { + if (o instanceof MyValue1) { + return ((MyValue1)o).hashInterpreted(); + } else if (o instanceof MyValue1[]) { + return ((MyValue1[])o)[index].hashInterpreted(); + } else if (o instanceof MyValue2) { + return ((MyValue2)o).hash(); + } else if (o instanceof MyValue2[]) { + return ((MyValue2[])o)[index].hash(); + } else if (o instanceof MyValue1[][]) { + return ((MyValue1[][])o)[index][index].hash(); + } else if (o instanceof Long) { + return (long)o; + } + return 0; + } + + @DontCompile + public void test40_verifier(boolean warmup) { + int index = Math.abs(rI) % 3; + long result = test40(testValue1, 0); + Asserts.assertEQ(result, testValue1.hash()); + result = test40(testValue1Array, index); + Asserts.assertEQ(result, testValue1.hash()); + result = test40(testValue2, index); + Asserts.assertEQ(result, testValue2.hash()); + result = test40(testValue2Array, index); + Asserts.assertEQ(result, testValue2.hash()); + result = test40(testValue1Array2, index); + Asserts.assertEQ(result, testValue1.hash()); + result = test40(new Long(42), index); + Asserts.assertEQ(result, 42L); + } + + // Test for bug in Escape Analysis + @DontInline + public void test41_dontinline(Object o) { + Asserts.assertEQ(o, rI); + } + + @Test() + public void test41() { + MyValue1[] vals = new MyValue1[] {testValue1}; + test41_dontinline(vals[0].oa[0]); + test41_dontinline(vals[0].oa[0]); + } + + @DontCompile + public void test41_verifier(boolean warmup) { + test41(); + } + + // Test for bug in Escape Analysis + private static final MyValue1.box test42VT1 = MyValue1.createWithFieldsInline(rI, rL); + private static final MyValue1.box test42VT2 = MyValue1.createWithFieldsInline(rI + 1, rL + 1); + + @Test() + public void test42() { + MyValue1[] vals = new MyValue1[] {test42VT1, test42VT2}; + Asserts.assertEQ(vals[0].hash(), test42VT1.hash()); + Asserts.assertEQ(vals[1].hash(), test42VT2.hash()); + } + + @DontCompile + public void test42_verifier(boolean warmup) { + if (!warmup) test42(); // We need -Xcomp behavior + } + + // Test for bug in Escape Analysis + @Test() + public long test43(boolean deopt) { + MyValue1[] vals = new MyValue1[] {test42VT1, test42VT2}; + + if (deopt) { + // uncommon trap + WHITE_BOX.deoptimizeMethod(tests.get(getClass().getSimpleName() + "::test43")); + Asserts.assertEQ(vals[0].hash(), test42VT1.hash()); + Asserts.assertEQ(vals[1].hash(), test42VT2.hash()); + } + + return vals[0].hash(); + } + + @DontCompile + public void test43_verifier(boolean warmup) { + test43(!warmup); + } + + // Tests writing an array element with a (statically known) incompatible type + private static final MethodHandle setArrayElementIncompatible = MethodHandleBuilder.loadCode(MethodHandles.lookup(), + "setArrayElementIncompatible", + MethodType.methodType(void.class, TestLWorld.class, MyValue1[].class, int.class, MyValue2.class.asValueType()), + CODE -> { + CODE. + aload_1(). + iload_2(). + aload_3(). + aastore(). + return_(); + }); + + @Test() + public void test44(MyValue1[] va, int index, MyValue2 v) throws Throwable { + setArrayElementIncompatible.invoke(this, va, index, v); + } + + @DontCompile + public void test44_verifier(boolean warmup) throws Throwable { + int index = Math.abs(rI) % 3; + try { + test44(testValue1Array, index, testValue2); + throw new RuntimeException("No ArrayStoreException thrown"); + } catch (ArrayStoreException e) { + // Expected + } + Asserts.assertEQ(testValue1Array[index].hash(), hash()); + } + + // Tests writing an array element with a (statically known) incompatible type + @ForceInline + public void test45_inline(Object[] oa, Object o, int index) { + oa[index] = o; + } + + @Test() + public void test45(MyValue1[] va, int index, MyValue2 v) throws Throwable { + test45_inline(va, v, index); + } + + @DontCompile + public void test45_verifier(boolean warmup) throws Throwable { + int index = Math.abs(rI) % 3; + try { + test45(testValue1Array, index, testValue2); + throw new RuntimeException("No ArrayStoreException thrown"); + } catch (ArrayStoreException e) { + // Expected + } + Asserts.assertEQ(testValue1Array[index].hash(), hash()); + } + + // instanceof tests with values + @Test + public boolean test46(MyValue1 vt) { + Object obj = vt; + return obj instanceof MyValue1; + } + + @DontCompile + public void test46_verifier(boolean warmup) { + MyValue1 vt = testValue1; + boolean result = test46(vt); + Asserts.assertTrue(result); + } + + @Test + public boolean test47(MyValue1 vt) { + Object obj = vt; + return obj instanceof MyValue2; + } + + @DontCompile + public void test47_verifier(boolean warmup) { + MyValue1 vt = testValue1; + boolean result = test47(vt); + Asserts.assertFalse(result); + } + + @Test + public boolean test48(Object obj) { + return obj instanceof MyValue1; + } + + @DontCompile + public void test48_verifier(boolean warmup) { + MyValue1 vt = testValue1; + boolean result = test48(vt); + Asserts.assertTrue(result); + } + + @Test + public boolean test49(Object obj) { + return obj instanceof MyValue2; + } + + @DontCompile + public void test49_verifier(boolean warmup) { + MyValue1 vt = testValue1; + boolean result = test49(vt); + Asserts.assertFalse(result); + } + + @Test + public boolean test50(Object obj) { + return obj instanceof MyValue1; + } + + @DontCompile + public void test50_verifier(boolean warmup) { + boolean result = test49(new Integer(42)); + Asserts.assertFalse(result); + } + + // Value type with some non-flattened fields + value final class Test51Value { + final Object objectField1 = null; + final Object objectField2 = null; + final Object objectField3 = null; + final Object objectField4 = null; + final Object objectField5 = null; + final Object objectField6 = null; + + final MyValue1.val valueField1; + final MyValue1.val valueField2; + final MyValue1.box valueField3; + final MyValue1.val valueField4; + final MyValue1.box valueField5; + + private Test51Value() { + valueField1 = testValue1; + valueField2 = testValue1; + valueField3 = testValue1; + valueField4 = MyValue1.createDefaultDontInline(); + valueField5 = MyValue1.createDefaultDontInline(); + } + + public Test51Value init() { + Test51Value vt = __WithField(this.valueField1, testValue1); + vt = __WithField(vt.valueField2, testValue1); + vt = __WithField(vt.valueField3, testValue1); + return vt; + } + + @ForceInline + public long test(Test51Value holder, MyValue1 vt1, Object vt2) { + holder = __WithField(holder.objectField1, vt1); + holder = __WithField(holder.objectField2, (MyValue1)vt2); + holder = __WithField(holder.objectField3, testValue1); + holder = __WithField(holder.objectField4, MyValue1.createWithFieldsDontInline(rI, rL)); + holder = __WithField(holder.objectField5, holder.valueField1); + holder = __WithField(holder.objectField6, holder.valueField3); + holder = __WithField(holder.valueField1, (MyValue1)holder.objectField1); + holder = __WithField(holder.valueField2, (MyValue1)vt2); + holder = __WithField(holder.valueField3, (MyValue1)vt2); + + return ((MyValue1)holder.objectField1).hash() + + ((MyValue1)holder.objectField2).hash() + + ((MyValue1)holder.objectField3).hash() + + ((MyValue1)holder.objectField4).hash() + + ((MyValue1)holder.objectField5).hash() + + ((MyValue1)holder.objectField6).hash() + + holder.valueField1.hash() + + holder.valueField2.hash() + + holder.valueField3.hash() + + holder.valueField4.hashPrimitive(); + } + } + + // Same as test2 but with field holder being a value type + @Test() + public long test51(Test51Value holder, MyValue1 vt1, Object vt2) { + return holder.test(holder, vt1, vt2); + } + + @DontCompile + public void test51_verifier(boolean warmup) { + MyValue1 vt = testValue1; + MyValue1 def = MyValue1.createDefaultDontInline(); + Test51Value holder = Test51Value.default; + Asserts.assertEQ(testValue1.hash(), vt.hash()); + holder = holder.init(); + Asserts.assertEQ(holder.valueField1.hash(), vt.hash()); + long result = test51(holder, vt, vt); + Asserts.assertEQ(result, 9*vt.hash() + def.hashPrimitive()); + } + + // Access non-flattened, uninitialized value type field with value type holder + @Test() + public void test52(Test51Value holder) { + if ((Object)holder.valueField5 != null) { + throw new RuntimeException("Should be null"); + } + } + + @DontCompile + public void test52_verifier(boolean warmup) { + Test51Value vt = Test51Value.default; + test52(vt); + } + + // Merging value types of different types + @Test() + public Object test53(Object o, boolean b) { + MyValue1 vt = MyValue1.createWithFieldsInline(rI, rL); + return b ? vt : o; + } + + @DontCompile + public void test53_verifier(boolean warmup) { + test53(new Object(), false); + MyValue1 result = (MyValue1)test53(new Object(), true); + Asserts.assertEQ(result.hash(), hash()); + } + + @Test() + public Object test54(boolean b) { + MyValue1 vt = MyValue1.createWithFieldsInline(rI, rL); + return b ? vt : testValue2; + } + + @DontCompile + public void test54_verifier(boolean warmup) { + MyValue1 result1 = (MyValue1)test54(true); + Asserts.assertEQ(result1.hash(), hash()); + MyValue2 result2 = (MyValue2)test54(false); + Asserts.assertEQ(result2.hash(), testValue2.hash()); + } + + @Test() + public Object test55(boolean b) { + MyValue1 vt1 = MyValue1.createWithFieldsInline(rI, rL); + MyValue2 vt2 = MyValue2.createWithFieldsInline(rI, true); + return b ? vt1 : vt2; + } + + @DontCompile + public void test55_verifier(boolean warmup) { + MyValue1 result1 = (MyValue1)test55(true); + Asserts.assertEQ(result1.hash(), hash()); + MyValue2 result2 = (MyValue2)test55(false); + Asserts.assertEQ(result2.hash(), testValue2.hash()); + } + + // Test synchronization on value types + @Test() + public void test56(Object vt) { + synchronized (vt) { + throw new RuntimeException("test56 failed: synchronization on value type should not succeed"); + } + } + + @DontCompile + public void test56_verifier(boolean warmup) { + try { + test56(testValue1); + throw new RuntimeException("test56 failed: no exception thrown"); + } catch (IllegalMonitorStateException ex) { + // Expected + } + } + + @ForceInline + public void test57_inline(Object vt) { + synchronized (vt) { + throw new RuntimeException("test57 failed: synchronization on value type should not succeed"); + } + } + + @Test() + public void test57(MyValue1 vt) { + test57_inline(vt); + } + + @DontCompile + public void test57_verifier(boolean warmup) { + try { + test57(testValue1); + throw new RuntimeException("test57 failed: no exception thrown"); + } catch (IllegalMonitorStateException ex) { + // Expected + } + } + + @ForceInline + public void test58_inline(Object vt) { + synchronized (vt) { + throw new RuntimeException("test58 failed: synchronization on value type should not succeed"); + } + } + + @Test() + public void test58() { + MyValue1 vt = MyValue1.createWithFieldsInline(rI, rL); + test58_inline(vt); + } + + @DontCompile + public void test58_verifier(boolean warmup) { + try { + test58(); + throw new RuntimeException("test58 failed: no exception thrown"); + } catch (IllegalMonitorStateException ex) { + // Expected + } + } + + @Test() + public void test59(Object o, boolean b) { + MyValue1 vt = MyValue1.createWithFieldsInline(rI, rL); + Object sync = b ? vt : o; + synchronized (sync) { + if (b) { + throw new RuntimeException("test59 failed: synchronization on value type should not succeed"); + } + } + } + + @DontCompile + public void test59_verifier(boolean warmup) { + test59(new Object(), false); + try { + test59(new Object(), true); + throw new RuntimeException("test59 failed: no exception thrown"); + } catch (IllegalMonitorStateException ex) { + // Expected + } + } + + @Test() + public void test60(boolean b) { + MyValue1 vt = MyValue1.createWithFieldsInline(rI, rL); + Object sync = b ? vt : testValue2; + synchronized (sync) { + throw new RuntimeException("test60 failed: synchronization on value type should not succeed"); + } + } + + @DontCompile + public void test60_verifier(boolean warmup) { + try { + test60(false); + throw new RuntimeException("test60 failed: no exception thrown"); + } catch (IllegalMonitorStateException ex) { + // Expected + } + try { + test60(true); + throw new RuntimeException("test60 failed: no exception thrown"); + } catch (IllegalMonitorStateException ex) { + // Expected + } + } + + // Test catching the IllegalMonitorStateException in compiled code + @Test() + public void test61(Object vt) { + boolean thrown = false; + try { + synchronized (vt) { + throw new RuntimeException("test61 failed: no exception thrown"); + } + } catch (IllegalMonitorStateException ex) { + thrown = true; + } + if (!thrown) { + throw new RuntimeException("test61 failed: no exception thrown"); + } + } + + @DontCompile + public void test61_verifier(boolean warmup) { + test61(testValue1); + } + + @Test() + public void test62(Object o) { + try { + synchronized (o) { } + } catch (IllegalMonitorStateException ex) { + // Expected + return; + } + throw new RuntimeException("test62 failed: no exception thrown"); + } + + @DontCompile + public void test62_verifier(boolean warmup) { + test62(testValue1); + } + + // Test synchronization without any instructions in the synchronized block + @Test() + public void test63(Object o) { + synchronized (o) { } + } + + @DontCompile + public void test63_verifier(boolean warmup) { + try { + test63(testValue1); + } catch (IllegalMonitorStateException ex) { + // Expected + return; + } + throw new RuntimeException("test63 failed: no exception thrown"); + } + + // type system test with interface and value type + @ForceInline + public MyInterface test64_helper(MyValue1 vt) { + return vt; + } + + @Test() + public MyInterface test64(MyValue1 vt) { + return test64_helper(vt); + } + + @DontCompile + public void test64_verifier(boolean warmup) { + test64(testValue1); + } + + // Array store tests + @Test() + public void test65(Object[] array, MyValue1 vt) { + array[0] = vt; + } + + @DontCompile + public void test65_verifier(boolean warmup) { + Object[] array = new Object[1]; + test65(array, testValue1); + Asserts.assertEQ(((MyValue1)array[0]).hash(), testValue1.hash()); + } + + @Test() + public void test66(Object[] array, MyValue1 vt) { + array[0] = vt; + } + + @DontCompile + public void test66_verifier(boolean warmup) { + MyValue1[] array = new MyValue1[1]; + test66(array, testValue1); + Asserts.assertEQ(array[0].hash(), testValue1.hash()); + } + + @Test() + public void test67(Object[] array, Object vt) { + array[0] = vt; + } + + @DontCompile + public void test67_verifier(boolean warmup) { + MyValue1[] array = new MyValue1[1]; + test67(array, testValue1); + Asserts.assertEQ(array[0].hash(), testValue1.hash()); + } + + @Test() + public void test68(Object[] array, Integer o) { + array[0] = o; + } + + @DontCompile + public void test68_verifier(boolean warmup) { + Integer[] array = new Integer[1]; + test68(array, 1); + Asserts.assertEQ(array[0], Integer.valueOf(1)); + } + + // Test convertion between a value type and java.lang.Object without an allocation + @ForceInline + public Object test69_sum(Object a, Object b) { + int sum = ((MyValue1)a).x + ((MyValue1)b).x; + return MyValue1.setX(((MyValue1)a), sum); + } + + @Test(failOn = ALLOC + STORE) + public int test69(MyValue1[] array) { + MyValue1 result = MyValue1.createDefaultInline(); + for (int i = 0; i < array.length; ++i) { + result = (MyValue1)test69_sum(result, array[i]); + } + return result.x; + } + + @DontCompile + public void test69_verifier(boolean warmup) { + int result = test69(testValue1Array); + Asserts.assertEQ(result, rI * testValue1Array.length); + } + + // Same as test69 but with an Interface + @ForceInline + public MyInterface test70_sum(MyInterface a, MyInterface b) { + int sum = ((MyValue1)a).x + ((MyValue1)b).x; + return MyValue1.setX(((MyValue1)a), sum); + } + + @Test(failOn = ALLOC + STORE) + public int test70(MyValue1[] array) { + MyValue1 result = MyValue1.createDefaultInline(); + for (int i = 0; i < array.length; ++i) { + result = (MyValue1)test70_sum(result, array[i]); + } + return result.x; + } + + @DontCompile + public void test70_verifier(boolean warmup) { + int result = test70(testValue1Array); + Asserts.assertEQ(result, rI * testValue1Array.length); + } + + // Test that allocated value type is not used in non-dominated path + public MyValue1 test71_inline(Object obj) { + MyValue1 vt = MyValue1.createWithFieldsInline(rI, rL); + try { + vt = (MyValue1)obj; + throw new RuntimeException("NullPointerException expected"); + } catch (NullPointerException e) { + // Expected + } + return vt; + } + + @Test + public MyValue1 test71() { + return test71_inline(null); + } + + @DontCompile + public void test71_verifier(boolean warmup) { + MyValue1 vt = test71(); + Asserts.assertEquals(vt.hash(), hash()); + } + + // Test calling a method on an uninitialized value type + value final class Test72Value { + final int x = 42; + public int get() { + return x; + } + } + + // Make sure Test72Value is loaded but not initialized + public void unused(Test72Value vt) { } + + @Test + @Warmup(0) + public int test72() { + Test72Value vt = Test72Value.default; + return vt.get(); + } + + @DontCompile + public void test72_verifier(boolean warmup) { + int result = test72(); + Asserts.assertEquals(result, 0); + } + + // Tests for loading/storing unkown values + @Test + public Object test73(Object[] va) { + return va[0]; + } + + @DontCompile + public void test73_verifier(boolean warmup) { + MyValue1 vt = (MyValue1)test73(testValue1Array); + Asserts.assertEquals(testValue1Array[0].hash(), vt.hash()); + } + + @Test + public void test74(Object[] va, Object vt) { + va[0] = vt; + } + + @DontCompile + public void test74_verifier(boolean warmup) { + MyValue1[] va = new MyValue1[1]; + test74(va, testValue1); + Asserts.assertEquals(va[0].hash(), testValue1.hash()); + } + + // Verify that mixing instances and arrays with the clone api + // doesn't break anything + @Test + public Object test75(Object o) { + MyValue1[] va = new MyValue1[1]; + Object[] next = va; + Object[] arr = va; + for (int i = 0; i < 10; i++) { + arr = next; + next = new Integer[1]; + } + return arr[0]; + } + + @DontCompile + public void test75_verifier(boolean warmup) { + test75(42); + } + + // Casting a null Integer to a (non-nullable) value type should throw a NullPointerException + @ForceInline + public MyValue1 test76_helper(Object o) { + return (MyValue1)o; + } + + @Test + public MyValue1 test76(Integer i) throws Throwable { + return test76_helper(i); + } + + @DontCompile + public void test76_verifier(boolean warmup) throws Throwable { + try { + test76(null); + throw new RuntimeException("NullPointerException expected"); + } catch (NullPointerException e) { + // Expected + } catch (Exception e) { + throw new RuntimeException("test76 failed: unexpected exception", e); + } + } + + // Casting an Integer to a (non-nullable) value type should throw a ClassCastException + @ForceInline + public MyValue1 test77_helper(Object o) { + return (MyValue1)o; + } + + @Test + public MyValue1 test77(Integer i) throws Throwable { + return test77_helper(i); + } + + @DontCompile + public void test77_verifier(boolean warmup) throws Throwable { + try { + test77(new Integer(42)); + throw new RuntimeException("ClassCastException expected"); + } catch (ClassCastException e) { + // Expected + } catch (Exception e) { + throw new RuntimeException("test77 failed: unexpected exception", e); + } + } + + // Casting a null Integer to a nullable value type should not throw + @ForceInline + public MyValue1.box test78_helper(Object o) { + return (MyValue1.box)o; + } + + @Test + public MyValue1.box test78(Integer i) throws Throwable { + return test78_helper(i); + } + + @DontCompile + public void test78_verifier(boolean warmup) throws Throwable { + try { + test78(null); // Should not throw + } catch (Exception e) { + throw new RuntimeException("test78 failed: unexpected exception", e); + } + } + + // Casting an Integer to a nullable value type should throw a ClassCastException + @ForceInline + public MyValue1.box test79_helper(Object o) { + return (MyValue1.box)o; + } + + @Test + public MyValue1.box test79(Integer i) throws Throwable { + return test79_helper(i); + } + + @DontCompile + public void test79_verifier(boolean warmup) throws Throwable { + try { + test79(new Integer(42)); + throw new RuntimeException("ClassCastException expected"); + } catch (ClassCastException e) { + // Expected + } catch (Exception e) { + throw new RuntimeException("test79 failed: unexpected exception", e); + } + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/TestMethodHandles.java 2019-03-11 14:27:37.390353894 +0100 @@ -0,0 +1,498 @@ +/* + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package compiler.valhalla.valuetypes; + +import java.lang.invoke.*; +import java.lang.reflect.Method; + +import jdk.test.lib.Asserts; + +/* + * @test + * @summary Test method handle support for value types + * @library /testlibrary /test/lib /compiler/whitebox / + * @requires os.simpleArch == "x64" + * @compile -XDallowWithFieldOperator TestMethodHandles.java + * @run driver ClassFileInstaller sun.hotspot.WhiteBox jdk.test.lib.Platform + * @run main/othervm/timeout=120 -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UnlockExperimentalVMOptions -XX:+WhiteBoxAPI -XX:+EnableValhalla + * -DVerifyIR=false + * compiler.valhalla.valuetypes.ValueTypeTest + * compiler.valhalla.valuetypes.TestMethodHandles + */ +public class TestMethodHandles extends ValueTypeTest { + // Extra VM parameters for some test scenarios. See ValueTypeTest.getVMParameters() + @Override + public String[] getExtraVMParameters(int scenario) { + switch (scenario) { + // Prevent inlining through MethodHandle linkTo adapters to stress the calling convention + case 2: return new String[] {"-XX:CompileCommand=dontinline,java.lang.invoke.DirectMethodHandle::internalMemberName"}; + case 3: return new String[] {"-XX:-ValueArrayFlatten"}; + case 4: return new String[] {"-XX:CompileCommand=dontinline,java.lang.invoke.DirectMethodHandle::internalMemberName"}; + } + return null; + } + + static { + try { + Class clazz = TestMethodHandles.class; + ClassLoader loader = clazz.getClassLoader(); + MethodHandles.Lookup lookup = MethodHandles.lookup(); + + MethodType mt = MethodType.methodType(MyValue3.class.asValueType()); + test1_mh = lookup.findVirtual(clazz, "test1_target", mt); + test2_mh = lookup.findVirtual(clazz, "test2_target", mt); + test3_mh = lookup.findVirtual(clazz, "test3_target", mt); + + MethodType test4_mt1 = MethodType.methodType(int.class, MyValue1.class.asValueType()); + MethodType test4_mt2 = MethodType.methodType(MyValue1.class.asValueType()); + MethodHandle test4_mh1 = lookup.findStatic(clazz, "test4_helper1", test4_mt1); + MethodHandle test4_mh2 = lookup.findStatic(clazz, "test4_helper2", test4_mt2); + test4_mh = MethodHandles.filterReturnValue(test4_mh2, test4_mh1); + + MethodType test5_mt = MethodType.methodType(int.class, MyValue1.class.asValueType()); + test5_mh = lookup.findVirtual(clazz, "test5_target", test5_mt); + + MethodType test6_mt = MethodType.methodType(MyValue3.class.asValueType()); + MethodHandle test6_mh1 = lookup.findVirtual(clazz, "test6_target1", test6_mt); + MethodHandle test6_mh2 = lookup.findVirtual(clazz, "test6_target2", test6_mt); + MethodType boolean_mt = MethodType.methodType(boolean.class); + MethodHandle test6_mh_test = lookup.findVirtual(clazz, "test6_test", boolean_mt); + test6_mh = MethodHandles.guardWithTest(test6_mh_test, test6_mh1, test6_mh2); + + MethodType myvalue2_mt = MethodType.methodType(MyValue2.class.asValueType()); + test7_mh1 = lookup.findStatic(clazz, "test7_target1", myvalue2_mt); + MethodHandle test7_mh2 = lookup.findStatic(clazz, "test7_target2", myvalue2_mt); + MethodHandle test7_mh_test = lookup.findStatic(clazz, "test7_test", boolean_mt); + test7_mh = MethodHandles.guardWithTest(test7_mh_test, + MethodHandles.invoker(myvalue2_mt), + MethodHandles.dropArguments(test7_mh2, 0, MethodHandle.class)); + + MethodHandle test8_mh1 = lookup.findStatic(clazz, "test8_target1", myvalue2_mt); + test8_mh2 = lookup.findStatic(clazz, "test8_target2", myvalue2_mt); + MethodHandle test8_mh_test = lookup.findStatic(clazz, "test8_test", boolean_mt); + test8_mh = MethodHandles.guardWithTest(test8_mh_test, + MethodHandles.dropArguments(test8_mh1, 0, MethodHandle.class), + MethodHandles.invoker(myvalue2_mt)); + + MethodType test9_mt = MethodType.methodType(MyValue3.class.asValueType()); + MethodHandle test9_mh1 = lookup.findVirtual(clazz, "test9_target1", test9_mt); + MethodHandle test9_mh2 = lookup.findVirtual(clazz, "test9_target2", test9_mt); + MethodHandle test9_mh3 = lookup.findVirtual(clazz, "test9_target3", test9_mt); + MethodType test9_mt2 = MethodType.methodType(boolean.class); + MethodHandle test9_mh_test1 = lookup.findVirtual(clazz, "test9_test1", test9_mt2); + MethodHandle test9_mh_test2 = lookup.findVirtual(clazz, "test9_test2", test9_mt2); + test9_mh = MethodHandles.guardWithTest(test9_mh_test1, + test9_mh1, + MethodHandles.guardWithTest(test9_mh_test2, test9_mh2, test9_mh3)); + + MethodType test10_mt = MethodType.methodType(MyValue2.class.asValueType()); + MethodHandle test10_mh1 = lookup.findStatic(clazz, "test10_target1", test10_mt); + test10_mh2 = lookup.findStatic(clazz, "test10_target2", test10_mt); + test10_mh3 = lookup.findStatic(clazz, "test10_target3", test10_mt); + MethodType test10_mt2 = MethodType.methodType(boolean.class); + MethodType test10_mt3 = MethodType.methodType(MyValue2.class.asValueType()); + MethodHandle test10_mh_test1 = lookup.findStatic(clazz, "test10_test1", test10_mt2); + MethodHandle test10_mh_test2 = lookup.findStatic(clazz, "test10_test2", test10_mt2); + test10_mh = MethodHandles.guardWithTest(test10_mh_test1, + MethodHandles.dropArguments(test10_mh1, 0, MethodHandle.class, MethodHandle.class), + MethodHandles.guardWithTest(test10_mh_test2, + MethodHandles.dropArguments(MethodHandles.invoker(test10_mt3), 1, MethodHandle.class), + MethodHandles.dropArguments(MethodHandles.invoker(test10_mt3), 0, MethodHandle.class)) + ); + + MethodHandle test11_mh1 = lookup.findStatic(clazz, "test11_target1", myvalue2_mt); + test11_mh2 = lookup.findStatic(clazz, "test11_target2", myvalue2_mt); + MethodHandle test11_mh_test = lookup.findStatic(clazz, "test11_test", boolean_mt); + test11_mh = MethodHandles.guardWithTest(test11_mh_test, + MethodHandles.dropArguments(test11_mh1, 0, MethodHandle.class), + MethodHandles.invoker(myvalue2_mt)); + } catch (NoSuchMethodException | IllegalAccessException e) { + e.printStackTrace(); + throw new RuntimeException("Method handle lookup failed"); + } + } + + public static void main(String[] args) throws Throwable { + TestMethodHandles test = new TestMethodHandles(); + test.run(args, MyValue1.class.asValueType(), MyValue2.class.asValueType(), MyValue2Inline.class.asValueType(), MyValue3.class.asValueType(), MyValue3Inline.class.asValueType()); + } + + // Everything inlined + final MyValue3 test1_vt = MyValue3.create(); + + @ForceInline + MyValue3 test1_target() { + return test1_vt; + } + + static final MethodHandle test1_mh; + + @Test(valid = ValueTypeReturnedAsFieldsOn, failOn = ALLOC + STORE + CALL) + @Test(valid = ValueTypeReturnedAsFieldsOff, match = { ALLOC, STORE }, matchCount = { 1, 12 }) + public MyValue3 test1() throws Throwable { + return (MyValue3)test1_mh.invokeExact(this); + } + + @DontCompile + public void test1_verifier(boolean warmup) throws Throwable { + MyValue3 vt = test1(); + test1_vt.verify(vt); + } + + // Leaf method not inlined but returned type is known + final MyValue3 test2_vt = MyValue3.create(); + @DontInline + MyValue3 test2_target() { + return test2_vt; + } + + static final MethodHandle test2_mh; + + @Test + public MyValue3 test2() throws Throwable { + return (MyValue3)test2_mh.invokeExact(this); + } + + @DontCompile + public void test2_verifier(boolean warmup) throws Throwable { + Method helper_m = getClass().getDeclaredMethod("test2_target"); + if (!warmup && USE_COMPILER && !WHITE_BOX.isMethodCompiled(helper_m, false)) { + WHITE_BOX.enqueueMethodForCompilation(helper_m, COMP_LEVEL_FULL_OPTIMIZATION); + Asserts.assertTrue(WHITE_BOX.isMethodCompiled(helper_m, false), "test2_target not compiled"); + } + MyValue3 vt = test2(); + test2_vt.verify(vt); + } + + // Leaf method not inlined and returned type not known + final MyValue3 test3_vt = MyValue3.create(); + @DontInline + MyValue3 test3_target() { + return test3_vt; + } + + static final MethodHandle test3_mh; + + @Test + public MyValue3 test3() throws Throwable { + return (MyValue3)test3_mh.invokeExact(this); + } + + @DontCompile + public void test3_verifier(boolean warmup) throws Throwable { + // hack so C2 doesn't know the target of the invoke call + Class c = Class.forName("java.lang.invoke.DirectMethodHandle"); + Method m = c.getDeclaredMethod("internalMemberName", Object.class); + WHITE_BOX.testSetDontInlineMethod(m, warmup); + MyValue3 vt = test3(); + test3_vt.verify(vt); + } + + // When test75_helper1 is inlined in test75, the method handle + // linker that called it is passed a pointer to a copy of vt + // stored in memory. The method handle linker needs to load the + // fields from memory before it inlines test75_helper1. + static public int test4_helper1(MyValue1 vt) { + return vt.x; + } + + static MyValue1 test4_vt = MyValue1.createWithFieldsInline(rI, rL); + static public MyValue1 test4_helper2() { + return test4_vt; + } + + static final MethodHandle test4_mh; + + @Test + public int test4() throws Throwable { + return (int)test4_mh.invokeExact(); + } + + @DontCompile + public void test4_verifier(boolean warmup) throws Throwable { + int i = test4(); + Asserts.assertEQ(i, test4_vt.x); + } + + // Test method handle call with value type argument + public int test5_target(MyValue1 vt) { + return vt.x; + } + + static final MethodHandle test5_mh; + MyValue1 test5_vt = MyValue1.createWithFieldsInline(rI, rL); + + @Test + public int test5() throws Throwable { + return (int)test5_mh.invokeExact(this, test5_vt); + } + + @DontCompile + public void test5_verifier(boolean warmup) throws Throwable { + int i = test5(); + Asserts.assertEQ(i, test5_vt.x); + } + + // Return of target1 and target2 merged in a Lambda Form as an + // Object. Shouldn't cause any allocation + final MyValue3 test6_vt1 = MyValue3.create(); + @ForceInline + MyValue3 test6_target1() { + return test6_vt1; + } + + final MyValue3 test6_vt2 = MyValue3.create(); + @ForceInline + MyValue3 test6_target2() { + return test6_vt2; + } + + boolean test6_bool = true; + @ForceInline + boolean test6_test() { + return test6_bool; + } + + static final MethodHandle test6_mh; + + @Test(valid = ValueTypeReturnedAsFieldsOn, failOn = ALLOC + ALLOCA + STORE + STOREVALUETYPEFIELDS) + @Test(valid = ValueTypeReturnedAsFieldsOff) + public MyValue3 test6() throws Throwable { + return (MyValue3)test6_mh.invokeExact(this); + } + + @DontCompile + public void test6_verifier(boolean warmup) throws Throwable { + test6_bool = !test6_bool; + MyValue3 vt = test6(); + vt.verify(test6_bool ? test6_vt1 : test6_vt2); + } + + // Similar as above but with the method handle for target1 not + // constant. Shouldn't cause any allocation. + @ForceInline + static MyValue2 test7_target1() { + return MyValue2.createWithFieldsInline(rI, true); + } + + @ForceInline + static MyValue2 test7_target2() { + return MyValue2.createWithFieldsInline(rI+1, false); + } + + static boolean test7_bool = true; + @ForceInline + static boolean test7_test() { + return test7_bool; + } + + static final MethodHandle test7_mh; + static MethodHandle test7_mh1; + + @Test(valid = ValueTypeReturnedAsFieldsOn, failOn = ALLOC + ALLOCA + STORE + STOREVALUETYPEFIELDS) + @Test(valid = ValueTypeReturnedAsFieldsOff) + public long test7() throws Throwable { + return ((MyValue2)test7_mh.invokeExact(test7_mh1)).hash(); + } + + @DontCompile + public void test7_verifier(boolean warmup) throws Throwable { + test7_bool = !test7_bool; + long hash = test7(); + Asserts.assertEQ(hash, MyValue2.createWithFieldsInline(rI+(test7_bool ? 0 : 1), test7_bool).hash()); + } + + // Same as above but with the method handle for target2 not + // constant. Shouldn't cause any allocation. + @ForceInline + static MyValue2 test8_target1() { + return MyValue2.createWithFieldsInline(rI, true); + } + + @ForceInline + static MyValue2 test8_target2() { + return MyValue2.createWithFieldsInline(rI+1, false); + } + + static boolean test8_bool = true; + @ForceInline + static boolean test8_test() { + return test8_bool; + } + + static final MethodHandle test8_mh; + static MethodHandle test8_mh2; + + @Test(valid = ValueTypeReturnedAsFieldsOn, failOn = ALLOC + ALLOCA + STORE + STOREVALUETYPEFIELDS) + @Test(valid = ValueTypeReturnedAsFieldsOff) + public long test8() throws Throwable { + return ((MyValue2)test8_mh.invokeExact(test8_mh2)).hash(); + } + + @DontCompile + public void test8_verifier(boolean warmup) throws Throwable { + test8_bool = !test8_bool; + long hash = test8(); + Asserts.assertEQ(hash, MyValue2.createWithFieldsInline(rI+(test8_bool ? 0 : 1), test8_bool).hash()); + } + + // Return of target1, target2 and target3 merged in Lambda Forms + // as an Object. Shouldn't cause any allocation + final MyValue3 test9_vt1 = MyValue3.create(); + @ForceInline + MyValue3 test9_target1() { + return test9_vt1; + } + + final MyValue3 test9_vt2 = MyValue3.create(); + @ForceInline + MyValue3 test9_target2() { + return test9_vt2; + } + + final MyValue3 test9_vt3 = MyValue3.create(); + @ForceInline + MyValue3 test9_target3() { + return test9_vt3; + } + + boolean test9_bool1 = true; + @ForceInline + boolean test9_test1() { + return test9_bool1; + } + + boolean test9_bool2 = true; + @ForceInline + boolean test9_test2() { + return test9_bool2; + } + + static final MethodHandle test9_mh; + + @Test(valid = ValueTypeReturnedAsFieldsOn, failOn = ALLOC + ALLOCA + STORE + STOREVALUETYPEFIELDS) + @Test(valid = ValueTypeReturnedAsFieldsOff) + public MyValue3 test9() throws Throwable { + return (MyValue3)test9_mh.invokeExact(this); + } + + static int test9_i = 0; + @DontCompile + public void test9_verifier(boolean warmup) throws Throwable { + test9_i++; + test9_bool1 = (test9_i % 2) == 0; + test9_bool2 = (test9_i % 3) == 0; + MyValue3 vt = test9(); + vt.verify(test9_bool1 ? test9_vt1 : (test9_bool2 ? test9_vt2 : test9_vt3)); + } + + // Same as above but with non constant target2 and target3 + @ForceInline + static MyValue2 test10_target1() { + return MyValue2.createWithFieldsInline(rI, true); + } + + @ForceInline + static MyValue2 test10_target2() { + return MyValue2.createWithFieldsInline(rI+1, false); + } + + @ForceInline + static MyValue2 test10_target3() { + return MyValue2.createWithFieldsInline(rI+2, true); + } + + static boolean test10_bool1 = true; + @ForceInline + static boolean test10_test1() { + return test10_bool1; + } + + static boolean test10_bool2 = true; + @ForceInline + static boolean test10_test2() { + return test10_bool2; + } + + static final MethodHandle test10_mh; + static MethodHandle test10_mh2; + static MethodHandle test10_mh3; + + @Test(valid = ValueTypeReturnedAsFieldsOn, failOn = ALLOC + ALLOCA + STORE + STOREVALUETYPEFIELDS) + @Test(valid = ValueTypeReturnedAsFieldsOff) + public long test10() throws Throwable { + return ((MyValue2)test10_mh.invokeExact(test10_mh2, test10_mh3)).hash(); + } + + static int test10_i = 0; + + @DontCompile + public void test10_verifier(boolean warmup) throws Throwable { + test10_i++; + test10_bool1 = (test10_i % 2) == 0; + test10_bool2 = (test10_i % 3) == 0; + long hash = test10(); + int i = rI+(test10_bool1 ? 0 : (test10_bool2 ? 1 : 2)); + boolean b = test10_bool1 ? true : (test10_bool2 ? false : true); + Asserts.assertEQ(hash, MyValue2.createWithFieldsInline(i, b).hash()); + } + + static int test11_i = 0; + + @ForceInline + static MyValue2 test11_target1() { + return MyValue2.createWithFieldsInline(rI+test11_i, true); + } + + @ForceInline + static MyValue2 test11_target2() { + return MyValue2.createWithFieldsInline(rI-test11_i, false); + } + + @ForceInline + static boolean test11_test() { + return (test11_i % 100) == 0; + } + + static final MethodHandle test11_mh; + static MethodHandle test11_mh2; + + // Check that a buffered value returned by a compiled lambda form + // is properly handled by the caller. + @Test(valid = ValueTypeReturnedAsFieldsOn, failOn = ALLOC + ALLOCA + STORE + STOREVALUETYPEFIELDS) + @Test(valid = ValueTypeReturnedAsFieldsOff) + @Warmup(11000) + public long test11() throws Throwable { + return ((MyValue2)test11_mh.invokeExact(test11_mh2)).hash(); + } + + @DontCompile + public void test11_verifier(boolean warmup) throws Throwable { + test11_i++; + long hash = test11(); + boolean b = (test11_i % 100) == 0; + Asserts.assertEQ(hash, MyValue2.createWithFieldsInline(rI+test11_i * (b ? 1 : -1), b).hash()); + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/TestNativeClone.java 2019-03-11 14:27:37.842353887 +0100 @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8209702 + * @summary Verify that the native clone intrinsic handles value types. + * @modules java.base/jdk.experimental.value + * @library /test/lib + * @run main/othervm -XX:+EnableValhalla -Xbatch -XX:-UseTypeProfile + * -XX:CompileCommand=compileonly,compiler.valhalla.valuetypes.MyValue::* + * -XX:CompileCommand=compileonly,compiler.valhalla.valuetypes.TestNativeClone::test* + * -XX:CompileCommand=compileonly,jdk.internal.reflect.GeneratedMethodAccessor1::invoke + * -XX:CompileCommand=dontinline,jdk.internal.reflect.GeneratedMethodAccessor1::invoke + * compiler.valhalla.valuetypes.TestNativeClone + */ + +package compiler.valhalla.valuetypes; + +import java.lang.invoke.*; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import jdk.experimental.value.MethodHandleBuilder; +import jdk.test.lib.Asserts; + +value class MyValue { + public final int x; + + public MyValue(int x) { + this.x = x; + } +} + +public class TestNativeClone { + + private static final MethodHandle cloneValue = MethodHandleBuilder.loadCode(MethodHandles.lookup(), + "MyValue", + MethodType.methodType(Object.class, MyValue.class.asValueType()), + CODE -> { + CODE. + aload_0(). + invokevirtual(Object.class, "clone", "()Ljava/lang/Object;", false). + areturn(); + }); + + public static void test1(MyValue vt) throws Throwable { + try { + cloneValue.invoke(vt); + throw new RuntimeException("No exception thrown"); + } catch (CloneNotSupportedException e) { + // Expected + } + } + + public static void test2(Method clone, Object obj) { + try { + clone.invoke(obj); + } catch (InvocationTargetException e) { + // Expected + Asserts.assertTrue(e.getCause() instanceof CloneNotSupportedException, "Unexpected exception thrown"); + return; + } catch (Exception e) { + throw new RuntimeException("Unexpected exception thrown", e); + } + throw new RuntimeException("No exception thrown"); + } + + public static void main(String[] args) throws Throwable { + MyValue vt = new MyValue(42); + Method clone = Object.class.getDeclaredMethod("clone"); + clone.setAccessible(true); + for (int i = 0; i < 20_000; ++i) { + test1(vt); + test2(clone, vt); + } + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/TestNewAcmp.java 2019-03-11 14:27:38.298353881 +0100 @@ -0,0 +1,1597 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test TestNewAcmp + * @summary Verifies correctness of the new acmp bytecode. + * @library /testlibrary /test/lib /compiler/whitebox / + * @compile -XDallowWithFieldOperator TestNewAcmp.java + * @run driver ClassFileInstaller sun.hotspot.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbatch + * -XX:+EnableValhalla -XX:TypeProfileLevel=222 + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::test* + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::cmp* + * compiler.valhalla.valuetypes.TestNewAcmp 0 + * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:+EnableValhalla -XX:TypeProfileLevel=222 + * -XX:+AlwaysIncrementalInline + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::test* + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::cmp* + * compiler.valhalla.valuetypes.TestNewAcmp 0 + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbatch + * -XX:+EnableValhalla -XX:TypeProfileLevel=222 + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::test* + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::cmp* + * compiler.valhalla.valuetypes.TestNewAcmp 1 + * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:+EnableValhalla -XX:TypeProfileLevel=222 + * -XX:+AlwaysIncrementalInline + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::test* + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::cmp* + * compiler.valhalla.valuetypes.TestNewAcmp 1 + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbatch + * -XX:+EnableValhalla -XX:TypeProfileLevel=222 + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::test* + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::cmp* + * compiler.valhalla.valuetypes.TestNewAcmp 2 + * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:+EnableValhalla -XX:TypeProfileLevel=222 + * -XX:+AlwaysIncrementalInline + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::test* + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::cmp* + * compiler.valhalla.valuetypes.TestNewAcmp 2 + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbatch + * -XX:+EnableValhalla -XX:TypeProfileLevel=222 + * -XX:+UnlockExperimentalVMOptions -XX:ACmpOnValues=3 + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::test* + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::cmp* + * compiler.valhalla.valuetypes.TestNewAcmp 0 + * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:+EnableValhalla -XX:TypeProfileLevel=222 + * -XX:+UnlockExperimentalVMOptions -XX:ACmpOnValues=3 + * -XX:+AlwaysIncrementalInline + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::test* + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::cmp* + * compiler.valhalla.valuetypes.TestNewAcmp 0 + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbatch + * -XX:+EnableValhalla -XX:TypeProfileLevel=222 + * -XX:+UnlockExperimentalVMOptions -XX:ACmpOnValues=3 + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::test* + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::cmp* + * compiler.valhalla.valuetypes.TestNewAcmp 1 + * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:+EnableValhalla -XX:TypeProfileLevel=222 + * -XX:+UnlockExperimentalVMOptions -XX:ACmpOnValues=3 + * -XX:+AlwaysIncrementalInline + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::test* + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::cmp* + * compiler.valhalla.valuetypes.TestNewAcmp 1 + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbatch + * -XX:+EnableValhalla -XX:TypeProfileLevel=222 + * -XX:+UnlockExperimentalVMOptions -XX:ACmpOnValues=3 + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::test* + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::cmp* + * compiler.valhalla.valuetypes.TestNewAcmp 2 + * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:+EnableValhalla -XX:TypeProfileLevel=222 + * -XX:+UnlockExperimentalVMOptions -XX:ACmpOnValues=3 + * -XX:+AlwaysIncrementalInline + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::test* + * -XX:CompileCommand=dontinline,compiler.valhalla.valuetypes.TestNewAcmp::cmp* + * compiler.valhalla.valuetypes.TestNewAcmp 2 + */ + +package compiler.valhalla.valuetypes; + +import jdk.test.lib.Asserts; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.invoke.*; +import java.lang.reflect.Method; +import java.util.regex.Pattern; +import java.util.regex.Matcher; +import java.util.Arrays; +import sun.hotspot.WhiteBox; + +interface MyInterface { + +} + +value class MyValue1 implements MyInterface { + final int x = 0; + + static MyValue1 createDefault() { + return MyValue1.default; + } + + static MyValue1 setX(MyValue1 v, int x) { + return __WithField(v.x, x); + } +} + +value class MyValue2 implements MyInterface { + final int x = 0; + + static MyValue2 createDefault() { + return MyValue2.default; + } + + static MyValue2 setX(MyValue2 v, int x) { + return __WithField(v.x, x); + } +} + +class MyObject implements MyInterface { + int x; +} + +// Mark test methods that return always false +@Retention(RetentionPolicy.RUNTIME) +@interface AlwaysFalse { + int[] valid_for() default {1, 2}; +} + +// Mark test methods that return always true +@Retention(RetentionPolicy.RUNTIME) +@interface AlwaysTrue { + int[] valid_for() default {1, 2}; +} + +// Mark test methods that return false if the argument is null +@Retention(RetentionPolicy.RUNTIME) +@interface FalseIfNull { } + +// Mark test methods that return true if the argument is null +@Retention(RetentionPolicy.RUNTIME) +@interface TrueIfNull { } + +public class TestNewAcmp { + + public boolean testEq01_1(Object u1, Object u2) { + return get(u1) == u2; // new acmp + } + + public boolean testEq01_2(Object u1, Object u2) { + return u1 == get(u2); // new acmp + } + + public boolean testEq01_3(Object u1, Object u2) { + return get(u1) == get(u2); // new acmp + } + + @FalseIfNull + public boolean testEq01_4(Object u1, Object u2) { + return getNotNull(u1) == u2; // new acmp without null check + } + + @FalseIfNull + public boolean testEq01_5(Object u1, Object u2) { + return u1 == getNotNull(u2); // new acmp without null check + } + + @FalseIfNull + public boolean testEq01_6(Object u1, Object u2) { + return getNotNull(u1) == getNotNull(u2); // new acmp without null check + } + + public boolean testEq02_1(MyValue1 v1, MyValue1 v2) { + return get(v1) == (Object)v2; // only true if both null + } + + public boolean testEq02_2(MyValue1 v1, MyValue1 v2) { + return (Object)v1 == get(v2); // only true if both null + } + + public boolean testEq02_3(MyValue1 v1, MyValue1 v2) { + return get(v1) == get(v2); // only true if both null + } + + public boolean testEq03_1(MyValue1 v, Object u) { + return get(v) == u; // only true if both null + } + + public boolean testEq03_2(MyValue1 v, Object u) { + return (Object)v == get(u); // only true if both null + } + + public boolean testEq03_3(MyValue1 v, Object u) { + return get(v) == get(u); // only true if both null + } + + public boolean testEq04_1(Object u, MyValue1 v) { + return get(u) == (Object)v; // only true if both null + } + + public boolean testEq04_2(Object u, MyValue1 v) { + return u == get(v); // only true if both null + } + + public boolean testEq04_3(Object u, MyValue1 v) { + return get(u) == get(v); // only true if both null + } + + public boolean testEq05_1(MyObject o, MyValue1 v) { + return get(o) == (Object)v; // only true if both null + } + + public boolean testEq05_2(MyObject o, MyValue1 v) { + return o == get(v); // only true if both null + } + + public boolean testEq05_3(MyObject o, MyValue1 v) { + return get(o) == get(v); // only true if both null + } + + public boolean testEq06_1(MyValue1 v, MyObject o) { + return get(v) == o; // only true if both null + } + + public boolean testEq06_2(MyValue1 v, MyObject o) { + return (Object)v == get(o); // only true if both null + } + + public boolean testEq06_3(MyValue1 v, MyObject o) { + return get(v) == get(o); // only true if both null + } + + @AlwaysFalse + public boolean testEq07_1(MyValue1 v1, MyValue1 v2) { + return getNotNull(v1) == (Object)v2; // false + } + + @AlwaysFalse + public boolean testEq07_2(MyValue1 v1, MyValue1 v2) { + return (Object)v1 == getNotNull(v2); // false + } + + @AlwaysFalse + public boolean testEq07_3(MyValue1 v1, MyValue1 v2) { + return getNotNull(v1) == getNotNull(v2); // false + } + + @AlwaysFalse + public boolean testEq08_1(MyValue1 v, Object u) { + return getNotNull(v) == u; // false + } + + @AlwaysFalse + public boolean testEq08_2(MyValue1 v, Object u) { + return (Object)v == getNotNull(u); // false + } + + @AlwaysFalse + public boolean testEq08_3(MyValue1 v, Object u) { + return getNotNull(v) == getNotNull(u); // false + } + + @AlwaysFalse + public boolean testEq09_1(Object u, MyValue1 v) { + return getNotNull(u) == (Object)v; // false + } + + @AlwaysFalse + public boolean testEq09_2(Object u, MyValue1 v) { + return u == getNotNull(v); // false + } + + @AlwaysFalse + public boolean testEq09_3(Object u, MyValue1 v) { + return getNotNull(u) == getNotNull(v); // false + } + + @AlwaysFalse + public boolean testEq10_1(MyObject o, MyValue1 v) { + return getNotNull(o) == (Object)v; // false + } + + @AlwaysFalse + public boolean testEq10_2(MyObject o, MyValue1 v) { + return o == getNotNull(v); // false + } + + @AlwaysFalse + public boolean testEq10_3(MyObject o, MyValue1 v) { + return getNotNull(o) == getNotNull(v); // false + } + + @AlwaysFalse + public boolean testEq11_1(MyValue1 v, MyObject o) { + return getNotNull(v) == o; // false + } + + @AlwaysFalse + public boolean testEq11_2(MyValue1 v, MyObject o) { + return (Object)v == getNotNull(o); // false + } + + @AlwaysFalse + public boolean testEq11_3(MyValue1 v, MyObject o) { + return getNotNull(v) == getNotNull(o); // false + } + + public boolean testEq12_1(MyObject o1, MyObject o2) { + return get(o1) == o2; // old acmp + } + + public boolean testEq12_2(MyObject o1, MyObject o2) { + return o1 == get(o2); // old acmp + } + + public boolean testEq12_3(MyObject o1, MyObject o2) { + return get(o1) == get(o2); // old acmp + } + + public boolean testEq13_1(Object u, MyObject o) { + return get(u) == o; // old acmp + } + + public boolean testEq13_2(Object u, MyObject o) { + return u == get(o); // old acmp + } + + public boolean testEq13_3(Object u, MyObject o) { + return get(u) == get(o); // old acmp + } + + public boolean testEq14_1(MyObject o, Object u) { + return get(o) == u; // old acmp + } + + public boolean testEq14_2(MyObject o, Object u) { + return o == get(u); // old acmp + } + + public boolean testEq14_3(MyObject o, Object u) { + return get(o) == get(u); // old acmp + } + + public boolean testEq15_1(Object[] a, Object u) { + return get(a) == u; // old acmp + } + + public boolean testEq15_2(Object[] a, Object u) { + return a == get(u); // old acmp + } + + public boolean testEq15_3(Object[] a, Object u) { + return get(a) == get(u); // old acmp + } + + public boolean testEq16_1(Object u, Object[] a) { + return get(u) == a; // old acmp + } + + public boolean testEq16_2(Object u, Object[] a) { + return u == get(a); // old acmp + } + + public boolean testEq16_3(Object u, Object[] a) { + return get(u) == get(a); // old acmp + } + + public boolean testEq17_1(Object[] a, MyValue1 v) { + return get(a) == (Object)v; // only true if both null + } + + public boolean testEq17_2(Object[] a, MyValue1 v) { + return a == get(v); // only true if both null + } + + public boolean testEq17_3(Object[] a, MyValue1 v) { + return get(a) == get(v); // only true if both null + } + + public boolean testEq18_1(MyValue1 v, Object[] a) { + return get(v) == a; // only true if both null + } + + public boolean testEq18_2(MyValue1 v, Object[] a) { + return (Object)v == get(a); // only true if both null + } + + public boolean testEq18_3(MyValue1 v, Object[] a) { + return get(v) == get(a); // only true if both null + } + + @AlwaysFalse + public boolean testEq19_1(Object[] a, MyValue1 v) { + return getNotNull(a) == (Object)v; // false + } + + @AlwaysFalse + public boolean testEq19_2(Object[] a, MyValue1 v) { + return a == getNotNull(v); // false + } + + @AlwaysFalse + public boolean testEq19_3(Object[] a, MyValue1 v) { + return getNotNull(a) == getNotNull(v); // false + } + + @AlwaysFalse + public boolean testEq20_1(MyValue1 v, Object[] a) { + return getNotNull(v) == a; // false + } + + @AlwaysFalse + public boolean testEq20_2(MyValue1 v, Object[] a) { + return (Object)v == getNotNull(a); // false + } + + @AlwaysFalse + public boolean testEq20_3(MyValue1 v, Object[] a) { + return getNotNull(v) == getNotNull(a); // false + } + + public boolean testEq21_1(MyInterface u1, MyInterface u2) { + return get(u1) == u2; // new acmp + } + + public boolean testEq21_2(MyInterface u1, MyInterface u2) { + return u1 == get(u2); // new acmp + } + + public boolean testEq21_3(MyInterface u1, MyInterface u2) { + return get(u1) == get(u2); // new acmp + } + + @FalseIfNull + public boolean testEq21_4(MyInterface u1, MyInterface u2) { + return getNotNull(u1) == u2; // new acmp without null check + } + + @FalseIfNull + public boolean testEq21_5(MyInterface u1, MyInterface u2) { + return u1 == getNotNull(u2); // new acmp without null check + } + + @FalseIfNull + public boolean testEq21_6(MyInterface u1, MyInterface u2) { + return getNotNull(u1) == getNotNull(u2); // new acmp without null check + } + + public boolean testEq22_1(MyValue1 v, MyInterface u) { + return get(v) == u; // only true if both null + } + + public boolean testEq22_2(MyValue1 v, MyInterface u) { + return (Object)v == get(u); // only true if both null + } + + public boolean testEq22_3(MyValue1 v, MyInterface u) { + return get(v) == get(u); // only true if both null + } + + public boolean testEq23_1(MyInterface u, MyValue1 v) { + return get(u) == (Object)v; // only true if both null + } + + public boolean testEq23_2(MyInterface u, MyValue1 v) { + return u == get(v); // only true if both null + } + + public boolean testEq23_3(MyInterface u, MyValue1 v) { + return get(u) == get(v); // only true if both null + } + + @AlwaysFalse + public boolean testEq24_1(MyValue1 v, MyInterface u) { + return getNotNull(v) == u; // false + } + + @AlwaysFalse + public boolean testEq24_2(MyValue1 v, MyInterface u) { + return (Object)v == getNotNull(u); // false + } + + @AlwaysFalse + public boolean testEq24_3(MyValue1 v, MyInterface u) { + return getNotNull(v) == getNotNull(u); // false + } + + @AlwaysFalse + public boolean testEq25_1(MyInterface u, MyValue1 v) { + return getNotNull(u) == (Object)v; // false + } + + @AlwaysFalse + public boolean testEq25_2(MyInterface u, MyValue1 v) { + return u == getNotNull(v); // false + } + + @AlwaysFalse + public boolean testEq25_3(MyInterface u, MyValue1 v) { + return getNotNull(u) == getNotNull(v); // false + } + + public boolean testEq26_1(MyInterface u, MyObject o) { + return get(u) == o; // old acmp + } + + public boolean testEq26_2(MyInterface u, MyObject o) { + return u == get(o); // old acmp + } + + public boolean testEq26_3(MyInterface u, MyObject o) { + return get(u) == get(o); // old acmp + } + + public boolean testEq27_1(MyObject o, MyInterface u) { + return get(o) == u; // old acmp + } + + public boolean testEq27_2(MyObject o, MyInterface u) { + return o == get(u); // old acmp + } + + public boolean testEq27_3(MyObject o, MyInterface u) { + return get(o) == get(u); // old acmp + } + + public boolean testEq28_1(MyInterface[] a, MyInterface u) { + return get(a) == u; // old acmp + } + + public boolean testEq28_2(MyInterface[] a, MyInterface u) { + return a == get(u); // old acmp + } + + public boolean testEq28_3(MyInterface[] a, MyInterface u) { + return get(a) == get(u); // old acmp + } + + public boolean testEq29_1(MyInterface u, MyInterface[] a) { + return get(u) == a; // old acmp + } + + public boolean testEq29_2(MyInterface u, MyInterface[] a) { + return u == get(a); // old acmp + } + + public boolean testEq29_3(MyInterface u, MyInterface[] a) { + return get(u) == get(a); // old acmp + } + + public boolean testEq30_1(MyInterface[] a, MyValue1 v) { + return get(a) == (Object)v; // only true if both null + } + + public boolean testEq30_2(MyInterface[] a, MyValue1 v) { + return a == get(v); // only true if both null + } + + public boolean testEq30_3(MyInterface[] a, MyValue1 v) { + return get(a) == get(v); // only true if both null + } + + public boolean testEq31_1(MyValue1 v, MyInterface[] a) { + return get(v) == a; // only true if both null + } + + public boolean testEq31_2(MyValue1 v, MyInterface[] a) { + return (Object)v == get(a); // only true if both null + } + + public boolean testEq31_3(MyValue1 v, MyInterface[] a) { + return get(v) == get(a); // only true if both null + } + + @AlwaysFalse + public boolean testEq32_1(MyInterface[] a, MyValue1 v) { + return getNotNull(a) == (Object)v; // false + } + + @AlwaysFalse + public boolean testEq32_2(MyInterface[] a, MyValue1 v) { + return a == getNotNull(v); // false + } + + @AlwaysFalse + public boolean testEq32_3(MyInterface[] a, MyValue1 v) { + return getNotNull(a) == getNotNull(v); // false + } + + @AlwaysFalse + public boolean testEq33_1(MyValue1 v, MyInterface[] a) { + return getNotNull(v) == a; // false + } + + @AlwaysFalse + public boolean testEq33_2(MyValue1 v, MyInterface[] a) { + return (Object)v == getNotNull(a); // false + } + + @AlwaysFalse + public boolean testEq33_3(MyValue1 v, MyInterface[] a) { + return getNotNull(v) == getNotNull(a); // false + } + + + // Null tests + + public boolean testNull01_1(MyValue1 v) { + return (Object)v == null; // old acmp + } + + public boolean testNull01_2(MyValue1 v) { + return get(v) == null; // old acmp + } + + public boolean testNull01_3(MyValue1 v) { + return (Object)v == get((Object)null); // old acmp + } + + public boolean testNull01_4(MyValue1 v) { + return get(v) == get((Object)null); // old acmp + } + + public boolean testNull02_1(MyValue1 v) { + return null == (Object)v; // old acmp + } + + public boolean testNull02_2(MyValue1 v) { + return get((Object)null) == (Object)v; // old acmp + } + + public boolean testNull02_3(MyValue1 v) { + return null == get(v); // old acmp + } + + public boolean testNull02_4(MyValue1 v) { + return get((Object)null) == get(v); // old acmp + } + + public boolean testNull03_1(Object u) { + return u == null; // old acmp + } + + public boolean testNull03_2(Object u) { + return get(u) == null; // old acmp + } + + public boolean testNull03_3(Object u) { + return u == get((Object)null); // old acmp + } + + public boolean testNull03_4(Object u) { + return get(u) == get((Object)null); // old acmp + } + + public boolean testNull04_1(Object u) { + return null == u; // old acmp + } + + public boolean testNull04_2(Object u) { + return get((Object)null) == u; // old acmp + } + + public boolean testNull04_3(Object u) { + return null == get(u); // old acmp + } + + public boolean testNull04_4(Object u) { + return get((Object)null) == get(u); // old acmp + } + + public boolean testNull05_1(MyObject o) { + return o == null; // old acmp + } + + public boolean testNull05_2(MyObject o) { + return get(o) == null; // old acmp + } + + public boolean testNull05_3(MyObject o) { + return o == get((Object)null); // old acmp + } + + public boolean testNull05_4(MyObject o) { + return get(o) == get((Object)null); // old acmp + } + + public boolean testNull06_1(MyObject o) { + return null == o; // old acmp + } + + public boolean testNull06_2(MyObject o) { + return get((Object)null) == o; // old acmp + } + + public boolean testNull06_3(MyObject o) { + return null == get(o); // old acmp + } + + public boolean testNull06_4(MyObject o) { + return get((Object)null) == get(o); // old acmp + } + + public boolean testNull07_1(MyInterface u) { + return u == null; // old acmp + } + + public boolean testNull07_2(MyInterface u) { + return get(u) == null; // old acmp + } + + public boolean testNull07_3(MyInterface u) { + return u == get((Object)null); // old acmp + } + + public boolean testNull07_4(MyInterface u) { + return get(u) == get((Object)null); // old acmp + } + + public boolean testNull08_1(MyInterface u) { + return null == u; // old acmp + } + + public boolean testNull08_2(MyInterface u) { + return get((Object)null) == u; // old acmp + } + + public boolean testNull08_3(MyInterface u) { + return null == get(u); // old acmp + } + + public boolean testNull08_4(MyInterface u) { + return get((Object)null) == get(u); // old acmp + } + + // Same tests as above but negated + + public boolean testNotEq01_1(Object u1, Object u2) { + return get(u1) != u2; // new acmp + } + + public boolean testNotEq01_2(Object u1, Object u2) { + return u1 != get(u2); // new acmp + } + + public boolean testNotEq01_3(Object u1, Object u2) { + return get(u1) != get(u2); // new acmp + } + + @TrueIfNull + public boolean testNotEq01_4(Object u1, Object u2) { + return getNotNull(u1) != u2; // new acmp without null check + } + + @TrueIfNull + public boolean testNotEq01_5(Object u1, Object u2) { + return u1 != getNotNull(u2); // new acmp without null check + } + + @TrueIfNull + public boolean testNotEq01_6(Object u1, Object u2) { + return getNotNull(u1) != getNotNull(u2); // new acmp without null check + } + + public boolean testNotEq02_1(MyValue1 v1, MyValue1 v2) { + return get(v1) != (Object)v2; // only false if both null + } + + public boolean testNotEq02_2(MyValue1 v1, MyValue1 v2) { + return (Object)v1 != get(v2); // only false if both null + } + + public boolean testNotEq02_3(MyValue1 v1, MyValue1 v2) { + return get(v1) != get(v2); // only false if both null + } + + public boolean testNotEq03_1(MyValue1 v, Object u) { + return get(v) != u; // only false if both null + } + + public boolean testNotEq03_2(MyValue1 v, Object u) { + return (Object)v != get(u); // only false if both null + } + + public boolean testNotEq03_3(MyValue1 v, Object u) { + return get(v) != get(u); // only false if both null + } + + public boolean testNotEq04_1(Object u, MyValue1 v) { + return get(u) != (Object)v; // only false if both null + } + + public boolean testNotEq04_2(Object u, MyValue1 v) { + return u != get(v); // only false if both null + } + + public boolean testNotEq04_3(Object u, MyValue1 v) { + return get(u) != get(v); // only false if both null + } + + public boolean testNotEq05_1(MyObject o, MyValue1 v) { + return get(o) != (Object)v; // only false if both null + } + + public boolean testNotEq05_2(MyObject o, MyValue1 v) { + return o != get(v); // only false if both null + } + + public boolean testNotEq05_3(MyObject o, MyValue1 v) { + return get(o) != get(v); // only false if both null + } + + public boolean testNotEq06_1(MyValue1 v, MyObject o) { + return get(v) != o; // only false if both null + } + + public boolean testNotEq06_2(MyValue1 v, MyObject o) { + return (Object)v != get(o); // only false if both null + } + + public boolean testNotEq06_3(MyValue1 v, MyObject o) { + return get(v) != get(o); // only false if both null + } + + @AlwaysTrue + public boolean testNotEq07_1(MyValue1 v1, MyValue1 v2) { + return getNotNull(v1) != (Object)v2; // true + } + + @AlwaysTrue + public boolean testNotEq07_2(MyValue1 v1, MyValue1 v2) { + return (Object)v1 != getNotNull(v2); // true + } + + @AlwaysTrue + public boolean testNotEq07_3(MyValue1 v1, MyValue1 v2) { + return getNotNull(v1) != getNotNull(v2); // true + } + + @AlwaysTrue + public boolean testNotEq08_1(MyValue1 v, Object u) { + return getNotNull(v) != u; // true + } + + @AlwaysTrue + public boolean testNotEq08_2(MyValue1 v, Object u) { + return (Object)v != getNotNull(u); // true + } + + @AlwaysTrue + public boolean testNotEq08_3(MyValue1 v, Object u) { + return getNotNull(v) != getNotNull(u); // true + } + + @AlwaysTrue + public boolean testNotEq09_1(Object u, MyValue1 v) { + return getNotNull(u) != (Object)v; // true + } + + @AlwaysTrue + public boolean testNotEq09_2(Object u, MyValue1 v) { + return u != getNotNull(v); // true + } + + @AlwaysTrue + public boolean testNotEq09_3(Object u, MyValue1 v) { + return getNotNull(u) != getNotNull(v); // true + } + + @AlwaysTrue + public boolean testNotEq10_1(MyObject o, MyValue1 v) { + return getNotNull(o) != (Object)v; // true + } + + @AlwaysTrue + public boolean testNotEq10_2(MyObject o, MyValue1 v) { + return o != getNotNull(v); // true + } + + @AlwaysTrue + public boolean testNotEq10_3(MyObject o, MyValue1 v) { + return getNotNull(o) != getNotNull(v); // true + } + + @AlwaysTrue + public boolean testNotEq11_1(MyValue1 v, MyObject o) { + return getNotNull(v) != o; // true + } + + @AlwaysTrue + public boolean testNotEq11_2(MyValue1 v, MyObject o) { + return (Object)v != getNotNull(o); // true + } + + @AlwaysTrue + public boolean testNotEq11_3(MyValue1 v, MyObject o) { + return getNotNull(v) != getNotNull(o); // true + } + + public boolean testNotEq12_1(MyObject o1, MyObject o2) { + return get(o1) != o2; // old acmp + } + + public boolean testNotEq12_2(MyObject o1, MyObject o2) { + return o1 != get(o2); // old acmp + } + + public boolean testNotEq12_3(MyObject o1, MyObject o2) { + return get(o1) != get(o2); // old acmp + } + + public boolean testNotEq13_1(Object u, MyObject o) { + return get(u) != o; // old acmp + } + + public boolean testNotEq13_2(Object u, MyObject o) { + return u != get(o); // old acmp + } + + public boolean testNotEq13_3(Object u, MyObject o) { + return get(u) != get(o); // old acmp + } + + public boolean testNotEq14_1(MyObject o, Object u) { + return get(o) != u; // old acmp + } + + public boolean testNotEq14_2(MyObject o, Object u) { + return o != get(u); // old acmp + } + + public boolean testNotEq14_3(MyObject o, Object u) { + return get(o) != get(u); // old acmp + } + + public boolean testNotEq15_1(Object[] a, Object u) { + return get(a) != u; // old acmp + } + + public boolean testNotEq15_2(Object[] a, Object u) { + return a != get(u); // old acmp + } + + public boolean testNotEq15_3(Object[] a, Object u) { + return get(a) != get(u); // old acmp + } + + public boolean testNotEq16_1(Object u, Object[] a) { + return get(u) != a; // old acmp + } + + public boolean testNotEq16_2(Object u, Object[] a) { + return u != get(a); // old acmp + } + + public boolean testNotEq16_3(Object u, Object[] a) { + return get(u) != get(a); // old acmp + } + + public boolean testNotEq17_1(Object[] a, MyValue1 v) { + return get(a) != (Object)v; // only false if both null + } + + public boolean testNotEq17_2(Object[] a, MyValue1 v) { + return a != get(v); // only false if both null + } + + public boolean testNotEq17_3(Object[] a, MyValue1 v) { + return get(a) != get(v); // only false if both null + } + + public boolean testNotEq18_1(MyValue1 v, Object[] a) { + return get(v) != a; // only false if both null + } + + public boolean testNotEq18_2(MyValue1 v, Object[] a) { + return (Object)v != get(a); // only false if both null + } + + public boolean testNotEq18_3(MyValue1 v, Object[] a) { + return get(v) != get(a); // only false if both null + } + + @AlwaysTrue + public boolean testNotEq19_1(Object[] a, MyValue1 v) { + return getNotNull(a) != (Object)v; // true + } + + @AlwaysTrue + public boolean testNotEq19_2(Object[] a, MyValue1 v) { + return a != getNotNull(v); // true + } + + @AlwaysTrue + public boolean testNotEq19_3(Object[] a, MyValue1 v) { + return getNotNull(a) != getNotNull(v); // true + } + + @AlwaysTrue + public boolean testNotEq20_1(MyValue1 v, Object[] a) { + return getNotNull(v) != a; // true + } + + @AlwaysTrue + public boolean testNotEq20_2(MyValue1 v, Object[] a) { + return (Object)v != getNotNull(a); // true + } + + @AlwaysTrue + public boolean testNotEq20_3(MyValue1 v, Object[] a) { + return getNotNull(v) != getNotNull(a); // true + } + + public boolean testNotEq21_1(MyInterface u1, MyInterface u2) { + return get(u1) != u2; // new acmp + } + + public boolean testNotEq21_2(MyInterface u1, MyInterface u2) { + return u1 != get(u2); // new acmp + } + + public boolean testNotEq21_3(MyInterface u1, MyInterface u2) { + return get(u1) != get(u2); // new acmp + } + + @TrueIfNull + public boolean testNotEq21_4(MyInterface u1, MyInterface u2) { + return getNotNull(u1) != u2; // new acmp without null check + } + + @TrueIfNull + public boolean testNotEq21_5(MyInterface u1, MyInterface u2) { + return u1 != getNotNull(u2); // new acmp without null check + } + + @TrueIfNull + public boolean testNotEq21_6(MyInterface u1, MyInterface u2) { + return getNotNull(u1) != getNotNull(u2); // new acmp without null check + } + + public boolean testNotEq22_1(MyValue1 v, MyInterface u) { + return get(v) != u; // only false if both null + } + + public boolean testNotEq22_2(MyValue1 v, MyInterface u) { + return (Object)v != get(u); // only false if both null + } + + public boolean testNotEq22_3(MyValue1 v, MyInterface u) { + return get(v) != get(u); // only false if both null + } + + public boolean testNotEq23_1(MyInterface u, MyValue1 v) { + return get(u) != (Object)v; // only false if both null + } + + public boolean testNotEq23_2(MyInterface u, MyValue1 v) { + return u != get(v); // only false if both null + } + + public boolean testNotEq23_3(MyInterface u, MyValue1 v) { + return get(u) != get(v); // only false if both null + } + + @AlwaysTrue + public boolean testNotEq24_1(MyValue1 v, MyInterface u) { + return getNotNull(v) != u; // true + } + + @AlwaysTrue + public boolean testNotEq24_2(MyValue1 v, MyInterface u) { + return (Object)v != getNotNull(u); // true + } + + @AlwaysTrue + public boolean testNotEq24_3(MyValue1 v, MyInterface u) { + return getNotNull(v) != getNotNull(u); // true + } + + @AlwaysTrue + public boolean testNotEq25_1(MyInterface u, MyValue1 v) { + return getNotNull(u) != (Object)v; // true + } + + @AlwaysTrue + public boolean testNotEq25_2(MyInterface u, MyValue1 v) { + return u != getNotNull(v); // true + } + + @AlwaysTrue + public boolean testNotEq25_3(MyInterface u, MyValue1 v) { + return getNotNull(u) != getNotNull(v); // true + } + + public boolean testNotEq26_1(MyInterface u, MyObject o) { + return get(u) != o; // old acmp + } + + public boolean testNotEq26_2(MyInterface u, MyObject o) { + return u != get(o); // old acmp + } + + public boolean testNotEq26_3(MyInterface u, MyObject o) { + return get(u) != get(o); // old acmp + } + + public boolean testNotEq27_1(MyObject o, MyInterface u) { + return get(o) != u; // old acmp + } + + public boolean testNotEq27_2(MyObject o, MyInterface u) { + return o != get(u); // old acmp + } + + public boolean testNotEq27_3(MyObject o, MyInterface u) { + return get(o) != get(u); // old acmp + } + + public boolean testNotEq28_1(MyInterface[] a, MyInterface u) { + return get(a) != u; // old acmp + } + + public boolean testNotEq28_2(MyInterface[] a, MyInterface u) { + return a != get(u); // old acmp + } + + public boolean testNotEq28_3(MyInterface[] a, MyInterface u) { + return get(a) != get(u); // old acmp + } + + public boolean testNotEq29_1(MyInterface u, MyInterface[] a) { + return get(u) != a; // old acmp + } + + public boolean testNotEq29_2(MyInterface u, MyInterface[] a) { + return u != get(a); // old acmp + } + + public boolean testNotEq29_3(MyInterface u, MyInterface[] a) { + return get(u) != get(a); // old acmp + } + + public boolean testNotEq30_1(MyInterface[] a, MyValue1 v) { + return get(a) != (Object)v; // only false if both null + } + + public boolean testNotEq30_2(MyInterface[] a, MyValue1 v) { + return a != get(v); // only false if both null + } + + public boolean testNotEq30_3(MyInterface[] a, MyValue1 v) { + return get(a) != get(v); // only false if both null + } + + public boolean testNotEq31_1(MyValue1 v, MyInterface[] a) { + return get(v) != a; // only false if both null + } + + public boolean testNotEq31_2(MyValue1 v, MyInterface[] a) { + return (Object)v != get(a); // only false if both null + } + + public boolean testNotEq31_3(MyValue1 v, MyInterface[] a) { + return get(v) != get(a); // only false if both null + } + + @AlwaysTrue + public boolean testNotEq32_1(MyInterface[] a, MyValue1 v) { + return getNotNull(a) != (Object)v; // true + } + + @AlwaysTrue + public boolean testNotEq32_2(MyInterface[] a, MyValue1 v) { + return a != getNotNull(v); // true + } + + @AlwaysTrue + public boolean testNotEq32_3(MyInterface[] a, MyValue1 v) { + return getNotNull(a) != getNotNull(v); // true + } + + @AlwaysTrue + public boolean testNotEq33_1(MyValue1 v, MyInterface[] a) { + return getNotNull(v) != a; // true + } + + @AlwaysTrue + public boolean testNotEq33_2(MyValue1 v, MyInterface[] a) { + return (Object)v != getNotNull(a); // true + } + + @AlwaysTrue + public boolean testNotEq33_3(MyValue1 v, MyInterface[] a) { + return getNotNull(v) != getNotNull(a); // true + } + + // Null tests + + public boolean testNotNull01_1(MyValue1 v) { + return (Object)v != null; // old acmp + } + + public boolean testNotNull01_2(MyValue1 v) { + return get(v) != null; // old acmp + } + + public boolean testNotNull01_3(MyValue1 v) { + return (Object)v != get((Object)null); // old acmp + } + + public boolean testNotNull01_4(MyValue1 v) { + return get(v) != get((Object)null); // old acmp + } + + public boolean testNotNull02_1(MyValue1 v) { + return null != (Object)v; // old acmp + } + + public boolean testNotNull02_2(MyValue1 v) { + return get((Object)null) != (Object)v; // old acmp + } + + public boolean testNotNull02_3(MyValue1 v) { + return null != get(v); // old acmp + } + + public boolean testNotNull02_4(MyValue1 v) { + return get((Object)null) != get(v); // old acmp + } + + public boolean testNotNull03_1(Object u) { + return u != null; // old acmp + } + + public boolean testNotNull03_2(Object u) { + return get(u) != null; // old acmp + } + + public boolean testNotNull03_3(Object u) { + return u != get((Object)null); // old acmp + } + + public boolean testNotNull03_4(Object u) { + return get(u) != get((Object)null); // old acmp + } + + public boolean testNotNull04_1(Object u) { + return null != u; // old acmp + } + + public boolean testNotNull04_2(Object u) { + return get((Object)null) != u; // old acmp + } + + public boolean testNotNull04_3(Object u) { + return null != get(u); // old acmp + } + + public boolean testNotNull04_4(Object u) { + return get((Object)null) != get(u); // old acmp + } + + public boolean testNotNull05_1(MyObject o) { + return o != null; // old acmp + } + + public boolean testNotNull05_2(MyObject o) { + return get(o) != null; // old acmp + } + + public boolean testNotNull05_3(MyObject o) { + return o != get((Object)null); // old acmp + } + + public boolean testNotNull05_4(MyObject o) { + return get(o) != get((Object)null); // old acmp + } + + public boolean testNotNull06_1(MyObject o) { + return null != o; // old acmp + } + + public boolean testNotNull06_2(MyObject o) { + return get((Object)null) != o; // old acmp + } + + public boolean testNotNull06_3(MyObject o) { + return null != get(o); // old acmp + } + + public boolean testNotNull06_4(MyObject o) { + return get((Object)null) != get(o); // old acmp + } + + public boolean testNotNull07_1(MyInterface u) { + return u != null; // old acmp + } + + public boolean testNotNull07_2(MyInterface u) { + return get(u) != null; // old acmp + } + + public boolean testNotNull07_3(MyInterface u) { + return u != get((Object)null); // old acmp + } + + public boolean testNotNull07_4(MyInterface u) { + return get(u) != get((Object)null); // old acmp + } + + public boolean testNotNull08_1(MyInterface u) { + return null != u; // old acmp + } + + public boolean testNotNull08_2(MyInterface u) { + return get((Object)null) != u; // old acmp + } + + public boolean testNotNull08_3(MyInterface u) { + return null != get(u); // old acmp + } + + public boolean testNotNull08_4(MyInterface u) { + return get((Object)null) != get(u); // old acmp + } + + // The following methods are used with -XX:+AlwaysIncrementalInline to hide exact types during parsing + + public Object get(Object u) { + return u; + } + + public Object getNotNull(Object u) { + return (u != null) ? u : new Object(); + } + + public Object get(MyValue1 v) { + return v; + } + + public Object getNotNull(MyValue1 v) { + return ((Object)v != null) ? v : MyValue1.createDefault(); + } + + public Object get(MyObject o) { + return o; + } + + public Object getNotNull(MyObject o) { + return (o != null) ? o : MyValue1.createDefault(); + } + + public Object get(Object[] a) { + return a; + } + + public Object getNotNull(Object[] a) { + return (a != null) ? a : new Object[1]; + } + + public boolean trueIfNull(Method m) { + return m.isAnnotationPresent(TrueIfNull.class); + } + + public boolean falseIfNull(Method m) { + return m.isAnnotationPresent(FalseIfNull.class); + } + + public boolean alwaysTrue(Method m) { + return m.isAnnotationPresent(AlwaysTrue.class) && + Arrays.asList(((AlwaysTrue)m.getAnnotation(AlwaysTrue.class)).valid_for()).contains(ACmpOnValues); + } + + public boolean alwaysFalse(Method m) { + return m.isAnnotationPresent(AlwaysFalse.class) && + Arrays.asList(((AlwaysFalse)m.getAnnotation(AlwaysFalse.class)).valid_for()).contains(ACmpOnValues); + } + + public boolean isNegated(Method m) { + return m.getName().startsWith("testNot"); + } + + // Tests with profiling + public boolean cmpAlwaysEqual1(Object a, Object b) { + return a == b; + } + + public boolean cmpAlwaysEqual2(Object a, Object b) { + return a != b; + } + + public boolean cmpAlwaysEqual3(Object a) { + return a == a; + } + + public boolean cmpAlwaysEqual4(Object a) { + return a != a; + } + + public boolean cmpAlwaysUnEqual1(Object a, Object b) { + return a == b; + } + + public boolean cmpAlwaysUnEqual2(Object a, Object b) { + return a != b; + } + + public boolean cmpAlwaysUnEqual3(Object a) { + return a == a; + } + + public boolean cmpAlwaysUnEqual4(Object a) { + return a != a; + } + + public boolean cmpSometimesEqual1(Object a) { + return a == a; + } + + public boolean cmpSometimesEqual2(Object a) { + return a != a; + } + + protected static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); + protected static final int COMP_LEVEL_FULL_OPTIMIZATION = 4; + protected static final long ACmpOnValues = (Long)WHITE_BOX.getVMFlag("ACmpOnValues"); + + public void runTest(Method m, Object[] args, int warmup, int nullMode, boolean[][] equalities) throws Exception { + Class[] parameterTypes = m.getParameterTypes(); + int parameterCount = parameterTypes.length; + // Nullness mode for first argument + // 0: default, 1: never null, 2: always null + int start = (nullMode != 1) ? 0 : 1; + int end = (nullMode != 2) ? args.length : 1; + for (int i = start; i < end; ++i) { + if (args[i] != null && !parameterTypes[0].isInstance(args[i])) { + continue; + } + if (args[i] == null && parameterTypes[0] == MyValue1.class.asValueType()) { + continue; + } + if (parameterCount == 1) { + // Null checks + System.out.print("Testing " + m.getName() + "(" + args[i] + ")"); + // Avoid acmp in the computation of the expected result! + boolean expected = isNegated(m) ? (i != 0) : (i == 0); + for (int run = 0; run < warmup; ++run) { + Boolean result = (Boolean)m.invoke(this, args[i]); + if (result != expected && WHITE_BOX.isMethodCompiled(m, false)) { + System.out.println(" = " + result); + throw new RuntimeException("Test failed: should return " + expected); + } + } + System.out.println(" = " + expected); + } else { + // Equality checks + for (int j = 0; j < args.length; ++j) { + if (args[j] != null && !parameterTypes[1].isInstance(args[j])) { + continue; + } + if (args[j] == null && parameterTypes[1] == MyValue1.class.asValueType()) { + continue; + } + System.out.print("Testing " + m.getName() + "(" + args[i] + ", " + args[j] + ")"); + // Avoid acmp in the computation of the expected result! + boolean equal = equalities[i][j]; + equal = isNegated(m) ? !equal : equal; + boolean expected = alwaysTrue(m) || ((i == 0 || j == 0) && trueIfNull(m)) || (!alwaysFalse(m) && equal && !(i == 0 && falseIfNull(m))); + for (int run = 0; run < warmup; ++run) { + Boolean result = (Boolean)m.invoke(this, args[i], args[j]); + if (result != expected && WHITE_BOX.isMethodCompiled(m, false) && warmup == 1) { + System.out.println(" = " + result); + throw new RuntimeException("Test failed: should return " + expected); + } + } + System.out.println(" = " + expected); + } + } + } + } + + public void run(int nullMode) throws Exception { + // Prepare test arguments + Object[] args = { null, + new Object(), + new MyObject(), + MyValue1.setX(MyValue1.createDefault(), 42), + new Object[10], + new MyObject[10], + MyValue1.setX(MyValue1.createDefault(), 0x42), + MyValue1.setX(MyValue1.createDefault(), 42), + MyValue2.setX(MyValue2.createDefault(), 42), }; + + boolean[][] equalities = { { true, false, false, false, false, false, false, false, false }, + { false, true, false, false, false, false, false, false, false }, + { false, false, true, false, false, false, false, false, false }, + { false, false, false, ACmpOnValues == 3,false, false, false, ACmpOnValues == 3, false }, + { false, false, false, false, true, false, false, false, false }, + { false, false, false, false, false, true, false, false, false }, + { false, false, false, false, false, false, ACmpOnValues == 3, false, false }, + { false, false, false, ACmpOnValues == 3,false, false, false, ACmpOnValues == 3, false }, + { false, false, false, false, false, false, false, false, ACmpOnValues == 3 } }; + + // Run tests + for (Method m : getClass().getMethods()) { + if (m.getName().startsWith("test")) { + // Do some warmup runs + runTest(m, args, 1000, nullMode, equalities); + // Make sure method is compiled + WHITE_BOX.enqueueMethodForCompilation(m, COMP_LEVEL_FULL_OPTIMIZATION); + Asserts.assertTrue(WHITE_BOX.isMethodCompiled(m, false), m + " not compiled"); + // Run again to verify correctness of compiled code + runTest(m, args, 1, nullMode, equalities); + } + } + + Method cmpAlwaysUnEqual3_m = getClass().getMethod("cmpAlwaysUnEqual3", Object.class); + Method cmpAlwaysUnEqual4_m = getClass().getMethod("cmpAlwaysUnEqual4", Object.class); + Method cmpSometimesEqual1_m = getClass().getMethod("cmpSometimesEqual1", Object.class); + Method cmpSometimesEqual2_m = getClass().getMethod("cmpSometimesEqual2", Object.class); + + for (int i = 0; i < 20_000; ++i) { + Asserts.assertTrue(cmpAlwaysEqual1(args[1], args[1])); + Asserts.assertFalse(cmpAlwaysEqual2(args[1], args[1])); + Asserts.assertTrue(cmpAlwaysEqual3(args[1])); + Asserts.assertFalse(cmpAlwaysEqual4(args[1])); + + Asserts.assertFalse(cmpAlwaysUnEqual1(args[1], args[2])); + Asserts.assertTrue(cmpAlwaysUnEqual2(args[1], args[2])); + boolean compiled = WHITE_BOX.isMethodCompiled(cmpAlwaysUnEqual3_m, false); + boolean res = cmpAlwaysUnEqual3(args[3]); + if (ACmpOnValues != 3) { + Asserts.assertFalse(res); + } else if (compiled) { + Asserts.assertTrue(res); + } + compiled = WHITE_BOX.isMethodCompiled(cmpAlwaysUnEqual4_m, false); + res = cmpAlwaysUnEqual4(args[3]); + if (ACmpOnValues != 3) { + Asserts.assertTrue(res); + } else if (compiled) { + Asserts.assertFalse(res); + } + + int idx = i % args.length; + compiled = WHITE_BOX.isMethodCompiled(cmpSometimesEqual1_m, false); + res = cmpSometimesEqual1(args[idx]); + if (ACmpOnValues != 3) { + Asserts.assertEQ(res, args[idx] == null || !args[idx].getClass().isValue()); + } else if (compiled) { + Asserts.assertTrue(res); + } + compiled = WHITE_BOX.isMethodCompiled(cmpSometimesEqual2_m, false); + res = cmpSometimesEqual2(args[idx]); + if (ACmpOnValues != 3) { + Asserts.assertNE(res, args[idx] == null || !args[idx].getClass().isValue()); + } else if (compiled) { + Asserts.assertFalse(res); + } + } + } + + public static void main(String[] args) throws Exception { + if (Boolean.getBoolean("test.c1")) { + System.out.println("new acmp is not implemented for C1"); + return; + } + + int nullMode = Integer.valueOf(args[0]); + TestNewAcmp t = new TestNewAcmp(); + t.run(nullMode); + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/TestNullableValueTypes.java 2019-03-11 14:27:38.754353875 +0100 @@ -0,0 +1,678 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package compiler.valhalla.valuetypes; + +import java.lang.invoke.*; +import java.lang.reflect.Method; + +import jdk.test.lib.Asserts; + +/* + * @test + * @summary Test correct handling of nullable value types. + * @library /testlibrary /test/lib /compiler/whitebox / + * @requires os.simpleArch == "x64" + * @compile -XDallowWithFieldOperator TestNullableValueTypes.java + * @run driver ClassFileInstaller sun.hotspot.WhiteBox jdk.test.lib.Platform + * @run main/othervm/timeout=120 -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UnlockExperimentalVMOptions -XX:+WhiteBoxAPI -XX:+EnableValhalla + * compiler.valhalla.valuetypes.ValueTypeTest + * compiler.valhalla.valuetypes.TestNullableValueTypes + */ +public class TestNullableValueTypes extends ValueTypeTest { + // Extra VM parameters for some test scenarios. See ValueTypeTest.getVMParameters() + @Override + public String[] getExtraVMParameters(int scenario) { + switch (scenario) { + case 1: return new String[] {"-XX:-UseOptoBiasInlining"}; + case 2: return new String[] {"-XX:-UseBiasedLocking"}; + case 3: return new String[] {"-XX:-MonomorphicArrayCheck", "-XX:-UseBiasedLocking", "-XX:+ValueArrayFlatten"}; + case 4: return new String[] {"-XX:-MonomorphicArrayCheck"}; + } + return null; + } + + public static void main(String[] args) throws Throwable { + TestNullableValueTypes test = new TestNullableValueTypes(); + test.run(args, MyValue1.class, MyValue2.class, MyValue2Inline.class, Test17Value.class, Test21Value.class); + } + + static { + try { + Class clazz = TestNullableValueTypes.class; + ClassLoader loader = clazz.getClassLoader(); + MethodHandles.Lookup lookup = MethodHandles.lookup(); + + MethodType test18_mt = MethodType.methodType(void.class, MyValue1.class.asBoxType()); + test18_mh1 = lookup.findStatic(clazz, "test18_target1", test18_mt); + test18_mh2 = lookup.findStatic(clazz, "test18_target2", test18_mt); + + MethodType test19_mt = MethodType.methodType(void.class, MyValue1.class.asBoxType()); + test19_mh1 = lookup.findStatic(clazz, "test19_target1", test19_mt); + test19_mh2 = lookup.findStatic(clazz, "test19_target2", test19_mt); + } catch (NoSuchMethodException | IllegalAccessException e) { + e.printStackTrace(); + throw new RuntimeException("Method handle lookup failed"); + } + } + + private static final MyValue1 testValue1 = MyValue1.createWithFieldsInline(rI, rL); + private static final MyValue1[] testValue1Array = new MyValue1[] {testValue1, + testValue1, + testValue1}; + + MyValue1.box nullField; + MyValue1.val valueField1 = testValue1; + + @Test + public long test1(MyValue1.box vt) { + long result = 0; + try { + result = vt.hash(); + throw new RuntimeException("NullPointerException expected"); + } catch (NullPointerException e) { + // Expected + } + return result; + } + + @DontCompile + public void test1_verifier(boolean warmup) throws Throwable { + long result = test1(null); + Asserts.assertEquals(result, 0L); + } + + @Test + public long test2(MyValue1.box vt) { + long result = 0; + try { + result = vt.hashInterpreted(); + throw new RuntimeException("NullPointerException expected"); + } catch (NullPointerException e) { + // Expected + } + return result; + } + + @DontCompile + public void test2_verifier(boolean warmup) { + long result = test2(nullField); + Asserts.assertEquals(result, 0L); + } + + @Test + public long test3() { + long result = 0; + try { + if ((Object)nullField != null) { + throw new RuntimeException("nullField should be null"); + } + result = nullField.hash(); + throw new RuntimeException("NullPointerException expected"); + } catch (NullPointerException e) { + // Expected + } + return result; + } + + @DontCompile + public void test3_verifier(boolean warmup) { + long result = test3(); + Asserts.assertEquals(result, 0L); + } + + @Test + public void test4() { + try { + valueField1 = nullField; + throw new RuntimeException("NullPointerException expected"); + } catch (NullPointerException e) { + // Expected + } + } + + @DontCompile + public void test4_verifier(boolean warmup) { + test4(); + } + + @Test + public MyValue1.box test5(MyValue1.box vt) { + try { + Object o = vt; + vt = (MyValue1)o; + throw new RuntimeException("NullPointerException expected"); + } catch (NullPointerException e) { + // Expected + } + + // Should not throw + vt = test5_dontinline(vt); + vt = test5_inline(vt); + return vt; + } + + @DontCompile + public void test5_verifier(boolean warmup) { + MyValue1.box vt = test5(nullField); + Asserts.assertEquals((Object)vt, null); + } + + @DontInline + public MyValue1.box test5_dontinline(MyValue1.box vt) { + return vt; + } + + @ForceInline + public MyValue1.box test5_inline(MyValue1.box vt) { + return vt; + } + + @Test + public MyValue1 test6(Object obj) { + MyValue1 vt = MyValue1.createWithFieldsInline(rI, rL); + try { + vt = (MyValue1)obj; + throw new RuntimeException("NullPointerException expected"); + } catch (NullPointerException e) { + // Expected + } + return vt; + } + + @DontCompile + public void test6_verifier(boolean warmup) { + MyValue1 vt = test6(null); + Asserts.assertEquals(vt.hash(), testValue1.hash()); + } + + @ForceInline + public MyValue1.box getNullInline() { + return null; + } + + @DontInline + public MyValue1.box getNullDontInline() { + return null; + } + + @Test + public void test7() throws Throwable { + nullField = getNullInline(); // Should not throw + nullField = getNullDontInline(); // Should not throw + try { + valueField1 = getNullInline(); + throw new RuntimeException("NullPointerException expected"); + } catch (NullPointerException e) { + // Expected + } + try { + valueField1 = getNullDontInline(); + throw new RuntimeException("NullPointerException expected"); + } catch (NullPointerException e) { + // Expected + } + } + + @DontCompile + public void test7_verifier(boolean warmup) throws Throwable { + test7(); + } + + @Test + public void test8() throws Throwable { + try { + valueField1 = nullField; + throw new RuntimeException("NullPointerException expected"); + } catch (NullPointerException e) { + // Expected + } + } + + @DontCompile + public void test8_verifier(boolean warmup) throws Throwable { + test8(); + } + + // merge of 2 values, one being null + @Test + public void test9(boolean flag1) { + MyValue1 v; + if (flag1) { + v = valueField1; + } else { + v = nullField; + } + valueField1 = v; + } + + @DontCompile + public void test9_verifier(boolean warmup) { + test9(true); + try { + test9(false); + throw new RuntimeException("NullPointerException expected"); + } catch (NullPointerException e) { + // Expected + } + } + + // null constant + @Test + public void test10(boolean flag) throws Throwable { + MyValue1.box val = flag ? valueField1 : null; + valueField1 = val; + } + + @DontCompile + public void test10_verifier(boolean warmup) throws Throwable { + test10(true); + try { + test10(false); + throw new RuntimeException("NullPointerException expected"); + } catch (NullPointerException e) { + // Expected + } + } + + // null constant + @Test + public void test11(boolean flag) throws Throwable { + MyValue1.box val = flag ? null : valueField1; + valueField1 = val; + } + + @DontCompile + public void test11_verifier(boolean warmup) throws Throwable { + test11(false); + try { + test11(true); + throw new RuntimeException("NullPointerException expected"); + } catch (NullPointerException e) { + // Expected + } + } + + // null return + int test12_cnt; + + @DontInline + public MyValue1.box test12_helper() { + test12_cnt++; + return nullField; + } + + @Test + public void test12() { + valueField1 = test12_helper(); + } + + @DontCompile + public void test12_verifier(boolean warmup) { + try { + test12_cnt = 0; + test12(); + throw new RuntimeException("NullPointerException expected"); + } catch (NullPointerException e) { + // Expected + } + if (test12_cnt != 1) { + throw new RuntimeException("call executed twice"); + } + } + + // null return at virtual call + class A { + public MyValue1.box test13_helper() { + return nullField; + } + } + + class B extends A { + public MyValue1.val test13_helper() { + return nullField; + } + } + + class C extends A { + public MyValue1.box test13_helper() { + return nullField; + } + } + + class D extends B { + public MyValue1.box test13_helper() { + return nullField; + } + } + + @Test + public void test13(A a) { + valueField1 = a.test13_helper(); + } + + @DontCompile + public void test13_verifier(boolean warmup) { + A b = new B(); + A c = new C(); + A d = new D(); + try { + test13(b); + throw new RuntimeException("NullPointerException expected"); + } catch (NullPointerException e) { + // Expected + } + try { + test13(c); + throw new RuntimeException("NullPointerException expected"); + } catch (NullPointerException e) { + // Expected + } + try { + test13(d); + throw new RuntimeException("NullPointerException expected"); + } catch (NullPointerException e) { + // Expected + } + } + + // Test writing null to a (flattened) value type array + @ForceInline + public void test14_inline(Object[] oa, Object o, int index) { + oa[index] = o; + } + + @Test() + public void test14(MyValue1[] va, int index) { + test14_inline(va, nullField, index); + } + + @DontCompile + public void test14_verifier(boolean warmup) { + int index = Math.abs(rI) % 3; + try { + test14(testValue1Array, index); + throw new RuntimeException("No NPE thrown"); + } catch (NullPointerException e) { + // Expected + } + Asserts.assertEQ(testValue1Array[index].hash(), testValue1.hash()); + } + + @DontInline + MyValue1.box getNullField1() { + return nullField; + } + + @DontInline + MyValue1.val getNullField2() { + return nullField; + } + + @Test() + public void test15() { + nullField = getNullField1(); // should not throw + try { + valueField1 = getNullField1(); + throw new RuntimeException("NullPointerException expected"); + } catch (NullPointerException e) { + // Expected + } + try { + valueField1 = getNullField2(); + throw new RuntimeException("NullPointerException expected"); + } catch (NullPointerException e) { + // Expected + } + } + + @DontCompile + public void test15_verifier(boolean warmup) { + test15(); + } + + @DontInline + public boolean test16_dontinline(MyValue1.box vt) { + return (Object)vt == null; + } + + // Test c2c call passing null for a value type + @Test + @Warmup(10000) // Warmup to make sure 'test17_dontinline' is compiled + public boolean test16(Object arg) throws Exception { + Method test16method = getClass().getMethod("test16_dontinline", MyValue1.class.asBoxType()); + return (boolean)test16method.invoke(this, arg); + } + + @DontCompile + public void test16_verifier(boolean warmup) throws Exception { + boolean res = test16(null); + Asserts.assertTrue(res); + } + + // Test scalarization of default value type with non-flattenable field + value final class Test17Value { + public final MyValue1.box valueField; + + public Test17Value() { + valueField = MyValue1.createDefaultDontInline(); + } + + @ForceInline + public Test17Value setValueField(MyValue1 valueField) { + return __WithField(this.valueField, valueField); + } + } + + @Test() + public Test17Value test17(boolean b) { + Test17Value vt1 = Test17Value.default; + if ((Object)vt1.valueField != null) { + throw new RuntimeException("Should be null"); + } + Test17Value vt2 = vt1.setValueField(testValue1); + return b ? vt1 : vt2; + } + + @DontCompile + public void test17_verifier(boolean warmup) { + test17(true); + test17(false); + } + + static final MethodHandle test18_mh1; + static final MethodHandle test18_mh2; + + static MyValue1.box nullValue; + + @DontInline + static void test18_target1(MyValue1.box vt) { + nullValue = vt; + } + + @ForceInline + static void test18_target2(MyValue1.box vt) { + nullValue = vt; + } + + // Test passing null for a value type + @Test + @Warmup(11000) // Make sure lambda forms get compiled + public void test18() throws Throwable { + test18_mh1.invokeExact(nullValue); + test18_mh2.invokeExact(nullValue); + } + + @DontCompile + public void test18_verifier(boolean warmup) { + try { + test18(); + } catch (Throwable t) { + throw new RuntimeException("test18 failed", t); + } + } + + static MethodHandle test19_mh1; + static MethodHandle test19_mh2; + + @DontInline + static void test19_target1(MyValue1.box vt) { + nullValue = vt; + } + + @ForceInline + static void test19_target2(MyValue1.box vt) { + nullValue = vt; + } + + // Same as test12 but with non-final mh + @Test + @Warmup(11000) // Make sure lambda forms get compiled + public void test19() throws Throwable { + test19_mh1.invokeExact(nullValue); + test19_mh2.invokeExact(nullValue); + } + + @DontCompile + public void test19_verifier(boolean warmup) { + try { + test19(); + } catch (Throwable t) { + throw new RuntimeException("test19 failed", t); + } + } + + // Same as test12/13 but with constant null + @Test + @Warmup(11000) // Make sure lambda forms get compiled + public void test20(MethodHandle mh) throws Throwable { + mh.invoke(null); + } + + @DontCompile + public void test20_verifier(boolean warmup) { + try { + test20(test18_mh1); + test20(test18_mh2); + test20(test19_mh1); + test20(test19_mh2); + } catch (Throwable t) { + throw new RuntimeException("test20 failed", t); + } + } + + // Test writing null to a flattenable/non-flattenable value type field in a value type + value final class Test21Value { + final MyValue1.box valueField1; + final MyValue1.val valueField2; + final MyValue1.box alwaysNull; + + private Test21Value() { + valueField1 = testValue1; + valueField2 = testValue1; + alwaysNull = testValue1; + } + + @ForceInline + public Test21Value test1() { + return __WithField(this.valueField1, alwaysNull); // Should not throw NPE + } + + @ForceInline + public Test21Value test2() { + return __WithField(this.valueField2, alwaysNull); // Should throw NPE + } + } + + @Test + public Test21Value test21(Test21Value vt) { + vt = vt.test1(); + try { + vt = vt.test2(); + throw new RuntimeException("NullPointerException expected"); + } catch (NullPointerException e) { + // Expected + } + return vt; + } + + @DontCompile + public void test21_verifier(boolean warmup) { + test21(Test21Value.default); + } + + @DontInline + public MyValue1.val test22_helper() { + return nullField; + } + + @Test + public void test22() { + valueField1 = test22_helper(); + } + + @DontCompile + public void test22_verifier(boolean warmup) { + try { + test22(); + throw new RuntimeException("NullPointerException expected"); + } catch (NullPointerException e) { + // Expected + } + } + + @Test + public void test23(MyValue1[] arr, MyValue1.box b) { + arr[0] = b; + } + + @DontCompile + public void test23_verifier(boolean warmup) { + MyValue1[] arr = new MyValue1[2]; + MyValue1.box b = null; + try { + test23(arr, b); + throw new RuntimeException("NullPointerException expected"); + } catch (NullPointerException e) { + // Expected + } + } + + static MyValue1.box nullBox; + + @Test + public MyValue1 test24() { + return nullBox; + } + + @DontCompile + public void test24_verifier(boolean warmup) { + try { + test24(); + throw new RuntimeException("NullPointerException expected"); + } catch (NullPointerException e) { + // Expected + } + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/TestOnStackReplacement.java 2019-03-11 14:27:39.214353868 +0100 @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package compiler.valhalla.valuetypes; + +import jdk.test.lib.Asserts; + +/* + * @test + * @summary Test on stack replacement (OSR) with value types + * @library /testlibrary /test/lib /compiler/whitebox / + * @requires os.simpleArch == "x64" + * @compile -XDallowWithFieldOperator TestOnStackReplacement.java + * @run driver ClassFileInstaller sun.hotspot.WhiteBox jdk.test.lib.Platform + * @run main/othervm/timeout=120 -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UnlockExperimentalVMOptions -XX:+WhiteBoxAPI -XX:+EnableValhalla + * compiler.valhalla.valuetypes.ValueTypeTest + * compiler.valhalla.valuetypes.TestOnStackReplacement + */ +public class TestOnStackReplacement extends ValueTypeTest { + // Extra VM parameters for some test scenarios. See ValueTypeTest.getVMParameters() + @Override + public String[] getExtraVMParameters(int scenario) { + switch (scenario) { + case 3: return new String[] {"-XX:-ValueArrayFlatten"}; + } + return null; + } + + public static void main(String[] args) throws Throwable { + TestOnStackReplacement test = new TestOnStackReplacement(); + test.run(args, MyValue1.class, MyValue2.class, MyValue2Inline.class); + } + + // Helper methods + + protected long hash() { + return hash(rI, rL); + } + + protected long hash(int x, long y) { + return MyValue1.createWithFieldsInline(x, y).hash(); + } + + // Test OSR compilation + @Test() + public long test1() { + MyValue1 v = MyValue1.createWithFieldsInline(rI, rL); + MyValue1[] va = new MyValue1[Math.abs(rI) % 3]; + for (int i = 0; i < va.length; ++i) { + va[i] = MyValue1.createWithFieldsInline(rI, rL); + } + long result = 0; + // Long loop to trigger OSR compilation + for (int i = 0 ; i < 50_000; ++i) { + // Reference local value type in interpreter state + result = v.hash(); + for (int j = 0; j < va.length; ++j) { + result += va[j].hash(); + } + } + return result; + } + + @DontCompile + public void test1_verifier(boolean warmup) { + long result = test1(); + Asserts.assertEQ(result, ((Math.abs(rI) % 3) + 1) * hash()); + } + + // Test loop peeling + @Test(failOn = ALLOC + LOAD + STORE) + public void test2() { + MyValue1 v = MyValue1.createWithFieldsInline(0, 1); + // Trigger OSR compilation and loop peeling + for (int i = 0; i < 50_000; ++i) { + if (v.x != i || v.y != i + 1) { + // Uncommon trap + throw new RuntimeException("test2 failed"); + } + v = MyValue1.createWithFieldsInline(i + 1, i + 2); + } + } + + @DontCompile + public void test2_verifier(boolean warmup) { + test2(); + } + + // Test loop peeling and unrolling + @Test() + public void test3() { + MyValue1 v1 = MyValue1.createWithFieldsInline(0, 0); + MyValue1 v2 = MyValue1.createWithFieldsInline(1, 1); + // Trigger OSR compilation and loop peeling + for (int i = 0; i < 50_000; ++i) { + if (v1.x != 2*i || v2.x != i+1 || v2.y != i+1) { + // Uncommon trap + throw new RuntimeException("test3 failed"); + } + v1 = MyValue1.createWithFieldsInline(2*(i+1), 0); + v2 = MyValue1.createWithFieldsInline(i+2, i+2); + } + } + + @DontCompile + public void test3_verifier(boolean warmup) { + test3(); + } + + // OSR compilation with Object local + @DontCompile + public Object test4_init() { + return MyValue1.createWithFieldsInline(rI, rL); + } + + @DontCompile + public Object test4_body() { + return MyValue1.createWithFieldsInline(rI, rL); + } + + @Test() + public Object test4() { + Object vt = test4_init(); + for (int i = 0; i < 50_000; i++) { + if (i % 2 == 1) { + vt = test4_body(); + } + } + return vt; + } + + @DontCompile + public void test4_verifier(boolean warmup) { + test4(); + } + + // OSR compilation with null value type local + + MyValue1.box nullField; + + @Test() + public void test5() { + MyValue1.box vt = nullField; + for (int i = 0; i < 50_000; i++) { + if ((Object)vt != null) { + throw new RuntimeException("test5 failed: No NPE thrown"); + } + } + } + + @DontCompile + public void test5_verifier(boolean warmup) { + test5(); + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/TestOptimizeKlassCmp.java 2019-03-11 14:27:39.666353862 +0100 @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8209687 + * @summary Verify that Parse::optimize_cmp_with_klass() works with value types. + * @library /test/lib + * @compile -XDemitQtypes -XDenableValueTypes TestOptimizeKlassCmp.java + * @run main/othervm -XX:+EnableValhalla -Xbatch + * compiler.valhalla.valuetypes.TestOptimizeKlassCmp + */ + +package compiler.valhalla.valuetypes; + +import jdk.test.lib.Asserts; + +value class MyValue { + public final int x; + + public MyValue(int x) { + this.x = x; + } +} + +public class TestOptimizeKlassCmp { + + public static boolean test1(MyValue v1, MyValue v2) { + return v1.equals(v2); + } + + public static boolean test2(MyValue v1, MyValue v2) { + return v1.getClass().equals(v2.getClass()); + } + + public static boolean test3(Object o1, Object o2) { + return o1.getClass().equals(o2.getClass()); + } + + public static void main(String[] args) { + MyValue v1 = new MyValue(0); + MyValue v2 = new MyValue(1); + for (int i = 0; i < 10_000; ++i) { + Asserts.assertFalse(test1(v1, v2)); + Asserts.assertTrue(test1(v1, v1)); + Asserts.assertTrue(test2(v1, v2)); + Asserts.assertTrue(test2(v1, v1)); + Asserts.assertTrue(test3(v1, v2)); + Asserts.assertTrue(test3(v1, v1)); + } + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/TestUnloadedValueTypeArray.java 2019-03-11 14:27:40.122353856 +0100 @@ -0,0 +1,240 @@ +/* + * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8182997 8214898 + * @library /test/lib + * @summary Test the handling of Arrays of unloaded value classes. + * @compile -XDallowWithFieldOperator TestUnloadedValueTypeArray.java + * @run main/othervm -XX:+EnableValhalla -Xcomp + * -XX:CompileCommand=compileonly,TestUnloadedValueTypeArray::test1 + * -XX:CompileCommand=compileonly,TestUnloadedValueTypeArray::test2 + * -XX:CompileCommand=compileonly,TestUnloadedValueTypeArray::test3 + * -XX:CompileCommand=compileonly,TestUnloadedValueTypeArray::test4 + * -XX:CompileCommand=compileonly,TestUnloadedValueTypeArray::test5 + * -XX:CompileCommand=compileonly,TestUnloadedValueTypeArray::test6 + * TestUnloadedValueTypeArray + */ + +import jdk.test.lib.Asserts; + +value final class MyValue { + final int foo; + + private MyValue() { + foo = 0x42; + } +} + +value final class MyValue2 { + final int foo; + + private MyValue2() { + foo = 0x42; + } + static MyValue2 make(int n) { + return __WithField(MyValue2.default.foo, n); + } +} + +value final class MyValue3 { + final int foo; + + private MyValue3() { + foo = 0x42; + } + static MyValue3 make(int n) { + return __WithField(MyValue3.default.foo, n); + } +} + +value final class MyValue4 { + final int foo; + + private MyValue4() { + foo = 0x53; + } + static MyValue4 make(int n) { + return __WithField(MyValue4.default.foo, n); + } +} + +value final class MyValue5 { + final int foo; + + private MyValue5() { + foo = 0x53; + } + static MyValue5 make(int n) { + return __WithField(MyValue5.default.foo, n); + } +} + +value final class MyValue6 { + final int foo = 0; + + static MyValue6 make(int n) { + return __WithField(MyValue6.default.foo, n); + } + static MyValue6 make2(MyValue6 v, MyValue6[] dummy) { + return __WithField(v.foo, v.foo+1); + } +} + +public class TestUnloadedValueTypeArray { + + static MyValue[] target() { + return new MyValue[10]; + } + + static void test1() { + target(); + } + + static int test2(MyValue2[] arr) { + if (arr != null) { + return arr[1].foo; + } else { + return 1234; + } + } + + static void test2_verifier() { + int n = 50000; + + int m = 9999; + for (int i=0; i defaultFlags = Arrays.asList( + "-XX:-BackgroundCompilation", "-XX:CICompilerCount=1", + "-XX:CompileCommand=quiet", + "-XX:CompileCommand=compileonly,java.lang.invoke.*::*", + "-XX:CompileCommand=compileonly,java.lang.Long::sum", + "-XX:CompileCommand=compileonly,java.lang.Object::", + "-XX:CompileCommand=compileonly,compiler.valhalla.valuetypes.*::*"); + private static final List printFlags = Arrays.asList( + "-XX:+PrintCompilation", "-XX:+PrintIdeal", "-XX:+PrintOptoAssembly"); + private static final List verifyFlags = Arrays.asList( + "-XX:+VerifyOops", "-XX:+VerifyStack", "-XX:+VerifyLastFrame", "-XX:+VerifyBeforeGC", "-XX:+VerifyAfterGC", + "-XX:+VerifyDuringGC", "-XX:+VerifyAdapterSharing"); + + protected static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); + protected static final int ValueTypePassFieldsAsArgsOn = 0x1; + protected static final int ValueTypePassFieldsAsArgsOff = 0x2; + protected static final int ValueTypeArrayFlattenOn = 0x4; + protected static final int ValueTypeArrayFlattenOff = 0x8; + protected static final int ValueTypeReturnedAsFieldsOn = 0x10; + protected static final int ValueTypeReturnedAsFieldsOff = 0x20; + protected static final int AlwaysIncrementalInlineOn = 0x40; + protected static final int AlwaysIncrementalInlineOff = 0x80; + static final int AllFlags = ValueTypePassFieldsAsArgsOn | ValueTypePassFieldsAsArgsOff | ValueTypeArrayFlattenOn | ValueTypeArrayFlattenOff | ValueTypeReturnedAsFieldsOn; + protected static final boolean ValueTypePassFieldsAsArgs = (Boolean)WHITE_BOX.getVMFlag("ValueTypePassFieldsAsArgs"); + protected static final boolean ValueTypeArrayFlatten = (Boolean)WHITE_BOX.getVMFlag("ValueArrayFlatten"); + protected static final boolean ValueTypeReturnedAsFields = (Boolean)WHITE_BOX.getVMFlag("ValueTypeReturnedAsFields"); + protected static final boolean AlwaysIncrementalInline = (Boolean)WHITE_BOX.getVMFlag("AlwaysIncrementalInline"); + protected static final int COMP_LEVEL_ANY = -2; + protected static final int COMP_LEVEL_FULL_OPTIMIZATION = TEST_C1 ? 1 : 4; + protected static final Hashtable tests = new Hashtable(); + protected static final boolean USE_COMPILER = WHITE_BOX.getBooleanVMFlag("UseCompiler"); + protected static final boolean PRINT_IDEAL = WHITE_BOX.getBooleanVMFlag("PrintIdeal"); + + // Regular expressions used to match nodes in the PrintIdeal output + protected static final String START = "(\\d+\\t(.*"; + protected static final String MID = ".*)+\\t===.*"; + protected static final String END = ")|"; + protected static final String ALLOC = "(.*precise klass compiler/valhalla/valuetypes/MyValue.*\\R(.*(nop|spill).*\\R)*.*_new_instance_Java" + END; + protected static final String ALLOCA = "(.*precise klass \\[Lcompiler/valhalla/valuetypes/MyValue.*\\R(.*(nop|spill).*\\R)*.*_new_array_Java" + END; + protected static final String LOAD = START + "Load(B|S|I|L|F|D|P|N)" + MID + "@compiler/valhalla/valuetypes/MyValue.*" + END; + protected static final String LOADK = START + "LoadK" + MID + END; + protected static final String STORE = START + "Store(B|C|S|I|L|F|D|P|N)" + MID + "@compiler/valhalla/valuetypes/MyValue.*" + END; + protected static final String LOOP = START + "Loop" + MID + "" + END; + protected static final String TRAP = START + "CallStaticJava" + MID + "uncommon_trap.*(unstable_if|predicate)" + END; + protected static final String RETURN = START + "Return" + MID + "returns" + END; + protected static final String LINKTOSTATIC = START + "CallStaticJava" + MID + "linkToStatic" + END; + protected static final String NPE = START + "CallStaticJava" + MID + "null_check" + END; + protected static final String CALL = START + "CallStaticJava" + MID + END; + protected static final String STOREVALUETYPEFIELDS = START + "CallStaticJava" + MID + "store_value_type_fields" + END; + protected static final String SCOBJ = "(.*# ScObj.*" + END; + + public static String[] concat(String prefix[], String... extra) { + ArrayList list = new ArrayList(); + if (prefix != null) { + for (String s : prefix) { + list.add(s); + } + } + if (extra != null) { + for (String s : extra) { + list.add(s); + } + } + + return list.toArray(new String[list.size()]); + } + + /** + * Override getNumScenarios and getVMParameters if you want to run with more than + * the 5 built-in scenarios + */ + public int getNumScenarios() { + if (TEST_C1) { + return 1; + } else { + return 6; + } + } + + /** + * VM paramaters for the 5 built-in test scenarios. If your test needs to append + * extra parameters for (some of) these scenarios, override getExtraVMParameters(). + */ + public String[] getVMParameters(int scenario) { + if (TEST_C1) { + return new String[] { + "-XX:+EnableValhallaC1", + }; + } + + switch (scenario) { + case 0: return new String[] { + "-XX:+AlwaysIncrementalInline", + "-XX:ValueArrayElemMaxFlatOops=-1", + "-XX:ValueArrayElemMaxFlatSize=-1", + "-XX:+ValueArrayFlatten", + "-XX:ValueFieldMaxFlatSize=-1", + "-XX:+ValueTypePassFieldsAsArgs", + "-XX:+ValueTypeReturnedAsFields"}; + case 1: return new String[] { + "-XX:-UseCompressedOops", + "-XX:ValueArrayElemMaxFlatOops=-1", + "-XX:ValueArrayElemMaxFlatSize=-1", + "-XX:+ValueArrayFlatten", + "-XX:ValueFieldMaxFlatSize=-1", + "-XX:-ValueTypePassFieldsAsArgs", + "-XX:-ValueTypeReturnedAsFields"}; + case 2: return new String[] { + "-DVerifyIR=false", + "-XX:-UseCompressedOops", + "-XX:ValueArrayElemMaxFlatOops=0", + "-XX:ValueArrayElemMaxFlatSize=0", + "-XX:-ValueArrayFlatten", + "-XX:ValueFieldMaxFlatSize=0", + "-XX:+ValueTypePassFieldsAsArgs", + "-XX:+ValueTypeReturnedAsFields", + "-XX:+StressValueTypePassFieldsAsArgs", + "-XX:+StressValueTypeReturnedAsFields"}; + case 3: return new String[] { + "-DVerifyIR=false", + "-XX:+AlwaysIncrementalInline", + "-XX:ValueArrayElemMaxFlatOops=0", + "-XX:ValueArrayElemMaxFlatSize=0", + "-XX:ValueFieldMaxFlatSize=0", + "-XX:-ValueTypePassFieldsAsArgs", + "-XX:-ValueTypeReturnedAsFields"}; + case 4: return new String[] { + "-DVerifyIR=false", + "-XX:ValueArrayElemMaxFlatOops=-1", + "-XX:ValueArrayElemMaxFlatSize=-1", + "-XX:+ValueArrayFlatten", + "-XX:ValueFieldMaxFlatSize=0", + "-XX:+ValueTypePassFieldsAsArgs", + "-XX:-ValueTypeReturnedAsFields", + "-XX:+StressValueTypePassFieldsAsArgs"}; + case 5: return new String[] { + "-XX:+AlwaysIncrementalInline", + "-XX:ValueArrayElemMaxFlatOops=-1", + "-XX:ValueArrayElemMaxFlatSize=-1", + "-XX:+ValueArrayFlatten", + "-XX:ValueFieldMaxFlatSize=-1", + "-XX:-ValueTypePassFieldsAsArgs", + "-XX:-ValueTypeReturnedAsFields"}; + } + + return null; + } + + /** + * Override this method to provide extra parameters for selected scenarios + */ + public String[] getExtraVMParameters(int scenario) { + return null; + } + + public static void main(String[] args) throws Throwable { + if (args.length != 1) { + throw new RuntimeException("Usage: @run main/othervm/timeout=120 -Xbootclasspath/a:." + + " -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions" + + " -XX:+UnlockExperimentalVMOptions -XX:+WhiteBoxAPI -XX:+EnableValhalla" + + " compiler.valhalla.valuetypes.ValueTypeTest "); + } + String testMainClassName = args[0]; + Class testMainClass = Class.forName(testMainClassName); + ValueTypeTest test = (ValueTypeTest)testMainClass.newInstance(); + List scenarios = null; + if (!SCENARIOS.isEmpty()) { + scenarios = Arrays.asList(SCENARIOS.split(",")); + } + for (int i=0; i= 0; i--) { + String ex = exclude.get(i); + if (ex.indexOf(".") > 0) { + if (ex.startsWith(classPrefix)) { + ex = ex.substring(classPrefix.length()); + exclude.set(i, ex); + } else { + exclude.remove(i); + } + } + } + } + return exclude; + } + + protected ValueTypeTest() { + List list = null; + if (!TESTLIST.isEmpty()) { + list = Arrays.asList(TESTLIST.split(",")); + } + List exclude = buildExcludeList(); + + // Gather all test methods and put them in Hashtable + for (Method m : getClass().getDeclaredMethods()) { + Test[] annos = m.getAnnotationsByType(Test.class); + if (annos.length != 0 && + ((list == null || list.contains(m.getName())) && (exclude == null || !exclude.contains(m.getName())))) { + tests.put(getClass().getSimpleName() + "::" + m.getName(), m); + } + } + } + + protected void run(String[] args, Class... classes) throws Throwable { + if (args.length == 0) { + // Spawn a new VM instance + execute_vm(); + } else { + // Execute tests + run(classes); + } + } + + private void execute_vm() throws Throwable { + Asserts.assertFalse(tests.isEmpty(), "no tests to execute"); + ArrayList args = new ArrayList(defaultFlags); + String[] vmInputArgs = InputArguments.getVmInputArgs(); + for (String arg : vmInputArgs) { + if (arg.startsWith("-XX:CompileThreshold")) { + // Disable IR verification if non-default CompileThreshold is set + VERIFY_IR = false; + } + } + if (VERIFY_IR) { + // Add print flags for IR verification + args.addAll(printFlags); + // Always trap for exception throwing to not confuse IR verification + args.add("-XX:-OmitStackTraceInFastThrow"); + } + if (VERIFY_VM) { + args.addAll(verifyFlags); + } + // Run tests in own process and verify output + args.add(getClass().getName()); + args.add("run"); + // Spawn process with default JVM options from the test's run command + String[] cmds = Arrays.copyOf(vmInputArgs, vmInputArgs.length + args.size()); + System.arraycopy(args.toArray(), 0, cmds, vmInputArgs.length, args.size()); + OutputAnalyzer oa = ProcessTools.executeTestJvm(cmds); + // If ideal graph printing is enabled/supported, verify output + String output = oa.getOutput(); + oa.shouldHaveExitValue(0); + if (VERIFY_IR) { + if (output.contains("PrintIdeal enabled")) { + parseOutput(output); + } else { + System.out.println(output); + System.out.println("WARNING: IR verification failed! Running with -Xint, -Xcomp or release build?"); + } + } + } + + private void parseOutput(String output) throws Exception { + Pattern comp_re = Pattern.compile("\\n\\s+\\d+\\s+\\d+\\s+(%| )(s| )(!| )b(n| )\\s+\\S+\\.(?[^.]+::\\S+)\\s+(?@ \\d+\\s+)?[(]\\d+ bytes[)]\\n"); + Matcher m = comp_re.matcher(output); + Map compilations = new LinkedHashMap<>(); + int prev = 0; + String methodName = null; + while (m.find()) { + if (prev == 0) { + // Print header + System.out.print(output.substring(0, m.start()+1)); + } else if (methodName != null) { + compilations.put(methodName, output.substring(prev, m.start()+1)); + } + if (m.group("osr") != null) { + methodName = null; + } else { + methodName = m.group("name"); + } + prev = m.end(); + } + if (prev == 0) { + // Print header + System.out.print(output); + } else if (methodName != null) { + compilations.put(methodName, output.substring(prev)); + } + // Iterate over compilation output + for (String testName : compilations.keySet()) { + Method test = tests.get(testName); + if (test == null) { + // Skip helper methods + continue; + } + String graph = compilations.get(testName); + if (PRINT_GRAPH) { + System.out.println("\nGraph for " + testName + "\n" + graph); + } + // Parse graph using regular expressions to determine if it contains forbidden nodes + Test[] annos = test.getAnnotationsByType(Test.class); + Test anno = null; + for (Test a : annos) { + if ((a.valid() & ValueTypePassFieldsAsArgsOn) != 0 && ValueTypePassFieldsAsArgs) { + assert anno == null; + anno = a; + } else if ((a.valid() & ValueTypePassFieldsAsArgsOff) != 0 && !ValueTypePassFieldsAsArgs) { + assert anno == null; + anno = a; + } else if ((a.valid() & ValueTypeArrayFlattenOn) != 0 && ValueTypeArrayFlatten) { + assert anno == null; + anno = a; + } else if ((a.valid() & ValueTypeArrayFlattenOff) != 0 && !ValueTypeArrayFlatten) { + assert anno == null; + anno = a; + } else if ((a.valid() & ValueTypeReturnedAsFieldsOn) != 0 && ValueTypeReturnedAsFields) { + assert anno == null; + anno = a; + } else if ((a.valid() & ValueTypeReturnedAsFieldsOff) != 0 && !ValueTypeReturnedAsFields) { + assert anno == null; + anno = a; + } else if ((a.valid() & AlwaysIncrementalInlineOn) != 0 && AlwaysIncrementalInline) { + assert anno == null; + anno = a; + } else if ((a.valid() & AlwaysIncrementalInlineOff) != 0 && !AlwaysIncrementalInline) { + assert anno == null; + anno = a; + } + } + assert anno != null; + String regexFail = anno.failOn(); + if (!regexFail.isEmpty()) { + Pattern pattern = Pattern.compile(regexFail.substring(0, regexFail.length()-1)); + Matcher matcher = pattern.matcher(graph); + boolean found = matcher.find(); + Asserts.assertFalse(found, "Graph for '" + testName + "' contains forbidden node:\n" + (found ? matcher.group() : "")); + } + String[] regexMatch = anno.match(); + int[] matchCount = anno.matchCount(); + for (int i = 0; i < regexMatch.length; ++i) { + Pattern pattern = Pattern.compile(regexMatch[i].substring(0, regexMatch[i].length()-1)); + Matcher matcher = pattern.matcher(graph); + int count = 0; + String nodes = ""; + while (matcher.find()) { + count++; + nodes += matcher.group() + "\n"; + } + if (matchCount[i] < 0) { + Asserts.assertLTE(Math.abs(matchCount[i]), count, "Graph for '" + testName + "' contains different number of match nodes:\n" + nodes); + } else { + Asserts.assertEQ(matchCount[i], count, "Graph for '" + testName + "' contains different number of match nodes:\n" + nodes); + } + } + tests.remove(testName); + System.out.println(testName + " passed"); + } + // Check if all tests were compiled + if (tests.size() != 0) { + for (String name : tests.keySet()) { + System.out.println("Test '" + name + "' not compiled!"); + } + throw new RuntimeException("Not all tests were compiled"); + } + } + + private void setup(Class clazz) { + if (XCOMP) { + // Don't control compilation if -Xcomp is enabled + return; + } + if (DUMP_REPLAY) { + // Generate replay compilation files + String directive = "[{ match: \"*.*\", DumpReplay: true }]"; + if (WHITE_BOX.addCompilerDirective(directive) != 1) { + throw new RuntimeException("Failed to add compiler directive"); + } + } + + Method[] methods = clazz.getDeclaredMethods(); + for (Method m : methods) { + if (m.isAnnotationPresent(Test.class)) { + // Don't inline tests + WHITE_BOX.testSetDontInlineMethod(m, true); + } + if (m.isAnnotationPresent(DontCompile.class)) { + WHITE_BOX.makeMethodNotCompilable(m, COMP_LEVEL_ANY, true); + WHITE_BOX.makeMethodNotCompilable(m, COMP_LEVEL_ANY, false); + WHITE_BOX.testSetDontInlineMethod(m, true); + } + if (m.isAnnotationPresent(ForceInline.class)) { + WHITE_BOX.testSetForceInlineMethod(m, true); + } else if (m.isAnnotationPresent(DontInline.class)) { + WHITE_BOX.testSetDontInlineMethod(m, true); + } + } + + // Compile class initializers + WHITE_BOX.enqueueInitializerForCompilation(clazz, COMP_LEVEL_FULL_OPTIMIZATION); + } + + private void run(Class... classes) throws Exception { + if (USE_COMPILER && PRINT_IDEAL && !XCOMP) { + System.out.println("PrintIdeal enabled"); + } + System.out.format("rI = %d, rL = %d\n", rI, rL); + + setup(getClass()); + for (Class clazz : classes) { + setup(clazz); + } + + // Execute tests + TreeMap durations = PRINT_TIMES ? new TreeMap() : null; + for (Method test : tests.values()) { + long startTime = System.nanoTime(); + Method verifier = getClass().getMethod(test.getName() + "_verifier", boolean.class); + // Warmup using verifier method + Warmup anno = test.getAnnotation(Warmup.class); + int warmup = anno == null ? WARMUP : anno.value(); + for (int i = 0; i < warmup; ++i) { + verifier.invoke(this, true); + } + // Trigger compilation + WHITE_BOX.enqueueMethodForCompilation(test, COMP_LEVEL_FULL_OPTIMIZATION); + Asserts.assertTrue(!USE_COMPILER || WHITE_BOX.isMethodCompiled(test, false), test + " not compiled"); + // Check result + verifier.invoke(this, false); + if (PRINT_TIMES) { + long endTime = System.nanoTime(); + long duration = (endTime - startTime); + durations.put(duration, test.getName()); + } + } + + // Print execution times + if (PRINT_TIMES) { + System.out.println("\n\nTest execution times:"); + for (Map.Entry entry : durations.entrySet()) { + System.out.format("%-10s%15d ns\n", entry.getValue() + ":", entry.getKey()); + } + } + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/hack/GetUnresolvedValueFieldWrongSignature.java 2019-03-11 14:27:41.934353831 +0100 @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +class TestUnloadedValueTypeField { + static class MyValue3 { + int foo; + } + + static class MyValue3Holder { + MyValue3 v; + } +} + +class GetUnresolvedValueFieldWrongSignature { + static int test3(TestUnloadedValueTypeField.MyValue3Holder holder3) { + if (holder3 != null) { + return holder3.v.foo + 3; + } else { + return 0; + } + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/libTestJNICalls.c 2019-03-11 14:27:42.390353824 +0100 @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include + +JNIEXPORT jobject JNICALL +Java_compiler_valhalla_valuetypes_TestJNICalls_testMethod1(JNIEnv *env, jobject receiver, jobject vt) { + jclass cls = (*env)->GetObjectClass(env, receiver); + jmethodID mid = (*env)->GetMethodID(env, cls, "test1", "(Qcompiler/valhalla/valuetypes/MyValue1;Z)Qcompiler/valhalla/valuetypes/MyValue1;"); + return (*env)->CallObjectMethod(env, receiver, mid, vt, JNI_TRUE); +} + +JNIEXPORT jlong JNICALL +Java_compiler_valhalla_valuetypes_TestJNICalls_testMethod2(JNIEnv *env, jobject receiver, jobject vt) { + jclass cls = (*env)->GetObjectClass(env, vt); + jmethodID mid = (*env)->GetMethodID(env, cls, "hash", "()J"); + return (*env)->CallLongMethod(env, vt, mid); +} + +JNIEXPORT jint JNICALL +Java_compiler_valhalla_valuetypes_TestJNICalls_00024MyValueWithNative_testMethod3(JNIEnv *env, jobject receiver) { + jclass cls = (*env)->GetObjectClass(env, receiver); + jfieldID fid = (*env)->GetFieldID(env, cls, "x", "I"); + return (*env)->GetIntField(env, receiver, fid); +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/CheckcastTest.java 2019-03-11 14:27:42.842353818 +0100 @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2018, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package runtime.valhalla.valuetypes; + +import jdk.test.lib.Asserts; + +/* + * @test CheckcastTest + * @summary checkcast bytecode test + * @library /test/lib + * @compile VDefaultTest.java + * @run main/othervm -Xint -XX:+EnableValhalla runtime.valhalla.valuetypes.CheckcastTest + * @run main/othervm -Xcomp -XX:+EnableValhalla runtime.valhalla.valuetypes.CheckcastTest + */ + +public class CheckcastTest { + + static value class Point { + int x; + int y; + + public Point() { + x = 0; + y = 0; + } + + public Point(int x, int y) { + this.x = x; + this.y = y; + } + } + + + static void testCastingFromObjectToVal(Object o) { + boolean npe = false; + try { + Point.val pv = (Point.val)o; + } catch(NullPointerException e) { + npe = true; + } + Asserts.assertTrue(npe == false || o == null, "Casting null to val should throw a NPE"); + } + + static void testCastingFromValToBox(Point.val p) { + boolean npe = false; + try { + Point.box pb = p; + } catch(NullPointerException e) { + npe = true; + } + Asserts.assertFalse(npe, "Casting from val to box should not throw an NPE"); + } + + static void testCastingFromBoxToVal(Point.box p) { + boolean npe = false; + try { + Point.val pv = p; + } catch(NullPointerException e) { + npe = true; + } + if (npe) { + Asserts.assertEquals(p, null, "NPE must be thrown only if p is null"); + } else { + Asserts.assertNotEquals(p, null, "Casting null to val must thrown a NPE"); + } + + } + + public static void main(String[] args) { + // Testing casting from box to val + // First invocation: casting null to Point.val with an unresolved class entry + testCastingFromBoxToVal(null); + // Second invocation: casting non-null to val, will trigger resolution of the class entry + testCastingFromBoxToVal(new Point(3,4)); + // Third invocation: casting null to Point.val with a resolved class entry + testCastingFromBoxToVal(null); + + // Testing casting from val to box + testCastingFromBoxToVal(new Point(3,4)); + + // Testing casting from object to val + // First invocation: casting null to Point.val with an unresolved class entry + testCastingFromObjectToVal(null); + // Second invocation: casting non-null to al, will trigger resolution of the class entry + testCastingFromObjectToVal(new Point(3,4)); + // Third invocation: casting null to Point.val with a resolved class entry"); + testCastingFromObjectToVal(null); + // Fourth invocation: with something not the right type + boolean cce = false; + try { + testCastingFromObjectToVal(new String("NotPoint")); + } catch(ClassCastException e) { + cce = true; + } + Asserts.assertTrue(cce,"casting invalid type to val should throw CCE"); + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/Empty.java 2019-03-11 14:27:43.298353812 +0100 @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package runtime.valhalla.valuetypes; + +value final class EmptyValue { + + private EmptyValue() { + } + + public static EmptyValue createEmptyValue() { + EmptyValue e = EmptyValue.default; + return e; + } +} + +class EmptyTest { + public void run() { + EmptyValue.createEmptyValue(); + throw new RuntimeException("Expected class file parse error"); + } +} + +/** + * @test Empty + * @summary Test empty value type + * @compile -XDemitQtypes -XDenableValueTypes -XDallowEmptyValues Empty.java + * @run main/othervm -Xint -XX:+EnableValhalla runtime.valhalla.valuetypes.Empty + * @run main/othervm -Xcomp -XX:+EnableValhalla runtime.valhalla.valuetypes.Empty + */ +public class Empty { + public static void main(String[] args) { + try { + EmptyTest test = new EmptyTest(); + test.run(); + } catch (ClassFormatError cfe) {} + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/FlattenableSemanticTest.java 2019-03-11 14:27:43.754353806 +0100 @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package runtime.valhalla.valuetypes; + +import java.lang.invoke.*; + +import jdk.experimental.value.MethodHandleBuilder; + +import jdk.test.lib.Asserts; + +/* + * @test + * @summary Flattenable field semantic test + * @modules java.base/jdk.experimental.bytecode + * java.base/jdk.experimental.value + * @library /test/lib + * @compile -XDemitQtypes -XDenableValueTypes -XDallowWithFieldOperator Point.java JumboValue.java + * @compile -XDemitQtypes -XDenableValueTypes -XDallowWithFieldOperator FlattenableSemanticTest.java + * @run main/othervm -Xint -XX:ValueFieldMaxFlatSize=64 -XX:+EnableValhalla runtime.valhalla.valuetypes.FlattenableSemanticTest + * @run main/othervm -Xcomp -XX:+EnableValhalla -XX:ValueFieldMaxFlatSize=64 runtime.valhalla.valuetypes.FlattenableSemanticTest + * // debug: -XX:+PrintValueLayout -XX:-ShowMessageBoxOnError + */ +public class FlattenableSemanticTest { + + static Point.box nfsp; + static Point.val fsp; + + Point.box nfip; + Point.val fip; + + static JumboValue.box nfsj; + static JumboValue.val fsj; + + JumboValue.box nfij; + JumboValue.val fij; + + static Object getNull() { + return null; + } + + FlattenableSemanticTest() { } + + public static void main(String[] args) { + FlattenableSemanticTest test = new FlattenableSemanticTest(); + + // Uninitialized value fields must be null for non flattenable fields + Asserts.assertNull(nfsp, "Invalid non null value for unitialized non flattenable field"); + Asserts.assertNull(nfsj, "Invalid non null value for unitialized non flattenable field"); + Asserts.assertNull(test.nfip, "Invalid non null value for unitialized non flattenable field"); + Asserts.assertNull(test.nfij, "Invalid non null value for unitialized non flattenable field"); + + // fsp.equals(null); + + // Uninitialized value fields must be non null for flattenable fields + Asserts.assertNotNull(fsp, "Invalid null value for unitialized flattenable field"); + Asserts.assertNotNull(fsj, "Invalid null value for unitialized flattenable field"); + Asserts.assertNotNull(test.fip, "Invalid null value for unitialized flattenable field"); + Asserts.assertNotNull(test.fij, "Invalid null value for unitialized flattenable field"); + + // Assigning null must be allowed for non flattenable value fields + boolean exception = true; + try { + nfsp = (Point.box)getNull(); + nfsp = null; + exception = false; + } catch (NullPointerException e) { + exception = true; + } + Asserts.assertFalse(exception, "Invalid NPE when assigning null to a non flattenable field"); + + try { + nfsj = (JumboValue.box)getNull(); + nfsj = null; + exception = false; + } catch (NullPointerException e) { + exception = true; + } + Asserts.assertFalse(exception, "Invalid NPE when assigning null to a non flattenable field"); + + try { + test.nfip = (Point.box)getNull(); + test.nfip = null; + exception = false; + } catch (NullPointerException e) { + exception = true; + } + Asserts.assertFalse(exception, "Invalid NPE when assigning null to a non flattenable field"); + + try { + test.nfij = (JumboValue.box)getNull(); + test.nfij = null; + exception = false; + } catch (NullPointerException e) { + exception = true; + } + Asserts.assertFalse(exception, "Invalid NPE when assigning null to a non flattenable field"); + + // Assigning null to a flattenable value field must trigger a NPE + exception = false; + try { + fsp = (Point)getNull(); + } catch(NullPointerException e) { + exception = true; + } + Asserts.assertTrue(exception, "NPE not thrown when assigning null to a flattenable field"); + exception = false; + try { + fsj = (JumboValue)getNull(); + } catch(NullPointerException e) { + exception = true; + } + Asserts.assertTrue(exception, "NPE not thrown when assigning null to a flattenable field"); + exception = false; + try { + test.fip = (Point)getNull(); + } catch(NullPointerException e) { + exception = true; + } + Asserts.assertTrue(exception, "NPE not thrown when assigning null to a flattenable field"); + exception = false; + try { + test.fij = (JumboValue)getNull(); + } catch(NullPointerException e) { + exception = true; + } + Asserts.assertTrue(exception, "NPE not thrown when assigning null to a flattenable field"); + exception = false; + } + +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/IntValue.java 2019-03-11 14:27:44.214353799 +0100 @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package runtime.valhalla.valuetypes; + +public final value class IntValue { + final int val; + + IntValue() { + val = 0; + } + + public int getInt() { return val; } + + public static IntValue create(int val) { + IntValue iv = IntValue.default; + iv.val = val; + return iv; + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/JumboValue.java 2019-03-11 14:27:44.670353793 +0100 @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package runtime.valhalla.valuetypes; + +public value final class JumboValue { + final long l0; + final long l1; + final long l2; + final long l3; + final long l4; + final long l5; + final long l6; + final long l7; + final long l8; + final long l9; + final long l10; + final long l11; + final long l12; + final long l13; + final long l14; + final long l15; + final long l16; + final long l17; + final long l18; + final long l19; + + private JumboValue() { + l0 = 0; + l1 = 0; + l2 = 0; + l3 = 0; + l4 = 0; + l5 = 0; + l6 = 0; + l7 = 0; + l8 = 0; + l9 = 0; + l10 = 0; + l11 = 0; + l12 = 0; + l13 = 0; + l14 = 0; + l15 = 0; + l16 = 0; + l17 = 0; + l18 = 0; + l19 = 0; + } + + public static JumboValue createJumboValue() { + JumboValue j = JumboValue.default; + return j; + } + + public JumboValue update(long l0, long l1) { + JumboValue j = __WithField(this.l0, l0); + j = __WithField(j.l1, l1); + j = __WithField(j.l2, l0 + l1); + j = __WithField(j.l3, l1 + l2); + j = __WithField(j.l4, l2 + l3); + j = __WithField(j.l5, l3 + l4); + j = __WithField(j.l6, l4 + l5); + j = __WithField(j.l7, l5 + l6); + j = __WithField(j.l8, l6 + l7); + j = __WithField(j.l9, l7 + l8); + j = __WithField(j.l10, l8 + l9); + j = __WithField(j.l11, l9 + l10); + j = __WithField(j.l12, l10 + l11); + j = __WithField(j.l13, l11 + l12); + j = __WithField(j.l14, l12 + l13); + j = __WithField(j.l15, l13 + l14); + j = __WithField(j.l16, l14 + l15); + j = __WithField(j.l17, l15 + l16); + j = __WithField(j.l18, l16 + l17); + j = __WithField(j.l19, l17 + l18); + return j; + } + + public boolean verify() { + return (l2 == (l0 + l1) && l3 == (l1 + l2) && l5 == (l3 + l4) + && l6 == (l4 + l5) && l7 == (l5 + l6) && l8 == (l6 + l7) + && l9 == (l7 + l8) && l10 == (l8 + l9) && l11 == (l9 + l10) + && l12 == (l10 + l11) && l13 == (l11 + l12) && l14 == (l12 + l13) + && l15 == (l13 + l14) && l16 == (l14 + l15) && l17 == (l15 + l16) + && l18 == (l16 + l17) && l19 == (l17 + l18)); + } + + public boolean equals(Object o) { + if(o instanceof JumboValue) { + JumboValue j = (JumboValue)o; + return (l0 == j.l0 && l1 == j.l1 && l2 == j.l2 && l3 == j.l3 + && l4 == j.l4 && l5 == j.l5 && l6 == j.l6 && l7 == j.l7 + && l8 == j.l8 && l9 == j.l9 && l10 == j.l10 && l7 == j.l10 + && l11 == j.l11 && l12 == j.l12 && l13 == j.l13 && l4 == j.l14 + && l15 == j.l15 && l16 == j.l16 && l17 == j.l17 && l18 == j.l18 + && l19 == j.l19); + } else { + return false; + } + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/Long8Value.java 2019-03-11 14:27:45.130353787 +0100 @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package runtime.valhalla.valuetypes; + +import jdk.test.lib.Asserts; + +public final value class Long8Value { + + final long longField1; + final long longField2; + final long longField3; + final long longField4; + final long longField5; + final long longField6; + final long longField7; + final long longField8; + + private Long8Value() { + longField1 = 0; + longField2 = 0; + longField3 = 0; + longField4 = 0; + longField5 = 0; + longField6 = 0; + longField7 = 0; + longField8 = 0; + } + + public long getLongField1() { return longField1; } + public long getLongField2() { return longField2; } + public long getLongField3() { return longField3; } + public long getLongField4() { return longField4; } + public long getLongField5() { return longField5; } + public long getLongField6() { return longField6; } + public long getLongField7() { return longField7; } + public long getLongField8() { return longField8; } + + public static Long8Value create(long long1, + long long2, + long long3, + long long4, + long long5, + long long6, + long long7, + long long8) { + Long8Value l8v = Long8Value.default; + l8v = __WithField(l8v.longField1, long1); + l8v = __WithField(l8v.longField2, long2); + l8v = __WithField(l8v.longField3, long3); + l8v = __WithField(l8v.longField4, long4); + l8v = __WithField(l8v.longField5, long5); + l8v = __WithField(l8v.longField6, long6); + l8v = __WithField(l8v.longField7, long7); + l8v = __WithField(l8v.longField8, long8); + return l8v; + } + + static void check(Long8Value value, + long long1, + long long2, + long long3, + long long4, + long long5, + long long6, + long long7, + long long8) { + Asserts.assertEquals(value.getLongField1(), long1, "Field 1 incorrect"); + Asserts.assertEquals(value.getLongField2(), long2, "Field 2 incorrect"); + Asserts.assertEquals(value.getLongField3(), long3, "Field 3 incorrect"); + Asserts.assertEquals(value.getLongField4(), long4, "Field 4 incorrect"); + Asserts.assertEquals(value.getLongField5(), long5, "Field 5 incorrect"); + Asserts.assertEquals(value.getLongField6(), long6, "Field 6 incorrect"); + Asserts.assertEquals(value.getLongField7(), long7, "Field 7 incorrect"); + Asserts.assertEquals(value.getLongField8(), long8, "Field 8 incorrect"); + } + +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/ObjectMethods.java 2019-03-11 14:27:45.590353780 +0100 @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package runtime.valhalla.valuetypes; + +import java.lang.invoke.*; + +import jdk.experimental.value.MethodHandleBuilder; + +/* + * @test ObjectMethods + * @summary Check object method implemented by the VM behave with value types + * @modules java.base/jdk.experimental.bytecode + * java.base/jdk.experimental.value + * @library /test/lib + * @compile -XDemitQtypes -XDenableValueTypes -XDallowWithFieldOperator ObjectMethods.java + * @run main/othervm -Xint -XX:+EnableValhalla -XX:+UseBiasedLocking -XX:+UseCompressedClassPointers runtime.valhalla.valuetypes.ObjectMethods + * @run main/othervm -Xint -XX:+EnableValhalla -XX:-UseBiasedLocking -XX:-UseCompressedClassPointers runtime.valhalla.valuetypes.ObjectMethods + * @run main/othervm -Xint -XX:+EnableValhalla -noverify runtime.valhalla.valuetypes.ObjectMethods noverify + * @run main/othervm -Xcomp -XX:+EnableValhalla -XX:+UseBiasedLocking -XX:+UseCompressedClassPointers runtime.valhalla.valuetypes.ObjectMethods + * @run main/othervm -Xcomp -XX:+EnableValhalla -XX:-UseBiasedLocking -XX:-UseCompressedClassPointers runtime.valhalla.valuetypes.ObjectMethods + * @run main/othervm -Xcomp -XX:+EnableValhalla -noverify runtime.valhalla.valuetypes.ObjectMethods noverify + */ + +public class ObjectMethods { + + public static void main(String[] args) { + testObjectMethods((args.length > 0 && args[0].equals("noverify"))); + } + + public static void testObjectMethods(boolean verifierDisabled) { + MyInt val = MyInt.create(7); + MyInt sameVal = MyInt.create(7); + + // Exercise all the Object native/VM methods... + + if (verifierDisabled) { // Just noverifier... + checkMonitorExit(val); + return; + } + + // getClass() + checkGetClass(val, MyInt.class); + + //hashCode()/identityHashCode() + checkHashCodes(val, sameVal.hashCode()); + + // clone() + checkNotCloneable(val); + + // synchronized + checkSynchronized(val); + + // wait/notify() + checkWait(val); + checkNotify(val); + + System.gc(); + } + + + static void checkGetClass(Object val, Class expectedClass) { + Class clazz = val.getClass(); + if (clazz == null) { + throw new RuntimeException("getClass return null"); + } else if (clazz != expectedClass) { + throw new RuntimeException("getClass (" + clazz + ") doesn't match " + expectedClass); + } + } + + // Just check we don't crash the VM + static void checkHashCodes(Object val, int expectedHashCode) { + if (val.hashCode() != expectedHashCode) { + throw new RuntimeException("Hash code mismatch value: " + val.hashCode() + + " expected: " + expectedHashCode); + } + } + + static void checkNotCloneable(MyInt val) { + boolean sawCnse = false; + try { + val.attemptClone(); + } catch (CloneNotSupportedException cnse) { + sawCnse = true; + } + if (!sawCnse) { + throw new RuntimeException("clone() did not fail"); + } + // Cloneable value type checked by "BadValueTypes" CFP tests + } + + static void checkSynchronized(Object val) { + boolean sawImse = false; + try { + synchronized (val) { + throw new IllegalStateException("Unreachable code, reached"); + } + } catch (IllegalMonitorStateException imse) { + sawImse = true; + } + if (!sawImse) { + throw new RuntimeException("monitorenter did not fail"); + } + // synchronized method modifiers tested by "BadValueTypes" CFP tests + // jni monitor ops tested by "ValueWithJni" + } + + // Check we haven't broken the mismatched monitor block check... + static void checkMonitorExit(Object val) { + boolean sawImse = false; + try { + MethodHandleBuilder.loadCode(MethodHandles.lookup(), + "mismatchedMonitorExit", + MethodType.methodType(Void.TYPE, Object.class), + CODE->{ + CODE + .aload(0) + .monitorexit() + .return_(); + }).invokeExact(val); + throw new IllegalStateException("Unreachable code, reached"); + } catch (Throwable t) { + if (t instanceof IllegalMonitorStateException) { + sawImse = true; + } else { + throw new RuntimeException(t); + } + } + if (!sawImse) { + throw new RuntimeException("monitorexit did not fail"); + } + } + + static void checkWait(Object val) { + boolean sawImse = false; + try { + val.wait(); + } catch (IllegalMonitorStateException imse) { + sawImse = true; + } catch (InterruptedException intExc) { + throw new RuntimeException(intExc); + } + if (!sawImse) { + throw new RuntimeException("wait() did not fail"); + } + + sawImse = false; + try { + val.wait(1l); + } catch (IllegalMonitorStateException imse) { + sawImse = true; + } catch (InterruptedException intExc) { + throw new RuntimeException(intExc); + } + if (!sawImse) { + throw new RuntimeException("wait() did not fail"); + } + + sawImse = false; + try { + val.wait(0l, 100); + } catch (IllegalMonitorStateException imse) { + sawImse = true; + } catch (InterruptedException intExc) { + throw new RuntimeException(intExc); + } + if (!sawImse) { + throw new RuntimeException("wait() did not fail"); + } + } + + static void checkNotify(Object val) { + boolean sawImse = false; + try { + val.notify(); + } catch (IllegalMonitorStateException imse) { + sawImse = true; + } + if (!sawImse) { + throw new RuntimeException("notify() did not fail"); + } + + sawImse = false; + try { + val.notifyAll(); + } catch (IllegalMonitorStateException imse) { + sawImse = true; + } + if (!sawImse) { + throw new RuntimeException("notifyAll() did not fail"); + } + } + + static final value class MyInt { + final int value; + private MyInt() { value = 0; } + public static MyInt create(int v) { + MyInt mi = MyInt.default; + mi = __WithField(mi.value, v); + return mi; + } + public Object attemptClone() throws CloneNotSupportedException { + try { // Check it is not possible to clone... + MethodHandles.Lookup lookup = MethodHandles.lookup(); + MethodHandle mh = lookup.findVirtual(getClass(), + "clone", + MethodType.methodType(Object.class)); + return mh.invokeExact(this); + } catch (Throwable t) { + if (t instanceof CloneNotSupportedException) { + throw (CloneNotSupportedException) t; + } + throw new RuntimeException(t); + } + } + } + +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/Person.java 2019-03-11 14:27:46.046353774 +0100 @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package runtime.valhalla.valuetypes; + +public final value class Person { + + final int id; + final String firstName; + final String lastName; + + private Person() { + id = 0; + firstName = null; + lastName = null; + } + + public int getId() { return id; } + public String getFirstName() { return firstName; } + public String getLastName() { return lastName; } + + public String toString() { + return getFirstName() + " " + getLastName() + " (id=" + getId() + ")"; + } + + static Person create(int id, String firstName, String lastName) { + Person p = Person.default; + p = __WithField(p.id, id); + p = __WithField(p.firstName, firstName); + p = __WithField(p.lastName, lastName); + return p; + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/PersonVcc.java 2019-03-11 14:27:46.498353768 +0100 @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package runtime.valhalla.valuetypes; + +@jdk.incubator.mvt.ValueCapableClass +public final class PersonVcc { + final int id; + final String firstName; + final String lastName; + + private PersonVcc(int id, String firstName, String lastName) { + this.id = id; + this.firstName = firstName; + this.lastName = lastName; + } + + public int getId() { return id; } + public String getFirstName() { return firstName; } + public String getLastName() { return lastName; } + + public String toString() { + return getFirstName() + " " + getLastName() + " (id=" + getId() + ")"; + } + + public static PersonVcc create(int id, String firstName, String lastName) { + return new PersonVcc(id, firstName, lastName); + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/Point.java 2019-03-11 14:27:46.950353761 +0100 @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package runtime.valhalla.valuetypes; + +public value final class Point { + final int x; + final int y; + + private Point() { + x = 0; + y = 0; + } + + public int getX() { return x; } + public int getY() { return y; } + + public boolean isSamePoint(Point that) { + return this.getX() == that.getX() && this.getY() == that.getY(); + } + + public String toString() { + return "Point: x=" + getX() + " y=" + getY(); + } + + public boolean equals(Object o) { + if(o instanceof Point) { + return ((Point)o).x == x && ((Point)o).y == y; + } else { + return false; + } + } + + public static Point createPoint(int x, int y) { + Point p = Point.default; + p = __WithField(p.x, x); + p = __WithField(p.y, y); + return p; + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/QuickeningTest.java 2019-03-11 14:27:47.406353755 +0100 @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2018, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package runtime.valhalla.valuetypes; + +import jdk.test.lib.Asserts; + +/* + * @test QuickeningTest + * @summary Test quickening of getfield and putfield applied to value fields + * @library /test/lib + * @compile -XDemitQtypes -XDenableValueTypes -XDallowWithFieldOperator Point.java JumboValue.java QuickeningTest.java + * @run main/othervm -Xint -XX:+EnableValhalla runtime.valhalla.valuetypes.QuickeningTest + * @run main/othervm -Xcomp -XX:+EnableValhalla runtime.valhalla.valuetypes.QuickeningTest + */ + +public class QuickeningTest { + + static class Parent { + Point.box nfp; /* Not flattenable value field */ + Point.val fp; /* Flattenable and flattened value field */ + JumboValue.val fj; /* Flattenable not flattened value field */ + + public void setNfp(Point p) { nfp = p; } + public void setFp(Point p) { fp = p; } + public void setFj(JumboValue j) { fj = j; } + } + + static class Child extends Parent { + // This class inherited fields from the Parent class + Point.box nfp2; /* Not flattenable value field */ + Point.val fp2; /* Flattenable and flattened value field */ + JumboValue.val fj2; /* Flattenable not flattene value field */ + + public void setNfp2(Point p) { nfp2 = p; } + public void setFp2(Point p) { fp2 = p; } + public void setFj2(JumboValue j) { fj2 = j; } + } + + static final value class Value { + final Point.box nfp; /* Not flattenable value field */ + final Point.val fp; /* Flattenable and flattened value field */ + final JumboValue.val fj; /* Flattenable not flattene value field */ + + private Value() { + nfp = Point.createPoint(0, 0); + fp = Point.createPoint(0, 0); + fj = JumboValue.createJumboValue(); + } + + public static Value create() { + return Value.default; + } + } + + static void testUninitializedFields() { + Parent p = new Parent(); + Asserts.assertEquals(p.nfp, null, "invalid uninitialized not flattenable"); + Asserts.assertEquals(p.fp.x, 0, "invalid value for uninitialized flattened field"); + Asserts.assertEquals(p.fp.y, 0, "invalid value for uninitialized flattened field"); + Asserts.assertEquals(p.fj.l0, 0L, "invalid value for uninitialized flattened field"); + Asserts.assertEquals(p.fj.l1, 0L, "invalid value for uninitialized flattened field"); + + Child c = new Child(); + Asserts.assertEquals(c.nfp, null, "invalid uninitialized not flattenable field"); + Asserts.assertEquals(c.fp.x, 0, "invalid value for uninitialized flattened field"); + Asserts.assertEquals(c.fp.y, 0, "invalid value for uninitialized flattened field"); + Asserts.assertEquals(c.fj.l0, 0L, "invalid value for uninitialized flattened field"); + Asserts.assertEquals(c.fj.l1, 0L, "invalid value for uninitialized flattened field"); + Asserts.assertEquals(c.nfp2, null, "invalid uninitialized not flattenable"); + Asserts.assertEquals(c.fp2.x, 0, "invalid value for uninitialized flattened field"); + Asserts.assertEquals(c.fp2.y, 0, "invalid value for uninitialized flattened field"); + Asserts.assertEquals(c.fj2.l0, 0L, "invalid value for uninitialized not flattened field"); + Asserts.assertEquals(c.fj2.l1, 0L, "invalid value for uninitialized not flattened field"); + + Value v = Value.create(); + Asserts.assertEquals(v.nfp, null, "invalid uninitialized not flattenable"); + Asserts.assertEquals(v.fp.x, 0, "invalid value for uninitialized flattened field"); + Asserts.assertEquals(v.fp.y, 0, "invalid value for uninitialized flattened field"); + Asserts.assertEquals(v.fj.l0, 0L, "invalid value for uninitialized not flattened field"); + Asserts.assertEquals(v.fj.l1, 0L, "invalid value for uninitialized not flattened field"); + } + + static void testPutfieldAndGetField() { + Point p1 = Point.createPoint(16, 47); + Point p2 = Point.createPoint(32, 64); + + JumboValue j1 = JumboValue.createJumboValue().update(4, 5); + JumboValue j2 = JumboValue.createJumboValue().update(7, 9); + + Parent p = new Parent(); + // executing each setter twice to test quickened bytecodes + p.setNfp(p1); + p.setNfp(p2); + p.setFp(p2); + p.setFp(p1); + p.setFj(j1); + p.setFj(j2); + + Asserts.assertTrue(p.nfp.equals(p2), "invalid updated not flattenable field"); + Asserts.assertEquals(p.fp.x, 16, "invalid value for updated flattened field"); + Asserts.assertEquals(p.fp.y, 47, "invalid value for updated flattened field"); + Asserts.assertTrue(p.fj.equals(j2), "invalid value for updated not flattened field"); + + Child c = new Child(); + c.setNfp(p1); + c.setNfp(p2); + c.setFp(p2); + c.setFp(p1); + c.setFj(j1); + c.setFj(j2); + c.setNfp2(p2); + c.setNfp2(p1); + c.setFp2(p1); + c.setFp2(p2); + c.setFj2(j2); + c.setFj2(j1); + + Asserts.assertTrue(c.nfp.equals(p2), "invalid updated not flattenable field"); + Asserts.assertEquals(c.fp.x, 16, "invalid value for updated flattened field"); + Asserts.assertEquals(c.fp.y, 47, "invalid value for updated flattened field"); + Asserts.assertTrue(c.fj.equals(j2), "invalid value for updated not flattened field"); + + Asserts.assertTrue(c.nfp2.equals(p1), "invalid updated not flattenable field"); + Asserts.assertEquals(c.fp2.x, 32, "invalid value for updated flattened field"); + Asserts.assertEquals(c.fp2.y, 64, "invalid value for updated flattened field"); + Asserts.assertTrue(c.fj2.equals(j1), "invalid value for updated not flattened field"); + } + + public static void main(String[] args) { + testUninitializedFields(); + testUninitializedFields(); // run twice to test quickened bytecodes + testPutfieldAndGetField(); + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/Test8186715.java 2019-03-11 14:27:47.862353749 +0100 @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package runtime.valhalla.valuetypes; + +/* + * @test Test8186715 + * @summary test return of buffered value passed in argument by caller + * @library /test/lib + * @compile -XDemitQtypes -XDallowWithFieldOperator -XDenableValueTypes Test8186715.java + * @run main/othervm -Xint -XX:+EnableValhalla runtime.valhalla.valuetypes.Test8186715 + * @run main/othervm -XX:+EnableValhalla runtime.valhalla.valuetypes.Test8186715 + */ + +public class Test8186715 { + + public static void main(String[] args) { + MyValueType v = MyValueType.testDefault(); + + for (int i = 0; i < 1000000; i++) { + MyValueType.testBranchArg1(false, v); + } + } +} + +value final class MyValueType { + final int i; + final int j; + + private MyValueType() { + i = 0; + j = 0; + } + + static MyValueType testDefault() { + return MyValueType.default; + } + + static MyValueType testBranchArg1(boolean flag, MyValueType v1) { + if (flag) { + v1 = __WithField(v1.i, 3); + v1 = __WithField(v1.j, 4); + } + return v1; + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/TestFieldNullability.java 2019-03-11 14:27:48.318353743 +0100 @@ -0,0 +1,113 @@ +/** + * @test TestFieldNullability + * @library /test/lib + * @compile -XDemitQtypes -XDenableValueTypes -XDallowWithFieldOperator TestFieldNullability.java + * @run main/othervm -Xint -Xmx128m -XX:+EnableValhalla -XX:-ShowMessageBoxOnError -XX:ValueFieldMaxFlatSize=32 + * runtime.valhalla.valuetypes.TestFieldNullability + */ + +package runtime.valhalla.valuetypes; + +import jdk.test.lib.Asserts; + +public class TestFieldNullability { + static value class MyValue { + int x; + + public MyValue() { + x = 314; + } + } + + static value class MyBigValue { + long l0, l1, l2, l3, l4, l5, l6, l7, l8, l9; + long l10, l11, l12, l13, l14, l15, l16, l17, l18, l19; + + public MyBigValue() { + l0 = l1 = l2 = l3 = l4 = l5 = l6 = l7 = l8 = l9 = 271; + l10 = l11 = l12 = l13 = l14 = l15 = l16 = l17 = l18 = l19 = 271; + } + } + + static value class TestValue { + final MyValue.box nullableField; + final MyValue.val nullfreeField; // flattened + final MyValue.box nullField; // src of null + final MyBigValue.val nullfreeBigField; // not flattened + final MyBigValue.box nullBigField; // src of null + + public void test() { + Asserts.assertNull(nullField, "Invalid non null value for for unitialized non flattenable field"); + Asserts.assertNull(nullBigField, "Invalid non null value for for unitialized non flattenable field"); + boolean NPE = false; + try { + TestValue tv = __WithField(this.nullableField, nullField); + } catch(NullPointerException e) { + NPE = true; + } + Asserts.assertFalse(NPE, "Invalid NPE when assigning null to a non flattenable field"); + try { + TestValue tv = __WithField(this.nullfreeField, nullField); + } catch(NullPointerException e) { + NPE = true; + } + Asserts.assertTrue(NPE, "Missing NPE when assigning null to a flattened field"); + try { + TestValue tv = __WithField(this.nullfreeBigField, nullBigField); + } catch(NullPointerException e) { + NPE = true; + } + Asserts.assertTrue(NPE, "Missing NPE when assigning null to a flattenable field"); + } + + public TestValue() { + nullableField = MyValue.default; + nullfreeField = MyValue.default; + nullField = MyValue.default; // fake assignment + nullfreeBigField = MyBigValue.default; + nullBigField = MyBigValue.default; // fake assignment + + } + } + + static class TestClass { + MyValue.box nullableField; + MyValue.val nullfreeField; // flattened + MyValue.box nullField; + MyBigValue.val nullfreeBigField; // not flattened + MyBigValue.box nullBigField; + + public void test() { + Asserts.assertNull(nullField, "Invalid non null value for for unitialized non flattenable field"); + Asserts.assertNull(nullBigField, "Invalid non null value for for unitialized non flattenable field"); + boolean NPE = false; + try { + nullableField = nullField; + } catch(NullPointerException e) { + NPE = true; + } + Asserts.assertFalse(NPE, "Invalid NPE when assigning null to a non flattenable field"); + try { + this.nullfreeField = nullField; + } catch(NullPointerException e) { + NPE = true; + } + Asserts.assertTrue(NPE, "Missing NPE when assigning null to a flattened field"); + try { + this.nullfreeBigField = nullBigField; + } catch(NullPointerException e) { + NPE = true; + } + Asserts.assertTrue(NPE, "Missing NPE when assigning null to a flattenable field"); + } + } + + public static void main(String[] args) { + TestClass tc = new TestClass(); + tc.test(); + TestValue tv = + TestValue.default; + tv.test(); + } + +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/TestInheritedValueTypeFields.java 2019-03-11 14:27:48.794353736 +0100 @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package runtime.valhalla.valuetypes; + +import jdk.test.lib.Asserts; + +/* + * @test TestInheritedValueTypeFields + * @summary Test if value field klasses are correctly retrieved for inherited fields + * @library /test/lib + * @compile -XDemitQtypes -XDenableValueTypes -XDallowWithFieldOperator -XDallowFlattenabilityModifiers Point.java TestInheritedValueTypeFields.java + * @run main/othervm -XX:+EnableValhalla runtime.valhalla.valuetypes.TestInheritedValueTypeFields + */ + +class A { + Point.val p; +} + +class B extends A { + +} + +class C extends B { + int i; +} + +class D { + long l; +} + +class E extends D { + Point.val p1; +} + +class F extends E { + +} + +class G extends F { + Point.val p2; +} + +public class TestInheritedValueTypeFields { + + public static void main(String[] args) { + for (int i = 0; i < 100000; i++) { + run(); + } + } + + public static void run() { + B b = new B(); + Asserts.assertEquals(b.p.x, 0); + Asserts.assertEquals(b.p.y, 0); + b.p = Point.createPoint(1,2); + Asserts.assertEquals(b.p.x, 1); + Asserts.assertEquals(b.p.y, 2); + + G g = new G(); + Asserts.assertEquals(g.p1.x, 0); + Asserts.assertEquals(g.p1.y, 0); + Asserts.assertEquals(g.p2.x, 0); + Asserts.assertEquals(g.p2.y, 0); + g.p1 = Point.createPoint(1,2); + g.p2 = Point.createPoint(3,4); + Asserts.assertEquals(g.p1.x, 1); + Asserts.assertEquals(g.p1.y, 2); + Asserts.assertEquals(g.p2.x, 3); + Asserts.assertEquals(g.p2.y, 4); + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/TestValue1.java 2019-03-11 14:27:49.298353729 +0100 @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package runtime.valhalla.valuetypes; + +final class ContainerValue1 { + static TestValue1.box staticValueField; + TestValue1.val nonStaticValueField; + TestValue1[] valueArray; +} + +public value final class TestValue1 { + + static TestValue1.box staticValue = getInstance(); + + final int i; + final String name; + + private TestValue1() { + i = (int)System.nanoTime(); + name = Integer.valueOf(i).toString(); + } + + public static TestValue1 create(int i) { + TestValue1 v = TestValue1.default; + v = __WithField(v.i, i); + v = __WithField(v.name, Integer.valueOf(i).toString()); + return v; + } + + public static TestValue1 create() { + TestValue1 v = TestValue1.default; + v = __WithField(v.i, (int)System.nanoTime()); + v = __WithField(v.name, Integer.valueOf(v.i).toString()); + return v; + } + + public static TestValue1 getInstance() { + return create(); + } + + public static TestValue1 getNonBufferedInstance() { + return staticValue; + } + + public boolean verify() { + if (name == null) return i == 0; + return Integer.valueOf(i).toString().compareTo(name) == 0; + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/TestValue2.java 2019-03-11 14:27:49.774353722 +0100 @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package runtime.valhalla.valuetypes; + +final class ContainerValue2 { + static TestValue2.box staticValueField; + TestValue2.val nonStaticValueField; + TestValue2[] valueArray; +} + +public value final class TestValue2 { + static TestValue2.box staticValue = getInstance(); + + final long l; + final double d; + final String s; + + private TestValue2() { + l = System.nanoTime(); + s = Long.valueOf(l).toString(); + d = Double.parseDouble(s); + } + + public static TestValue2 create(long l) { + TestValue2 v = TestValue2.default; + v = __WithField(v.l, l); + v = __WithField(v.s, Long.valueOf(l).toString()); + v = __WithField(v.d, Double.parseDouble(v.s)); + return v; + } + + public static TestValue2 create() { + TestValue2 v = TestValue2.default; + v = __WithField(v.l, System.nanoTime()); + v = __WithField(v.s, Long.valueOf(v.l).toString()); + v = __WithField(v.d, Double.parseDouble(v.s)); + return v; + } + + public static TestValue2 getInstance() { + return create(); + } + + public static TestValue2 getNonBufferedInstance() { + return staticValue; + } + + public boolean verify() { + if (s == null) { + return d == 0 && l == 0; + } + return Long.valueOf(l).toString().compareTo(s) == 0 + && Double.parseDouble(s) == d; + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/TestValue3.java 2019-03-11 14:27:50.222353716 +0100 @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package runtime.valhalla.valuetypes; + +final class ContainerValue3 { + static TestValue3.box staticValueField; + TestValue3.val nonStaticValueField; + TestValue3[] valueArray; +} + +public value final class TestValue3 { + + static TestValue3.box staticValue = getInstance(); + + final byte b; + + private TestValue3() { + b = 123; + } + + public static TestValue3 create(byte b) { + TestValue3 v = TestValue3.default; + v = __WithField(v.b, b); + return v; + } + + public static TestValue3 create() { + TestValue3 v = TestValue3.default; + v = __WithField(v.b, 123); + return v; + } + + public static TestValue3 getInstance() { + return create(); + } + + public static TestValue3 getNonBufferedInstance() { + return staticValue; + } + + public boolean verify() { + return b == 0 || b == 123; + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/TestValue4.java 2019-03-11 14:27:50.678353710 +0100 @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package runtime.valhalla.valuetypes; + +import java.nio.ByteBuffer; + +final class ContainerValue4 { + static TestValue4.box staticValueField; + TestValue4.val nonStaticValueField; + TestValue4[] valueArray; +} + +public value final class TestValue4 { + + static TestValue4.box staticValue = getInstance(); + + final byte b1; + final byte b2; + final byte b3; + final byte b4; + final short s1; + final short s2; + final int i; + final long l; + final String val; + + private TestValue4() { + i = (int)System.nanoTime(); + val = Integer.valueOf(i).toString(); + l = ((long)i) << Integer.SIZE | i; + s1 = (short)(i & ~Short.MIN_VALUE); + s2 = (short)(i >> Short.SIZE); + b1 = (byte)(i & ~Byte.MIN_VALUE); + b2 = (byte)((i >> Byte.SIZE) & ~Byte.MIN_VALUE); + b3 = (byte)((i >> (2 * Byte.SIZE)) & ~Byte.MIN_VALUE); + b4 = (byte)((i >> (3 * Byte.SIZE)) & ~Byte.MIN_VALUE); + } + + public static TestValue4 create(int i) { + TestValue4 v = TestValue4.default; + v = __WithField(v.i, i); + v = __WithField(v.val, Integer.valueOf(i).toString()); + ByteBuffer bf = ByteBuffer.allocate(8); + bf.putInt(0, i); + bf.putInt(4, i); + v = __WithField(v.l, bf.getLong(0)); + v = __WithField(v.s1, bf.getShort(2)); + v = __WithField(v.s2, bf.getShort(0)); + v = __WithField(v.b1, bf.get(3)); + v = __WithField(v.b2, bf.get(2)); + v = __WithField(v.b3, bf.get(1)); + v = __WithField(v.b4, bf.get(0)); + return v; + } + + public static TestValue4 create() { + return create((int)System.nanoTime()); + } + + public static TestValue4 getInstance() { + return create(); + } + + public static TestValue4 getNonBufferedInstance() { + return staticValue; + } + + public boolean verify() { + if (val == null) { + return i == 0 && l == 0 && b1 == 0 && b2 == 0 && b3 == 0 && b4 == 0 + && s1 == 0 && s2 == 0; + } + ByteBuffer bf = ByteBuffer.allocate(8); + bf.putInt(0, i); + bf.putInt(4, i); + long nl = bf.getLong(0); + bf.clear(); + bf.putShort(0, s2); + bf.putShort(2, s1); + int from_s = bf.getInt(0); + bf.clear(); + bf.put(0, b4); + bf.put(1, b3); + bf.put(2, b2); + bf.put(3, b1); + int from_b = bf.getInt(0); + return l == nl && Integer.valueOf(i).toString().compareTo(val) == 0 + && from_s == i && from_b == i; + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/UninitializedValueFieldsTest.java 2019-03-11 14:27:51.134353704 +0100 @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package runtime.valhalla.valuetypes; + +import jdk.test.lib.Asserts; + +/* + * @test + * @summary Uninitialized value fields test + * @library /test/lib + * @compile -XDemitQtypes -XDenableValueTypes -XDallowWithFieldOperator -XDallowFlattenabilityModifiers Point.java JumboValue.java UninitializedValueFieldsTest.java + * @run main/othervm -Xint -XX:ValueFieldMaxFlatSize=64 -XX:+EnableValhalla runtime.valhalla.valuetypes.UninitializedValueFieldsTest + * @run main/othervm -Xcomp -XX:+EnableValhalla -XX:ValueFieldMaxFlatSize=64 runtime.valhalla.valuetypes.UninitializedValueFieldsTest + */ +public class UninitializedValueFieldsTest { + static Point.box nonFlattenableStaticPoint; + static Point.val staticPoint; + + Point instancePoint; + + static JumboValue.box sj1; + static JumboValue.val sj2; + + JumboValue.box j1; + JumboValue.val j2; + + static Object getNull() { + return null; + } + + UninitializedValueFieldsTest() { } + + public static void main(String[] args) { + checkUninitializedPoint(UninitializedValueFieldsTest.staticPoint, 0, 0); + Asserts.assertEquals(nonFlattenableStaticPoint, null, "invalid non flattenable static value field"); + UninitializedValueFieldsTest.staticPoint = Point.createPoint(456, 678); + checkUninitializedPoint(UninitializedValueFieldsTest.staticPoint, 456, 678); + UninitializedValueFieldsTest test = new UninitializedValueFieldsTest(); + checkUninitializedPoint(test.instancePoint, 0, 0); + test.instancePoint = Point.createPoint(123, 345); + checkUninitializedPoint(test.instancePoint, 123, 345); + + Asserts.assertEquals(test.j1, null, "invalid non flattenable instance value field"); + Asserts.assertEquals(test.j2.l0, 0L, "invalid flattenable instance value field"); + } + + static void checkUninitializedPoint(Point p, int x, int y) { + Asserts.assertEquals(p.x, x, "invalid x value"); + Asserts.assertEquals(p.y, y, "invalid y value"); + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/UnsafeTest.java 2019-03-11 14:27:51.590353697 +0100 @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package runtime.valhalla.valuetypes; + +/* + * @test UnsafeTest + * @summary unsafe get/put/with value + * @modules java.base/jdk.internal.misc + * @library /test/lib + * @compile -XDallowWithFieldOperator Point.java UnsafeTest.java + * @run main/othervm -Xint -XX:+EnableValhalla runtime.valhalla.valuetypes.UnsafeTest + * @run main/othervm -Xcomp -XX:+EnableValhalla runtime.valhalla.valuetypes.UnsafeTest + */ + +import jdk.internal.misc.Unsafe; + +import java.lang.reflect.*; +import java.util.List; +import static jdk.test.lib.Asserts.*; + +public class UnsafeTest { + static final Unsafe U = Unsafe.getUnsafe(); + + static value class Value1 { + Point point; + Point[] array; + Value1() { + this.point = Point.createPoint(1, 1); + this.array = new Point[0]; + } + + static Value1 create(Point p, Point... points) { + Value1 o = Value1.default; + o = __WithField(o.point, p); + o = __WithField(o.array, points); + return o; + } + } + + static value class Value2 { + int i; + Value1 v; + + Value2() { + this.i = 0; + this.v = Value1.create(Point.createPoint(0,0), new Point[0]); + } + + static Value2 create(Value1 v, int i) { + Value2 o = Value2.default; + o = __WithField(o.v, v); + o = __WithField(o.i, i); + return o; + } + } + + static value class Value3 { + Object o; + Value2 v; + + Value3() { + this.v = Value2.create(Value1.create(Point.createPoint(0,0), new Point[0]), 0); + this.o = new Object(); + } + + static Value3 create(Value2 v, Object ref) { + Value3 o = Value3.default; + o = __WithField(o.v, v); + o = __WithField(o.o, ref); + return o; + } + } + + + public static void main(String[] args) throws Throwable { + printValueClass(Value3.class, 0); + + Value1 v1 = Value1.create(Point.createPoint(10,10), Point.createPoint(20,20), Point.createPoint(30,30)); + Value2 v2 = Value2.create(v1, 20); + Value3 v3 = Value3.create(v2, List.of("Value3")); + long off_o = U.objectFieldOffset(Value3.class, "o"); + long off_v = U.objectFieldOffset(Value3.class, "v"); + long off_i = U.objectFieldOffset(Value2.class, "i"); + long off_v2 = U.objectFieldOffset(Value2.class, "v"); + + long off_point = U.objectFieldOffset(Value1.class, "point"); + + /* + * Layout of Value3 + * + * | valueheader | o | i | x | y | array | + * ^-------^ + * Point + * ^---------------^ + * Value1 + * + * ^-------------------^ + * Value2 + */ + Value3 v = v3; + try { + v = U.makePrivateBuffer(v); + // patch v3.o + U.putObject(v, off_o, List.of("Value1", "Value2", "Value3")); + // patch v3.v.i; + U.putInt(v, off_v + off_i - U.valueHeaderSize(Value2.class), 999); + // patch v3.v.v.point + U.putValue(v, off_v + off_v2 - U.valueHeaderSize(Value2.class) + off_point - U.valueHeaderSize(Value1.class), + Point.class, Point.createPoint(100, 100)); + } finally { + v = U.finishPrivateBuffer(v); + } + + assertEquals(v.v.v.point, Point.createPoint(100, 100)); + assertEquals(v.v.i, 999); + assertEquals(v.o, List.of("Value1", "Value2", "Value3")); + assertEquals(v.v.v.array, v1.array); + + Value1 nv1 = Value1.create(Point.createPoint(70,70), Point.createPoint(80,80), Point.createPoint(90,90)); + Value2 nv2 = Value2.create(nv1, 100); + Value3 nv3 = Value3.create(nv2, List.of("Value1", "Value2", "Value3")); + + try { + v = U.makePrivateBuffer(v); + // patch v3.v + U.putValue(v, off_v2, Value2.class, nv2); + } finally { + v = U.finishPrivateBuffer(v); + } + assertEquals(v, nv3); + } + + static void printValueClass(Class vc, int level) { + String indent = ""; + for (int i=0; i < level; i++) { + indent += " "; + } + System.out.format("%s%s header size %d%n", indent, vc, U.valueHeaderSize(vc)); + for (Field f : vc.getDeclaredFields()) { + System.out.format("%s%s: %s%s offset %d%n", indent, f.getName(), + U.isFlattened(f) ? "flattened " : "", f.getType(), + U.objectFieldOffset(vc, f.getName())); + if (U.isFlattened(f)) { + printValueClass(f.getType(), level+1); + } + } + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/VDefaultTest.java 2019-03-11 14:27:52.050353691 +0100 @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package runtime.valhalla.valuetypes; + +import jdk.test.lib.Asserts; + +/* + * @test VDefaultTest + * @summary vdefault bytecode test + * @library /test/lib + * @compile -XDemitQtypes -XDenableValueTypes -XDallowWithFieldOperator Point.java + * @compile -XDemitQtypes -XDenableValueTypes -XDallowWithFieldOperator -XDallowFlattenabilityModifiers VDefaultTest.java + * @run main/othervm -Xint -XX:+EnableValhalla runtime.valhalla.valuetypes.VDefaultTest + * @run main/othervm -Xcomp -XX:+EnableValhalla runtime.valhalla.valuetypes.VDefaultTest + */ + +public class VDefaultTest { + + static value final class Point { + final int x; + final int y; + + static Point make() { + Point p = Point.default; + return p; + } + + Point() { + x = 0; + y = 0; + } + } + + static value final class Value { + final char c; + final byte b; + final short s; + final int i; + final long l; + final float f; + final double d; + final Point.val p; + + static Value make() { + Value p = Value.default; + return p; + } + + Value () { + c = 0; + b = 0; + s = 0; + i = 0; + l = 0; + f = 0; + d = 0; + p = Point.make(); + } + } + + public static void main(String[] args) { + creationTest(); + creationTest(); + } + + static void creationTest() { + Value v = Value.make(); + Asserts.assertEquals(v.c, (char)0, "invalid char default value"); + Asserts.assertEquals(v.b, (byte)0, "invalid char default value"); + Asserts.assertEquals(v.s, (short)0, "invalid short default value"); + Asserts.assertEquals(v.i, 0, "invalid int default value"); + Asserts.assertEquals(v.l, 0L, "invalid long default value"); + Asserts.assertEquals(v.f, 0.0F, "invalid float default value"); + Asserts.assertEquals(v.d, 0.0D, "invalid double default value"); + Asserts.assertEquals(v.p.x, 0, "invalid embedded value type value"); + Asserts.assertEquals(v.p.y, 0, "invalid embedded value type value"); + } +} + --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/VWithFieldTest.java 2019-03-11 14:27:52.506353685 +0100 @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package runtime.valhalla.valuetypes; + +import jdk.test.lib.Asserts; + +/* + * @test VWithFieldTest + * @summary vwithfield bytecode test + * @library /test/lib + * @compile -XDemitQtypes -XDenableValueTypes -XDallowWithFieldOperator Point.java + * @compile -XDemitQtypes -XDenableValueTypes -XDallowWithFieldOperator VWithFieldTest.java + * @run main/othervm -Xint -XX:+EnableValhalla runtime.valhalla.valuetypes.VWithFieldTest + * @run main/othervm -Xcomp -XX:+EnableValhalla runtime.valhalla.valuetypes.VWithFieldTest + */ + +public class VWithFieldTest { + + static value final class Point { + final private int x; + final private int y; + + static Point make(int x, int y) { + Point p = Point.default; + Asserts.assertEquals(p.x, 0, "invalid x default value"); + Asserts.assertEquals(p.y, 0, "invalid y default value"); + p = __WithField(p.x, x); + Asserts.assertEquals(p.x, x, "invalid x value"); + Asserts.assertEquals(p.y, 0, "invalid y value"); + p = __WithField(p.y, y); + Asserts.assertEquals(p.x, x, "invalid x value"); + Asserts.assertEquals(p.y, y, "invalid y value"); + return p; + } + + Point () { + x = 0; + y = 0; + } + + public int getX() { + return x; + } + + static Point setX(Point p, int x) { + p = __WithField(p.x, x); + return p; + } + + public int getY() { + return y; + } + + static Point setY(Point p, int y) { + p = __WithField(p.y, y); + return p; + } + } + + public static void main(String[] args) { + creationTest(); + creationTest(); + witherTest(); + witherTest(); + } + + static void creationTest() { + Point p = Point.make(10,20); + Asserts.assertEquals(p.x, 10, "invalid x value"); + Asserts.assertEquals(p.y, 20, "invalid y value"); + } + + static void witherTest() { + Point p1 = Point.make(2,12); + Asserts.assertEquals(p1.x, 2, "invalid x value"); + Asserts.assertEquals(p1.y, 12, "invalid y value"); + Point p2 = Point.setX(p1,3); + Asserts.assertEquals(p2.x, 3, "invalid x value"); + Asserts.assertEquals(p2.y, 12, "invalid y value"); + Point p3 = Point.setY(p2, 14); + Asserts.assertEquals(p3.x, 3, "invalid x value"); + Asserts.assertEquals(p3.y, 14, "invalid y value"); + } + +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/ValueCapableClass.java 2019-03-11 14:27:53.010353678 +0100 @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package runtime.valhalla.valuetypes; + +@jdk.incubator.mvt.ValueCapableClass +public final class ValueCapableClass { + + public static final int DEFAULT_X = 11; + public static final short DEFAULT_Y = 13; + public static final short DEFAULT_Z = 15; + public static final String STATIC_FIELD = "Should be left alone"; + + public final int x; + public final short y; + public final short z; + + private ValueCapableClass() { + this(DEFAULT_X, DEFAULT_Y, DEFAULT_Z); + } + + private ValueCapableClass(int x, short y, short z) { + this.x = x; + this.y = y; + this.z = z; + } + + public int getX() { + return x; + } + + public short getY() { + return y; + } + + public short getZ() { + return z; + } + + public String toString() { + int ax = getX(); + short ay = getY(); + short az = getZ(); + return "ValueCapableClass x=" + ax + " y=" + ay + " z=" + az; + } + + public static ValueCapableClass create(int x, short y, short z) { + return new ValueCapableClass(x, y, z); + } + + public static ValueCapableClass create() { + return new ValueCapableClass(); + } + + public static void test() { + ValueCapableClass value = create(4711, (short)7, (short)11); + String s = value.toString(); + if ((value.getX() != 4711) || (value.getY() != 7) || value.getZ() != 11) { + throw new IllegalStateException("Bad value: " + s); + } + System.out.println(s); + ValueCapableClass defaultValue = create(); + s = defaultValue.toString(); + if ((defaultValue.getX() != DEFAULT_X) || + (defaultValue.getY() != DEFAULT_Y) || + (defaultValue.getZ() != DEFAULT_Z)) { + throw new IllegalStateException("Bad value: " + s); + } + + if (!STATIC_FIELD.equals("Should be left alone")) { + throw new IllegalStateException("Bad static field: " + STATIC_FIELD); + } + } + + public static void main(String[] args) { + test(); + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/ValueOops.java 2019-03-11 14:27:53.506353671 +0100 @@ -0,0 +1,664 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package runtime.valhalla.valuetypes; + +import java.lang.invoke.*; +import java.lang.ref.*; +import java.util.concurrent.*; + +import static jdk.test.lib.Asserts.*; +import jdk.test.lib.Utils; +import sun.hotspot.WhiteBox; + +import jdk.experimental.value.MethodHandleBuilder; + +/** + * @test ValueOops + * @requires vm.gc == null + * @requires vm.opt.ExplicitGCInvokesConcurrent != true + * @summary Test embedding oops into Value types + * @modules java.base/jdk.experimental.bytecode + * java.base/jdk.experimental.value + * @library /test/lib + * @compile -XDemitQtypes -XDenableValueTypes -XDallowWithFieldOperator Person.java + * @compile -XDemitQtypes -XDenableValueTypes -XDallowWithFieldOperator ValueOops.java + * @run driver ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xint -XX:+UseSerialGC -Xmx128m -XX:+EnableValhalla + * -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * runtime.valhalla.valuetypes.ValueOops + * @run main/othervm -Xint -XX:+UseG1GC -Xmx128m -XX:+EnableValhalla + * -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * runtime.valhalla.valuetypes.ValueOops 100 + * @run main/othervm -Xint -XX:+UseParallelGC -Xmx128m -XX:+EnableValhalla + * -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * runtime.valhalla.valuetypes.ValueOops + * @run main/othervm -Xcomp -XX:+UseSerialGC -Xmx128m -XX:+EnableValhalla + * -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * runtime.valhalla.valuetypes.ValueOops + * @run main/othervm -Xcomp -XX:+UseG1GC -Xmx128m -XX:+EnableValhalla + * -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * runtime.valhalla.valuetypes.ValueOops 100 + * @run main/othervm -Xcomp -XX:+UseParallelGC -Xmx128m -XX:+EnableValhalla + * -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * runtime.valhalla.valuetypes.ValueOops + */ +public class ValueOops { + + // Extra debug: -XX:+VerifyOops -XX:+VerifyStack -XX:+VerifyLastFrame -XX:+VerifyBeforeGC -XX:+VerifyAfterGC -XX:+VerifyDuringGC -XX:VerifySubSet=threads,heap + // Even more debugging: -XX:+TraceNewOopMapGeneration -Xlog:gc*=info + + static final int NOF_PEOPLE = 1000; // Exercise arrays of this size + + static int MIN_ACTIVE_GC_COUNT = 10; // Run active workload for this number of GC passes + + static int MED_ACTIVE_GC_COUNT = 4; // Medium life span in terms of GC passes + + static final String TEST_STRING1 = "Test String 1"; + static final String TEST_STRING2 = "Test String 2"; + + static boolean USE_COMPILER = WhiteBox.getWhiteBox().getBooleanVMFlag("UseCompiler"); + + static MethodHandles.Lookup LOOKUP = MethodHandles.lookup(); + + public static void main(String[] args) { + if (args.length > 0) { + MIN_ACTIVE_GC_COUNT = Integer.parseInt(args[0]); + } + testClassLoad(); + testValues(); + + if (!USE_COMPILER) { + testOopMaps(); + } + + // Check we survive GC... + testOverGc(); // Exercise root scan / oopMap + testActiveGc(); // Brute force + } + + /** + * Test ClassFileParser can load values with reference fields + */ + public static void testClassLoad() { + String s = Person.class.toString(); + new Bar(); + new BarWithValue(); + s = BarValue.class.toString(); + s = ObjectWithObjectValue.class.toString(); + s = ObjectWithObjectValues.class.toString(); + } + + + static class Couple { + public Person.val onePerson; + public Person.val otherPerson; + } + + static final value class Composition { + public final Person.val onePerson; + public final Person.val otherPerson; + + private Composition() { + onePerson = Person.create(0, null, null); + otherPerson = Person.create(0, null, null); + } + + public static Composition create(Person onePerson, Person otherPerson) { + Composition comp = Composition.default; + comp = __WithField(comp.onePerson, onePerson); + comp = __WithField(comp.otherPerson, otherPerson); + return comp; + } + } + + /** + * Check value type operations with "Valhalla Value Types" (VVT) + */ + public static void testValues() { + // Exercise creation, getfield, vreturn with null refs + validateDefaultPerson(createDefaultPerson()); + + // anewarray, aaload, aastore + int index = 7; + Person[] array = new Person[NOF_PEOPLE]; + validateDefaultPerson(array[index]); + + // Now with refs... + validateIndexedPerson(createIndexedPerson(index), index); + array[index] = createIndexedPerson(index); + validateIndexedPerson(array[index], index); + + // Check the neighbours + validateDefaultPerson(array[index - 1]); + validateDefaultPerson(array[index + 1]); + + // getfield/putfield + Couple couple = new Couple(); + validateDefaultPerson(couple.onePerson); + validateDefaultPerson(couple.otherPerson); + + couple.onePerson = createIndexedPerson(index); + validateIndexedPerson(couple.onePerson, index); + + Composition composition = Composition.create(couple.onePerson, couple.onePerson); + validateIndexedPerson(composition.onePerson, index); + validateIndexedPerson(composition.otherPerson, index); + } + + /** + * Check oop map generation for klass layout and frame... + */ + public static void testOopMaps() { + Object[] objects = WhiteBox.getWhiteBox().getObjectsViaKlassOopMaps(new Couple()); + assertTrue(objects.length == 4, "Expected 4 oops"); + for (int i = 0; i < objects.length; i++) { + assertTrue(objects[i] == null, "not-null"); + } + + String fn1 = "Sam"; + String ln1 = "Smith"; + String fn2 = "Jane"; + String ln2 = "Jones"; + Couple couple = new Couple(); + couple.onePerson = Person.create(0, fn1, ln1); + couple.otherPerson = Person.create(1, fn2, ln2); + objects = WhiteBox.getWhiteBox().getObjectsViaKlassOopMaps(couple); + assertTrue(objects.length == 4, "Expected 4 oops"); + assertTrue(objects[0] == fn1, "Bad oop fn1"); + assertTrue(objects[1] == ln1, "Bad oop ln1"); + assertTrue(objects[2] == fn2, "Bad oop fn2"); + assertTrue(objects[3] == ln2, "Bad oop ln2"); + + objects = WhiteBox.getWhiteBox().getObjectsViaOopIterator(couple); + assertTrue(objects.length == 4, "Expected 4 oops"); + assertTrue(objects[0] == fn1, "Bad oop fn1"); + assertTrue(objects[1] == ln1, "Bad oop ln1"); + assertTrue(objects[2] == fn2, "Bad oop fn2"); + assertTrue(objects[3] == ln2, "Bad oop ln2"); + + // Array.. + objects = WhiteBox.getWhiteBox().getObjectsViaOopIterator(createPeople()); + assertTrue(objects.length == NOF_PEOPLE * 2, "Unexpected length: " + objects.length); + int o = 0; + for (int i = 0; i < NOF_PEOPLE; i++) { + assertTrue(objects[o++].equals(firstName(i)), "Bad firstName"); + assertTrue(objects[o++].equals(lastName(i)), "Bad lastName"); + } + + // Sanity check, FixMe need more test cases + objects = testFrameOops(couple); + //assertTrue(objects.length == 5, "Number of frame oops incorrect = " + objects.length); + //assertTrue(objects[0] == couple, "Bad oop 0"); + //assertTrue(objects[1] == fn1, "Bad oop 1"); + //assertTrue(objects[2] == ln1, "Bad oop 2"); + //assertTrue(objects[3] == TEST_STRING1, "Bad oop 3"); + //assertTrue(objects[4] == TEST_STRING2, "Bad oop 4"); + + //testFrameOopsVBytecodes(); + } + + static final String GET_OOP_MAP_NAME = "getOopMap"; + static final String GET_OOP_MAP_DESC = "()[Ljava/lang/Object;"; + + public static Object[] getOopMap() { + Object[] oopMap = WhiteBox.getWhiteBox().getObjectsViaFrameOopIterator(2); + /* Remove this frame (class mirror for this method), and above class mirror */ + Object[] trimmedOopMap = new Object[oopMap.length - 2]; + System.arraycopy(oopMap, 2, trimmedOopMap, 0, trimmedOopMap.length); + return trimmedOopMap; + } + + // Expecting Couple couple, Person couple.onePerson, and Person (created here) + public static Object[] testFrameOops(Couple couple) { + int someId = 89898; + Person person = couple.onePerson; + assertTrue(person.getId() == 0, "Bad Person"); + Person anotherPerson = Person.create(someId, TEST_STRING1, TEST_STRING2); + assertTrue(anotherPerson.getId() == someId, "Bad Person"); + return getOopMap(); + } + + // Debug... + static void dumpOopMap(Object[] oopMap) { + System.out.println("Oop Map len: " + oopMap.length); + for (int i = 0; i < oopMap.length; i++) { + System.out.println("[" + i + "] = " + oopMap[i]); + } + } + + /** + * Just some check sanity checks with defaultvalue, withfield, astore and aload + * + * Changes to javac slot usage may well break this test + */ + public static void testFrameOopsVBytecodes() { + int nofOopMaps = 4; + Object[][] oopMaps = new Object[nofOopMaps][]; + String[] inputArgs = new String[] { "aName", "aDescription", "someNotes" }; + + FooValue.testFrameOopsDefault(oopMaps); + + // Test-D0 Slots=R Stack=Q(RRR)RV + assertTrue(oopMaps[0].length == 5 && + oopMaps[0][1] == null && + oopMaps[0][2] == null && + oopMaps[0][3] == null, "Test-D0 incorrect"); + + // Test-D1 Slots=R Stack=RV + assertTrue(oopMaps[1].length == 2, "Test-D1 incorrect"); + + // Test-D2 Slots=RQ(RRR) Stack=RV + assertTrue(oopMaps[2].length == 5 && + oopMaps[2][1] == null && + oopMaps[2][2] == null && + oopMaps[2][3] == null, "Test-D2 incorrect"); + + // Test-D3 Slots=R Stack=Q(RRR)RV + assertTrue(oopMaps[3].length == 6 && + oopMaps[3][1] == null && + oopMaps[3][2] == null && + oopMaps[3][3] == null && + oopMaps[3][4] == null, "Test-D3 incorrect"); + + // With ref fields... + String name = "TestName"; + String desc = "TestDesc"; + String note = "TestNotes"; + FooValue.testFrameOopsRefs(name, desc, note, oopMaps); + + // Test-R0 Slots=RR Stack=Q(RRR)RV + assertTrue(oopMaps[0].length == 6 && + oopMaps[0][2] == name && + oopMaps[0][3] == desc && + oopMaps[0][4] == note, "Test-R0 incorrect"); + + /** + * TODO: vwithfield from method handle cooked from anonymous class within the value class + * even with "MethodHandles.privateLookupIn()" will fail final putfield rules + */ + } + + /** + * Check forcing GC for combination of VT on stack/LVT etc works + */ + public static void testOverGc() { + try { + Class vtClass = Person.class.asValueType(); + + System.out.println("vtClass="+vtClass); + + doGc(); + + // VT on stack and lvt, null refs, see if GC flies + MethodHandle moveValueThroughStackAndLvt = MethodHandleBuilder.loadCode( + LOOKUP, + "gcOverPerson", + MethodType.methodType(vtClass, vtClass), + CODE->{ + CODE + .aload(0) + .invokestatic(ValueOops.class, "doGc", "()V", false) // Stack + .astore(0) + .invokestatic(ValueOops.class, "doGc", "()V", false) // LVT + .aload(0) + .astore(1024) // LVT wide index + .aload(1024) + .iconst_1() // push a litte further down + .invokestatic(ValueOops.class, "doGc", "()V", false) // Stack,LVT + .pop() + .areturn(); + }); + Person person = (Person) moveValueThroughStackAndLvt.invokeExact(createDefaultPerson()); + validateDefaultPerson(person); + doGc(); + + int index = 4711; + person = (Person) moveValueThroughStackAndLvt.invokeExact(createIndexedPerson(index)); + validateIndexedPerson(person, index); + doGc(); + person = createDefaultPerson(); + doGc(); + } + catch (Throwable t) { fail("testOverGc", t); } + } + + static void submitNewWork(ForkJoinPool fjPool, int size) { + for (int i = 0; i < size; i++) { + for (int j = 0; j < 100; j++) { + fjPool.execute(ValueOops::testValues); + } + } + } + + static void sleepNoThrow(long ms) { + try { + Thread.sleep(ms); + } + catch (Throwable t) {} + } + + /** + * Run some workloads with different object/value life times... + */ + public static void testActiveGc() { + try { + int nofThreads = 7; + int workSize = nofThreads * 10; + + Object longLivedObjects = createLongLived(); + Object longLivedPeople = createPeople(); + + Object medLivedObjects = createLongLived(); + Object medLivedPeople = createPeople(); + + doGc(); + + ForkJoinPool fjPool = new ForkJoinPool(nofThreads, ForkJoinPool.defaultForkJoinWorkerThreadFactory, null, true); + + // submit work until we see some GC + Reference ref = createRef(); + submitNewWork(fjPool, workSize); + while (ref.get() != null) { + if (fjPool.hasQueuedSubmissions()) { + sleepNoThrow(1L); + } + else { + workSize *= 2; // Grow the submission size + submitNewWork(fjPool, workSize); + } + } + + // Keep working and actively GC, until MIN_ACTIVE_GC_COUNT + int nofActiveGc = 1; + ref = createRef(); + while (nofActiveGc < MIN_ACTIVE_GC_COUNT) { + if (ref.get() == null) { + nofActiveGc++; + ref = createRef(); + if (nofActiveGc % MED_ACTIVE_GC_COUNT == 0) { + validateLongLived(medLivedObjects); + validatePeople(medLivedPeople); + + medLivedObjects = createLongLived(); + medLivedPeople = createPeople(); + } + } + else if (fjPool.hasQueuedSubmissions()) { + sleepNoThrow((long) Utils.getRandomInstance().nextInt(1000)); + doGc(); + } + else { + submitNewWork(fjPool, workSize); + } + } + fjPool.shutdown(); + + validateLongLived(medLivedObjects); + validatePeople(medLivedPeople); + medLivedObjects = null; + medLivedPeople = null; + + validateLongLived(longLivedObjects); + validatePeople(longLivedPeople); + + longLivedObjects = null; + longLivedPeople = null; + + doGc(); + } + catch (Throwable t) { fail("testActiveGc", t); } + } + + static final ReferenceQueue REFQ = new ReferenceQueue<>(); + + public static void doGc() { + // Create Reference, wait until it clears... + Reference ref = createRef(); + while (ref.get() != null) { + System.gc(); + } + } + + static Reference createRef() { + return new WeakReference(new Object(), REFQ); + } + + static void validatePerson(Person person, int id, String fn, String ln, boolean equals) { + assertTrue(person.id == id); + if (equals) { + assertTrue(fn.equals(person.getFirstName()), "Invalid field firstName"); + assertTrue(ln.equals(person.getLastName()), "Invalid field lastName"); + } + else { + assertTrue(person.getFirstName() == fn, "Invalid field firstName"); + assertTrue(person.getLastName() == ln, "Invalid field lastName"); + } + } + + static Person createIndexedPerson(int i) { + return Person.create(i, firstName(i), lastName(i)); + } + + static void validateIndexedPerson(Person person, int i) { + validatePerson(person, i, firstName(i), lastName(i), true); + } + + static Person createDefaultPerson() { + return Person.create(0, null, null); + } + + static void validateDefaultPerson(Person person) { + validatePerson(person, 0, null, null, false); + } + + static String firstName(int i) { + return "FirstName-" + i; + } + + static String lastName(int i) { + return "LastName-" + i; + } + + static Object createLongLived() throws Throwable { + Object[] population = new Object[1]; + population[0] = createPeople(); + return population; + } + + static void validateLongLived(Object pop) throws Throwable { + Object[] population = (Object[]) pop; + validatePeople(population[0]); + } + + static Object createPeople() { + int arrayLength = NOF_PEOPLE; + Person[] people = new Person[arrayLength]; + for (int i = 0; i < arrayLength; i++) { + people[i] = createIndexedPerson(i); + } + return people; + } + + static void validatePeople(Object array) { + Person[] people = (Person[]) array; + int arrayLength = people.length; + assertTrue(arrayLength == NOF_PEOPLE); + for (int i = 0; i < arrayLength; i++) { + validateIndexedPerson(people[i], i); + } + } + + // Various field layouts...sanity testing, see MVTCombo testing for full-set + + static final value class ObjectValue { + final Object object; + + private ObjectValue(Object obj) { + object = obj; + } + } + + static class ObjectWithObjectValue { + ObjectValue value1; + Object ref1; + } + + static class ObjectWithObjectValues { + ObjectValue value1; + ObjectValue value2; + Object ref1; + } + + static class Foo { + int id; + String name; + String description; + long timestamp; + String notes; + } + + static class Bar extends Foo { + long extendedId; + String moreNotes; + int count; + String otherStuff; + } + + public static final value class FooValue { + public final int id; + public final String name; + public final String description; + public final long timestamp; + public final String notes; + + private FooValue() { + id = 0; + name = null; + description = null; + timestamp = 0L; + notes = null; + } + + public static FooValue create(int id, String name, String description, long timestamp, String notes) { + FooValue f = FooValue.default; + f = __WithField(f.id, id); + f = __WithField(f.name, name); + f = __WithField(f.description, description); + f = __WithField(f.timestamp, timestamp); + f = __WithField(f.notes, notes); + return f; + } + + public static void testFrameOopsDefault(Object[][] oopMaps) { + MethodType mt = MethodType.methodType(Void.TYPE, oopMaps.getClass()); + int oopMapsSlot = 0; + int vtSlot = 1; + + // Slots 1=oopMaps + // OopMap Q=RRR (.name .description .someNotes) + try { + MethodHandleBuilder.loadCode( + LOOKUP, "exerciseVBytecodeExprStackWithDefault", mt, + CODE->{ + CODE + .defaultvalue(FooValue.class) + .aload(oopMapsSlot) + .iconst_0() // Test-D0 Slots=R Stack=Q(RRR)RV + .invokestatic(ValueOops.class, GET_OOP_MAP_NAME, GET_OOP_MAP_DESC, false) + .aastore() + .pop() + .aload(oopMapsSlot) + .iconst_1() // Test-D1 Slots=R Stack=RV + .invokestatic(ValueOops.class, GET_OOP_MAP_NAME, GET_OOP_MAP_DESC, false) + .aastore() + .defaultvalue(FooValue.class) + .astore(vtSlot) + .aload(oopMapsSlot) + .iconst_2() // Test-D2 Slots=RQ(RRR) Stack=RV + .invokestatic(ValueOops.class, GET_OOP_MAP_NAME, GET_OOP_MAP_DESC, false) + .aastore() + .aload(vtSlot) + .aconst_null() + .astore(vtSlot) // Storing null over the Q slot won't remove the ref, but should be single null ref + .aload(oopMapsSlot) + .iconst_3() // Test-D3 Slots=R Stack=Q(RRR)RV + .invokestatic(ValueOops.class, GET_OOP_MAP_NAME, GET_OOP_MAP_DESC, false) + .aastore() + .pop() + .return_(); + }).invoke(oopMaps); + } catch (Throwable t) { fail("exerciseVBytecodeExprStackWithDefault", t); } + } + + public static void testFrameOopsRefs(String name, String description, String notes, Object[][] oopMaps) { + FooValue f = create(4711, name, description, 9876543231L, notes); + FooValue[] fa = new FooValue[] { f }; + MethodType mt = MethodType.methodType(Void.TYPE, fa.getClass(), oopMaps.getClass()); + int fooArraySlot = 0; + int oopMapsSlot = 1; + try { + MethodHandleBuilder.loadCode(LOOKUP, "exerciseVBytecodeExprStackWithRefs", mt, + CODE->{ + CODE + .aload(fooArraySlot) + .iconst_0() + .aaload() + .aload(oopMapsSlot) + .iconst_0() // Test-R0 Slots=RR Stack=Q(RRR)RV + .invokestatic(ValueOops.class, GET_OOP_MAP_NAME, GET_OOP_MAP_DESC, false) + .aastore() + .pop() + .return_(); + }).invoke(fa, oopMaps); + } catch (Throwable t) { fail("exerciseVBytecodeExprStackWithRefs", t); } + } + } + + static class BarWithValue { + FooValue.val foo; + long extendedId; + String moreNotes; + int count; + String otherStuff; + } + + static final value class BarValue { + final FooValue.val foo; + final long extendedId; + final String moreNotes; + final int count; + final String otherStuff; + + private BarValue(FooValue f, long extId, String mNotes, int c, String other) { + foo = f; + extendedId = extId; + moreNotes = mNotes; + count = c; + otherStuff = other; + } + } + +} + --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/ValueTypeArray.java 2019-03-11 14:27:54.022353664 +0100 @@ -0,0 +1,473 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package runtime.valhalla.valuetypes; + +import java.lang.reflect.Array; +import java.util.Arrays; +import java.util.ArrayList; +import java.util.List; + +import static jdk.test.lib.Asserts.*; + +/* + * @test ValueTypeArray + * @summary Plain array test for Value Types + * @library /test/lib + * @compile -XDemitQtypes -XDenableValueTypes -XDallowWithFieldOperator -XDallowFlattenabilityModifiers -XDallowGenericsOverValues ValueTypeArray.java Point.java Long8Value.java Person.java + * @run main/othervm -Xint -XX:+ValueArrayFlatten -XX:+EnableValhalla runtime.valhalla.valuetypes.ValueTypeArray + * @run main/othervm -Xint -XX:-ValueArrayFlatten -XX:+EnableValhalla runtime.valhalla.valuetypes.ValueTypeArray + * @run main/othervm -Xcomp -XX:+ValueArrayFlatten -XX:+EnableValhalla runtime.valhalla.valuetypes.ValueTypeArray + * @run main/othervm -Xcomp -XX:-ValueArrayFlatten -XX:+EnableValhalla runtime.valhalla.valuetypes.ValueTypeArray + */ +public class ValueTypeArray { + public static void main(String[] args) { + ValueTypeArray valueTypeArray = new ValueTypeArray(); + valueTypeArray.run(); + } + + public void run() { + testClassForName(); + testSimplePointArray(); + testLong8Array(); + testMixedPersonArray(); + testMultiDimPointArray(); + testComposition(); + + testSanityCheckcasts(); + testObjectArrayOfValues(); + + testReflectArray(); + // testUtilArrays(); + } + + void testClassForName() { + String arrayClsName = "[Lruntime.valhalla.valuetypes.Point;"; + try { + Class arrayCls = Class.forName(arrayClsName); + assertTrue(arrayCls.isArray(), "Expected an array class"); + // array-of-L-type not supported yet + // the component type of a flattened value array is of the value type + // the component type of a non-flattened array is of the box type + assertTrue(arrayCls.getComponentType().asBoxType() == Point.class, + "Expected component type of Point.class got: " + arrayCls.getComponentType()); + + arrayClsName = "[" + arrayClsName; + Class mulArrayCls = Class.forName(arrayClsName); + assertTrue(mulArrayCls.isArray()); + assertTrue(mulArrayCls.getComponentType() == arrayCls); + } + catch (ClassNotFoundException cnfe) { + fail("Class.forName(" + arrayClsName + ") failed", cnfe); + } + } + + void testSimplePointArray() { + Point[] defaultPoint = new Point[1]; + Point p = defaultPoint[0]; + assertEquals(p.x, 0, "invalid default loaded from array"); + assertEquals(p.y, 0, "invalid default loaded from array"); + boolean gotNpe = false; + try { + defaultPoint[0] = (Point) getNull(); + } catch (NullPointerException npe) { + gotNpe = true; + } + assertTrue(gotNpe, "Expected NullPointerException"); + + Point[] points = createSimplePointArray(); + checkSimplePointArray(points); + System.gc(); // check that VTs survive GC + + assertTrue(points instanceof Point[], "Instance of"); + + Point[] pointsCopy = new Point[points.length]; + System.arraycopy(points, 0, pointsCopy, 0, points.length); + checkSimplePointArray(pointsCopy); + } + + static Point[] createSimplePointArray() { + Point[] ps = new Point[2]; + assertEquals(ps.length, 2, "Length"); + ps.toString(); + ps[0] = Point.createPoint(1, 2); + ps[1] = Point.createPoint(3, 4); + boolean sawOob = false; + try { + ps[2] = Point.createPoint(0, 0); + } catch (ArrayIndexOutOfBoundsException aioobe) { sawOob = true; } + assertTrue(sawOob, "Didn't see AIOOBE"); + System.gc(); // check that VTs survive GC + return ps; + } + + static void checkSimplePointArray(Point[] points) { + assertEquals(points[0].x, 1, "invalid 0 point x value"); + assertEquals(points[0].y, 2, "invalid 0 point y value"); + assertEquals(points[1].x, 3, "invalid 1 point x value"); + assertEquals(points[1].y, 4, "invalid 1 point y value"); + } + + void testLong8Array() { + Long8Value[] values = new Long8Value[3]; + assertEquals(values.length, 3, "length"); + values.toString(); + Long8Value value = values[1]; + long zl = 0; + Long8Value.check(value, zl, zl, zl, zl, zl, zl, zl, zl); + values[1] = Long8Value.create(1, 2, 3, 4, 5, 6, 7, 8); + value = values[1]; + Long8Value.check(value, 1, 2, 3, 4, 5, 6, 7, 8); + + Long8Value[] copy = new Long8Value[values.length]; + System.arraycopy(values, 0, copy, 0, values.length); + value = copy[1]; + Long8Value.check(value, 1, 2, 3, 4, 5, 6, 7, 8); + } + + void testMixedPersonArray() { + Person[] people = new Person[3]; + + people[0] = Person.create(1, "First", "Last"); + assertEquals(people[0].getId(), 1, "Invalid Id person"); + assertEquals(people[0].getFirstName(), "First", "Invalid First Name"); + assertEquals(people[0].getLastName(), "Last", "Invalid Last Name"); + + people[1] = Person.create(2, "Jane", "Wayne"); + people[2] = Person.create(3, "Bob", "Dobalina"); + + Person[] peopleCopy = new Person[people.length]; + System.arraycopy(people, 0, peopleCopy, 0, people.length); + assertEquals(peopleCopy[2].getId(), 3, "Invalid Id"); + assertEquals(peopleCopy[2].getFirstName(), "Bob", "Invalid First Name"); + assertEquals(peopleCopy[2].getLastName(), "Dobalina", "Invalid Last Name"); + } + + void testMultiDimPointArray() { + Point[][][] multiPoints = new Point[2][3][4]; + assertEquals(multiPoints.length, 2, "1st dim length"); + assertEquals(multiPoints[0].length, 3, "2st dim length"); + assertEquals(multiPoints[0][0].length, 4, "3rd dim length"); + + Point defaultPoint = multiPoints[1][2][3]; + assertEquals(defaultPoint.x, 0, "invalid point x value"); + assertEquals(defaultPoint.y, 0, "invalid point x value"); + } + + void testReflectArray() { + // Check the java.lang.reflect.Array.newInstance methods... + Class cls = (Class) Point[].class; + Point[][] array = (Point[][]) Array.newInstance(cls, 1); + assertEquals(array.length, 1, "Incorrect length"); + assertTrue(array[0] == null, "Expected NULL"); + + Point[][][] array3 = (Point[][][]) Array.newInstance(cls, 1, 2); + assertEquals(array3.length, 1, "Incorrect length"); + assertEquals(array3[0].length, 2, "Incorrect length"); + assertTrue(array3[0][0] == null, "Expected NULL"); + + // Now create ObjArrays of ValueArray... + cls = (Class) Point.class; + array = (Point[][]) Array.newInstance(cls, 1, 2); + assertEquals(array.length, 1, "Incorrect length"); + assertEquals(array[0].length, 2, "Incorrect length"); + Point p = array[0][1]; + int x = p.x; + assertEquals(x, 0, "Bad Point Value"); + } + + static final value class MyInt implements Comparable { + final int value; + + private MyInt() { value = 0; } + public int getValue() { return value; } + public String toString() { return "MyInt: " + getValue(); } + public int compareTo(MyInt that) { return Integer.compare(this.getValue(), that.getValue()); } + public boolean equals(Object o) { + if (o instanceof MyInt) { + return this.getValue() == ((MyInt) o).getValue(); + } + return false; + } + + public static MyInt create(int v) { + MyInt mi = MyInt.default; + mi = __WithField(mi.value, v); + return mi; + } + + public static final MyInt.box MIN = MyInt.create(Integer.MIN_VALUE); + public static final MyInt.box ZERO = MyInt.create(0); + public static final MyInt.box MAX = MyInt.create(Integer.MAX_VALUE); + } + + static interface SomeSecondaryType { + default String hi() { return "Hi"; } + } + + static final value class MyOtherInt implements SomeSecondaryType { + final int value; + private MyOtherInt() { value = 0; } + } + + void testSanityCheckcasts() { +// TODO Re-enable if value type arrays become covariant with object arrays +/* + + MyInt[] myInts = new MyInt[1]; + assertTrue(myInts instanceof Object[]); + assertTrue(myInts instanceof Comparable[]); + + Object arrObj = Array.newInstance(MyInt.class, 1); + assertTrue(arrObj instanceof Object[], "Not Object array"); + assertTrue(arrObj instanceof Comparable[], "Not Comparable array"); + assertTrue(arrObj instanceof MyInt[], "Not MyInt array"); + + Object[] arr = (Object[]) arrObj; + assertTrue(arr instanceof Comparable[], "Not Comparable array"); + assertTrue(arr instanceof MyInt[], "Not MyInt array"); + Comparable[] comparables = (Comparable[])arr; + MyInt[] myIntArr = (MyInt[]) arr; + + // multi-dim, check secondary array types are setup... + MyOtherInt[][] matrix = new MyOtherInt[1][1]; + assertTrue(matrix[0] instanceof MyOtherInt[]); + assertTrue(matrix[0] instanceof SomeSecondaryType[]); +*/ + } + +/* + * Comment out this test because value type arrays are not assignable to the array + * parameter types used by the methods in class java.util.Arrays. + * + void testUtilArrays() { + // Sanity check j.u.Arrays + MyInt[] myInts = new MyInt[] { MyInt.MAX, MyInt.MIN }; + // Sanity sort another copy + MyInt[] copyMyInts = Arrays.copyOf(myInts, myInts.length + 1); + checkArrayElementsEqual(copyMyInts, new MyInt[] { myInts[0], myInts[1], MyInt.ZERO}); + + Arrays.sort(copyMyInts); + checkArrayElementsEqual(copyMyInts, new MyInt[] { MyInt.MIN, MyInt.ZERO, MyInt.MAX }); + + List myIntList = Arrays.asList(copyMyInts); + checkArrayElementsEqual(copyMyInts, myIntList.toArray(new MyInt[copyMyInts.length])); + // This next line needs testMixedLayoutArrays to work + checkArrayElementsEqual(copyMyInts, myIntList.toArray()); + + // Sanity check j.u.ArrayList + ArrayList aList = new ArrayList(Arrays.asList(copyMyInts)); + assertTrue(aList.indexOf(MyInt.MIN) == 0, "Bad Index"); + assertTrue(aList.indexOf(MyInt.ZERO) == 1, "Bad Index"); + assertTrue(aList.indexOf(MyInt.MAX) == 2, "Bad Index"); + + aList.remove(2); + aList.add(MyInt.create(5)); + + // Interesting: + //aList.add((MyInt)getNull()); + + // javac currently generating "java/util/Objects.requireNonNull + // should checkcast treat null against Value class as CCE ? + // Then in the end, ArrayList.elementData is Object[], (that's why remove works) + // why can't I write add(null) then ? + } +*/ + + void testObjectArrayOfValues() { + testSanityObjectArrays(); + testMixedLayoutArrays(); + } + + void testSanityObjectArrays() { + Object[] objects = new Object[2]; + assertTrue(objects[0] == null && objects[1] == null, "Not null ?"); + + objects[0] = MyInt.create(1); + objects[1] = Integer.valueOf(2); + assertTrue(objects[0].equals(MyInt.create(1)), "Bad Value"); + assertTrue(objects[1].equals(Integer.valueOf(2)), "Bad Object"); + + Comparable[] copyComparables = new Comparable[objects.length]; + System.arraycopy(objects, 0, copyComparables, 0, objects.length); + checkArrayElementsEqual(objects, copyComparables); + + objects[0] = null; + objects[1] = null; + assertTrue(objects[0] == null && objects[1] == null, "Not null ?"); + + Comparable[] comparables = new Comparable[2]; + assertTrue(comparables[0] == null && comparables[1] == null, "Not null ?"); + comparables[0] = MyInt.create(3); + comparables[1] = Integer.valueOf(4); + assertTrue(comparables[0].equals(MyInt.create(3)), "Bad Value"); + assertTrue(comparables[1].equals(Integer.valueOf(4)), "Bad Object"); + + Object[] copyObjects = new Object[2]; + System.arraycopy(comparables, 0, copyObjects, 0, comparables.length); + checkArrayElementsEqual(comparables, copyObjects); + + comparables[0] = null; + comparables[1] = null; + assertTrue(comparables[0] == null && comparables[1] == null, "Not null ?"); + } + + void testMixedLayoutArrays() { + Object[] objArray = new Object[3]; + Comparable[] compArray = new Comparable[3]; + MyInt[] valArray = new MyInt[] { MyInt.MIN, MyInt.ZERO, MyInt.MAX }; + + arrayCopy(valArray, 0, objArray, 0, 3); + checkArrayElementsEqual(valArray, objArray); + arrayCopy(valArray, 0, objArray, 0, 3); + + objArray = new Object[3]; + System.arraycopy(valArray, 0, objArray, 0, 3); + checkArrayElementsEqual(valArray, objArray); + + System.arraycopy(valArray, 0, compArray, 0, 3); + checkArrayElementsEqual(valArray, compArray); + + valArray = new MyInt[] { MyInt.ZERO, MyInt.ZERO, MyInt.ZERO }; + System.arraycopy(compArray, 0, valArray, 0, 3); + checkArrayElementsEqual(valArray, compArray); + + valArray = new MyInt[] { MyInt.ZERO, MyInt.ZERO, MyInt.ZERO }; + System.arraycopy(objArray, 0, valArray, 0, 3); + checkArrayElementsEqual(valArray, objArray); + + // Sanity check dst == src + System.arraycopy(valArray, 0, valArray, 0, 3); + checkArrayElementsEqual(valArray, objArray); + + objArray[0] = "Not a value object"; + try { + System.arraycopy(objArray, 0, valArray, 0, 3); + throw new RuntimeException("Expected ArrayStoreException"); + } catch (ArrayStoreException ase) {} + } + + static final value class MyPoint { + final MyInt.val x; + final MyInt y; + + private MyPoint() { + x = MyInt.ZERO; + y = x; + } + public boolean equals(Object that) { + if (that instanceof MyPoint) { + MyPoint thatPoint = (MyPoint) that; + return x.equals(thatPoint.x) && java.util.Objects.equals(y, thatPoint.y); + } + return false; + } + static MyPoint create(int x) { + MyPoint mp = MyPoint.default; + mp = __WithField(mp.x, MyInt.create(x)); + return mp; + } + static MyPoint create(int x, int y) { + MyPoint mp = MyPoint.default; + mp = __WithField(mp.x, MyInt.create(x)); + mp = __WithField(mp.y, MyInt.create(y)); + return mp; + } + static final MyPoint.box ORIGIN = create(0); + } + + void testComposition() { + // Test array operations with compostion of values, check element payload is correct... + MyPoint a = MyPoint.create(1, 2); + MyPoint b = MyPoint.create(7, 21); + MyPoint c = MyPoint.create(Integer.MAX_VALUE, Integer.MIN_VALUE); + + MyPoint[] pts = new MyPoint[3]; + if (!pts[0].equals(MyPoint.ORIGIN)) { + throw new RuntimeException("Equals failed: " + pts[0] + " vs " + MyPoint.ORIGIN); + } + pts = new MyPoint[] { a, b, c }; + checkArrayElementsEqual(pts, new Object[] { a, b, c}); + Object[] oarr = new Object[3]; + + arrayCopy(pts, 0, oarr, 0, 3); + checkArrayElementsEqual(pts, oarr); + + oarr = new Object[3]; + System.arraycopy(pts, 0, oarr, 0, 3); + checkArrayElementsEqual(pts, oarr); + + System.arraycopy(oarr, 0, pts, 0, 3); + checkArrayElementsEqual(pts, oarr); + + oarr = new Object[3]; + try { + System.arraycopy(oarr, 0, pts, 0, 3); + throw new RuntimeException("Expected NPE"); + } + catch (NullPointerException npe) {} + + oarr = new Object[3]; + oarr[0] = new Object(); + try { + System.arraycopy(oarr, 0, pts, 0, 3); + throw new RuntimeException("Expected ASE"); + } + catch (ArrayStoreException ase) {} + } + + void checkArrayElementsEqual(MyInt[] arr1, Object[] arr2) { + assertTrue(arr1.length == arr2.length, "Bad length"); + for (int i = 0; i < arr1.length; i++) { + assertTrue(java.util.Objects.equals(arr1[i], arr2[i]), "Element " + i + " not equal"); + } + } + + void checkArrayElementsEqual(MyPoint[] arr1, Object[] arr2) { + assertTrue(arr1.length == arr2.length, "Bad length"); + for (int i = 0; i < arr1.length; i++) { + assertTrue(java.util.Objects.equals(arr1[i], arr2[i]), "Element " + i + " not equal"); + } + } + + void checkArrayElementsEqual(Object[] arr1, Object[] arr2) { + assertTrue(arr1.length == arr2.length, "Bad length"); + for (int i = 0; i < arr1.length; i++) { + assertTrue(java.util.Objects.equals(arr1[i], arr2[i]), "Element " + i + " not equal"); + } + } + + void arrayCopy(MyInt[] src, int srcPos, Object[] dst, int dstPos, int length) { + for (int i = 0; i < length ; i++) { + dst[dstPos++] = src[srcPos++]; + } + } + void arrayCopy(MyPoint[] src, int srcPos, Object[] dst, int dstPos, int length) { + for (int i = 0; i < length ; i++) { + dst[dstPos++] = src[srcPos++]; + } + } + + Object getNull() { return null; } + +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/ValueTypeCreation.java 2019-03-11 14:27:54.514353657 +0100 @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package runtime.valhalla.valuetypes; + +import jdk.test.lib.Asserts; + +/* + * @test ValueTypeCreation + * @summary Value Type creation test + * @library /test/lib + * @compile -XDemitQtypes -XDenableValueTypes -XDallowWithFieldOperator -XDallowFlattenabilityModifiers ValueTypeCreation.java Point.java Long8Value.java Person.java + * @run main/othervm -Xint -XX:+EnableValhalla runtime.valhalla.valuetypes.ValueTypeCreation + * @run main/othervm -Xcomp -XX:+EnableValhalla runtime.valhalla.valuetypes.ValueTypeCreation + */ +public class ValueTypeCreation { + public static void main(String[] args) { + ValueTypeCreation valueTypeCreation = new ValueTypeCreation(); + valueTypeCreation.run(); + } + + public void run() { + testPoint(); + testLong8(); + testPerson(); + StaticSelf.test(); + } + + void testPoint() { + Point p = Point.createPoint(1, 2); + Asserts.assertEquals(p.x, 1, "invalid point x value"); + Asserts.assertEquals(p.y, 2, "invalid point y value"); + Point p2 = clonePoint(p); + Asserts.assertEquals(p2.x, 1, "invalid point clone x value"); + Asserts.assertEquals(p2.y, 2, "invalid point clone y value"); + } + + static Point clonePoint(Point p) { + Point q = p; + return q; + } + + void testLong8() { + Long8Value long8Value = Long8Value.create(1, 2, 3, 4, 5, 6, 7, 8); + Asserts.assertEquals(long8Value.getLongField1(), 1L, "Field 1 incorrect"); + Asserts.assertEquals(long8Value.getLongField8(), 8L, "Field 8 incorrect"); + Long8Value.check(long8Value, 1, 2, 3, 4, 5, 6, 7, 8); + } + + void testPerson() { + Person person = Person.create(1, "John", "Smith"); + Asserts.assertEquals(person.getId(), 1, "Id field incorrect"); + Asserts.assertEquals(person.getFirstName(), "John", "First name incorrect"); + Asserts.assertEquals(person.getLastName(), "Smith", "Last name incorrect"); + } + + static final value class StaticSelf { + + static final StaticSelf.box DEFAULT = create(0); + final int f1; + + private StaticSelf() { f1 = 0; } + public String toString() { return "StaticSelf f1=" + f1; } + + static StaticSelf create(int f1) { + StaticSelf s = StaticSelf.default; + s = __WithField(s.f1, f1); + return s; + } + + public static void test() { + String s = DEFAULT.toString(); + } + + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/ValueTypeDensity.java 2019-03-11 14:27:55.030353650 +0100 @@ -0,0 +1,238 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +import java.lang.management.MemoryPoolMXBean; + +import sun.hotspot.WhiteBox; +import jdk.test.lib.Asserts; + +/** + * @test ValueTypeDensity + * @summary Heap density test for ValueTypes + * @library /test/lib + * @compile -XDemitQtypes -XDenableValueTypes -XDallowWithFieldOperator ValueTypeDensity.java + * @run driver ClassFileInstaller sun.hotspot.WhiteBox + * @run main/othervm -Xint -XX:+EnableValhalla -XX:+ValueArrayFlatten + * -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI ValueTypeDensity + * @run main/othervm -Xcomp -XX:+EnableValhalla -XX:+ValueArrayFlatten + * -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI ValueTypeDensity + */ + +public class ValueTypeDensity { + + private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); + + public ValueTypeDensity() { + if (!WHITE_BOX.getBooleanVMFlag("ValueArrayFlatten")) { + throw new IllegalStateException("ValueArrayFlatten false"); + } + } + + interface LocalDate { + public int getYear(); + public short getMonth(); + public short getDay(); + } + + interface LocalTime { + public byte getHour(); + public byte getMinute(); + public byte getSecond(); + public int getNano(); + } + + interface LocalDateTime extends LocalDate, LocalTime {} + + static final value class LocalDateValue implements LocalDate { + final int year; + final short month; + final short day; + + LocalDateValue() { + year = 0; + month = 0; + day = 0; + } + + public int getYear() { return year; } + public short getMonth() { return month; } + public short getDay() { return day; } + + public static LocalDateValue create(int year, short month, short day) { + LocalDateValue localDate = LocalDateValue.default; + localDate = __WithField(localDate.year, year); + localDate = __WithField(localDate.month, month); + localDate = __WithField(localDate.day, day); + return localDate; + } + } + + static final value class LocalTimeValue implements LocalTime { + final byte hour; + final byte minute; + final byte second; + final int nano; + + LocalTimeValue() { + hour = 0; + minute = 0; + second = 0; + nano = 0; + } + + public byte getHour() { return hour; } + public byte getMinute() { return minute; } + public byte getSecond() { return second; } + public int getNano() { return nano; } + + public static LocalTimeValue create(byte hour, byte minute, byte second, int nano) { + LocalTimeValue localTime = LocalTimeValue.default; + localTime = __WithField(localTime.hour, hour); + localTime = __WithField(localTime.minute, minute); + localTime = __WithField(localTime.second, second); + localTime = __WithField(localTime.nano, nano); + return localTime; + } + } + + static final value class LocalDateTimeValue implements LocalDateTime { + final LocalDateValue date; + final LocalTimeValue time; + + LocalDateTimeValue() { + // Well this is a little weird... + date = LocalDateValue.create(0, (short)0, (short)0); + time = LocalTimeValue.create((byte)0, (byte)0, (byte)0, 0); + } + + public int getYear() { return date.year; } + public short getMonth() { return date.month; } + public short getDay() { return date.day; } + + public byte getHour() { return time.hour; } + public byte getMinute() { return time.minute; } + public byte getSecond() { return time.second; } + public int getNano() { return time.nano; } + + public static LocalDateTimeValue create(LocalDateValue date, LocalTimeValue time) { + LocalDateTimeValue localDateTime = LocalDateTimeValue.default; + localDateTime = __WithField(localDateTime.date, date); + localDateTime = __WithField(localDateTime.time, time); + return localDateTime; + } + } + + static final class LocalDateClass implements LocalDate { + final int year; + final short month; + final short day; + + LocalDateClass(int year, short month, short day) { + this.year = year; + this.month = month; + this.day = day; + } + + public int getYear() { return year; } + public short getMonth() { return month; } + public short getDay() { return day; } + } + + static final class LocalTimeClass implements LocalTime { + final byte hour; + final byte minute; + final byte second; + final int nano; + + LocalTimeClass(byte hour, byte minute, byte second, int nano) { + this.hour = hour; + this.minute = minute; + this.second = second; + this.nano = nano; + } + + public byte getHour() { return hour; } + public byte getMinute() { return minute; } + public byte getSecond() { return second; } + public int getNano() { return nano; } + } + + static final class LocalDateTimeClass implements LocalDateTime { + final LocalDateClass date; + final LocalTimeClass time; + + LocalDateTimeClass(LocalDateClass date, LocalTimeClass time) { + this.date = date; + this.time = time; + } + + public LocalDateClass getDate() { return date; } + public LocalTimeClass getTime() { return time; } + + public int getYear() { return date.year; } + public short getMonth() { return date.month; } + public short getDay() { return date.day; } + + public byte getHour() { return time.hour; } + public byte getMinute() { return time.minute; } + public byte getSecond() { return time.second; } + public int getNano() { return time.nano; } + } + + public void ensureArraySizeWin() { + int arrayLength = 1000; + System.out.println("ensureArraySizeWin for length " + arrayLength); + LocalDateTimeClass[] objectArray = new LocalDateTimeClass[arrayLength]; + for (int i = 0; i < arrayLength; i++) { + objectArray[i] = new LocalDateTimeClass(new LocalDateClass(0, (short)0, (short)0), + new LocalTimeClass((byte)0, (byte)0, (byte)0, 0)); + } + + long objectArraySize = WHITE_BOX.getObjectSize(objectArray); + System.out.println("Empty object array size: " + objectArraySize); + objectArraySize += (arrayLength * + (WHITE_BOX.getObjectSize(objectArray[0]) + + WHITE_BOX.getObjectSize(objectArray[0].getDate()) + + WHITE_BOX.getObjectSize(objectArray[0].getTime()))); + + LocalDateTimeValue[] valueArray = new LocalDateTimeValue[arrayLength]; + // CMH: add "isFlatValueArray" to WhiteBox API, to ensure we are correctly account size + + long valueArraySize = WHITE_BOX.getObjectSize(valueArray); + System.out.println("Object array and elements: " + objectArraySize + " versus Value Array: " + valueArraySize); + Asserts.assertLessThan(valueArraySize, objectArraySize, "Value array accounts for more heap than object array + elements !"); + } + + public void test() { + ensureArraySizeWin(); + } + + public static void main(String[] args) { + new ValueTypeDensity().test(); + } + +} + --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/ValueTypeGetField.java 2019-03-11 14:27:55.522353643 +0100 @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package runtime.valhalla.valuetypes; + +import jdk.test.lib.Asserts; + +/* + * @test ValueTypeGetField + * @summary Value Type get field test + * @library /test/lib + * @compile -XDemitQtypes -XDenableValueTypes -XDallowWithFieldOperator Point.java ValueTypeGetField.java + * @run main/othervm -Xint -XX:+EnableValhalla runtime.valhalla.valuetypes.ValueTypeGetField + * @run main/othervm -Xcomp -XX:+EnableValhalla runtime.valhalla.valuetypes.ValueTypeGetField + */ +public class ValueTypeGetField { + + static Point staticPoint0; + static Point staticPoint1; + Point instancePoint0; + Point instancePoint1; + + static { + staticPoint0 = Point.createPoint(358, 406); + staticPoint1 = Point.createPoint(101, 2653); + } + + ValueTypeGetField() { + instancePoint0 = Point.createPoint(1890, 1918); + instancePoint1 = Point.createPoint(91, 102); + } + + public static void main(String[] args) { + ValueTypeGetField valueTypeGetField = new ValueTypeGetField(); + System.gc(); // check that VTs survive GC + valueTypeGetField.run(); + } + + public void run() { + // testing initial configuration + checkPoint(staticPoint0, 358, 406); + checkPoint(staticPoint1, 101, 2653); + checkPoint(instancePoint0, 1890, 1918); + checkPoint(instancePoint1, 91, 102); + // swapping static fields + Point p = staticPoint1; + staticPoint1 = staticPoint0; + staticPoint0 = p; + System.gc(); + checkPoint(staticPoint0, 101, 2653); + checkPoint(staticPoint1, 358, 406); + //swapping instance fields + p = instancePoint1; + instancePoint1 = instancePoint0; + instancePoint0 = p; + System.gc(); + checkPoint(instancePoint0, 91, 102); + checkPoint(instancePoint1, 1890, 1918); + // instance to static + staticPoint0 = instancePoint0; + System.gc(); + checkPoint(staticPoint0, 91, 102); + // static to instance + instancePoint1 = staticPoint1; + System.gc(); + checkPoint(instancePoint1, 358, 406); + } + + static void checkPoint(Point p , int x, int y) { + Asserts.assertEquals(p.x, x, "invalid x value"); + Asserts.assertEquals(p.y, y, "invalid y value"); + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/ValueTypesTest.java 2019-03-11 14:27:56.022353636 +0100 @@ -0,0 +1,281 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package runtime.valhalla.valuetypes; + +import java.io.File; +import java.io.IOException; +import java.io.PrintWriter; +import java.lang.invoke.*; +import java.lang.ref.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.*; + +import static jdk.test.lib.Asserts.*; + +import jdk.experimental.bytecode.MacroCodeBuilder; +import jdk.experimental.bytecode.MacroCodeBuilder.CondKind; +import jdk.experimental.bytecode.TypeTag; +import jdk.test.lib.Platform; +import jdk.test.lib.Utils; + +import jdk.experimental.value.MethodHandleBuilder; + +import javax.tools.*; + +/** + * @test ValueTypesTest + * @summary Test data movement with value types + * @modules java.base/jdk.experimental.bytecode + * java.base/jdk.experimental.value + * @library /test/lib + * @compile -XDemitQtypes -XDenableValueTypes -XDallowWithFieldOperator TestValue1.java TestValue2.java TestValue3.java TestValue4.java ValueTypesTest.java + * @run main/othervm -Xint -Xmx128m -XX:+EnableValhalla -XX:-ShowMessageBoxOnError + * -XX:+ExplicitGCInvokesConcurrent + * -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -Djava.lang.invoke.MethodHandle.DUMP_CLASS_FILES=false + * runtime.valhalla.valuetypes.ValueTypesTest + * @run main/othervm -Xcomp -Xmx128m -XX:+EnableValhalla -XX:-ShowMessageBoxOnError + * -XX:+ExplicitGCInvokesConcurrent + * -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -Djava.lang.invoke.MethodHandle.DUMP_CLASS_FILES=false + * runtime.valhalla.valuetypes.ValueTypesTest + */ +public class ValueTypesTest { + + public static void main(String[] args) { + Class valueClass = runtime.valhalla.valuetypes.TestValue1.class; + Class testClasses[] = { + runtime.valhalla.valuetypes.TestValue1.class, + runtime.valhalla.valuetypes.TestValue2.class, + runtime.valhalla.valuetypes.TestValue3.class, + runtime.valhalla.valuetypes.TestValue4.class + }; + Class containerClasses[] = { + runtime.valhalla.valuetypes.ContainerValue1.class, + runtime.valhalla.valuetypes.ContainerValue2.class, + runtime.valhalla.valuetypes.ContainerValue3.class, + runtime.valhalla.valuetypes.ContainerValue4.class + }; + + for (int i = 0; i < testClasses.length; i++) { + try { + testExecutionStackToLocalVariable(testClasses[i]); + testExecutionStackToFields(testClasses[i], containerClasses[i]); + // testExecutionStackToValueArray(testClasses[i], containerClasses[i]); + } catch (Throwable t) { + t.printStackTrace(); + throw new RuntimeException(t); + } + } + } + + static MethodHandles.Lookup LOOKUP = MethodHandles.lookup(); + + static void testExecutionStackToLocalVariable(Class valueClass) throws Throwable { + String sig = "()Q" + valueClass.getName() + ";"; + final String signature = sig.replace('.', '/'); + MethodHandle fromExecStackToLocalVar = MethodHandleBuilder.loadCode( + LOOKUP, + "execStackToLocalVar", + MethodType.methodType(boolean.class), + CODE -> { + CODE.invokestatic(System.class, "gc", "()V", false); + int n = -1; + while (n < 1024) { + n++; + CODE + .invokestatic(valueClass, "getInstance", signature, false) + .astore(n); + n++; + CODE + .invokestatic(valueClass, "getNonBufferedInstance", signature, false) + .astore(n); + } + CODE.invokestatic(System.class, "gc", "()V", false); + while (n > 0) { + CODE + .aload(n) + .invokevirtual(valueClass, "verify", "()Z", false) + .iconst_1() + .ifcmp(TypeTag.I, CondKind.NE, "end"); + n--; + } + CODE + .iconst_1() + .return_(TypeTag.Z) + .label("end") + .iconst_0() + .return_(TypeTag.Z); + }); + boolean result = (boolean) fromExecStackToLocalVar.invokeExact(); + System.out.println(result); + assertTrue(result, "Invariant"); + } + + static void testExecutionStackToFields(Class valueClass, Class containerClass) throws Throwable { + final int ITERATIONS = Platform.isDebugBuild() ? 3 : 512; + String sig = "()Q" + valueClass.getName() + ";"; + final String methodSignature = sig.replace('.', '/'); + final String fieldQSignature = "Q" + valueClass.getName().replace('.', '/') + ";"; + final String fieldLSignature = "L" + valueClass.getName().replace('.', '/') + ";"; + System.out.println(methodSignature); + MethodHandle fromExecStackToFields = MethodHandleBuilder.loadCode( + LOOKUP, + "execStackToFields", + MethodType.methodType(boolean.class), + CODE -> { + CODE + .invokestatic(System.class, "gc", "()V", false) + .new_(containerClass) + .dup() + .invoke(MacroCodeBuilder.InvocationKind.INVOKESPECIAL, containerClass, "", "()V", false) + .astore_1() + .iconst_m1() + .istore_2() + .label("loop") + .iload_2() + .ldc(ITERATIONS) + .ifcmp(TypeTag.I, CondKind.EQ, "end") + .aload_1() + .invokestatic(valueClass, "getInstance", methodSignature, false) + .putfield(containerClass, "nonStaticValueField", fieldQSignature) + .invokestatic(System.class, "gc", "()V", false) + .aload_1() + .getfield(containerClass, "nonStaticValueField", fieldQSignature) + .invokevirtual(valueClass, "verify", "()Z", false) + .iconst_1() + .ifcmp(TypeTag.I, CondKind.NE, "failed") + .aload_1() + .invokestatic(valueClass, "getNonBufferedInstance", methodSignature, false) + .putfield(containerClass, "nonStaticValueField", fieldQSignature) + .invokestatic(System.class, "gc", "()V", false) + .aload_1() + .getfield(containerClass, "nonStaticValueField", fieldQSignature) + .invokevirtual(valueClass, "verify", "()Z", false) + .iconst_1() + .ifcmp(TypeTag.I, CondKind.NE, "failed") + .invokestatic(valueClass, "getInstance", methodSignature, false) + .putstatic(containerClass, "staticValueField", fieldLSignature) + .invokestatic(System.class, "gc", "()V", false) + .getstatic(containerClass, "staticValueField", fieldLSignature) + .invokevirtual(valueClass, "verify", "()Z", false) + .iconst_1() + .ifcmp(TypeTag.I, CondKind.NE, "failed") + .invokestatic(valueClass, "getNonBufferedInstance", methodSignature, false) + .putstatic(containerClass, "staticValueField", fieldLSignature) + .invokestatic(System.class, "gc", "()V", false) + .getstatic(containerClass, "staticValueField", fieldLSignature) + .invokevirtual(valueClass, "verify", "()Z", false) + .iconst_1() + .ifcmp(TypeTag.I, CondKind.NE, "failed") + .iinc(2, 1) + .goto_("loop") + .label("end") + .iconst_1() + .return_(TypeTag.Z) + .label("failed") + .iconst_0() + .return_(TypeTag.Z); + }); + boolean result = (boolean) fromExecStackToFields.invokeExact(); + System.out.println(result); + assertTrue(result, "Invariant"); + } + + static void testExecutionStackToValueArray(Class valueClass, Class containerClass) throws Throwable { + final int ITERATIONS = Platform.isDebugBuild() ? 3 : 100; + String sig = "()Q" + valueClass.getName() + ";"; + final String signature = sig.replace('.', '/'); + final String arraySignature = "[L" + valueClass.getName().replace('.', '/') + ";"; + System.out.println(arraySignature); + MethodHandle fromExecStackToValueArray = MethodHandleBuilder.loadCode( + LOOKUP, + "execStackToValueArray", + MethodType.methodType(boolean.class), + CODE -> { + CODE + .invokestatic(System.class, "gc", "()V", false) + .new_(containerClass) + .dup() + .invoke(MacroCodeBuilder.InvocationKind.INVOKESPECIAL, containerClass, "", "()V", false) + .astore_1() + .ldc(ITERATIONS * 3) + .anewarray(valueClass) + .astore_2() + .aload_2() + .aload_1() + .swap() + .putfield(containerClass, "valueArray", arraySignature) + .iconst_0() + .istore_3() + .label("loop1") + .iload_3() + .ldc(ITERATIONS) + .ifcmp(TypeTag.I, CondKind.GE, "end1") + .aload_2() + .iload_3() + .invokestatic(valueClass, "getInstance", signature, false) + .aastore() + .iinc(3, 1) + .aload_2() + .iload_3() + .invokestatic(valueClass, "getNonBufferedInstance", signature, false) + .aastore() + .iinc(3, 1) + .aload_2() + .iload_3() + .defaultvalue(valueClass) + .aastore() + .iinc(3, 1) + .goto_("loop1") + .label("end1") + .invokestatic(System.class, "gc", "()V", false) + .iconst_0() + .istore_3() + .label("loop2") + .iload_3() + .ldc(ITERATIONS * 3) + .ifcmp(TypeTag.I, CondKind.GE, "end2") + .aload_2() + .iload_3() + .aaload() + .invokevirtual(valueClass, "verify", "()Z", false) + .iconst_1() + .ifcmp(TypeTag.I, CondKind.NE, "failed") + .iinc(3, 1) + .goto_("loop2") + .label("end2") + .iconst_1() + .return_(TypeTag.Z) + .label("failed") + .iconst_0() + .return_(TypeTag.Z); + }); + boolean result = (boolean) fromExecStackToValueArray.invokeExact(); + System.out.println(result); + assertTrue(result, "Invariant"); + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/ValueWithJni.java 2019-03-11 14:27:56.526353629 +0100 @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package runtime.valhalla.valuetypes; + +/* @test + * @summary test JNI functions with values + * @compile -XDemitQtypes -XDallowWithFieldOperator ValueWithJni.java + * @run main/othervm/native -Xint -XX:+EnableValhalla runtime.valhalla.valuetypes.ValueWithJni + * @run main/othervm/native -Xcomp -XX:+EnableValhalla runtime.valhalla.valuetypes.ValueWithJni + */ +public value final class ValueWithJni { + + static { + System.loadLibrary("ValueWithJni"); + } + + public static void main(String[] args) { + testJniMonitorOps(); + } + + final int x; + private ValueWithJni() { x = 0; } + + public native void doJniMonitorEnter(); + public native void doJniMonitorExit(); + + public static ValueWithJni createValueWithJni(int x) { + ValueWithJni v = ValueWithJni.default; + v = __WithField(v.x, x); + return v; + } + + public static void testJniMonitorOps() { + boolean sawImse = false; + try { + createValueWithJni(0).doJniMonitorEnter(); + } catch (Throwable t) { + sawImse = checkImse(t); + } + if (!sawImse) { + throw new RuntimeException("JNI MonitorEnter did not fail"); + } + sawImse = false; + try { + createValueWithJni(0).doJniMonitorExit(); + } catch (Throwable t) { + sawImse = checkImse(t); + } + if (!sawImse) { + throw new RuntimeException("JNI MonitorExit did not fail"); + } + } + + static boolean checkImse(Throwable t) { + if (t instanceof IllegalMonitorStateException) { + return true; + } + throw new RuntimeException(t); + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/WithFieldAccessorTest.java 2019-03-11 14:27:57.050353622 +0100 @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8210351 + * @summary test nestmate access to a value type's public, protected and private final fields. + * @compile -XDemitQtypes -XDallowWithFieldOperator WithFieldAccessorTest.java + * @run main/othervm -XX:+EnableValhalla WithFieldAccessorTest + */ + +// This test is similar to javac's WithFieldAccessorTest but tests nestmate +// access to public, protected, and private final fields in a value type. +public class WithFieldAccessorTest { + + public static final value class V { + public final char c; + protected final long l; + private final int i; + V() { + this.c = '0'; + this.l = 0; + this.i = 0; + } + + public static V make(char c, long l, int i) { + V v = V.default; + v = __WithField(v.c, c); + v = __WithField(v.l, l); + v = __WithField(v.i, i); + return v; + } + } + + public static void main(String... args) throws Throwable { + V v = __WithField(V.make('a', 5, 10).c, 'b'); + if (!v.toString().equals("[WithFieldAccessorTest$V c=b l=5 i=10]")) { + throw new AssertionError("Withfield of 'c' didn't work!" + v.toString()); + } + v = __WithField(V.make('a', 5, 10).l, 25); + if (!v.toString().equals("[WithFieldAccessorTest$V c=a l=25 i=10]")) { + throw new AssertionError("Withfield of 'l' didn't work!" + v.toString()); + } + v = __WithField(V.make('a', 5, 10).i, 20); + if (!v.toString().equals("[WithFieldAccessorTest$V c=a l=5 i=20]")) { + throw new AssertionError("Withfield of 'i' didn't work!" + v.toString()); + } + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/WithFieldNoAccessTest.jcod 2019-03-11 14:27:57.526353615 +0100 @@ -0,0 +1,647 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test + * @bug 8210351 + * @summary Check that IllegalAccessError exceptions get thrown if a class that + * is not a nestmate of a value type tries to write to the value type's + * final fields. + * @compile -XDemitQtypes WithFieldNoAccessTest.jcod + * @run main/othervm -XX:+EnableValhalla WithFieldNoAccessTest + */ + +// This test is based on the below Java program. The only difference is that +// the nestmate attributes have been removed. So, value type WithFieldNoAccessTest +// and WithFieldNoAccessTest$V are no longer nestmates. This should cause +// IllegalAccessError exceptions when WithFieldNoAccessTest tries to write, using +// withfield, to value type WithFieldNoAccessTest$V's final fields. +// +// public class WithFieldNoAccessTest { +// +// public static final value class V { +// public final char c; +// protected final long l; +// private final int i; +// V() { +// this.c = '0'; +// this.l = 0; +// this.i = 0; +// } +// +// public static V make(char c, long l, int i) { +// V v = V.default; +// v = __WithField(v.c, c); +// v = __WithField(v.l, l); +// v = __WithField(v.i, i); +// return v; +// } +// } +// +// public static void main(String... args) throws Throwable { +// try { +// V v = __WithField(V.make('a', 5, 10).c, 'b'); +// throw new RuntimeException("Failed to throw IllegalAccessError exception for final public field"); +// } catch (java.lang.IllegalAccessError e) { +// if (!e.toString().contains("Update to non-static final field WithFieldNoAccessTest$V.c attempted")) { +// throw new RuntimeException("Unexpected IllegalAccessError: " + e.toString()); +// } +// } +// +// try { +// V v = __WithField(V.make('a', 5, 10).l, 25); +// throw new RuntimeException("Failed to throw IllegalAccessError exception for final protected field"); +// } catch (java.lang.IllegalAccessError e) { +// if (!e.toString().contains("Update to non-static final field WithFieldNoAccessTest$V.l attempted")) { +// throw new RuntimeException("Unexpected IllegalAccessError: " + e.toString()); +// } +// } +// +// try { +// V v = __WithField(V.make('a', 5, 10).i, 20); +// throw new RuntimeException("Failed to throw IllegalAccessError exception for final private field"); +// } catch (java.lang.IllegalAccessError e) { +// if (!e.toString().contains("WithFieldNoAccessTest tried to access private field WithFieldNoAccessTest$V.i")) { +// throw new RuntimeException("Unexpected IllegalAccessError: " + e.toString()); +// } +// } +// } +// } +// + +class WithFieldNoAccessTest$V { + 0xCAFEBABE; + 0; // minor version + 57; // version + [68] { // Constant Pool + ; // first element is empty + Method #10 #40; // #1 at 0x0A + class #41; // #2 at 0x0F + Field #2 #42; // #3 at 0x12 + Field #2 #43; // #4 at 0x17 + Field #2 #44; // #5 at 0x1C + InvokeDynamic 0s #47; // #6 at 0x21 + InvokeDynamic 0s #48; // #7 at 0x26 + InvokeDynamic 0s #49; // #8 at 0x2B + InvokeDynamic 0s #50; // #9 at 0x30 + class #51; // #10 at 0x35 + Utf8 "c"; // #11 at 0x38 + Utf8 "C"; // #12 at 0x3C + Utf8 "l"; // #13 at 0x40 + Utf8 "J"; // #14 at 0x44 + Utf8 "i"; // #15 at 0x48 + Utf8 "I"; // #16 at 0x4C + Utf8 ""; // #17 at 0x50 + Utf8 "()V"; // #18 at 0x59 + Utf8 "Code"; // #19 at 0x5F + Utf8 "LineNumberTable"; // #20 at 0x66 + Utf8 "make"; // #21 at 0x78 + Utf8 "V"; // #22 at 0x7F + Utf8 "InnerClasses"; // #23 at 0x83 + Utf8 "ValueTypes"; // #24 at 0x92 + Utf8 "(CJI)QWithFieldNoAccessTest$V;"; // #25 at 0x9F + Utf8 "hashCode"; // #26 at 0xC0 + Utf8 "()I"; // #27 at 0xCB + Utf8 "equals"; // #28 at 0xD1 + Utf8 "(Ljava/lang/Object;)Z"; // #29 at 0xDA + Utf8 "toString"; // #30 at 0xF2 + Utf8 "()Ljava/lang/String;"; // #31 at 0xFD + Utf8 "longHashCode"; // #32 at 0x0114 + Utf8 "()J"; // #33 at 0x0123 + Utf8 "$makeValue$"; // #34 at 0x0129 + Utf8 "()QWithFieldNoAccessTest$V;"; // #35 at 0x0137 + Utf8 "SourceFile"; // #36 at 0x0155 + Utf8 "WithFieldNoAccessTest.java"; // #37 at 0x0162 + Utf8 "NestHost"; // #38 at 0x017F + class #52; // #39 at 0x018A + NameAndType #17 #18; // #40 at 0x018D + Utf8 "WithFieldNoAccessTest$V"; // #41 at 0x0192 + NameAndType #11 #12; // #42 at 0x01AC + NameAndType #13 #14; // #43 at 0x01B1 + NameAndType #15 #16; // #44 at 0x01B6 + Utf8 "BootstrapMethods"; // #45 at 0x01BB + MethodHandle 6b #53; // #46 at 0x01CE + NameAndType #26 #54; // #47 at 0x01D2 + NameAndType #28 #55; // #48 at 0x01D7 + NameAndType #30 #56; // #49 at 0x01DC + NameAndType #32 #57; // #50 at 0x01E1 + Utf8 "java/lang/Object"; // #51 at 0x01E6 + Utf8 "WithFieldNoAccessTest"; // #52 at 0x01F9 + Method #58 #59; // #53 at 0x0211 + Utf8 "(Ljava/lang/Object;)I"; // #54 at 0x0216 + Utf8 "(Ljava/lang/Object;Ljava/lang/Object;)Z"; // #55 at 0x022E + Utf8 "(Ljava/lang/Object;)Ljava/lang/String;"; // #56 at 0x0258 + Utf8 "(Ljava/lang/Object;)J"; // #57 at 0x0281 + class #60; // #58 at 0x0299 + NameAndType #61 #64; // #59 at 0x029C + Utf8 "java/lang/invoke/ValueBootstrapMethods"; // #60 at 0x02A1 + Utf8 "makeBootstrapMethod"; // #61 at 0x02CA + class #66; // #62 at 0x02E0 + Utf8 "Lookup"; // #63 at 0x02E3 + Utf8 "(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;)Ljava/lang/invoke/CallSite;"; // #64 at 0x02EC + class #67; // #65 at 0x0362 + Utf8 "java/lang/invoke/MethodHandles$Lookup"; // #66 at 0x0365 + Utf8 "java/lang/invoke/MethodHandles"; // #67 at 0x038D + } // Constant Pool + + 0x0131; // access [ ACC_PUBLIC ACC_SUPER ACC_FINAL ] + #2;// this_cpx + #10;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [3] { // fields + { // Member at 0x03B8 + 0x0011; // access + #11; // name_cpx + #12; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0x03C0 + 0x0014; // access + #13; // name_cpx + #14; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0x03C8 + 0x0012; // access + #15; // name_cpx + #16; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [7] { // methods + { // Member at 0x03D2 + 0x0000; // access + #17; // name_cpx + #18; // sig_cpx + [1] { // Attributes + Attr(#19, 29) { // Code at 0x03DA + 1; // max_stack + 1; // max_locals + Bytes[5]{ + 0x2AB70001B1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#20, 6) { // LineNumberTable at 0x03F1 + [1] { // LineNumberTable + 0 15; // at 0x03FD + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x03FD + 0x0009; // access + #21; // name_cpx + #25; // sig_cpx + [1] { // Attributes + Attr(#19, 76) { // Code at 0x0405 + 4; // max_stack + 5; // max_locals + Bytes[36]{ + 0xCB00023A041A1904; + 0x5FCC00033A041F19; + 0x045B57CC00043A04; + 0x1D19045FCC00053A; + 0x041904B0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#20, 22) { // LineNumberTable at 0x043B + [5] { // LineNumberTable + 0 22; // at 0x0447 + 5 23; // at 0x044B + 14 24; // at 0x044F + 24 25; // at 0x0453 + 33 26; // at 0x0457 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0457 + 0x0011; // access + #26; // name_cpx + #27; // sig_cpx + [1] { // Attributes + Attr(#19, 31) { // Code at 0x045F + 1; // max_stack + 1; // max_locals + Bytes[7]{ + 0x2ABA00060000AC; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#20, 6) { // LineNumberTable at 0x0478 + [1] { // LineNumberTable + 0 11; // at 0x0484 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0484 + 0x0011; // access + #28; // name_cpx + #29; // sig_cpx + [1] { // Attributes + Attr(#19, 32) { // Code at 0x048C + 2; // max_stack + 2; // max_locals + Bytes[8]{ + 0x2A2BBA00070000AC; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#20, 6) { // LineNumberTable at 0x04A6 + [1] { // LineNumberTable + 0 11; // at 0x04B2 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x04B2 + 0x0011; // access + #30; // name_cpx + #31; // sig_cpx + [1] { // Attributes + Attr(#19, 31) { // Code at 0x04BA + 1; // max_stack + 1; // max_locals + Bytes[7]{ + 0x2ABA00080000B0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#20, 6) { // LineNumberTable at 0x04D3 + [1] { // LineNumberTable + 0 11; // at 0x04DF + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x04DF + 0x0011; // access + #32; // name_cpx + #33; // sig_cpx + [1] { // Attributes + Attr(#19, 31) { // Code at 0x04E7 + 2; // max_stack + 1; // max_locals + Bytes[7]{ + 0x2ABA00090000AD; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#20, 6) { // LineNumberTable at 0x0500 + [1] { // LineNumberTable + 0 11; // at 0x050C + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x050C + 0x1008; // access + #34; // name_cpx + #35; // sig_cpx + [1] { // Attributes + Attr(#19, 69) { // Code at 0x0514 + 4; // max_stack + 1; // max_locals + Bytes[29]{ + 0xCB00024B10302A5F; + 0xCC00034B092A5B57; + 0xCC00044B032A5FCC; + 0x00054B2AB0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#20, 22) { // LineNumberTable at 0x0543 + [5] { // LineNumberTable + 0 15; // at 0x054F + 4 16; // at 0x0553 + 12 17; // at 0x0557 + 20 18; // at 0x055B + 27 19; // at 0x055F + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [3] { // Attributes + Attr(#36, 2) { // SourceFile at 0x0561 + #37; + } // end SourceFile + ; + Attr(#23, 18) { // InnerClasses at 0x0571 + [2] { // InnerClasses + #2 #39 #22 281; // at 0x0581 + #62 #65 #63 25; // at 0x0589 + } + } // end InnerClasses + ; + Attr(#45, 6) { // BootstrapMethods at 0x0589 + [1] { // bootstrap_methods + { // bootstrap_method + #46; // bootstrap_method_ref + [0] { // bootstrap_arguments + } // bootstrap_arguments + } // bootstrap_method + } + } // end BootstrapMethods + } // Attributes +} // end class WithFieldNoAccessTest$V + + +class WithFieldNoAccessTest { + 0xCAFEBABE; + 0; // minor version + 57; // version + [92] { // Constant Pool + ; // first element is empty + Method #23 #40; // #1 at 0x0A + long 0x0000000000000005;; // #2 at 0x0F + Method #24 #41; // #4 at 0x18 + Field #24 #42; // #5 at 0x1D + class #43; // #6 at 0x22 + String #44; // #7 at 0x25 + Method #6 #45; // #8 at 0x28 + class #46; // #9 at 0x2D + Method #9 #47; // #10 at 0x30 + String #48; // #11 at 0x35 + Method #49 #50; // #12 at 0x38 + InvokeDynamic 0s #54; // #13 at 0x3D + long 0x0000000000000019;; // #14 at 0x42 + Field #24 #55; // #16 at 0x4B + String #56; // #17 at 0x50 + String #57; // #18 at 0x53 + Field #24 #58; // #19 at 0x56 + String #59; // #20 at 0x5B + String #60; // #21 at 0x5E + class #61; // #22 at 0x61 + class #62; // #23 at 0x64 + class #63; // #24 at 0x67 + Utf8 "V"; // #25 at 0x6A + Utf8 "InnerClasses"; // #26 at 0x6E + Utf8 "ValueTypes"; // #27 at 0x7D + Utf8 ""; // #28 at 0x8A + Utf8 "()V"; // #29 at 0x93 + Utf8 "Code"; // #30 at 0x99 + Utf8 "LineNumberTable"; // #31 at 0xA0 + Utf8 "main"; // #32 at 0xB2 + Utf8 "([Ljava/lang/String;)V"; // #33 at 0xB9 + Utf8 "StackMapTable"; // #34 at 0xD2 + Utf8 "Exceptions"; // #35 at 0xE2 + class #64; // #36 at 0xEF + Utf8 "SourceFile"; // #37 at 0xF2 + Utf8 "WithFieldNoAccessTest.java"; // #38 at 0xFF + Utf8 "NestMembers"; // #39 at 0x011C + NameAndType #28 #29; // #40 at 0x012A + NameAndType #65 #66; // #41 at 0x012F + NameAndType #67 #68; // #42 at 0x0134 + Utf8 "java/lang/RuntimeException"; // #43 at 0x0139 + Utf8 "Failed to throw IllegalAccessError exception for final public field"; // #44 at 0x0156 + NameAndType #28 #69; // #45 at 0x019C + Utf8 "java/lang/IllegalAccessError"; // #46 at 0x01A1 + NameAndType #70 #71; // #47 at 0x01C0 + Utf8 "Update to non-static final field WithFieldNoAccessTest$V.c attempted"; // #48 at 0x01C5 + class #72; // #49 at 0x020C + NameAndType #73 #74; // #50 at 0x020F + Utf8 "BootstrapMethods"; // #51 at 0x0214 + MethodHandle 6b #75; // #52 at 0x0227 + String #76; // #53 at 0x022B + NameAndType #77 #78; // #54 at 0x022E + NameAndType #79 #80; // #55 at 0x0233 + Utf8 "Failed to throw IllegalAccessError exception for final protected field"; // #56 at 0x0238 + Utf8 "Update to non-static final field WithFieldNoAccessTest$V.l attempted"; // #57 at 0x0281 + NameAndType #81 #82; // #58 at 0x02C8 + Utf8 "Failed to throw IllegalAccessError exception for final private field"; // #59 at 0x02CD + Utf8 "WithFieldNoAccessTest tried to access private field WithFieldNoAccessTest$V.i"; // #60 at 0x0314 + Utf8 "WithFieldNoAccessTest"; // #61 at 0x0364 + Utf8 "java/lang/Object"; // #62 at 0x037C + Utf8 "WithFieldNoAccessTest$V"; // #63 at 0x038F + Utf8 "java/lang/Throwable"; // #64 at 0x03A9 + Utf8 "make"; // #65 at 0x03BF + Utf8 "(CJI)QWithFieldNoAccessTest$V;"; // #66 at 0x03C6 + Utf8 "c"; // #67 at 0x03E7 + Utf8 "C"; // #68 at 0x03EB + Utf8 "(Ljava/lang/String;)V"; // #69 at 0x03EF + Utf8 "toString"; // #70 at 0x0407 + Utf8 "()Ljava/lang/String;"; // #71 at 0x0412 + Utf8 "java/lang/String"; // #72 at 0x0429 + Utf8 "contains"; // #73 at 0x043C + Utf8 "(Ljava/lang/CharSequence;)Z"; // #74 at 0x0447 + Method #83 #84; // #75 at 0x0465 + Utf8 "Unexpected IllegalAccessError: "; // #76 at 0x046A + Utf8 "makeConcatWithConstants"; // #77 at 0x048D + Utf8 "(Ljava/lang/String;)Ljava/lang/String;"; // #78 at 0x04A7 + Utf8 "l"; // #79 at 0x04D0 + Utf8 "J"; // #80 at 0x04D4 + Utf8 "i"; // #81 at 0x04D8 + Utf8 "I"; // #82 at 0x04DC + class #85; // #83 at 0x04E0 + NameAndType #77 #88; // #84 at 0x04E3 + Utf8 "java/lang/invoke/StringConcatFactory"; // #85 at 0x04E8 + class #90; // #86 at 0x050F + Utf8 "Lookup"; // #87 at 0x0512 + Utf8 "(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;Ljava/lang/String;[Ljava/lang/Object;)Ljava/lang/invoke/CallSite;"; // #88 at 0x051B + class #91; // #89 at 0x05B6 + Utf8 "java/lang/invoke/MethodHandles$Lookup"; // #90 at 0x05B9 + Utf8 "java/lang/invoke/MethodHandles"; // #91 at 0x05E1 + } // Constant Pool + + 0x0021; // access [ ACC_PUBLIC ACC_SUPER ] + #22;// this_cpx + #23;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [0] { // fields + } // fields + + [2] { // methods + { // Member at 0x060E + 0x0001; // access + #28; // name_cpx + #29; // sig_cpx + [1] { // Attributes + Attr(#30, 29) { // Code at 0x0616 + 1; // max_stack + 1; // max_locals + Bytes[5]{ + 0x2AB70001B1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#31, 6) { // LineNumberTable at 0x062D + [1] { // LineNumberTable + 0 9; // at 0x0639 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0639 + 0x0089; // access + #32; // name_cpx + #33; // sig_cpx + [2] { // Attributes + Attr(#30, 305) { // Code at 0x0641 + 6; // max_stack + 2; // max_locals + Bytes[174]{ + 0x1062106114000210; + 0x0AB800045FCC0005; + 0x4CBB0006591207B7; + 0x0008BF4C2BB6000A; + 0x120BB6000C9A0014; + 0xBB0006592BB6000A; + 0xBA000D0000B70008; + 0xBF14000E10611400; + 0x02100AB800045B57; + 0xCC00104CBB000659; + 0x1211B70008BF4C2B; + 0xB6000A1212B6000C; + 0x9A0014BB0006592B; + 0xB6000ABA000D0000; + 0xB70008BF10141061; + 0x140002100AB80004; + 0x5FCC00134CBB0006; + 0x591214B70008BF4C; + 0x2BB6000A1215B600; + 0x0C9A0014BB000659; + 0x2BB6000ABA000D00; + 0x00B70008BFB1; + }; + [3] { // Traps + 0 27 27 9; // at 0x0707 + 57 86 86 9; // at 0x070F + 116 143 143 9; // at 0x0717 + } // end Traps + [2] { // Attributes + Attr(#31, 66) { // LineNumberTable at 0x0719 + [16] { // LineNumberTable + 0 32; // at 0x0725 + 17 33; // at 0x0729 + 27 34; // at 0x072D + 28 35; // at 0x0731 + 40 36; // at 0x0735 + 57 41; // at 0x0739 + 76 42; // at 0x073D + 86 43; // at 0x0741 + 87 44; // at 0x0745 + 99 45; // at 0x0749 + 116 50; // at 0x074D + 133 51; // at 0x0751 + 143 52; // at 0x0755 + 144 53; // at 0x0759 + 156 54; // at 0x075D + 173 57; // at 0x0761 + } + } // end LineNumberTable + ; + Attr(#34, 17) { // StackMapTable at 0x0761 + [6] { // + 91b, [1]z{7b,9}; // same_locals_1_stack_item_frame + 29b; // same_frame + 92b, [1]z{7b,9}; // same_locals_1_stack_item_frame + 29b; // same_frame + 90b, [1]z{7b,9}; // same_locals_1_stack_item_frame + 29b; // same_frame + } + } // end StackMapTable + } // Attributes + } // end Code + ; + Attr(#35, 4) { // Exceptions at 0x0778 + [1] { // Exceptions + #36; // at 0x0782 + } + } // end Exceptions + } // Attributes + } // Member + } // methods + + [3] { // Attributes + Attr(#37, 2) { // SourceFile at 0x0784 + #38; + } // end SourceFile + ; + Attr(#26, 18) { // InnerClasses at 0x0796 + [2] { // InnerClasses + #24 #22 #25 281; // at 0x07A6 + #86 #89 #87 25; // at 0x07AE + } + } // end InnerClasses + ; + Attr(#51, 8) { // BootstrapMethods at 0x07AE + [1] { // bootstrap_methods + { // bootstrap_method + #52; // bootstrap_method_ref + [1] { // bootstrap_arguments + #53; // at 0x07BC + } // bootstrap_arguments + } // bootstrap_method + } + } // end BootstrapMethods + } // Attributes +} // end class WithFieldNoAccessTest --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/classfileparser/BadACCValue.java 2019-03-11 14:27:57.982353609 +0100 @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +/* + * @test + * @summary test that if a class file has ACC_VALUE set then it must be run + * with option -XX:+EnableValhalla. + * @compile cfpTests.jcod + * @run main/othervm -XX:-EnableValhalla BadACCValue + */ + +public class BadACCValue { + + public static void runTest(String test_name, String message) throws Exception { + System.out.println("Testing: " + test_name); + try { + Class newClass = Class.forName(test_name); + } catch (java.lang.ClassFormatError e) { + if (!e.getMessage().contains(message)) { + throw new RuntimeException( "Wrong ClassFormatError: " + e.getMessage()); + } + } + } + + public static void main(String[] args) throws Exception { + + // Test ACC_VALUE causes a CFE unless -XX:+EnableValhalla is specified. + runTest("ValueFieldNotFinal", + "Class modifier ACC_VALUE in class ValueFieldNotFinal requires option -XX:+EnableValhalla"); + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/classfileparser/BadValueTypes.java 2019-03-11 14:27:58.486353602 +0100 @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +/* + * @test + * @summary test that the right exceptions get thrown for bad value type + * class files. + * @compile cfpTests.jcod + * @run main/othervm -XX:+EnableValhalla BadValueTypes + */ + +public class BadValueTypes { + + public static void runTest(String test_name, String message) throws Exception { + System.out.println("Testing: " + test_name); + try { + Class newClass = Class.forName(test_name); + } catch (java.lang.ClassFormatError e) { + if (!e.getMessage().contains(message)) { + throw new RuntimeException( "Wrong ClassFormatError: " + e.getMessage()); + } + } + } + + public static void main(String[] args) throws Exception { + + // Test that ACC_VALUE with ACC_ABSTRACT is illegal. + runTest("ValueAbstract", "Illegal class modifiers in class ValueAbstract"); + + // Test that ACC_VALUE with ACC_ENUM is illegal. + runTest("ValueEnum", "Illegal class modifiers in class ValueEnum"); + + // Test that value type fields must be final. + runTest("ValueFieldNotFinal", "Illegal field modifiers in class ValueFieldNotFinal"); + + // Test that arrays cannot have ACC_FLATTENABLE set. + runTest("ValueFlatArray", "ACC_FLATTENABLE cannot be specified for an array"); + + // Test that a value type cannot have a method named init. +/* TBD: uncomment when javac stops generating () methods for value types. + runTest("ValueInitMethod", "Value Type cannot have a method named "); +*/ + + // Test that ACC_VALUE with ACC_INTERFACE is illegal. + runTest("ValueInterface", "Illegal class modifiers in class ValueInterface"); + + // Test that value type instance methods cannot be synchronized. + runTest("ValueMethodSynch", "Method instanceMethod in class ValueMethodSynch has illegal modifiers"); + + runTest("ValueSuperClass", "Value type must have java.lang.Object as superclass"); + + // Test that ClassCircularityError gets detected for instance fields. + System.out.println("Testing ClassCircularityError for instance fields"); + try { + Class newClass = Class.forName("Circ"); + throw new RuntimeException( "java.lang.ClassCircularityError exception not thrown!"); + } catch (java.lang.ClassCircularityError e) { + if (!e.getMessage().contains("Circ")) { + throw new RuntimeException( "Wrong ClassCircularityError: " + e.getMessage()); + } + } + + // Test that ClassCircularityError gets detected for static fields. + System.out.println("Testing ClassCircularityError for static fields"); + try { + Class newClass = Class.forName("CircStaticB"); + throw new RuntimeException( "java.lang.ClassCircularityError exception not thrown!"); + } catch (java.lang.ClassCircularityError e) { + if (!e.getMessage().contains("CircStatic")) { + throw new RuntimeException( "Wrong ClassCircularityError: " + e.getMessage()); + } + } + + runTest("ValueCloneable", "Value Types do not support Cloneable"); + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/classfileparser/cfpTests.jcod 2019-03-11 14:27:58.934353596 +0100 @@ -0,0 +1,2778 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +// This file contains multiple illegal value type classes that should cause +// ClassFormatError exceptions when attempted to be loaded. +// +// The .jcod classes were originally generated from this Java file and then +// modified to cause ClassFormatError or ClassCircularityError exceptions. The +// '(bad)' comments in most of the tests show where the modifications were made. +// +// final value class Value { +// static final Value VT = makeValue(0x01234567); +// final int int_v; +// Value() { +// int_v = 1; +// } +// static Value makeValue(int x) { +// Value v = Value.default; +// v = __WithField(v.int_v, x); +// return v; +// } +// } + + +// Test that class modifiers cannot have both ACC_VALUE and ACC_ABSTRACT set. +// +class ValueAbstract { + 0xCAFEBABE; + 0; // minor version + 57; // version + [28] { // Constant Pool + ; // first element is empty + Method #7 #23; // #1 at 0x0A + Field #3 #24; // #2 at 0x0F + class #9; // #3 at 0x14 + int 0x01234567; // #4 at 0x17 + Method #3 #25; // #5 at 0x1C + Field #3 #26; // #6 at 0x21 + class #27; // #7 at 0x26 + Utf8 "VT"; // #8 at 0x29 + Utf8 "ValueAbstract"; // #9 at 0x2E + Utf8 "ValueTypes"; // #10 at 0x36 + Utf8 "LValueAbstract;"; // #11 at 0x43 + Utf8 "int_v"; // #12 at 0x4D + Utf8 "I"; // #13 at 0x55 + Utf8 ""; // #14 at 0x59 + Utf8 "()V"; // #15 at 0x62 + Utf8 "Code"; // #16 at 0x68 + Utf8 "LineNumberTable"; // #17 at 0x6F + Utf8 "makeValueAbstract"; // #18 at 0x81 + Utf8 "(I)LValueAbstract;"; // #19 at 0x8D + Utf8 ""; // #20 at 0x9A + Utf8 "SourceFile"; // #21 at 0xA5 + Utf8 "ValueAbstract.java"; // #22 at 0xB2 + NameAndType #14 #15; // #23 at 0xBF + NameAndType #12 #13; // #24 at 0xC4 + NameAndType #18 #19; // #25 at 0xC9 + NameAndType #8 #11; // #26 at 0xCE + Utf8 "java/lang/Object"; // #27 at 0xD3 + } // Constant Pool + + 0x0530; // access [ ACC_VALUE ACC_ABSTRACT(bad) ACC_SUPER ACC_FINAL ] + #3;// this_cpx + #7;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [2] { // fields + { // Member at 0xF0 + 0x0118; // access + #8; // name_cpx + #11; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0xF8 + 0x0010; // access + #12; // name_cpx + #13; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [3] { // methods + { // Member at 0x0102 + 0x0000; // access + #14; // name_cpx + #15; // sig_cpx + [1] { // Attributes + Attr(#16, 42) { // Code at 0x010A + 2; // max_stack + 1; // max_locals + Bytes[10]{ + 0x2AB700012A04B500; + 0x02B1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#17, 14) { // LineNumberTable at 0x0126 + [3] { // LineNumberTable + 0 4; // at 0x0132 + 4 5; // at 0x0136 + 9 6; // at 0x013A + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x013A + 0x0008; // access + #18; // name_cpx + #19; // sig_cpx + [1] { // Attributes + Attr(#16, 44) { // Code at 0x0142 + 2; // max_stack + 2; // max_locals + Bytes[12]{ + 0xCB00034C2B1ACC00; + 0x024C2BB0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#17, 14) { // LineNumberTable at 0x0160 + [3] { // LineNumberTable + 0 8; // at 0x016C + 4 9; // at 0x0170 + 10 10; // at 0x0174 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0174 + 0x0008; // access + #20; // name_cpx + #15; // sig_cpx + [1] { // Attributes + Attr(#16, 33) { // Code at 0x017C + 1; // max_stack + 0; // max_locals + Bytes[9]{ + 0x1204B80005B30006; + 0xB1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#17, 6) { // LineNumberTable at 0x0197 + [1] { // LineNumberTable + 0 2; // at 0x01A3 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [2] { // Attributes + Attr(#21, 2) { // SourceFile at 0x01A5 + #22; + } // end SourceFile + ; + Attr(#10, 4) { // ValueTypes at 0x01AD + 0x00010003; + } // end ValueTypes + } // Attributes +} // end class ValueAbstract + +////////////////////////////////////////////////////////////////////// + +// Test that class modifiers cannot have both ACC_VALUE and ACC_ENUM set. +// +class ValueEnum { + 0xCAFEBABE; + 0; // minor version + 57; // version + [28] { // Constant Pool + ; // first element is empty + Method #7 #23; // #1 at 0x0A + Field #3 #24; // #2 at 0x0F + class #9; // #3 at 0x14 + int 0x01234567; // #4 at 0x17 + Method #3 #25; // #5 at 0x1C + Field #3 #26; // #6 at 0x21 + class #27; // #7 at 0x26 + Utf8 "VT"; // #8 at 0x29 + Utf8 "ValueEnum"; // #9 at 0x2E + Utf8 "ValueTypes"; // #10 at 0x36 + Utf8 "LValueEnum;"; // #11 at 0x43 + Utf8 "int_v"; // #12 at 0x4D + Utf8 "I"; // #13 at 0x55 + Utf8 ""; // #14 at 0x59 + Utf8 "()V"; // #15 at 0x62 + Utf8 "Code"; // #16 at 0x68 + Utf8 "LineNumberTable"; // #17 at 0x6F + Utf8 "makeValueEnum"; // #18 at 0x81 + Utf8 "(I)LValueEnum;"; // #19 at 0x8D + Utf8 ""; // #20 at 0x9A + Utf8 "SourceFile"; // #21 at 0xA5 + Utf8 "ValueEnum.java"; // #22 at 0xB2 + NameAndType #14 #15; // #23 at 0xBF + NameAndType #12 #13; // #24 at 0xC4 + NameAndType #18 #19; // #25 at 0xC9 + NameAndType #8 #11; // #26 at 0xCE + Utf8 "java/lang/Object"; // #27 at 0xD3 + } // Constant Pool + + 0x04130; // access [ ACC_VALUE ACC_ENUM(bad) ACC_SUPER ACC_FINAL ] + #3;// this_cpx + #7;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [2] { // fields + { // Member at 0xF0 + 0x0118; // access + #8; // name_cpx + #11; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0xF8 + 0x0010; // access + #12; // name_cpx + #13; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [3] { // methods + { // Member at 0x0102 + 0x0000; // access + #14; // name_cpx + #15; // sig_cpx + [1] { // Attributes + Attr(#16, 42) { // Code at 0x010A + 2; // max_stack + 1; // max_locals + Bytes[10]{ + 0x2AB700012A04B500; + 0x02B1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#17, 14) { // LineNumberTable at 0x0126 + [3] { // LineNumberTable + 0 4; // at 0x0132 + 4 5; // at 0x0136 + 9 6; // at 0x013A + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x013A + 0x0008; // access + #18; // name_cpx + #19; // sig_cpx + [1] { // Attributes + Attr(#16, 44) { // Code at 0x0142 + 2; // max_stack + 2; // max_locals + Bytes[12]{ + 0xCB00034C2B1ACC00; + 0x024C2BB0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#17, 14) { // LineNumberTable at 0x0160 + [3] { // LineNumberTable + 0 8; // at 0x016C + 4 9; // at 0x0170 + 10 10; // at 0x0174 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0174 + 0x0008; // access + #20; // name_cpx + #15; // sig_cpx + [1] { // Attributes + Attr(#16, 33) { // Code at 0x017C + 1; // max_stack + 0; // max_locals + Bytes[9]{ + 0x1204B80005B30006; + 0xB1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#17, 6) { // LineNumberTable at 0x0197 + [1] { // LineNumberTable + 0 2; // at 0x01A3 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [2] { // Attributes + Attr(#21, 2) { // SourceFile at 0x01A5 + #22; + } // end SourceFile + ; + Attr(#10, 4) { // ValueTypes at 0x01AD + 0x00010003; + } // end ValueTypes + } // Attributes +} // end class ValueEnum + +////////////////////////////////////////////////////////////////////// + +// Test that value type fields must be final. +// +class ValueFieldNotFinal { + 0xCAFEBABE; + 0; // minor version + 57; // version + [28] { // Constant Pool + ; // first element is empty + Method #7 #23; // #1 at 0x0A + Field #3 #24; // #2 at 0x0F + class #9; // #3 at 0x14 + int 0x01234567; // #4 at 0x17 + Method #3 #25; // #5 at 0x1C + Field #3 #26; // #6 at 0x21 + class #27; // #7 at 0x26 + Utf8 "VT"; // #8 at 0x29 + Utf8 "ValueFieldNotFinal"; // #9 at 0x2E + Utf8 "ValueTypes"; // #10 at 0x36 + Utf8 "LValueFieldNotFinal;"; // #11 at 0x43 + Utf8 "int_v"; // #12 at 0x4D + Utf8 "I"; // #13 at 0x55 + Utf8 ""; // #14 at 0x59 + Utf8 "()V"; // #15 at 0x62 + Utf8 "Code"; // #16 at 0x68 + Utf8 "LineNumberTable"; // #17 at 0x6F + Utf8 "makeValueFieldNotFinal"; // #18 at 0x81 + Utf8 "(I)LValueFieldNotFinal;"; // #19 at 0x8D + Utf8 ""; // #20 at 0x9A + Utf8 "SourceFile"; // #21 at 0xA5 + Utf8 "ValueFieldNotFinal.java"; // #22 at 0xB2 + NameAndType #14 #15; // #23 at 0xBF + NameAndType #12 #13; // #24 at 0xC4 + NameAndType #18 #19; // #25 at 0xC9 + NameAndType #8 #11; // #26 at 0xCE + Utf8 "java/lang/Object"; // #27 at 0xD3 + } // Constant Pool + + 0x0130; // access [ ACC_VALUE ACC_SUPER ACC_FINAL ] + #3;// this_cpx + #7;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [2] { // fields + { // Member at 0xF0 + 0x0118; // access + #8; // name_cpx + #11; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0xF8 + 0x0000; // access [ Field not ACC_FINAL(bad) ] + #12; // name_cpx + #13; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [3] { // methods + { // Member at 0x0102 + 0x0000; // access + #14; // name_cpx + #15; // sig_cpx + [1] { // Attributes + Attr(#16, 42) { // Code at 0x010A + 2; // max_stack + 1; // max_locals + Bytes[10]{ + 0x2AB700012A04B500; + 0x02B1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#17, 14) { // LineNumberTable at 0x0126 + [3] { // LineNumberTable + 0 4; // at 0x0132 + 4 5; // at 0x0136 + 9 6; // at 0x013A + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x013A + 0x0008; // access + #18; // name_cpx + #19; // sig_cpx + [1] { // Attributes + Attr(#16, 44) { // Code at 0x0142 + 2; // max_stack + 2; // max_locals + Bytes[12]{ + 0xCB00034C2B1ACC00; + 0x024C2BB0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#17, 14) { // LineNumberTable at 0x0160 + [3] { // LineNumberTable + 0 8; // at 0x016C + 4 9; // at 0x0170 + 10 10; // at 0x0174 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0174 + 0x0008; // access + #20; // name_cpx + #15; // sig_cpx + [1] { // Attributes + Attr(#16, 33) { // Code at 0x017C + 1; // max_stack + 0; // max_locals + Bytes[9]{ + 0x1204B80005B30006; + 0xB1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#17, 6) { // LineNumberTable at 0x0197 + [1] { // LineNumberTable + 0 2; // at 0x01A3 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [2] { // Attributes + Attr(#21, 2) { // SourceFile at 0x01A5 + #22; + } // end SourceFile + ; + Attr(#10, 4) { // ValueTypes at 0x01AD + 0x00010003; + } // end ValueTypes + } // Attributes +} // end class ValueFieldNotFinal + +////////////////////////////////////////////////////////////////////// + +// Test that arrays cannot have ACC_FLATTENABLE set. +// +class ValueFlatArray { + 0xCAFEBABE; + 0; // minor version + 57; // version + [32] { // Constant Pool + ; // first element is empty + Method #8 #26; // #1 at 0x0A + Field #4 #27; // #2 at 0x0F + Field #4 #28; // #3 at 0x14 + class #10; // #4 at 0x19 + int 0x01234567; // #5 at 0x1C + Method #4 #29; // #6 at 0x21 + Field #4 #30; // #7 at 0x26 + class #31; // #8 at 0x2B + Utf8 "VT"; // #9 at 0x2E + Utf8 "ValueFlatArray"; // #10 at 0x33 + Utf8 "ValueTypes"; // #11 at 0x40 + Utf8 "LValueFlatArray;"; // #12 at 0x4D + Utf8 "int_v"; // #13 at 0x5C + Utf8 "I"; // #14 at 0x64 + Utf8 "int_a"; // #15 at 0x68 + Utf8 "[I"; // #16 at 0x70 + Utf8 ""; // #17 at 0x75 + Utf8 "()V"; // #18 at 0x7E + Utf8 "Code"; // #19 at 0x84 + Utf8 "LineNumberTable"; // #20 at 0x8B + Utf8 "makeValueFlatArray"; // #21 at 0x9D + Utf8 "(I)LValueFlatArray;"; // #22 at 0xAE + Utf8 ""; // #23 at 0xC0 + Utf8 "SourceFile"; // #24 at 0xCB + Utf8 "ValueFlatArray.java"; // #25 at 0xD8 + NameAndType #17 #18; // #26 at 0xEA + NameAndType #13 #14; // #27 at 0xEF + NameAndType #15 #16; // #28 at 0xF4 + NameAndType #21 #22; // #29 at 0xF9 + NameAndType #9 #12; // #30 at 0xFE + Utf8 "java/lang/Object"; // #31 at 0x0103 + } // Constant Pool + + 0x0130; // access [ ACC_VALUE ACC_SUPER ACC_FINAL ] + #4;// this_cpx + #8;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [3] { // fields + { // Member at 0x0120 + 0x0118; // access + #9; // name_cpx + #12; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0x0128 + 0x0010; // access + #13; // name_cpx + #14; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0x0130 + 0x0110; // access // access [ ACC_FINAL ACC_FLATTENABLE(bad) ] + #15; // name_cpx + #16; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [3] { // methods + { // Member at 0x013A + 0x0000; // access + #17; // name_cpx + #18; // sig_cpx + [1] { // Attributes + Attr(#19, 51) { // Code at 0x0142 + 2; // max_stack + 1; // max_locals + Bytes[15]{ + 0x2AB700012A04B500; + 0x022A01B50003B1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#20, 18) { // LineNumberTable at 0x0163 + [4] { // LineNumberTable + 0 5; // at 0x016F + 4 6; // at 0x0173 + 9 7; // at 0x0177 + 14 8; // at 0x017B + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x017B + 0x0008; // access + #21; // name_cpx + #22; // sig_cpx + [1] { // Attributes + Attr(#19, 44) { // Code at 0x0183 + 2; // max_stack + 2; // max_locals + Bytes[12]{ + 0xCB00044C2B1ACC00; + 0x024C2BB0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#20, 14) { // LineNumberTable at 0x01A1 + [3] { // LineNumberTable + 0 10; // at 0x01AD + 4 11; // at 0x01B1 + 10 12; // at 0x01B5 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x01B5 + 0x0008; // access + #23; // name_cpx + #18; // sig_cpx + [1] { // Attributes + Attr(#19, 33) { // Code at 0x01BD + 1; // max_stack + 0; // max_locals + Bytes[9]{ + 0x1205B80006B30007; + 0xB1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#20, 6) { // LineNumberTable at 0x01D8 + [1] { // LineNumberTable + 0 2; // at 0x01E4 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [2] { // Attributes + Attr(#24, 2) { // SourceFile at 0x01E6 + #25; + } // end SourceFile + ; + Attr(#11, 4) { // ValueTypes at 0x01EE + 0x00010004; + } // end ValueTypes + } // Attributes +} // end class ValueFlatArray + +////////////////////////////////////////////////////////////////////// + +// Test that a value type cannot have a method named . +// +class ValueInitMethod { + 0xCAFEBABE; + 0; // minor version + 57; // version + [28] { // Constant Pool + ; // first element is empty + Method #7 #23; // #1 at 0x0A + Field #3 #24; // #2 at 0x0F + class #9; // #3 at 0x14 + int 0x01234567; // #4 at 0x17 + Method #3 #25; // #5 at 0x1C + Field #3 #26; // #6 at 0x21 + class #27; // #7 at 0x26 + Utf8 "VT"; // #8 at 0x29 + Utf8 "ValueInitMethod"; // #9 at 0x2E + Utf8 "ValueTypes"; // #10 at 0x36 + Utf8 "LValueInitMethod;"; // #11 at 0x43 + Utf8 "int_v"; // #12 at 0x4D + Utf8 "I"; // #13 at 0x55 + Utf8 ""; // #14 at 0x59 + Utf8 "()V"; // #15 at 0x62 + Utf8 "Code"; // #16 at 0x68 + Utf8 "LineNumberTable"; // #17 at 0x6F + Utf8 "makeValueInitMethod"; // #18 at 0x81 + Utf8 "(I)LValueInitMethod;"; // #19 at 0x8D + Utf8 ""; // #20 at 0x9A + Utf8 "SourceFile"; // #21 at 0xA5 + Utf8 "ValueInitMethod.java"; // #22 at 0xB2 + NameAndType #14 #15; // #23 at 0xBF + NameAndType #12 #13; // #24 at 0xC4 + NameAndType #18 #19; // #25 at 0xC9 + NameAndType #8 #11; // #26 at 0xCE + Utf8 "java/lang/Object"; // #27 at 0xD3 + } // Constant Pool + + 0x0130; // access [ ACC_VALUE ACC_SUPER ACC_FINAL ] + #3;// this_cpx + #7;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [2] { // fields + { // Member at 0xF0 + 0x0118; // access + #8; // name_cpx + #11; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0xF8 + 0x0010; // access + #12; // name_cpx + #13; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [3] { // methods + { // Member at 0x0102 + 0x0000; // access + #14; // name_cpx (bad) + #15; // sig_cpx + [1] { // Attributes + Attr(#16, 42) { // Code at 0x010A + 2; // max_stack + 1; // max_locals + Bytes[10]{ + 0x2AB700012A04B500; + 0x02B1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#17, 14) { // LineNumberTable at 0x0126 + [3] { // LineNumberTable + 0 4; // at 0x0132 + 4 5; // at 0x0136 + 9 6; // at 0x013A + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x013A + 0x0008; // access + #18; // name_cpx + #19; // sig_cpx + [1] { // Attributes + Attr(#16, 44) { // Code at 0x0142 + 2; // max_stack + 2; // max_locals + Bytes[12]{ + 0xCB00034C2B1ACC00; + 0x024C2BB0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#17, 14) { // LineNumberTable at 0x0160 + [3] { // LineNumberTable + 0 8; // at 0x016C + 4 9; // at 0x0170 + 10 10; // at 0x0174 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0174 + 0x0008; // access + #20; // name_cpx + #15; // sig_cpx + [1] { // Attributes + Attr(#16, 33) { // Code at 0x017C + 1; // max_stack + 0; // max_locals + Bytes[9]{ + 0x1204B80005B30006; + 0xB1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#17, 6) { // LineNumberTable at 0x0197 + [1] { // LineNumberTable + 0 2; // at 0x01A3 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [2] { // Attributes + Attr(#21, 2) { // SourceFile at 0x01A5 + #22; + } // end SourceFile + ; + Attr(#10, 4) { // ValueTypes at 0x01AD + 0x00010003; + } // end ValueTypes + } // Attributes +} // end class ValueInitMethod + +////////////////////////////////////////////////////////////////////// + +// Test that class modifiers cannot have both ACC_VALUE and ACC_INTERFACE set. +// +class ValueInterface { + 0xCAFEBABE; + 0; // minor version + 57; // version + [27] { // Constant Pool + ; // first element is empty + Method #7 #21; // #1 at 0x0A + Field #3 #22; // #2 at 0x0F + class #23; // #3 at 0x14 + int 0x01234567; // #4 at 0x17 + Method #3 #24; // #5 at 0x1C + Field #3 #25; // #6 at 0x21 + class #26; // #7 at 0x26 + Utf8 "VT"; // #8 at 0x29 + Utf8 "LValueInterface;"; // #9 at 0x2E + Utf8 "int_v"; // #10 at 0x3C + Utf8 "I"; // #11 at 0x44 + Utf8 ""; // #12 at 0x48 + Utf8 "()V"; // #13 at 0x51 + Utf8 "Code"; // #14 at 0x57 + Utf8 "LineNumberTable"; // #15 at 0x5E + Utf8 "makeValueInterface"; // #16 at 0x70 + Utf8 "(I)LValueInterface;"; // #17 at 0x80 + Utf8 ""; // #18 at 0x91 + Utf8 "SourceFile"; // #19 at 0x9C + Utf8 "ValueInterface.java"; // #20 at 0xA9 + NameAndType #12 #13; // #21 at 0xBA + NameAndType #10 #11; // #22 at 0xBF + Utf8 "ValueInterface"; // #23 at 0xC4 + NameAndType #16 #17; // #24 at 0xD0 + NameAndType #8 #9; // #25 at 0xD5 + Utf8 "java/lang/Object"; // #26 at 0xDA + } // Constant Pool + + 0x0330; // access [ ACC_VALUE ACC_INTERFACE(bad) ACC_SUPER ACC_FINAL ] + #3;// this_cpx + #7;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [2] { // fields + { // Member at 0xF7 + 0x0018; // access + #8; // name_cpx + #9; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0xFF + 0x0010; // access + #10; // name_cpx + #11; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [1] { // methods + { // Member at 0x0141 + 0x0008; // access + #16; // name_cpx + #17; // sig_cpx + [1] { // Attributes + Attr(#14, 44) { // Code at 0x0149 + 2; // max_stack + 2; // max_locals + Bytes[12]{ + 0xCB00034C2B1ACC00; + 0x024C2BB0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#15, 14) { // LineNumberTable at 0x0167 + [3] { // LineNumberTable + 0 8; // at 0x0173 + 4 9; // at 0x0177 + 10 10; // at 0x017B + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [1] { // Attributes + Attr(#19, 2) { // SourceFile at 0x01AC + #20; + } // end SourceFile + } // Attributes +} // end class ValueInterface + +////////////////////////////////////////////////////////////////////// + +// Test that value type instance methods cannot be synchronized. +// +class ValueMethodSynch { + 0xCAFEBABE; + 0; // minor version + 57; // version + [29] { // Constant Pool + ; // first element is empty + Method #7 #24; // #1 at 0x0A + Field #3 #25; // #2 at 0x0F + class #9; // #3 at 0x14 + int 0x01234567; // #4 at 0x17 + Method #3 #26; // #5 at 0x1C + Field #3 #27; // #6 at 0x21 + class #28; // #7 at 0x26 + Utf8 "VT"; // #8 at 0x29 + Utf8 "ValueMethodSynch"; // #9 at 0x2E + Utf8 "ValueTypes"; // #10 at 0x41 + Utf8 "LValueMethodSynch;"; // #11 at 0x4E + Utf8 "int_v"; // #12 at 0x63 + Utf8 "I"; // #13 at 0x6B + Utf8 ""; // #14 at 0x6F + Utf8 "()V"; // #15 at 0x78 + Utf8 "Code"; // #16 at 0x7E + Utf8 "LineNumberTable"; // #17 at 0x85 + Utf8 "instanceMethod"; // #18 at 0x97 + Utf8 "makeValueMethodSynch"; // #19 at 0xA8 + Utf8 "(I)LValueMethodSynch;"; // #20 at 0xBF + Utf8 ""; // #21 at 0xD7 + Utf8 "SourceFile"; // #22 at 0xE2 + Utf8 "ValueMethodSynch.java"; // #23 at 0xEF + NameAndType #14 #15; // #24 at 0x0107 + NameAndType #12 #13; // #25 at 0x010C + NameAndType #19 #20; // #26 at 0x0111 + NameAndType #8 #11; // #27 at 0x0116 + Utf8 "java/lang/Object"; // #28 at 0x011B + } // Constant Pool + + 0x0130; // access [ ACC_VALUE ACC_SUPER ACC_FINAL ] + #3;// this_cpx + #7;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [2] { // fields + { // Member at 0x0138 + 0x0118; // access + #8; // name_cpx + #11; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0x0140 + 0x0010; // access + #12; // name_cpx + #13; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [4] { // methods + { // Member at 0x014A + 0x0000; // access + #14; // name_cpx + #15; // sig_cpx + [1] { // Attributes + Attr(#16, 42) { // Code at 0x0152 + 2; // max_stack + 1; // max_locals + Bytes[10]{ + 0x2AB700012A04B500; + 0x02B1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#17, 14) { // LineNumberTable at 0x016E + [3] { // LineNumberTable + 0 4; // at 0x017A + 4 5; // at 0x017E + 9 6; // at 0x0182 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0182 + 0x0020; // access [ ACC_SYNCHRONIZED(bad) ] + #18; // name_cpx + #15; // sig_cpx + [1] { // Attributes + Attr(#16, 25) { // Code at 0x018A + 0; // max_stack + 1; // max_locals + Bytes[1]{ + 0xB1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#17, 6) { // LineNumberTable at 0x019D + [1] { // LineNumberTable + 0 8; // at 0x01A9 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x01A9 + 0x0008; // access + #19; // name_cpx + #20; // sig_cpx + [1] { // Attributes + Attr(#16, 44) { // Code at 0x01B1 + 2; // max_stack + 2; // max_locals + Bytes[12]{ + 0xCB00034C2B1ACC00; + 0x024C2BB0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#17, 14) { // LineNumberTable at 0x01CF + [3] { // LineNumberTable + 0 11; // at 0x01DB + 4 12; // at 0x01DF + 10 13; // at 0x01E3 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x01E3 + 0x0008; // access + #21; // name_cpx + #15; // sig_cpx + [1] { // Attributes + Attr(#16, 33) { // Code at 0x01EB + 1; // max_stack + 0; // max_locals + Bytes[9]{ + 0x1204B80005B30006; + 0xB1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#17, 6) { // LineNumberTable at 0x0206 + [1] { // LineNumberTable + 0 2; // at 0x0212 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [2] { // Attributes + Attr(#22, 2) { // SourceFile at 0x0214 + #23; + } // end SourceFile + ; + Attr(#10, 4) { // ValueTypes at 0x021C + 0x00010003; + } // end ValueTypes + } // Attributes +} // end class ValueMethodSynch + +////////////////////////////////////////////////////////////////////// + +// Test that a value type's super class must be java.lang.Object. +// +class ValueSuperClass { + 0xCAFEBABE; + 0; // minor version + 57; // version + [30] { // Constant Pool + ; // first element is empty + Method #7 #23; // #1 at 0x0A + Field #3 #24; // #2 at 0x0F + class #9; // #3 at 0x14 + int 0x01234567; // #4 at 0x17 + Method #3 #25; // #5 at 0x1C + Field #3 #26; // #6 at 0x21 + class #27; // #7 at 0x26 + Utf8 "VT"; // #8 at 0x29 + Utf8 "ValueSuperClass"; // #9 at 0x2E + Utf8 "ValueTypes"; // #10 at 0x36 + Utf8 "LValueSuperClass;"; // #11 at 0x43 + Utf8 "int_v"; // #12 at 0x4D + Utf8 "I"; // #13 at 0x55 + Utf8 ""; // #14 at 0x59 + Utf8 "()V"; // #15 at 0x62 + Utf8 "Code"; // #16 at 0x68 + Utf8 "LineNumberTable"; // #17 at 0x6F + Utf8 "makeValueSuperClass"; // #18 at 0x81 + Utf8 "(I)LValueSuperClass;"; // #19 at 0x8D + Utf8 ""; // #20 at 0x9A + Utf8 "SourceFile"; // #21 at 0xA5 + Utf8 "ValueSuperClass.java"; // #22 at 0xB2 + NameAndType #14 #15; // #23 at 0xBF + NameAndType #12 #13; // #24 at 0xC4 + NameAndType #18 #19; // #25 at 0xC9 + NameAndType #8 #11; // #26 at 0xCE + Utf8 "java/lang/Object"; // #27 at 0xD3 + class #29; // #28 + Utf8 "java/lang/Throwable"; // #29 + } // Constant Pool + + 0x0130; // access [ ACC_SUPER ACC_FINAL ] + #3;// this_cpx + #28;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [2] { // fields + { // Member at 0xF0 + 0x0118; // access + #8; // name_cpx + #11; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0xF8 + 0x0010; // access + #12; // name_cpx + #13; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [3] { // methods + { // Member at 0x0102 + 0x0000; // access + #14; // name_cpx + #15; // sig_cpx + [1] { // Attributes + Attr(#16, 42) { // Code at 0x010A + 2; // max_stack + 1; // max_locals + Bytes[10]{ + 0x2AB700012A04B500; + 0x02B1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#17, 14) { // LineNumberTable at 0x0126 + [3] { // LineNumberTable + 0 4; // at 0x0132 + 4 5; // at 0x0136 + 9 6; // at 0x013A + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x013A + 0x0008; // access + #18; // name_cpx + #19; // sig_cpx + [1] { // Attributes + Attr(#16, 44) { // Code at 0x0142 + 2; // max_stack + 2; // max_locals + Bytes[12]{ + 0xCB00034C2B1ACC00; + 0x024C2BB0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#17, 14) { // LineNumberTable at 0x0160 + [3] { // LineNumberTable + 0 8; // at 0x016C + 4 9; // at 0x0170 + 10 10; // at 0x0174 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0174 + 0x0008; // access + #20; // name_cpx + #15; // sig_cpx + [1] { // Attributes + Attr(#16, 33) { // Code at 0x017C + 1; // max_stack + 0; // max_locals + Bytes[9]{ + 0x1204B80005B30006; + 0xB1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#17, 6) { // LineNumberTable at 0x0197 + [1] { // LineNumberTable + 0 2; // at 0x01A3 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [2] { // Attributes + Attr(#21, 2) { // SourceFile at 0x01A5 + #22; + } // end SourceFile + ; + Attr(#10, 4) { // ValueTypes at 0x01AD + 0x00010003; + } // end ValueTypes + } // Attributes +} // end class ValueSuperClass + +////////////////////////////////////////////////////////////////////// + +// Value types Circ and Circ2 have fields of each other's type. This should +// cause a ClassCircularityError exception when one of them is being loaded. +// +// The value types are based on these two Java classes: +// +// final value class Circ { +// static final Circ VT = makeCirc(0x01234567); +// final int int_v; +// final Circ2 v2; +// Circ() { +// int_v = 1; +// v2 = Circ2.default; +// } +// static Circ makeCirc(int x) { +// Circ v = Circ.default; +// v = __WithField(v.int_v, x); +// return v; +// } +// } +//---------------------------------------------------------------- +// final value class Circ2 { +// static final Circ2 VT = makeCirc2('\u0123'); +// final char char_v; +// final Circ vv; +// Circ2() { +// char_v = 'z'; +// vv = Circ.default; +// } +// static Circ2 makeCirc2(char c) { +// Circ2 v = Circ2.default; +// v = __WithField(v.char_v, c); +// return v; +// } +// } + +class Circ { + 0xCAFEBABE; + 0; // minor version + 57; // version + [69] { // Constant Pool + ; // first element is empty + Method #13 #39; // #1 at 0x0A + class #40; // #2 at 0x0F + Field #2 #41; // #3 at 0x12 + InvokeDynamic 0s #44; // #4 at 0x17 + InvokeDynamic 0s #45; // #5 at 0x1C + InvokeDynamic 0s #46; // #6 at 0x21 + InvokeDynamic 0s #47; // #7 at 0x26 + int 0x01234567; // #8 at 0x2B + Method #2 #48; // #9 at 0x30 + Field #2 #49; // #10 at 0x35 + class #50; // #11 at 0x3A + Field #2 #51; // #12 at 0x3D + class #52; // #13 at 0x42 + Utf8 "VT"; // #14 at 0x45 + Utf8 "QCirc;"; // #15 at 0x4A + Utf8 "int_v"; // #16 at 0x53 + Utf8 "I"; // #17 at 0x5B + Utf8 "v2"; // #18 at 0x5F + Utf8 "QCirc2;"; // #19 at 0x64 + Utf8 ""; // #20 at 0x6E + Utf8 "()V"; // #21 at 0x77 + Utf8 "Code"; // #22 at 0x7D + Utf8 "LineNumberTable"; // #23 at 0x84 + Utf8 "makeCirc"; // #24 at 0x96 + Utf8 "(I)QCirc;"; // #25 at 0xA1 + Utf8 "hashCode"; // #26 at 0xAD + Utf8 "()I"; // #27 at 0xB8 + Utf8 "equals"; // #28 at 0xBE + Utf8 "(Ljava/lang/Object;)Z"; // #29 at 0xC7 + Utf8 "toString"; // #30 at 0xDF + Utf8 "()Ljava/lang/String;"; // #31 at 0xEA + Utf8 "longHashCode"; // #32 at 0x0101 + Utf8 "()J"; // #33 at 0x0110 + Utf8 ""; // #34 at 0x0116 + Utf8 "$makeValue$"; // #35 at 0x0121 + Utf8 "()QCirc;"; // #36 at 0x012F + Utf8 "SourceFile"; // #37 at 0x013A + Utf8 "Circ.java"; // #38 at 0x0147 + NameAndType #20 #21; // #39 at 0x0153 + Utf8 "Circ"; // #40 at 0x0158 + NameAndType #16 #17; // #41 at 0x015F + Utf8 "BootstrapMethods"; // #42 at 0x0164 + MethodHandle 6b #53; // #43 at 0x0177 + NameAndType #26 #54; // #44 at 0x017B + NameAndType #28 #55; // #45 at 0x0180 + NameAndType #30 #56; // #46 at 0x0185 + NameAndType #32 #57; // #47 at 0x018A + NameAndType #24 #25; // #48 at 0x018F + NameAndType #14 #15; // #49 at 0x0194 + Utf8 "Circ2"; // #50 at 0x0199 + NameAndType #18 #19; // #51 at 0x01A1 + Utf8 "java/lang/Object"; // #52 at 0x01A6 + Method #58 #59; // #53 at 0x01B9 + Utf8 "(Ljava/lang/Object;)I"; // #54 at 0x01BE + Utf8 "(Ljava/lang/Object;Ljava/lang/Object;)Z"; // #55 at 0x01D6 + Utf8 "(Ljava/lang/Object;)Ljava/lang/String;"; // #56 at 0x0200 + Utf8 "(Ljava/lang/Object;)J"; // #57 at 0x0229 + class #60; // #58 at 0x0241 + NameAndType #61 #65; // #59 at 0x0244 + Utf8 "java/lang/invoke/ValueBootstrapMethods"; // #60 at 0x0249 + Utf8 "makeBootstrapMethod"; // #61 at 0x0272 + class #67; // #62 at 0x0288 + Utf8 "Lookup"; // #63 at 0x028B + Utf8 "InnerClasses"; // #64 at 0x0294 + Utf8 "(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;)Ljava/lang/invoke/CallSite;"; // #65 at 0x02A3 + class #68; // #66 at 0x0319 + Utf8 "java/lang/invoke/MethodHandles$Lookup"; // #67 at 0x031C + Utf8 "java/lang/invoke/MethodHandles"; // #68 at 0x0344 + } // Constant Pool + + 0x0130; // access [ ACC_SUPER ACC_FINAL ] + #2;// this_cpx + #13;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [3] { // fields + { // Member at 0x036F + 0x0018; // access + #14; // name_cpx + #15; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0x0377 + 0x0010; // access + #16; // name_cpx + #17; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0x037F + 0x0010; // access + #18; // name_cpx + #19; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [8] { // methods + { // Member at 0x0389 + 0x0000; // access + #20; // name_cpx + #21; // sig_cpx + [1] { // Attributes + Attr(#22, 29) { // Code at 0x0391 + 1; // max_stack + 1; // max_locals + Bytes[5]{ + 0x2AB70001B1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#23, 6) { // LineNumberTable at 0x03A8 + [1] { // LineNumberTable + 0 6; // at 0x03B4 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x03B4 + 0x0008; // access + #24; // name_cpx + #25; // sig_cpx + [1] { // Attributes + Attr(#22, 45) { // Code at 0x03BC + 2; // max_stack + 2; // max_locals + Bytes[13]{ + 0xCB00024C1A2B5FCC; + 0x00034C2BB0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#23, 14) { // LineNumberTable at 0x03DB + [3] { // LineNumberTable + 0 12; // at 0x03E7 + 4 13; // at 0x03EB + 11 14; // at 0x03EF + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x03EF + 0x0011; // access + #26; // name_cpx + #27; // sig_cpx + [1] { // Attributes + Attr(#22, 31) { // Code at 0x03F7 + 1; // max_stack + 1; // max_locals + Bytes[7]{ + 0x2ABA00040000AC; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#23, 6) { // LineNumberTable at 0x0410 + [1] { // LineNumberTable + 0 1; // at 0x041C + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x041C + 0x0011; // access + #28; // name_cpx + #29; // sig_cpx + [1] { // Attributes + Attr(#22, 32) { // Code at 0x0424 + 2; // max_stack + 2; // max_locals + Bytes[8]{ + 0x2A2BBA00050000AC; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#23, 6) { // LineNumberTable at 0x043E + [1] { // LineNumberTable + 0 1; // at 0x044A + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x044A + 0x0011; // access + #30; // name_cpx + #31; // sig_cpx + [1] { // Attributes + Attr(#22, 31) { // Code at 0x0452 + 1; // max_stack + 1; // max_locals + Bytes[7]{ + 0x2ABA00060000B0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#23, 6) { // LineNumberTable at 0x046B + [1] { // LineNumberTable + 0 1; // at 0x0477 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0477 + 0x0011; // access + #32; // name_cpx + #33; // sig_cpx + [1] { // Attributes + Attr(#22, 31) { // Code at 0x047F + 2; // max_stack + 1; // max_locals + Bytes[7]{ + 0x2ABA00070000AD; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#23, 6) { // LineNumberTable at 0x0498 + [1] { // LineNumberTable + 0 1; // at 0x04A4 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x04A4 + 0x0008; // access + #34; // name_cpx + #21; // sig_cpx + [1] { // Attributes + Attr(#22, 33) { // Code at 0x04AC + 1; // max_stack + 0; // max_locals + Bytes[9]{ + 0x1208B80009B3000A; + 0xB1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#23, 6) { // LineNumberTable at 0x04C7 + [1] { // LineNumberTable + 0 2; // at 0x04D3 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x04D3 + 0x1008; // access + #35; // name_cpx + #36; // sig_cpx + [1] { // Attributes + Attr(#22, 58) { // Code at 0x04DB + 2; // max_stack + 1; // max_locals + Bytes[22]{ + 0xCB00024B042A5FCC; + 0x00034BCB000B2A5F; + 0xCC000C4B2AB0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#23, 18) { // LineNumberTable at 0x0503 + [4] { // LineNumberTable + 0 6; // at 0x050F + 4 7; // at 0x0513 + 11 8; // at 0x0517 + 20 9; // at 0x051B + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [3] { // Attributes + Attr(#37, 2) { // SourceFile at 0x051D + #38; + } // end SourceFile + ; + Attr(#64, 10) { // InnerClasses at 0x0525 + [1] { // InnerClasses + #62 #66 #63 25; // at 0x0535 + } + } // end InnerClasses + ; + Attr(#42, 6) { // BootstrapMethods at 0x0535 + [1] { // bootstrap_methods + { // bootstrap_method + #43; // bootstrap_method_ref + [0] { // bootstrap_arguments + } // bootstrap_arguments + } // bootstrap_method + } + } // end BootstrapMethods + } // Attributes +} // end class Circ + +class Circ2 { + 0xCAFEBABE; + 0; // minor version + 57; // version + [68] { // Constant Pool + ; // first element is empty + Method #12 #38; // #1 at 0x0A + class #39; // #2 at 0x0F + Field #2 #40; // #3 at 0x12 + InvokeDynamic 0s #43; // #4 at 0x17 + InvokeDynamic 0s #44; // #5 at 0x1C + InvokeDynamic 0s #45; // #6 at 0x21 + InvokeDynamic 0s #46; // #7 at 0x26 + Method #2 #47; // #8 at 0x2B + Field #2 #48; // #9 at 0x30 + class #49; // #10 at 0x35 + Field #2 #50; // #11 at 0x38 + class #51; // #12 at 0x3D + Utf8 "VT"; // #13 at 0x40 + Utf8 "QCirc2;"; // #14 at 0x45 + Utf8 "char_v"; // #15 at 0x4F + Utf8 "C"; // #16 at 0x58 + Utf8 "vv"; // #17 at 0x5C + Utf8 "QCirc;"; // #18 at 0x61 + Utf8 ""; // #19 at 0x6A + Utf8 "()V"; // #20 at 0x73 + Utf8 "Code"; // #21 at 0x79 + Utf8 "LineNumberTable"; // #22 at 0x80 + Utf8 "makeCirc2"; // #23 at 0x92 + Utf8 "(C)QCirc2;"; // #24 at 0x9E + Utf8 "hashCode"; // #25 at 0xAB + Utf8 "()I"; // #26 at 0xB6 + Utf8 "equals"; // #27 at 0xBC + Utf8 "(Ljava/lang/Object;)Z"; // #28 at 0xC5 + Utf8 "toString"; // #29 at 0xDD + Utf8 "()Ljava/lang/String;"; // #30 at 0xE8 + Utf8 "longHashCode"; // #31 at 0xFF + Utf8 "()J"; // #32 at 0x010E + Utf8 ""; // #33 at 0x0114 + Utf8 "$makeValue$"; // #34 at 0x011F + Utf8 "()QCirc2;"; // #35 at 0x012D + Utf8 "SourceFile"; // #36 at 0x0139 + Utf8 "Circ2.java"; // #37 at 0x0146 + NameAndType #19 #20; // #38 at 0x0153 + Utf8 "Circ2"; // #39 at 0x0158 + NameAndType #15 #16; // #40 at 0x0160 + Utf8 "BootstrapMethods"; // #41 at 0x0165 + MethodHandle 6b #52; // #42 at 0x0178 + NameAndType #25 #53; // #43 at 0x017C + NameAndType #27 #54; // #44 at 0x0181 + NameAndType #29 #55; // #45 at 0x0186 + NameAndType #31 #56; // #46 at 0x018B + NameAndType #23 #24; // #47 at 0x0190 + NameAndType #13 #14; // #48 at 0x0195 + Utf8 "Circ"; // #49 at 0x019A + NameAndType #17 #18; // #50 at 0x01A1 + Utf8 "java/lang/Object"; // #51 at 0x01A6 + Method #57 #58; // #52 at 0x01B9 + Utf8 "(Ljava/lang/Object;)I"; // #53 at 0x01BE + Utf8 "(Ljava/lang/Object;Ljava/lang/Object;)Z"; // #54 at 0x01D6 + Utf8 "(Ljava/lang/Object;)Ljava/lang/String;"; // #55 at 0x0200 + Utf8 "(Ljava/lang/Object;)J"; // #56 at 0x0229 + class #59; // #57 at 0x0241 + NameAndType #60 #64; // #58 at 0x0244 + Utf8 "java/lang/invoke/ValueBootstrapMethods"; // #59 at 0x0249 + Utf8 "makeBootstrapMethod"; // #60 at 0x0272 + class #66; // #61 at 0x0288 + Utf8 "Lookup"; // #62 at 0x028B + Utf8 "InnerClasses"; // #63 at 0x0294 + Utf8 "(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;)Ljava/lang/invoke/CallSite;"; // #64 at 0x02A3 + class #67; // #65 at 0x0319 + Utf8 "java/lang/invoke/MethodHandles$Lookup"; // #66 at 0x031C + Utf8 "java/lang/invoke/MethodHandles"; // #67 at 0x0344 + } // Constant Pool + + 0x0130; // access [ ACC_SUPER ACC_FINAL ] + #2;// this_cpx + #12;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [3] { // fields + { // Member at 0x036F + 0x0018; // access + #13; // name_cpx + #14; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0x0377 + 0x0010; // access + #15; // name_cpx + #16; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0x037F + 0x0010; // access + #17; // name_cpx + #18; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [8] { // methods + { // Member at 0x0389 + 0x0000; // access + #19; // name_cpx + #20; // sig_cpx + [1] { // Attributes + Attr(#21, 29) { // Code at 0x0391 + 1; // max_stack + 1; // max_locals + Bytes[5]{ + 0x2AB70001B1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#22, 6) { // LineNumberTable at 0x03A8 + [1] { // LineNumberTable + 0 6; // at 0x03B4 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x03B4 + 0x0008; // access + #23; // name_cpx + #24; // sig_cpx + [1] { // Attributes + Attr(#21, 45) { // Code at 0x03BC + 2; // max_stack + 2; // max_locals + Bytes[13]{ + 0xCB00024C1A2B5FCC; + 0x00034C2BB0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#22, 14) { // LineNumberTable at 0x03DB + [3] { // LineNumberTable + 0 12; // at 0x03E7 + 4 13; // at 0x03EB + 11 14; // at 0x03EF + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x03EF + 0x0011; // access + #25; // name_cpx + #26; // sig_cpx + [1] { // Attributes + Attr(#21, 31) { // Code at 0x03F7 + 1; // max_stack + 1; // max_locals + Bytes[7]{ + 0x2ABA00040000AC; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#22, 6) { // LineNumberTable at 0x0410 + [1] { // LineNumberTable + 0 1; // at 0x041C + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x041C + 0x0011; // access + #27; // name_cpx + #28; // sig_cpx + [1] { // Attributes + Attr(#21, 32) { // Code at 0x0424 + 2; // max_stack + 2; // max_locals + Bytes[8]{ + 0x2A2BBA00050000AC; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#22, 6) { // LineNumberTable at 0x043E + [1] { // LineNumberTable + 0 1; // at 0x044A + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x044A + 0x0011; // access + #29; // name_cpx + #30; // sig_cpx + [1] { // Attributes + Attr(#21, 31) { // Code at 0x0452 + 1; // max_stack + 1; // max_locals + Bytes[7]{ + 0x2ABA00060000B0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#22, 6) { // LineNumberTable at 0x046B + [1] { // LineNumberTable + 0 1; // at 0x0477 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0477 + 0x0011; // access + #31; // name_cpx + #32; // sig_cpx + [1] { // Attributes + Attr(#21, 31) { // Code at 0x047F + 2; // max_stack + 1; // max_locals + Bytes[7]{ + 0x2ABA00070000AD; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#22, 6) { // LineNumberTable at 0x0498 + [1] { // LineNumberTable + 0 1; // at 0x04A4 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x04A4 + 0x0008; // access + #33; // name_cpx + #20; // sig_cpx + [1] { // Attributes + Attr(#21, 34) { // Code at 0x04AC + 1; // max_stack + 0; // max_locals + Bytes[10]{ + 0x110123B80008B300; + 0x09B1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#22, 6) { // LineNumberTable at 0x04C8 + [1] { // LineNumberTable + 0 2; // at 0x04D4 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x04D4 + 0x1008; // access + #34; // name_cpx + #35; // sig_cpx + [1] { // Attributes + Attr(#21, 59) { // Code at 0x04DC + 2; // max_stack + 1; // max_locals + Bytes[23]{ + 0xCB00024B107A2A5F; + 0xCC00034BCB000A2A; + 0x5FCC000B4B2AB0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#22, 18) { // LineNumberTable at 0x0505 + [4] { // LineNumberTable + 0 6; // at 0x0511 + 4 7; // at 0x0515 + 12 8; // at 0x0519 + 21 9; // at 0x051D + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [3] { // Attributes + Attr(#36, 2) { // SourceFile at 0x051F + #37; + } // end SourceFile + ; + Attr(#63, 10) { // InnerClasses at 0x0527 + [1] { // InnerClasses + #61 #65 #62 25; // at 0x0537 + } + } // end InnerClasses + ; + Attr(#41, 6) { // BootstrapMethods at 0x0537 + [1] { // bootstrap_methods + { // bootstrap_method + #42; // bootstrap_method_ref + [0] { // bootstrap_arguments + } // bootstrap_arguments + } // bootstrap_method + } + } // end BootstrapMethods + } // Attributes +} // end class Circ2 + + +////////////////////////////////////////////////////////////////////// + +// Value types CircStaticA and CircStaticB have static fields of each other's +// type. This should cause a ClassCircularityError exception when one of them +// is being loaded. +// +// The value types are based on these two Java classes: +// +// final value class CircStaticA { +// static final CircStaticA VT = makeCircStaticA(0x01234567); +// final int int_v; +// static final CircStaticB v2 = CircStaticB.default; +// CircStaticA() { +// int_v = 1; +// } +// static CircStaticA makeCircStaticA(int x) { +// CircStaticA v = CircStaticA.default; +// v = __WithField(v.int_v, x); +// return v; +// } +// } +//---------------------------------------------------------------- +// final value class CircStaticB { +// static final CircStaticB VT = makeCircStaticB(0x01234567); +// final int int_v; +// static final CircStaticA v2 = CircStaticA.default; +// CircStaticB() { +// int_v = 1; +// } +// static CircStaticB makeCircStaticB(int x) { +// CircStaticB v = CircStaticB.default; +// v = __WithField(v.int_v, x); +// return v; +// } +// } + +class CircStaticA { + 0xCAFEBABE; + 0; // minor version + 57; // version + [69] { // Constant Pool + ; // first element is empty + Method #13 #39; // #1 at 0x0A + class #40; // #2 at 0x0F + Field #2 #41; // #3 at 0x12 + InvokeDynamic 0s #44; // #4 at 0x17 + InvokeDynamic 0s #45; // #5 at 0x1C + InvokeDynamic 0s #46; // #6 at 0x21 + InvokeDynamic 0s #47; // #7 at 0x26 + int 0x01234567; // #8 at 0x2B + Method #2 #48; // #9 at 0x30 + Field #2 #49; // #10 at 0x35 + class #50; // #11 at 0x3A + Field #2 #51; // #12 at 0x3D + class #52; // #13 at 0x42 + Utf8 "VT"; // #14 at 0x45 + Utf8 "QCircStaticA;"; // #15 at 0x4A + Utf8 "int_v"; // #16 at 0x5A + Utf8 "I"; // #17 at 0x62 + Utf8 "v2"; // #18 at 0x66 + Utf8 "QCircStaticB;"; // #19 at 0x6B + Utf8 ""; // #20 at 0x7B + Utf8 "()V"; // #21 at 0x84 + Utf8 "Code"; // #22 at 0x8A + Utf8 "LineNumberTable"; // #23 at 0x91 + Utf8 "makeCircStaticA"; // #24 at 0xA3 + Utf8 "(I)QCircStaticA;"; // #25 at 0xB5 + Utf8 "hashCode"; // #26 at 0xC8 + Utf8 "()I"; // #27 at 0xD3 + Utf8 "equals"; // #28 at 0xD9 + Utf8 "(Ljava/lang/Object;)Z"; // #29 at 0xE2 + Utf8 "toString"; // #30 at 0xFA + Utf8 "()Ljava/lang/String;"; // #31 at 0x0105 + Utf8 "longHashCode"; // #32 at 0x011C + Utf8 "()J"; // #33 at 0x012B + Utf8 ""; // #34 at 0x0131 + Utf8 "$makeValue$"; // #35 at 0x013C + Utf8 "()QCircStaticA;"; // #36 at 0x014A + Utf8 "SourceFile"; // #37 at 0x015C + Utf8 "CircStaticA.java"; // #38 at 0x0169 + NameAndType #20 #21; // #39 at 0x017C + Utf8 "CircStaticA"; // #40 at 0x0181 + NameAndType #16 #17; // #41 at 0x018F + Utf8 "BootstrapMethods"; // #42 at 0x0194 + MethodHandle 6b #53; // #43 at 0x01A7 + NameAndType #26 #54; // #44 at 0x01AB + NameAndType #28 #55; // #45 at 0x01B0 + NameAndType #30 #56; // #46 at 0x01B5 + NameAndType #32 #57; // #47 at 0x01BA + NameAndType #24 #25; // #48 at 0x01BF + NameAndType #14 #15; // #49 at 0x01C4 + Utf8 "CircStaticB"; // #50 at 0x01C9 + NameAndType #18 #19; // #51 at 0x01D7 + Utf8 "java/lang/Object"; // #52 at 0x01DC + Method #58 #59; // #53 at 0x01EF + Utf8 "(Ljava/lang/Object;)I"; // #54 at 0x01F4 + Utf8 "(Ljava/lang/Object;Ljava/lang/Object;)Z"; // #55 at 0x020C + Utf8 "(Ljava/lang/Object;)Ljava/lang/String;"; // #56 at 0x0236 + Utf8 "(Ljava/lang/Object;)J"; // #57 at 0x025F + class #60; // #58 at 0x0277 + NameAndType #61 #65; // #59 at 0x027A + Utf8 "java/lang/invoke/ValueBootstrapMethods"; // #60 at 0x027F + Utf8 "makeBootstrapMethod"; // #61 at 0x02A8 + class #67; // #62 at 0x02BE + Utf8 "Lookup"; // #63 at 0x02C1 + Utf8 "InnerClasses"; // #64 at 0x02CA + Utf8 "(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;)Ljava/lang/invoke/CallSite;"; // #65 at 0x02D9 + class #68; // #66 at 0x034F + Utf8 "java/lang/invoke/MethodHandles$Lookup"; // #67 at 0x0352 + Utf8 "java/lang/invoke/MethodHandles"; // #68 at 0x037A + } // Constant Pool + + 0x0130; // access [ ACC_SUPER ACC_FINAL ] + #2;// this_cpx + #13;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [3] { // fields + { // Member at 0x03A5 + 0x0018; // access + #14; // name_cpx + #15; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0x03AD + 0x0010; // access + #16; // name_cpx + #17; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0x03B5 + 0x0018; // access + #18; // name_cpx + #19; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [8] { // methods + { // Member at 0x03BF + 0x0000; // access + #20; // name_cpx + #21; // sig_cpx + [1] { // Attributes + Attr(#22, 29) { // Code at 0x03C7 + 1; // max_stack + 1; // max_locals + Bytes[5]{ + 0x2AB70001B1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#23, 6) { // LineNumberTable at 0x03DE + [1] { // LineNumberTable + 0 6; // at 0x03EA + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x03EA + 0x0008; // access + #24; // name_cpx + #25; // sig_cpx + [1] { // Attributes + Attr(#22, 45) { // Code at 0x03F2 + 2; // max_stack + 2; // max_locals + Bytes[13]{ + 0xCB00024C1A2B5FCC; + 0x00034C2BB0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#23, 14) { // LineNumberTable at 0x0411 + [3] { // LineNumberTable + 0 11; // at 0x041D + 4 12; // at 0x0421 + 11 13; // at 0x0425 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0425 + 0x0011; // access + #26; // name_cpx + #27; // sig_cpx + [1] { // Attributes + Attr(#22, 31) { // Code at 0x042D + 1; // max_stack + 1; // max_locals + Bytes[7]{ + 0x2ABA00040000AC; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#23, 6) { // LineNumberTable at 0x0446 + [1] { // LineNumberTable + 0 1; // at 0x0452 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0452 + 0x0011; // access + #28; // name_cpx + #29; // sig_cpx + [1] { // Attributes + Attr(#22, 32) { // Code at 0x045A + 2; // max_stack + 2; // max_locals + Bytes[8]{ + 0x2A2BBA00050000AC; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#23, 6) { // LineNumberTable at 0x0474 + [1] { // LineNumberTable + 0 1; // at 0x0480 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0480 + 0x0011; // access + #30; // name_cpx + #31; // sig_cpx + [1] { // Attributes + Attr(#22, 31) { // Code at 0x0488 + 1; // max_stack + 1; // max_locals + Bytes[7]{ + 0x2ABA00060000B0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#23, 6) { // LineNumberTable at 0x04A1 + [1] { // LineNumberTable + 0 1; // at 0x04AD + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x04AD + 0x0011; // access + #32; // name_cpx + #33; // sig_cpx + [1] { // Attributes + Attr(#22, 31) { // Code at 0x04B5 + 2; // max_stack + 1; // max_locals + Bytes[7]{ + 0x2ABA00070000AD; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#23, 6) { // LineNumberTable at 0x04CE + [1] { // LineNumberTable + 0 1; // at 0x04DA + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x04DA + 0x0008; // access + #34; // name_cpx + #21; // sig_cpx + [1] { // Attributes + Attr(#22, 43) { // Code at 0x04E2 + 1; // max_stack + 0; // max_locals + Bytes[15]{ + 0x1208B80009B3000A; + 0xCB000BB3000CB1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#23, 10) { // LineNumberTable at 0x0503 + [2] { // LineNumberTable + 0 2; // at 0x050F + 8 4; // at 0x0513 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0513 + 0x1008; // access + #35; // name_cpx + #36; // sig_cpx + [1] { // Attributes + Attr(#22, 45) { // Code at 0x051B + 2; // max_stack + 1; // max_locals + Bytes[13]{ + 0xCB00024B042A5FCC; + 0x00034B2AB0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#23, 14) { // LineNumberTable at 0x053A + [3] { // LineNumberTable + 0 6; // at 0x0546 + 4 7; // at 0x054A + 11 8; // at 0x054E + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [3] { // Attributes + Attr(#37, 2) { // SourceFile at 0x0550 + #38; + } // end SourceFile + ; + Attr(#64, 10) { // InnerClasses at 0x0558 + [1] { // InnerClasses + #62 #66 #63 25; // at 0x0568 + } + } // end InnerClasses + ; + Attr(#42, 6) { // BootstrapMethods at 0x0568 + [1] { // bootstrap_methods + { // bootstrap_method + #43; // bootstrap_method_ref + [0] { // bootstrap_arguments + } // bootstrap_arguments + } // bootstrap_method + } + } // end BootstrapMethods + } // Attributes +} // end class CircStaticA + + +class CircStaticB { + 0xCAFEBABE; + 0; // minor version + 57; // version + [69] { // Constant Pool + ; // first element is empty + Method #13 #39; // #1 at 0x0A + class #40; // #2 at 0x0F + Field #2 #41; // #3 at 0x12 + InvokeDynamic 0s #44; // #4 at 0x17 + InvokeDynamic 0s #45; // #5 at 0x1C + InvokeDynamic 0s #46; // #6 at 0x21 + InvokeDynamic 0s #47; // #7 at 0x26 + int 0x01234567; // #8 at 0x2B + Method #2 #48; // #9 at 0x30 + Field #2 #49; // #10 at 0x35 + class #50; // #11 at 0x3A + Field #2 #51; // #12 at 0x3D + class #52; // #13 at 0x42 + Utf8 "VT"; // #14 at 0x45 + Utf8 "QCircStaticB;"; // #15 at 0x4A + Utf8 "int_v"; // #16 at 0x5A + Utf8 "I"; // #17 at 0x62 + Utf8 "v2"; // #18 at 0x66 + Utf8 "QCircStaticA;"; // #19 at 0x6B + Utf8 ""; // #20 at 0x7B + Utf8 "()V"; // #21 at 0x84 + Utf8 "Code"; // #22 at 0x8A + Utf8 "LineNumberTable"; // #23 at 0x91 + Utf8 "makeCircStaticB"; // #24 at 0xA3 + Utf8 "(I)QCircStaticB;"; // #25 at 0xB5 + Utf8 "hashCode"; // #26 at 0xC8 + Utf8 "()I"; // #27 at 0xD3 + Utf8 "equals"; // #28 at 0xD9 + Utf8 "(Ljava/lang/Object;)Z"; // #29 at 0xE2 + Utf8 "toString"; // #30 at 0xFA + Utf8 "()Ljava/lang/String;"; // #31 at 0x0105 + Utf8 "longHashCode"; // #32 at 0x011C + Utf8 "()J"; // #33 at 0x012B + Utf8 ""; // #34 at 0x0131 + Utf8 "$makeValue$"; // #35 at 0x013C + Utf8 "()QCircStaticB;"; // #36 at 0x014A + Utf8 "SourceFile"; // #37 at 0x015C + Utf8 "CircStaticB.java"; // #38 at 0x0169 + NameAndType #20 #21; // #39 at 0x017C + Utf8 "CircStaticB"; // #40 at 0x0181 + NameAndType #16 #17; // #41 at 0x018F + Utf8 "BootstrapMethods"; // #42 at 0x0194 + MethodHandle 6b #53; // #43 at 0x01A7 + NameAndType #26 #54; // #44 at 0x01AB + NameAndType #28 #55; // #45 at 0x01B0 + NameAndType #30 #56; // #46 at 0x01B5 + NameAndType #32 #57; // #47 at 0x01BA + NameAndType #24 #25; // #48 at 0x01BF + NameAndType #14 #15; // #49 at 0x01C4 + Utf8 "CircStaticA"; // #50 at 0x01C9 + NameAndType #18 #19; // #51 at 0x01D7 + Utf8 "java/lang/Object"; // #52 at 0x01DC + Method #58 #59; // #53 at 0x01EF + Utf8 "(Ljava/lang/Object;)I"; // #54 at 0x01F4 + Utf8 "(Ljava/lang/Object;Ljava/lang/Object;)Z"; // #55 at 0x020C + Utf8 "(Ljava/lang/Object;)Ljava/lang/String;"; // #56 at 0x0236 + Utf8 "(Ljava/lang/Object;)J"; // #57 at 0x025F + class #60; // #58 at 0x0277 + NameAndType #61 #65; // #59 at 0x027A + Utf8 "java/lang/invoke/ValueBootstrapMethods"; // #60 at 0x027F + Utf8 "makeBootstrapMethod"; // #61 at 0x02A8 + class #67; // #62 at 0x02BE + Utf8 "Lookup"; // #63 at 0x02C1 + Utf8 "InnerClasses"; // #64 at 0x02CA + Utf8 "(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;)Ljava/lang/invoke/CallSite;"; // #65 at 0x02D9 + class #68; // #66 at 0x034F + Utf8 "java/lang/invoke/MethodHandles$Lookup"; // #67 at 0x0352 + Utf8 "java/lang/invoke/MethodHandles"; // #68 at 0x037A + } // Constant Pool + + 0x0130; // access [ ACC_SUPER ACC_FINAL ] + #2;// this_cpx + #13;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [3] { // fields + { // Member at 0x03A5 + 0x0018; // access + #14; // name_cpx + #15; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0x03AD + 0x0010; // access + #16; // name_cpx + #17; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0x03B5 + 0x0018; // access + #18; // name_cpx + #19; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [8] { // methods + { // Member at 0x03BF + 0x0000; // access + #20; // name_cpx + #21; // sig_cpx + [1] { // Attributes + Attr(#22, 29) { // Code at 0x03C7 + 1; // max_stack + 1; // max_locals + Bytes[5]{ + 0x2AB70001B1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#23, 6) { // LineNumberTable at 0x03DE + [1] { // LineNumberTable + 0 6; // at 0x03EA + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x03EA + 0x0008; // access + #24; // name_cpx + #25; // sig_cpx + [1] { // Attributes + Attr(#22, 45) { // Code at 0x03F2 + 2; // max_stack + 2; // max_locals + Bytes[13]{ + 0xCB00024C1A2B5FCC; + 0x00034C2BB0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#23, 14) { // LineNumberTable at 0x0411 + [3] { // LineNumberTable + 0 11; // at 0x041D + 4 12; // at 0x0421 + 11 13; // at 0x0425 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0425 + 0x0011; // access + #26; // name_cpx + #27; // sig_cpx + [1] { // Attributes + Attr(#22, 31) { // Code at 0x042D + 1; // max_stack + 1; // max_locals + Bytes[7]{ + 0x2ABA00040000AC; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#23, 6) { // LineNumberTable at 0x0446 + [1] { // LineNumberTable + 0 1; // at 0x0452 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0452 + 0x0011; // access + #28; // name_cpx + #29; // sig_cpx + [1] { // Attributes + Attr(#22, 32) { // Code at 0x045A + 2; // max_stack + 2; // max_locals + Bytes[8]{ + 0x2A2BBA00050000AC; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#23, 6) { // LineNumberTable at 0x0474 + [1] { // LineNumberTable + 0 1; // at 0x0480 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0480 + 0x0011; // access + #30; // name_cpx + #31; // sig_cpx + [1] { // Attributes + Attr(#22, 31) { // Code at 0x0488 + 1; // max_stack + 1; // max_locals + Bytes[7]{ + 0x2ABA00060000B0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#23, 6) { // LineNumberTable at 0x04A1 + [1] { // LineNumberTable + 0 1; // at 0x04AD + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x04AD + 0x0011; // access + #32; // name_cpx + #33; // sig_cpx + [1] { // Attributes + Attr(#22, 31) { // Code at 0x04B5 + 2; // max_stack + 1; // max_locals + Bytes[7]{ + 0x2ABA00070000AD; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#23, 6) { // LineNumberTable at 0x04CE + [1] { // LineNumberTable + 0 1; // at 0x04DA + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x04DA + 0x0008; // access + #34; // name_cpx + #21; // sig_cpx + [1] { // Attributes + Attr(#22, 43) { // Code at 0x04E2 + 1; // max_stack + 0; // max_locals + Bytes[15]{ + 0x1208B80009B3000A; + 0xCB000BB3000CB1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#23, 10) { // LineNumberTable at 0x0503 + [2] { // LineNumberTable + 0 2; // at 0x050F + 8 4; // at 0x0513 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0513 + 0x1008; // access + #35; // name_cpx + #36; // sig_cpx + [1] { // Attributes + Attr(#22, 45) { // Code at 0x051B + 2; // max_stack + 1; // max_locals + Bytes[13]{ + 0xCB00024B042A5FCC; + 0x00034B2AB0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#23, 14) { // LineNumberTable at 0x053A + [3] { // LineNumberTable + 0 6; // at 0x0546 + 4 7; // at 0x054A + 11 8; // at 0x054E + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [3] { // Attributes + Attr(#37, 2) { // SourceFile at 0x0550 + #38; + } // end SourceFile + ; + Attr(#64, 10) { // InnerClasses at 0x0558 + [1] { // InnerClasses + #62 #66 #63 25; // at 0x0568 + } + } // end InnerClasses + ; + Attr(#42, 6) { // BootstrapMethods at 0x0568 + [1] { // bootstrap_methods + { // bootstrap_method + #43; // bootstrap_method_ref + [0] { // bootstrap_arguments + } // bootstrap_arguments + } // bootstrap_method + } + } // end BootstrapMethods + } // Attributes +} // end class CircStaticB + + +////////////////////////////////////////////////////////////////////// + +// Test that a value type cannot be Cloneable. +// +// final value class ValueCloneable implements Cloneable { +// final int field; +// private ValueCloneable() { field = 0; } +//} + +class ValueCloneable { + 0xCAFEBABE; + 0; // minor version + 57; // version + [20] { // Constant Pool + ; // first element is empty + Method #4 #14; // #1 at 0x0A + Field #3 #15; // #2 at 0x0F + class #16; // #3 at 0x14 + class #18; // #4 at 0x17 + class #19; // #5 at 0x1A + Utf8 "field"; // #6 at 0x1D + Utf8 "I"; // #7 at 0x25 + Utf8 ""; // #8 at 0x29 + Utf8 "()V"; // #9 at 0x32 + Utf8 "Code"; // #10 at 0x38 + Utf8 "LineNumberTable"; // #11 at 0x3F + Utf8 "SourceFile"; // #12 at 0x51 + Utf8 "ValueCloneable.java"; // #13 at 0x5E + NameAndType #8 #9; // #14 at 0x74 + NameAndType #6 #7; // #15 at 0x79 + Utf8 "ValueCloneable"; // #16 at 0x7E + Utf8 "ValueTypes"; // #17 at 0x8F + Utf8 "java/lang/Object"; // #18 at 0x9C + Utf8 "java/lang/Cloneable"; // #19 at 0xAF + } // Constant Pool + + 0x0130; // access [ ACC_SUPER ACC_FINAL ] + #3;// this_cpx + #4;// super_cpx + + [1] { // Interfaces + #5; + } // Interfaces + + [1] { // fields + { // Member at 0xD1 + 0x0010; // access + #6; // name_cpx + #7; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [1] { // methods + { // Member at 0xDB + 0x0002; // access + #8; // name_cpx + #9; // sig_cpx + [1] { // Attributes + Attr(#10, 34) { // Code at 0xE3 + 2; // max_stack + 1; // max_locals + Bytes[10]{ + 0x2AB700012A03B500; + 0x02B1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#11, 6) { // LineNumberTable at 0xFF + [1] { // LineNumberTable + 0 3; // at 0x010B + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [2] { // Attributes + Attr(#12, 2) { // SourceFile at 0x010D + #13; + } // end SourceFile + ; + Attr(#17, 4) { // ValueTypes at 0x0115 + 0x00010003; + } // end ValueTypes + } // Attributes +} // end class ValueCloneable --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/libValueWithJni.c 2019-03-11 14:27:59.494353588 +0100 @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include + +JNIEXPORT void JNICALL +Java_runtime_valhalla_valuetypes_ValueWithJni_doJniMonitorEnter(JNIEnv *env, jobject obj) { + (*env)->MonitorEnter(env, obj); +} + +JNIEXPORT void JNICALL +Java_runtime_valhalla_valuetypes_ValueWithJni_doJniMonitorExit(JNIEnv *env, jobject obj) { + (*env)->MonitorExit(env, obj); +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/verifier/NoArrayCov.jcod 2019-03-11 14:27:59.958353582 +0100 @@ -0,0 +1,516 @@ +/* + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +// Test that a VerifyError exception is not thrown when trying to pass a value +// type array when the formal parameter is an array of java.lang.Object. +// +// // Java program emulating the jcod contents. +// public value final class NoArrayCov { +// final int x; +// final int y; +// +// private NoArrayCov() { +// x = 0; +// y = 0; +// } +// +// public int getX() { return x; } +// public int getY() { return y; } +// +// public String toString() { +// return "NoArrayCov: x=" + getX() + " y=" + getY(); +// } +// +// public void objArray(Object[] oArr) { +// System.out.println("In objArray"); +// } +// +// public static NoArrayCov createNoArrayCov(int x, int y) { +// NoArrayCov p = NoArrayCov.default; +// p = __WithField(p.x, x); +// p = __WithField(p.y, y); +// return p; +// } +// +// public static void main(String[] args) { +// NoArrayCov a = createNoArrayCov(3, 4); +// NoArrayCov b = createNoArrayCov(2, 4); +// NoArrayCov pa[] = new NoArrayCov[2]; +// pa[0] = a; +// pa[1] = b; +// a.objArray(pa); // Should not cause VerifyError exception !!! +// } +// } + +class NoArrayCov { + 0xCAFEBABE; + 0; // minor version + 57; // version + [98] { // Constant Pool + ; // first element is empty + Method #17 #47; // #1 at 0x0A + Field #10 #48; // #2 at 0x0F + Field #10 #49; // #3 at 0x14 + Method #10 #50; // #4 at 0x19 + Method #10 #51; // #5 at 0x1E + InvokeDynamic 0s #55; // #6 at 0x23 + Field #56 #57; // #7 at 0x28 + String #58; // #8 at 0x2D + Method #59 #60; // #9 at 0x30 + class #33; // #10 at 0x35 + Method #10 #61; // #11 at 0x38 + class #62; // #12 at 0x3D + Method #10 #63; // #13 at 0x40 + InvokeDynamic 1s #65; // #14 at 0x45 + InvokeDynamic 1s #66; // #15 at 0x4A + InvokeDynamic 1s #67; // #16 at 0x4F + class #68; // #17 at 0x54 + Utf8 "x"; // #18 at 0x57 + Utf8 "I"; // #19 at 0x5B + Utf8 "y"; // #20 at 0x5F + Utf8 ""; // #21 at 0x63 + Utf8 "()V"; // #22 at 0x6C + Utf8 "Code"; // #23 at 0x72 + Utf8 "LineNumberTable"; // #24 at 0x79 + Utf8 "getX"; // #25 at 0x8B + Utf8 "()I"; // #26 at 0x92 + Utf8 "getY"; // #27 at 0x98 + Utf8 "toString"; // #28 at 0x9F + Utf8 "()Ljava/lang/String;"; // #29 at 0xAA + Utf8 "objArray"; // #30 at 0xC1 + Utf8 "([Ljava/lang/Object;)V"; // #31 at 0xCC + Utf8 "createNoArrayCov"; // #32 at 0xE5 + Utf8 "NoArrayCov"; // #33 at 0xF8 + Utf8 "ValueTypes"; // #34 at 0x0105 + Utf8 "(II)QNoArrayCov;"; // #35 at 0x0112 + Utf8 "main"; // #36 at 0x0125 + Utf8 "([Ljava/lang/String;)V"; // #37 at 0x012C + Utf8 "hashCode"; // #38 at 0x0145 + Utf8 "equals"; // #39 at 0x0150 + Utf8 "(Ljava/lang/Object;)Z"; // #40 at 0x0159 + Utf8 "longHashCode"; // #41 at 0x0171 + Utf8 "()J"; // #42 at 0x0180 + Utf8 "$makeValue$"; // #43 at 0x0186 + Utf8 "()QNoArrayCov;"; // #44 at 0x0194 + Utf8 "SourceFile"; // #45 at 0x01A5 + Utf8 "NoArrayCov.java"; // #46 at 0x01B2 + NameAndType #21 #22; // #47 at 0x01C4 + NameAndType #18 #19; // #48 at 0x01C9 + NameAndType #20 #19; // #49 at 0x01CE + NameAndType #25 #26; // #50 at 0x01D3 + NameAndType #27 #26; // #51 at 0x01D8 + Utf8 "BootstrapMethods"; // #52 at 0x01DD + MethodHandle 6b #69; // #53 at 0x01F0 + String #70; // #54 at 0x01F4 + NameAndType #71 #72; // #55 at 0x01F7 + class #73; // #56 at 0x01FC + NameAndType #74 #75; // #57 at 0x01FF + Utf8 "In objArray"; // #58 at 0x0204 + class #76; // #59 at 0x0212 + NameAndType #77 #78; // #60 at 0x0215 + NameAndType #32 #35; // #61 at 0x021A + Utf8 "QNoArrayCov;"; // #62 at 0x021F + NameAndType #30 #31; // #63 at 0x022E + MethodHandle 6b #79; // #64 at 0x0233 + NameAndType #38 #80; // #65 at 0x0237 + NameAndType #39 #81; // #66 at 0x023C + NameAndType #41 #82; // #67 at 0x0241 + Utf8 "java/lang/Object"; // #68 at 0x0246 + Method #83 #84; // #69 at 0x0259 + Utf8 "NoArrayCov: x= y="; // #70 at 0x025E + Utf8 "makeConcatWithConstants"; // #71 at 0x0274 + Utf8 "(II)Ljava/lang/String;"; // #72 at 0x028E + Utf8 "java/lang/System"; // #73 at 0x02A7 + Utf8 "out"; // #74 at 0x02BA + Utf8 "Ljava/io/PrintStream;"; // #75 at 0x02C0 + Utf8 "java/io/PrintStream"; // #76 at 0x02D8 + Utf8 "println"; // #77 at 0x02EE + Utf8 "(Ljava/lang/String;)V"; // #78 at 0x02F8 + Method #85 #86; // #79 at 0x0310 + Utf8 "(Ljava/lang/Object;)I"; // #80 at 0x0315 + Utf8 "(Ljava/lang/Object;Ljava/lang/Object;)Z"; // #81 at 0x032D + Utf8 "(Ljava/lang/Object;)J"; // #82 at 0x0357 + class #87; // #83 at 0x036F + NameAndType #71 #91; // #84 at 0x0372 + class #92; // #85 at 0x0377 + NameAndType #93 #94; // #86 at 0x037A + Utf8 "java/lang/invoke/StringConcatFactory"; // #87 at 0x037F + class #96; // #88 at 0x03A6 + Utf8 "Lookup"; // #89 at 0x03A9 + Utf8 "InnerClasses"; // #90 at 0x03B2 + Utf8 "(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;Ljava/lang/String;[Ljava/lang/Object;)Ljava/lang/invoke/CallSite;"; // #91 at 0x03C1 + Utf8 "java/lang/invoke/ValueBootstrapMethods"; // #92 at 0x045C + Utf8 "makeBootstrapMethod"; // #93 at 0x0485 + Utf8 "(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;)Ljava/lang/invoke/CallSite;"; // #94 at 0x049B + class #97; // #95 at 0x0511 + Utf8 "java/lang/invoke/MethodHandles$Lookup"; // #96 at 0x0514 + Utf8 "java/lang/invoke/MethodHandles"; // #97 at 0x053C + } // Constant Pool + + 0x0131; // access [ ACC_PUBLIC ACC_SUPER ACC_FINAL ] + #10;// this_cpx + #17;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [2] { // fields + { // Member at 0x0567 + 0x0010; // access + #18; // name_cpx + #19; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0x056F + 0x0010; // access + #20; // name_cpx + #19; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [11] { // methods + { // Member at 0x0579 + 0x0002; // access + #21; // name_cpx + #22; // sig_cpx + [1] { // Attributes + Attr(#23, 29) { // Code at 0x0581 + 1; // max_stack + 1; // max_locals + Bytes[5]{ + 0x2AB70001B1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#24, 6) { // LineNumberTable at 0x0598 + [1] { // LineNumberTable + 0 28; // at 0x05A4 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x05A4 + 0x0001; // access + #25; // name_cpx + #26; // sig_cpx + [1] { // Attributes + Attr(#23, 29) { // Code at 0x05AC + 1; // max_stack + 1; // max_locals + Bytes[5]{ + 0x2AB40002AC; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#24, 6) { // LineNumberTable at 0x05C3 + [1] { // LineNumberTable + 0 33; // at 0x05CF + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x05CF + 0x0001; // access + #27; // name_cpx + #26; // sig_cpx + [1] { // Attributes + Attr(#23, 29) { // Code at 0x05D7 + 1; // max_stack + 1; // max_locals + Bytes[5]{ + 0x2AB40003AC; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#24, 6) { // LineNumberTable at 0x05EE + [1] { // LineNumberTable + 0 34; // at 0x05FA + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x05FA + 0x0001; // access + #28; // name_cpx + #29; // sig_cpx + [1] { // Attributes + Attr(#23, 38) { // Code at 0x0602 + 2; // max_stack + 1; // max_locals + Bytes[14]{ + 0x2AB600042AB60005; + 0xBA00060000B0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#24, 6) { // LineNumberTable at 0x0622 + [1] { // LineNumberTable + 0 37; // at 0x062E + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x062E + 0x0001; // access + #30; // name_cpx + #31; // sig_cpx + [1] { // Attributes + Attr(#23, 37) { // Code at 0x0636 + 2; // max_stack + 2; // max_locals + Bytes[9]{ + 0xB200071208B60009; + 0xB1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#24, 10) { // LineNumberTable at 0x0651 + [2] { // LineNumberTable + 0 41; // at 0x065D + 8 42; // at 0x0661 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0661 + 0x0009; // access + #32; // name_cpx + #35; // sig_cpx + [1] { // Attributes + Attr(#23, 56) { // Code at 0x0669 + 2; // max_stack + 3; // max_locals + Bytes[20]{ + 0xCB000A4D1A2C5FCC; + 0x00024D1B2C5FCC00; + 0x034D2CB0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#24, 18) { // LineNumberTable at 0x068F + [4] { // LineNumberTable + 0 45; // at 0x069B + 4 46; // at 0x069F + 11 47; // at 0x06A3 + 18 48; // at 0x06A7 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x06A7 + 0x0009; // access + #36; // name_cpx + #37; // sig_cpx + [1] { // Attributes + Attr(#23, 79) { // Code at 0x06AF + 3; // max_stack + 4; // max_locals + Bytes[31]{ + 0x0607B8000B4C0507; + 0xB8000B4D05BD000C; + 0x4E2D032B532D042C; + 0x532B2DB6000DB1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#24, 30) { // LineNumberTable at 0x06E0 + [7] { // LineNumberTable + 0 52; // at 0x06EC + 6 53; // at 0x06F0 + 12 54; // at 0x06F4 + 17 55; // at 0x06F8 + 21 56; // at 0x06FC + 25 57; // at 0x0700 + 30 58; // at 0x0704 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0704 + 0x0011; // access + #38; // name_cpx + #26; // sig_cpx + [1] { // Attributes + Attr(#23, 31) { // Code at 0x070C + 1; // max_stack + 1; // max_locals + Bytes[7]{ + 0x2ABA000E0000AC; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#24, 6) { // LineNumberTable at 0x0725 + [1] { // LineNumberTable + 0 24; // at 0x0731 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0731 + 0x0011; // access + #39; // name_cpx + #40; // sig_cpx + [1] { // Attributes + Attr(#23, 32) { // Code at 0x0739 + 2; // max_stack + 2; // max_locals + Bytes[8]{ + 0x2A2BBA000F0000AC; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#24, 6) { // LineNumberTable at 0x0753 + [1] { // LineNumberTable + 0 24; // at 0x075F + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x075F + 0x0011; // access + #41; // name_cpx + #42; // sig_cpx + [1] { // Attributes + Attr(#23, 31) { // Code at 0x0767 + 2; // max_stack + 1; // max_locals + Bytes[7]{ + 0x2ABA00100000AD; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#24, 6) { // LineNumberTable at 0x0780 + [1] { // LineNumberTable + 0 24; // at 0x078C + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x078C + 0x100A; // access + #43; // name_cpx + #44; // sig_cpx + [1] { // Attributes + Attr(#23, 56) { // Code at 0x0794 + 2; // max_stack + 1; // max_locals + Bytes[20]{ + 0xCB000A4B032A5FCC; + 0x00024B032A5FCC00; + 0x034B2AB0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#24, 18) { // LineNumberTable at 0x07BA + [4] { // LineNumberTable + 0 28; // at 0x07C6 + 4 29; // at 0x07CA + 11 30; // at 0x07CE + 18 31; // at 0x07D2 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [4] { // Attributes + Attr(#45, 2) { // SourceFile at 0x07D4 + #46; + } // end SourceFile + ; + Attr(#90, 10) { // InnerClasses at 0x07DC + [1] { // InnerClasses + #88 #95 #89 25; // at 0x07EC + } + } // end InnerClasses + ; + Attr(#52, 12) { // BootstrapMethods at 0x07EC + [2] { // bootstrap_methods + { // bootstrap_method + #53; // bootstrap_method_ref + [1] { // bootstrap_arguments + #54; // at 0x07FA + } // bootstrap_arguments + } // bootstrap_method + ; + { // bootstrap_method + #64; // bootstrap_method_ref + [0] { // bootstrap_arguments + } // bootstrap_arguments + } // bootstrap_method + } + } // end BootstrapMethods + ; + Attr(#34, 4) { // ValueTypes at 0x07FE + 0x0001000A; + } // end ValueTypes + } // Attributes +} // end class NoArrayCov --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/verifier/NoArrayCovIntf.jcod 2019-03-11 14:28:00.514353574 +0100 @@ -0,0 +1,499 @@ +/* + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +// Test that a VerifyError exception is not thrown when trying to pass a value +// type array when the formal parameter is an array of an interface type. +// +// // Java program emulating the jcod contents. +// interface II { } +// +// public value final class NoArrayCovIntf { +// final int x; +// final int y; +// +// private NoArrayCovIntf() { +// x = 0; +// y = 0; +// } +// +// public int getX() { return x; } +// public int getY() { return y; } +// +// public String toString() { +// return "NoArrayCovIntf: x=" + getX() + " y=" + getY(); +// } +// +// public void iiArray(ii[] oArr) { +// System.out.println("In iiArray"); +// } +// +// public static NoArrayCovIntf createNoArrayCovIntf(int x, int y) { +// NoArrayCovIntf p = NoArrayCovIntf.default; +// p = __WithField(p.x, x); +// p = __WithField(p.y, y); +// return p; +// } +// +// public static void main(String[] args) { +// NoArrayCovIntf a = createNoArrayCovIntf(3, 4); +// NoArrayCovIntf b = createNoArrayCovIntf(2, 4); +// NoArrayCovIntf pa[] = new NoArrayCovIntf[2]; +// pa[0] = a; +// pa[1] = b; +// a.iiArray(pa); // Should not throw VerifyError. +// } +// } + + +class II { + 0xCAFEBABE; + 0; // minor version + 57; // version + [7] { // Constant Pool + ; // first element is empty + class #5; // #1 at 0x0A + class #6; // #2 at 0x0D + Utf8 "SourceFile"; // #3 at 0x10 + Utf8 "II.java"; // #4 at 0x1D + Utf8 "II"; // #5 at 0x33 + Utf8 "java/lang/Object"; // #6 at 0x37 + } // Constant Pool + + 0x0600; // access [ ACC_INTERFACE ] + #1;// this_cpx + #2;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [0] { // fields + } // fields + + [0] { // methods + } // methods + + [1] { // Attributes + Attr(#3, 2) { // SourceFile at 0x58 + #4; + } // end SourceFile + } // Attributes +} // end class II + + +class NoArrayCovIntf { + 0xCAFEBABE; + 0; // minor version + 57; // version + [99] { // Constant Pool + ; // first element is empty + String #58; // #1 at 0x0A + Method #66 #19; // #2 at 0x0D + InvokeDynamic 3s #63; // #3 at 0x12 + class #38; // #4 at 0x17 + class #39; // #5 at 0x1A + Method #24 #50; // #6 at 0x1D + Method #4 #85; // #7 at 0x22 + InvokeDynamic 1s #36; // #8 at 0x27 + Method #4 #73; // #9 at 0x2C + Field #65 #89; // #10 at 0x31 + InvokeDynamic 2s #30; // #11 at 0x36 + Method #4 #57; // #12 at 0x3B + Field #4 #34; // #13 at 0x40 + Field #4 #56; // #14 at 0x45 + InvokeDynamic 0s #93; // #15 at 0x4A + Method #4 #32; // #16 at 0x4F + Utf8 "java/io/PrintStream"; // #17 at 0x54 + Utf8 "NoArrayCovIntf.jasm"; // #18 at 0x6A + NameAndType #88 #62; // #19 at 0x80 + Utf8 "([LII;)V"; // #20 at 0x85 + Utf8 "java/lang/invoke/ValueBootstrapMethods"; // #21 at 0x8F + Utf8 "java/lang/invoke/MethodHandles$Lookup"; // #22 at 0xB8 + MethodHandle 6b #43; // #23 at 0xE0 + class #17; // #24 at 0xE4 + Utf8 "SourceFile"; // #25 at 0xE7 + Utf8 "iiArray"; // #26 at 0xF4 + class #21; // #27 at 0xFF + Utf8 "Lookup"; // #28 at 0x0102 + class #22; // #29 at 0x010B + NameAndType #60 #40; // #30 at 0x010E + Utf8 "hashCode"; // #31 at 0x0113 + NameAndType #77 #75; // #32 at 0x011E + Utf8 "(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;Ljava/lang/String;[Ljava/lang/Object;)Ljava/lang/invoke/CallSite;"; // #33 at 0x0123 + NameAndType #95 #35; // #34 at 0x01BE + Utf8 "I"; // #35 at 0x01C3 + NameAndType #31 #68; // #36 at 0x01C7 + Utf8 "Code"; // #37 at 0x01CC + Utf8 "NoArrayCovIntf"; // #38 at 0x01D3 + Utf8 "QNoArrayCovIntf;"; // #39 at 0x01E4 + Utf8 "(Ljava/lang/Object;Ljava/lang/Object;)Z"; // #40 at 0x01F7 + NameAndType #91 #33; // #41 at 0x0221 + Utf8 "createNoArrayCovIntf"; // #42 at 0x0226 + Method #96 #41; // #43 at 0x023D + Utf8 "([Ljava/lang/String;)V"; // #44 at 0x0242 + Utf8 "out"; // #45 at 0x025B + Utf8 "$makeValue$"; // #46 at 0x0261 + Utf8 "BootstrapMethods"; // #47 at 0x026F + String #67; // #48 at 0x0282 + Utf8 "toString"; // #49 at 0x0285 + NameAndType #53 #69; // #50 at 0x0290 + Utf8 "java/lang/invoke/MethodHandles"; // #51 at 0x0295 + Utf8 "(Ljava/lang/Object;)Z"; // #52 at 0x02B6 + Utf8 "println"; // #53 at 0x02CE + Utf8 "java/lang/Object"; // #54 at 0x02D8 + Utf8 "java/lang/System"; // #55 at 0x02EB + NameAndType #92 #35; // #56 at 0x02FE + NameAndType #26 #20; // #57 at 0x0303 + Utf8 "In iiArray"; // #58 at 0x0308 + class #51; // #59 at 0x0316 + Utf8 "equals"; // #60 at 0x0319 + NameAndType #72 #79; // #61 at 0x0322 + Utf8 "()V"; // #62 at 0x0327 + NameAndType #97 #64; // #63 at 0x032D + Utf8 "(Ljava/lang/Object;)J"; // #64 at 0x0332 + class #55; // #65 at 0x034A + class #54; // #66 at 0x034D + Utf8 "NoArrayCovIntf: x= y="; // #67 at 0x0350 + Utf8 "(Ljava/lang/Object;)I"; // #68 at 0x036A + Utf8 "(Ljava/lang/String;)V"; // #69 at 0x0382 + Utf8 "main"; // #70 at 0x039A + Method #27 #61; // #71 at 0x03A1 + Utf8 "makeBootstrapMethod"; // #72 at 0x03A6 + NameAndType #42 #78; // #73 at 0x03BC + Utf8 "()J"; // #74 at 0x03C1 + Utf8 "()I"; // #75 at 0x03C7 + Utf8 "getY"; // #76 at 0x03CD + Utf8 "getX"; // #77 at 0x03D4 + Utf8 "(II)QNoArrayCovIntf;"; // #78 at 0x03DB + Utf8 "(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;)Ljava/lang/invoke/CallSite;"; // #79 at 0x03F2 + Utf8 "InnerClasses"; // #80 at 0x0468 + Utf8 "()QNoArrayCovIntf;"; // #81 at 0x0477 + MethodHandle 6b #71; // #82 at 0x048C + MethodHandle 6b #71; // #83 at 0x0490 + MethodHandle 6b #71; // #84 at 0x0494 + NameAndType #76 #75; // #85 at 0x0498 + Utf8 "Ljava/io/PrintStream;"; // #86 at 0x049D + Utf8 "java/lang/invoke/StringConcatFactory"; // #87 at 0x04B5 + Utf8 ""; // #88 at 0x04DC + NameAndType #45 #86; // #89 at 0x04E5 + Utf8 "()Ljava/lang/String;"; // #90 at 0x04EA + Utf8 "makeConcatWithConstants"; // #91 at 0x0501 + Utf8 "y"; // #92 at 0x051B + NameAndType #91 #98; // #93 at 0x051F + Utf8 "ValueTypes"; // #94 at 0x0524 + Utf8 "x"; // #95 at 0x0531 + class #87; // #96 at 0x0535 + Utf8 "longHashCode"; // #97 at 0x0538 + Utf8 "(I)Ljava/lang/String;"; // #98 at 0x0547 + } // Constant Pool + + 0x0131; // access [ ACC_PUBLIC ACC_SUPER ACC_FINAL ] + #4;// this_cpx + #66;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [2] { // fields + { // Member at 0x056A + 0x0010; // access + #95; // name_cpx + #35; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0x0572 + 0x0010; // access + #92; // name_cpx + #35; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [11] { // methods + { // Member at 0x057C + 0x0002; // access + #88; // name_cpx + #62; // sig_cpx + [1] { // Attributes + Attr(#37, 17) { // Code at 0x0584 + 1; // max_stack + 1; // max_locals + Bytes[5]{ + 0x2AB70002B1; + }; + [0] { // Traps + } // end Traps + [0] { // Attributes + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x059B + 0x0001; // access + #77; // name_cpx + #75; // sig_cpx + [1] { // Attributes + Attr(#37, 17) { // Code at 0x05A3 + 1; // max_stack + 1; // max_locals + Bytes[5]{ + 0x2AB4000DAC; + }; + [0] { // Traps + } // end Traps + [0] { // Attributes + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x05BA + 0x0001; // access + #76; // name_cpx + #75; // sig_cpx + [1] { // Attributes + Attr(#37, 17) { // Code at 0x05C2 + 1; // max_stack + 1; // max_locals + Bytes[5]{ + 0x2AB4000EAC; + }; + [0] { // Traps + } // end Traps + [0] { // Attributes + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x05D9 + 0x0001; // access + #49; // name_cpx + #90; // sig_cpx + [1] { // Attributes + Attr(#37, 26) { // Code at 0x05E1 + 2; // max_stack + 1; // max_locals + Bytes[14]{ + 0x2AB600102AB60007; + 0xBA000F0000B0; + }; + [0] { // Traps + } // end Traps + [0] { // Attributes + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0601 + 0x0001; // access + #26; // name_cpx + #20; // sig_cpx + [1] { // Attributes + Attr(#37, 21) { // Code at 0x0609 + 2; // max_stack + 2; // max_locals + Bytes[9]{ + 0xB2000A1201B60006; + 0xB1; + }; + [0] { // Traps + } // end Traps + [0] { // Attributes + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0624 + 0x0009; // access + #42; // name_cpx + #78; // sig_cpx + [1] { // Attributes + Attr(#37, 32) { // Code at 0x062C + 2; // max_stack + 3; // max_locals + Bytes[20]{ + 0xCB00044D1A2C5FCC; + 0x000D4D1B2C5FCC00; + 0x0E4D2CB0; + }; + [0] { // Traps + } // end Traps + [0] { // Attributes + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0652 + 0x0009; // access + #70; // name_cpx + #44; // sig_cpx + [1] { // Attributes + Attr(#37, 43) { // Code at 0x065A + 3; // max_stack + 4; // max_locals + Bytes[31]{ + 0x0607B800094C0507; + 0xB800094D05BD0005; + 0x4E2D032B532D042C; + 0x532B2DB6000CB1; + }; + [0] { // Traps + } // end Traps + [0] { // Attributes + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x068B + 0x0011; // access + #31; // name_cpx + #75; // sig_cpx + [1] { // Attributes + Attr(#37, 19) { // Code at 0x0693 + 1; // max_stack + 1; // max_locals + Bytes[7]{ + 0x2ABA00080000AC; + }; + [0] { // Traps + } // end Traps + [0] { // Attributes + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x06AC + 0x0011; // access + #60; // name_cpx + #52; // sig_cpx + [1] { // Attributes + Attr(#37, 20) { // Code at 0x06B4 + 2; // max_stack + 2; // max_locals + Bytes[8]{ + 0x2A2BBA000B0000AC; + }; + [0] { // Traps + } // end Traps + [0] { // Attributes + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x06CE + 0x0011; // access + #97; // name_cpx + #74; // sig_cpx + [1] { // Attributes + Attr(#37, 19) { // Code at 0x06D6 + 2; // max_stack + 1; // max_locals + Bytes[7]{ + 0x2ABA00030000AD; + }; + [0] { // Traps + } // end Traps + [0] { // Attributes + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x06EF + 0x100A; // access + #46; // name_cpx + #81; // sig_cpx + [1] { // Attributes + Attr(#37, 32) { // Code at 0x06F7 + 2; // max_stack + 1; // max_locals + Bytes[20]{ + 0xCB00044B032A5FCC; + 0x000D4B032A5FCC00; + 0x0E4B2AB0; + }; + [0] { // Traps + } // end Traps + [0] { // Attributes + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [4] { // Attributes + Attr(#25, 2) { // SourceFile at 0x071F + #18; + } // end SourceFile + ; + Attr(#80, 10) { // InnerClasses at 0x0727 + [1] { // InnerClasses + #29 #59 #28 25; // at 0x0737 + } + } // end InnerClasses + ; + Attr(#47, 20) { // BootstrapMethods at 0x0737 + [4] { // bootstrap_methods + { // bootstrap_method + #23; // bootstrap_method_ref + [1] { // bootstrap_arguments + #48; // at 0x0745 + } // bootstrap_arguments + } // bootstrap_method + ; + { // bootstrap_method + #84; // bootstrap_method_ref + [0] { // bootstrap_arguments + } // bootstrap_arguments + } // bootstrap_method + ; + { // bootstrap_method + #83; // bootstrap_method_ref + [0] { // bootstrap_arguments + } // bootstrap_arguments + } // bootstrap_method + ; + { // bootstrap_method + #82; // bootstrap_method_ref + [0] { // bootstrap_arguments + } // bootstrap_arguments + } // bootstrap_method + } + } // end BootstrapMethods + ; + Attr(#94, 4) { // ValueTypes at 0x0751 + 0x00010004; + } // end ValueTypes + } // Attributes +} // end class NoArrayCovIntf --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/verifier/NoNullVT.jcod 2019-03-11 14:28:00.970353568 +0100 @@ -0,0 +1,465 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +// Test that a VerifyError exception is thrown when trying to pass a null +// when the formal parameter is a value type. +// +// // Java program emulating the jcod contents. +// public value final class NoNullVT { +// final int x; +// final int y; +// +// private NoNullVT() { +// x = 0; +// y = 0; +// } +// +// public int getX() { return x; } +// public int getY() { return y; } +// +// public boolean isSameNoNullVT(NoNullVT that) { +// return this.getX() == that.getX() && this.getY() == that.getY(); +// } +// +// public boolean equals(Object o) { +// if(o instanceof NoNullVT) { +// return ((NoNullVT)o).x == x && ((NoNullVT)o).y == y; +// } else { +// return false; +// } +// } +// +// public static NoNullVT createNoNullVT(int x, int y) { +// NoNullVT p = NoNullVT.default; +// p = __WithField(p.x, x); +// p = __WithField(p.y, y); +// return p; +// } +// +// public static void main(String[] args) { +// String str = null; +// NoNullVT a = createNoNullVT(3, 4); +// NoNullVT b = createNoNullVT(2, 4); +// boolean res = a.isSameNoNullVT(null); // Should throw VerifyError +// } +// } + + +class NoNullVT { + 0xCAFEBABE; + 0; // minor version + 57; // version + [86] { // Constant Pool + ; // first element is empty + Method #54 #13; // #1 at 0x0A + Field #10 #47; // #2 at 0x0F + InvokeDynamic 2s #52; // #3 at 0x14 + InvokeDynamic 1s #29; // #4 at 0x19 + Method #10 #24; // #5 at 0x1E + Method #10 #72; // #6 at 0x23 + Field #10 #26; // #7 at 0x28 + Method #63 #40; // #8 at 0x2D + InvokeDynamic 0s #80; // #9 at 0x32 + class #68; // #10 at 0x37 + Method #10 #27; // #11 at 0x3A + Method #10 #73; // #12 at 0x3F + NameAndType #75 #51; // #13 at 0x44 + Utf8 "java/lang/invoke/ValueBootstrapMethods"; // #14 at 0x49 + Utf8 "java/lang/invoke/MethodHandles$Lookup"; // #15 at 0x72 + Utf8 "createNoNullVT"; // #16 at 0x9A + MethodHandle 6b #36; // #17 at 0xAC + Utf8 "SourceFile"; // #18 at 0xB0 + class #14; // #19 at 0xBD + Utf8 "Lookup"; // #20 at 0xC0 + class #15; // #21 at 0xC9 + Utf8 "hashCode"; // #22 at 0xCC + Utf8 "(Ljava/lang/Object;)Ljava/lang/Object;"; // #23 at 0xD7 + NameAndType #65 #62; // #24 at 0x0100 + Utf8 "(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;Ljava/lang/String;[Ljava/lang/Object;)Ljava/lang/invoke/CallSite;"; // #25 at 0x0105 + NameAndType #82 #28; // #26 at 0x01A0 + NameAndType #16 #61; // #27 at 0x01A5 + Utf8 "I"; // #28 at 0x01AA + NameAndType #22 #55; // #29 at 0x01AE + Utf8 "Code"; // #30 at 0x01B3 + Utf8 "requireNonNull"; // #31 at 0x01BA + String #45; // #32 at 0x01CB + Utf8 "StackMapTable"; // #33 at 0x01CE + NameAndType #77 #25; // #34 at 0x01DE + Utf8 "NoNullVT.jasm"; // #35 at 0x01E3 + Method #83 #34; // #36 at 0x01F4 + Utf8 "([Ljava/lang/String;)V"; // #37 at 0x01F9 + Utf8 "$makeValue$"; // #38 at 0x0212 + Utf8 "BootstrapMethods"; // #39 at 0x0220 + NameAndType #31 #23; // #40 at 0x0233 + Utf8 "toString"; // #41 at 0x0238 + Utf8 "isSameNoNullVT"; // #42 at 0x0243 + Utf8 "java/lang/invoke/MethodHandles"; // #43 at 0x0255 + Utf8 "(Ljava/lang/Object;)Z"; // #44 at 0x0276 + Utf8 "NoNullVT: x= y="; // #45 at 0x028E + Utf8 "java/lang/Object"; // #46 at 0x02A3 + NameAndType #79 #28; // #47 at 0x02B6 + class #43; // #48 at 0x02BB + NameAndType #59 #66; // #49 at 0x02BE + Utf8 "equals"; // #50 at 0x02C3 + Utf8 "()V"; // #51 at 0x02CC + NameAndType #84 #53; // #52 at 0x02D2 + Utf8 "(Ljava/lang/Object;)J"; // #53 at 0x02D7 + class #46; // #54 at 0x02EF + Utf8 "(Ljava/lang/Object;)I"; // #55 at 0x02F2 + Utf8 "java/util/Objects"; // #56 at 0x030A + Utf8 "main"; // #57 at 0x031E + Method #19 #49; // #58 at 0x0325 + Utf8 "makeBootstrapMethod"; // #59 at 0x032A + Utf8 "()J"; // #60 at 0x0340 + Utf8 "(II)QNoNullVT;"; // #61 at 0x0346 + Utf8 "()I"; // #62 at 0x0358 + class #56; // #63 at 0x035E + Utf8 "getY"; // #64 at 0x0361 + Utf8 "getX"; // #65 at 0x0368 + Utf8 "(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;)Ljava/lang/invoke/CallSite;"; // #66 at 0x036F + Utf8 "InnerClasses"; // #67 at 0x03E5 + Utf8 "NoNullVT"; // #68 at 0x03F4 + Utf8 "(QNoNullVT;)Z"; // #69 at 0x0400 + MethodHandle 6b #58; // #70 at 0x0411 + MethodHandle 6b #58; // #71 at 0x0415 + NameAndType #64 #62; // #72 at 0x0419 + NameAndType #42 #69; // #73 at 0x041E + Utf8 "java/lang/invoke/StringConcatFactory"; // #74 at 0x0423 + Utf8 ""; // #75 at 0x044A + Utf8 "()Ljava/lang/String;"; // #76 at 0x0453 + Utf8 "makeConcatWithConstants"; // #77 at 0x046A + Utf8 "()QNoNullVT;"; // #78 at 0x0484 + Utf8 "y"; // #79 at 0x0494 + NameAndType #77 #85; // #80 at 0x0498 + Utf8 "ValueTypes"; // #81 at 0x049D + Utf8 "x"; // #82 at 0x04AA + class #74; // #83 at 0x04AE + Utf8 "longHashCode"; // #84 at 0x04B1 + Utf8 "(II)Ljava/lang/String;"; // #85 at 0x04C0 + } // Constant Pool + + 0x0131; // access [ ACC_PUBLIC ACC_SUPER ACC_FINAL ] + #10;// this_cpx + #54;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [2] { // fields + { // Member at 0x04E3 + 0x0010; // access + #82; // name_cpx + #28; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0x04EB + 0x0010; // access + #79; // name_cpx + #28; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [11] { // methods + { // Member at 0x04F5 + 0x0002; // access + #75; // name_cpx + #51; // sig_cpx + [1] { // Attributes + Attr(#30, 17) { // Code at 0x04FD + 1; // max_stack + 1; // max_locals + Bytes[5]{ + 0x2AB70001B1; + }; + [0] { // Traps + } // end Traps + [0] { // Attributes + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0514 + 0x0001; // access + #65; // name_cpx + #62; // sig_cpx + [1] { // Attributes + Attr(#30, 17) { // Code at 0x051C + 1; // max_stack + 1; // max_locals + Bytes[5]{ + 0x2AB40007AC; + }; + [0] { // Traps + } // end Traps + [0] { // Attributes + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0533 + 0x0001; // access + #64; // name_cpx + #62; // sig_cpx + [1] { // Attributes + Attr(#30, 17) { // Code at 0x053B + 1; // max_stack + 1; // max_locals + Bytes[5]{ + 0x2AB40002AC; + }; + [0] { // Traps + } // end Traps + [0] { // Attributes + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0552 + 0x0001; // access + #42; // name_cpx + #69; // sig_cpx + [1] { // Attributes + Attr(#30, 51) { // Code at 0x055A + 2; // max_stack + 2; // max_locals + Bytes[28]{ + 0x2AB600052BB60005; + 0xA000122AB600062B; + 0xB60006A0000704A7; + 0x000403AC; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#33, 5) { // StackMapTable at 0x0588 + [2] { // + 26b; // same_frame + 64b, [1]z{1b}; // same_locals_1_stack_item_frame + } + } // end StackMapTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0593 + 0x0001; // access + #41; // name_cpx + #76; // sig_cpx + [1] { // Attributes + Attr(#30, 26) { // Code at 0x059B + 2; // max_stack + 1; // max_locals + Bytes[14]{ + 0x2AB600052AB60006; + 0xBA00090000B0; + }; + [0] { // Traps + } // end Traps + [0] { // Attributes + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x05BB + 0x0001; // access + #50; // name_cpx + #44; // sig_cpx + [1] { // Attributes + Attr(#30, 77) { // Code at 0x05C3 + 2; // max_stack + 2; // max_locals + Bytes[53]{ + 0x2BC1000A99002F2B; + 0x59B8000857C0000A; + 0xB400072AB40007A0; + 0x001A2B59B8000857; + 0xC0000AB400022AB4; + 0x0002A0000704A700; + 0x0403AC03AC; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#33, 6) { // StackMapTable at 0x060A + [3] { // + 49b; // same_frame + 64b, [1]z{1b}; // same_locals_1_stack_item_frame + 0b; // same_frame + } + } // end StackMapTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0616 + 0x0009; // access + #16; // name_cpx + #61; // sig_cpx + [1] { // Attributes + Attr(#30, 32) { // Code at 0x061E + 2; // max_stack + 3; // max_locals + Bytes[20]{ + 0xCB000A4D1A2C5FCC; + 0x00074D1B2C5FCC00; + 0x024D2CB0; + }; + [0] { // Traps + } // end Traps + [0] { // Attributes + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0644 + 0x0009; // access + #57; // name_cpx + #37; // sig_cpx + [1] { // Attributes + Attr(#30, 34) { // Code at 0x064C + 2; // max_stack + 5; // max_locals + Bytes[22]{ + 0x014C0607B8000B4D; + 0x0507B8000B4E2C2B; + 0xB6000C3604B1; + }; + [0] { // Traps + } // end Traps + [0] { // Attributes + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0674 + 0x0011; // access + #22; // name_cpx + #62; // sig_cpx + [1] { // Attributes + Attr(#30, 19) { // Code at 0x067C + 1; // max_stack + 1; // max_locals + Bytes[7]{ + 0x2ABA00040000AC; + }; + [0] { // Traps + } // end Traps + [0] { // Attributes + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0695 + 0x0011; // access + #84; // name_cpx + #60; // sig_cpx + [1] { // Attributes + Attr(#30, 19) { // Code at 0x069D + 2; // max_stack + 1; // max_locals + Bytes[7]{ + 0x2ABA00030000AD; + }; + [0] { // Traps + } // end Traps + [0] { // Attributes + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x06B6 + 0x100A; // access + #38; // name_cpx + #78; // sig_cpx + [1] { // Attributes + Attr(#30, 32) { // Code at 0x06BE + 2; // max_stack + 1; // max_locals + Bytes[20]{ + 0xCB000A4B032A5FCC; + 0x00074B032A5FCC00; + 0x024B2AB0; + }; + [0] { // Traps + } // end Traps + [0] { // Attributes + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [4] { // Attributes + Attr(#18, 2) { // SourceFile at 0x06E6 + #35; + } // end SourceFile + ; + Attr(#67, 10) { // InnerClasses at 0x06EE + [1] { // InnerClasses + #21 #48 #20 25; // at 0x06FE + } + } // end InnerClasses + ; + Attr(#39, 16) { // BootstrapMethods at 0x06FE + [3] { // bootstrap_methods + { // bootstrap_method + #17; // bootstrap_method_ref + [1] { // bootstrap_arguments + #32; // at 0x070C + } // bootstrap_arguments + } // bootstrap_method + ; + { // bootstrap_method + #71; // bootstrap_method_ref + [0] { // bootstrap_arguments + } // bootstrap_arguments + } // bootstrap_method + ; + { // bootstrap_method + #70; // bootstrap_method_ref + [0] { // bootstrap_arguments + } // bootstrap_arguments + } // bootstrap_method + } + } // end BootstrapMethods + ; + Attr(#81, 4) { // ValueTypes at 0x0714 + 0x0001000A; + } // end ValueTypes + } // Attributes +} // end class NoNullVT --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/verifier/VTAssignability.java 2019-03-11 14:28:01.434353561 +0100 @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Test basic verifier assignability of value types. + * @compile -XDallowWithFieldOperator -XDemitQtypes VTAssignability.java + * @run main/othervm -Xverify:remote -XX:+EnableValhalla VTAssignability + */ + +// Test that a value type is assignable to itself, to java.lang.Object, +// and to an interface, +// +interface II { } + +public value final class VTAssignability implements II { + final int x; + final int y; + + private VTAssignability() { + x = 0; + y = 0; + } + + public int getX() { return x; } + public int getY() { return y; } + + public boolean isSameVTAssignability(VTAssignability that) { + return this.getX() == that.getX() && this.getY() == that.getY(); + } + + public boolean equals(Object o) { + if(o instanceof VTAssignability) { + return ((VTAssignability)o).x == x && ((VTAssignability)o).y == y; + } else { + return false; + } + } + + public void takesInterface(II i) { + System.out.println("Test passes!!"); + } + + public static VTAssignability createVTAssignability(int x, int y) { + VTAssignability p = VTAssignability.default; + p = __WithField(p.x, x); + p = __WithField(p.y, y); + return p; + } + + public static void main(String[] args) { + VTAssignability a = createVTAssignability(3, 4); + VTAssignability b = createVTAssignability(2, 4); + + // Test assignability of a value type to itself. + boolean res = a.isSameVTAssignability(b); + + // Test assignability of a value type to java.lang.Object. + res = b.equals(a); + + // Test assignability of a value type to an interface. + a.takesInterface(b); + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/verifier/ValueCapableClass.java 2019-03-11 14:28:01.886353555 +0100 @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package runtime.valhalla.valuetypes.verifier; + +@jdk.incubator.mvt.ValueCapableClass +public final class ValueCapableClass { + + public static final int DEFAULT_X = 11; + public static final short DEFAULT_Y = 13; + public static final short DEFAULT_Z = 15; + public static final String STATIC_FIELD = "Should be left alone"; + + public final int x; + public final short y; + public final short z; + + private ValueCapableClass() { + this(DEFAULT_X, DEFAULT_Y, DEFAULT_Z); + } + + private ValueCapableClass(int x, short y, short z) { + this.x = x; + this.y = y; + this.z = z; + } + + public int getX() { + return x; + } + + public short getY() { + return y; + } + + public short getZ() { + return z; + } + + public String toString() { + int ax = getX(); + short ay = getY(); + short az = getZ(); + return "ValueCapableClass x=" + ax + " y=" + ay + " z=" + az; + } + + public static ValueCapableClass create(int x, short y, short z) { + return new ValueCapableClass(x, y, z); + } + + public static ValueCapableClass create() { + return new ValueCapableClass(); + } + + public static void test() { + ValueCapableClass value = create(4711, (short)7, (short)11); + String s = value.toString(); + if ((value.getX() != 4711) || (value.getY() != 7) || value.getZ() != 11) { + throw new IllegalStateException("Bad value: " + s); + } + System.out.println(s); + ValueCapableClass defaultValue = create(); + s = defaultValue.toString(); + if ((defaultValue.getX() != DEFAULT_X) || + (defaultValue.getY() != DEFAULT_Y) || + (defaultValue.getZ() != DEFAULT_Z)) { + throw new IllegalStateException("Bad value: " + s); + } + + if (!STATIC_FIELD.equals("Should be left alone")) { + throw new IllegalStateException("Bad static field: " + STATIC_FIELD); + } + } + + public static void main(String[] args) { + test(); + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/verifier/VerifierValueTypes.java 2019-03-11 14:28:02.350353548 +0100 @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +/* + * @test + * @summary test that the right exceptions get thrown for bad value type + * class files. + * @compile verifierTests.jcod NoArrayCov.jcod NoArrayCovIntf.jcod NoNullVT.jcod + * @run main/othervm -verify -XX:+EnableValhalla VerifierValueTypes + */ + +public class VerifierValueTypes { + + public static void runTestVerifyError(String test_name, String message) throws Exception { + System.out.println("Testing: " + test_name); + try { + Class newClass = Class.forName(test_name); + throw new RuntimeException("Expected VerifyError exception not thrown"); + } catch (java.lang.VerifyError e) { + if (!e.getMessage().contains(message)) { + throw new RuntimeException("Wrong VerifyError: " + e.getMessage()); + } + } + } + + public static void runTestFormatError(String test_name, String message) throws Exception { + System.out.println("Testing: " + test_name); + try { + Class newClass = Class.forName(test_name); + throw new RuntimeException("Expected ClassFormatError exception not thrown"); + } catch (java.lang.ClassFormatError e) { + if (!e.getMessage().contains(message)) { + throw new RuntimeException("Wrong ClassFormatError: " + e.getMessage()); + } + } + } + + public static void runTestNoError(String test_name) throws Exception { + System.out.println("Testing: " + test_name); + Class newClass = Class.forName(test_name); + } + + public static void main(String[] args) throws Exception { + + // Test that a defaultvalue opcode with an out of bounds cp index causes a VerifyError. + runTestVerifyError("defValBadCP", "Illegal constant pool index"); + + // Test that ClassFormatError is thrown for a class file, with major version 54, that + // contains a defaultvalue opcode. + runTestFormatError("defValBadMajorVersion", "defaultvalue not supported by this class file version"); + + // Test VerifyError is thrown if a defaultvalue's cp entry is not a class. + runTestVerifyError("defValWrongCPType", "Illegal type at constant pool entry"); + + // Test that a withfield opcode with an out of bounds cp index causes a VerifyError. + runTestVerifyError("wthFldBadCP", "Illegal constant pool index"); + + // Test that VerifyError is thrown if the first operand on the stack is not assignable + // to withfield's field. + runTestVerifyError("wthFldBadFldVal", "Bad type on operand stack"); + + // Test that VerifyError is thrown if the second operand on the stack is a primitive. + runTestVerifyError("wthFldBadFldRef", "Bad type on operand stack"); + + // Test that ClassFormatError is thrown for a class file, with major version 54, that + // contains a withfield opcode. + runTestFormatError("wthFldBadMajorVersion", "withfield not supported by this class file version"); + + // Test VerifyError is thrown if a withfields's cp entry is not a field. + runTestVerifyError("wthFldWrongCPType", "Illegal type at constant pool entry"); + + // Test that VerifyError is thrown if the class for a withfields's cp fieldref + // entry is java.lang.Object and the reference on the stack is a value type. + runTestVerifyError("wthFldObject", "must be identical value types"); + + // Test VerifyError is thrown if a monitorenter's cp entry is a value type. + runTestVerifyError("monEnterVT", "Bad type on operand stack"); + + // Test VerifyError is thrown if a defaultvalue's cp entry is a value type. + // TBD!!! + runTestVerifyError("defValueObj", "Invalid type on operand stack in withfield instruction"); + + // Test VerifyError is thrown if a withfield's class operand is not a value type. + runTestVerifyError("withfieldObj", "Bad type on operand stack"); + + // Test that an array of value types is assignable to [Ljava/lang/Object; (Covariance). + runTestNoError("NoArrayCov"); + + // Test that an array of value types is assignable to an array of interfaces (Covariance). + runTestNoError("NoArrayCovIntf"); + + // Test that null is not assignable to a value type. + runTestVerifyError("NoNullVT", + "Type null (current frame, stack[1]) is not assignable to 'QNoNullVT;'"); + } +} --- /dev/null 2019-03-11 09:22:42.048915961 +0100 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/verifier/verifierTests.jcod 2019-03-11 14:28:02.822353542 +0100 @@ -0,0 +1,1414 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +// The jcod classes in this file were derived from this Java value type: +// +// final value class Value { +// static final Value VT = makeValue(0x01234567); +// final int int_v; +// Value() { +// int_v = 1; +// } +// static Value makeValue(int x) { +// Value v = Value.default; +// v = __WithField(v.int_v, x); +// return v; +// } +// } +// +// The changes for each test were made to the bytecodes for method makeValue(int x). +// Its bytecodes are: +// +// static Value makeValue(int); descriptor: (I)LValue; flags: (0x0008) ACC_STATIC +// Code: +// stack=2, locals=2, args_size=1 +// 0: defaultvalue #3 // class Value +// 3: astore_1 +// 4: aload_1 +// 5: iload_0 +// 6: withfield #2 // Field int_v:I +// 9: astore_1 +// 10: aload_1 +// 11: areturn + + +// The constant pool index of the defaultvalue opcode (0xCB) in the Code +// attribute was changed to 0x93. Since this index is outside the range of +// the constant pool, a VerifyError exception should get thrown. +// +class defValBadCP { + 0xCAFEBABE; + 0; // minor version + 57; // version + [27] { // Constant Pool + ; // first element is empty + Method #7 #21; // #1 at 0x0A + Field #3 #22; // #2 at 0x0F + class #23; // #3 at 0x14 + int 0x01234567; // #4 at 0x17 + Method #3 #24; // #5 at 0x1C + Field #3 #25; // #6 at 0x21 + class #26; // #7 at 0x26 + Utf8 "VT"; // #8 at 0x29 + Utf8 "LdefValBadCP;"; // #9 at 0x2E + Utf8 "int_v"; // #10 at 0x38 + Utf8 "I"; // #11 at 0x40 + Utf8 ""; // #12 at 0x44 + Utf8 "()V"; // #13 at 0x4D + Utf8 "Code"; // #14 at 0x53 + Utf8 "LineNumberTable"; // #15 at 0x5A + Utf8 "makeValue"; // #16 at 0x6C + Utf8 "(I)LdefValBadCP;"; // #17 at 0x78 + Utf8 ""; // #18 at 0x85 + Utf8 "SourceFile"; // #19 at 0x90 + Utf8 "defValBadCP.java"; // #20 at 0x9D + NameAndType #12 #13; // #21 at 0xAA + NameAndType #10 #11; // #22 at 0xAF + Utf8 "defValBadCP"; // #23 at 0xB4 + NameAndType #16 #17; // #24 at 0xBC + NameAndType #8 #9; // #25 at 0xC1 + Utf8 "java/lang/Object"; // #26 at 0xC6 + } // Constant Pool + + 0x0130; // access [ ACC_VALUE ACC_SUPER ACC_FINAL ] + #3;// this_cpx + #7;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [2] { // fields + { // Member at 0xE3 + 0x0018; // access + #8; // name_cpx + #9; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0xEB + 0x0010; // access + #10; // name_cpx + #11; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [1] { // methods + { // Member at 0x012D + 0x0008; // access + #16; // name_cpx + #17; // sig_cpx + [1] { // Attributes + Attr(#14, 44) { // Code at 0x0135 + 2; // max_stack + 2; // max_locals + Bytes[12]{ + 0xCB00934C2B1ACC00; // Changed CP index from 3 to 0x93 for opcode 0xCB (defaultvalue) + 0x024C2BB0; // so that the index is outside of the range of the constant pool. + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#15, 14) { // LineNumberTable at 0x0153 + [3] { // LineNumberTable + 0 8; // at 0x015F + 4 9; // at 0x0163 + 10 10; // at 0x0167 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [1] { // Attributes + Attr(#19, 2) { // SourceFile at 0x0198 + #20; + } // end SourceFile + } // Attributes +} // end class defValBadCP + +/////////////////////////////////////////////////////////// + +// The class's major version was changed to 54. Since this class has a +// defaultvalue opcode (0xCB), this should cause a ClassFormatError +// exception to get thrown. +// +class defValBadMajorVersion { + 0xCAFEBABE; + 0; // minor version + 54; // version + [27] { // Constant Pool + ; // first element is empty + Method #7 #21; // #1 at 0x0A + Field #3 #22; // #2 at 0x0F + class #23; // #3 at 0x14 + int 0x01234567; // #4 at 0x17 + Method #3 #24; // #5 at 0x1C + Field #3 #25; // #6 at 0x21 + class #26; // #7 at 0x26 + Utf8 "VT"; // #8 at 0x29 + Utf8 "LdefValBadMajorVersion;"; // #9 at 0x2E + Utf8 "int_v"; // #10 at 0x38 + Utf8 "I"; // #11 at 0x40 + Utf8 ""; // #12 at 0x44 + Utf8 "()V"; // #13 at 0x4D + Utf8 "Code"; // #14 at 0x53 + Utf8 "LineNumberTable"; // #15 at 0x5A + Utf8 "makeValue"; // #16 at 0x6C + Utf8 "(I)LdefValBadMajorVersion;"; // #17 at 0x78 + Utf8 ""; // #18 at 0x85 + Utf8 "SourceFile"; // #19 at 0x90 + Utf8 "defValBadMajorVersion.java"; // #20 at 0x9D + NameAndType #12 #13; // #21 at 0xAA + NameAndType #10 #11; // #22 at 0xAF + Utf8 "defValBadMajorVersion"; // #23 at 0xB4 + NameAndType #16 #17; // #24 at 0xBC + NameAndType #8 #9; // #25 at 0xC1 + Utf8 "java/lang/Object"; // #26 at 0xC6 + } // Constant Pool + + 0x0130; // access [ ACC_VALUE ACC_SUPER ACC_FINAL ] + #3;// this_cpx + #7;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [2] { // fields + { // Member at 0xE3 + 0x0018; // access + #8; // name_cpx + #9; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0xEB + 0x0010; // access + #10; // name_cpx + #11; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [1] { // methods + { // Member at 0x012D + 0x0008; // access + #16; // name_cpx + #17; // sig_cpx + [1] { // Attributes + Attr(#14, 44) { // Code at 0x0135 + 2; // max_stack + 2; // max_locals + Bytes[12]{ + 0xCB00034C2B1ACC00; + 0x024C2BB0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#15, 14) { // LineNumberTable at 0x0153 + [3] { // LineNumberTable + 0 8; // at 0x015F + 4 9; // at 0x0163 + 10 10; // at 0x0167 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [1] { // Attributes + Attr(#19, 2) { // SourceFile at 0x0198 + #20; + } // end SourceFile + } // Attributes +} // end class defValBadMajorVersion + +/////////////////////////////////////////////////////////// + +// The constant pool index of a defaultvalue opcode (0xCB) in the Code +// attribute was changed to 2. Since this index now points to a Field +// entry instead of a Class entry, a VerifyError exception should get thrown. +// +class defValWrongCPType { + 0xCAFEBABE; + 0; // minor version + 57; // version + [27] { // Constant Pool + ; // first element is empty + Method #7 #21; // #1 at 0x0A + Field #3 #22; // #2 at 0x0F + class #23; // #3 at 0x14 + int 0x01234567; // #4 at 0x17 + Method #3 #24; // #5 at 0x1C + Field #3 #25; // #6 at 0x21 + class #26; // #7 at 0x26 + Utf8 "VT"; // #8 at 0x29 + Utf8 "LdefValWrongCPType;"; // #9 at 0x2E + Utf8 "int_v"; // #10 at 0x38 + Utf8 "I"; // #11 at 0x40 + Utf8 ""; // #12 at 0x44 + Utf8 "()V"; // #13 at 0x4D + Utf8 "Code"; // #14 at 0x53 + Utf8 "LineNumberTable"; // #15 at 0x5A + Utf8 "makeValue"; // #16 at 0x6C + Utf8 "(I)LdefValWrongCPType;"; // #17 at 0x78 + Utf8 ""; // #18 at 0x85 + Utf8 "SourceFile"; // #19 at 0x90 + Utf8 "defValWrongCPType.java"; // #20 at 0x9D + NameAndType #12 #13; // #21 at 0xAA + NameAndType #10 #11; // #22 at 0xAF + Utf8 "defValWrongCPType"; // #23 at 0xB4 + NameAndType #16 #17; // #24 at 0xBC + NameAndType #8 #9; // #25 at 0xC1 + Utf8 "java/lang/Object"; // #26 at 0xC6 + } // Constant Pool + + 0x0130; // access [ ACC_VALUE ACC_SUPER ACC_FINAL ] + #3;// this_cpx + #7;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [2] { // fields + { // Member at 0xE3 + 0x0018; // access + #8; // name_cpx + #9; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0xEB + 0x0010; // access + #10; // name_cpx + #11; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [1] { // methods + { // Member at 0x012D + 0x0008; // access + #16; // name_cpx + #17; // sig_cpx + [1] { // Attributes + Attr(#14, 44) { // Code at 0x0135 + 2; // max_stack + 2; // max_locals + Bytes[12]{ + 0xCB00024C2B1ACC00; // Changed CP index from 3 to 2 for opcode 0xCB (defaultvalue) + 0x024C2BB0; // so that the cp index no longer points to a cp Class entry. + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#15, 14) { // LineNumberTable at 0x0153 + [3] { // LineNumberTable + 0 8; // at 0x015F + 4 9; // at 0x0163 + 10 10; // at 0x0167 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [1] { // Attributes + Attr(#19, 2) { // SourceFile at 0x0198 + #20; + } // end SourceFile + } // Attributes +} // end class defValWrongCPType + +/////////////////////////////////////////////////////////// + +// The constant pool index of the withfield opcode (0xCC) in the Code +// attribute was changed to 0x82. Since this index is outside the range of +// the constant pool, a VerifyError exception should get thrown. +// +class wthFldBadCP { + 0xCAFEBABE; + 0; // minor version + 57; // version + [20] { // Constant Pool + ; // first element is empty + Method #4 #17; // #1 at 0x0A + Field #3 #18; // #2 at 0x0F + class #12; // #3 at 0x14 + class #19; // #4 at 0x17 + Utf8 "int_v"; // #5 at 0x1A + Utf8 "I"; // #6 at 0x22 + Utf8 ""; // #7 at 0x26 + Utf8 "()V"; // #8 at 0x2F + Utf8 "Code"; // #9 at 0x35 + Utf8 "LineNumberTable"; // #10 at 0x3C + Utf8 "makewthFldBadCP"; // #11 at 0x4E + Utf8 "wthFldBadCP"; // #12 at 0x60 + Utf8 "ValueTypes"; // #13 at 0x6E + Utf8 "(I)LwthFldBadCP;"; // #14 at 0x7B + Utf8 "SourceFile"; // #15 at 0x8E + Utf8 "wthFldBadCP.java"; // #16 at 0x9B + NameAndType #7 #8; // #17 at 0xAE + NameAndType #5 #6; // #18 at 0xB3 + Utf8 "java/lang/Object"; // #19 at 0xB8 + } // Constant Pool + + 0x0130; // access [ ACC_SUPER ACC_FINAL ] + #3;// this_cpx + #4;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [1] { // fields + { // Member at 0xD5 + 0x0010; // access + #5; // name_cpx + #6; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [2] { // methods + { // Member at 0xDF + 0x0000; // access + #7; // name_cpx + #8; // sig_cpx + [1] { // Attributes + Attr(#9, 42) { // Code at 0xE7 + 2; // max_stack + 1; // max_locals + Bytes[10]{ + 0x2AB700012A04B500; + 0x02B1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#10, 14) { // LineNumberTable at 0x0103 + [3] { // LineNumberTable + 0 4; // at 0x010F + 4 5; // at 0x0113 + 9 6; // at 0x0117 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0117 + 0x0008; // access + #11; // name_cpx + #14; // sig_cpx + [1] { // Attributes + Attr(#9, 44) { // Code at 0x011F + 2; // max_stack + 2; // max_locals + Bytes[12]{ + 0xCB00034C2B1ACC00; // Changed CP index from 2 to 0x82 for opcode 0xCC (withfield) + 0x824C2BB0; // so that the index is outside of the range of the constant pool. + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#10, 14) { // LineNumberTable at 0x013D + [3] { // LineNumberTable + 0 8; // at 0x0149 + 4 9; // at 0x014D + 10 10; // at 0x0151 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [2] { // Attributes + Attr(#15, 2) { // SourceFile at 0x0153 + #16; + } // end SourceFile + ; + Attr(#13, 4) { // ValueTypes at 0x015B + 0x00010003; + } // end ValueTypes + } // Attributes +} // end class wthFldBadCP + +/////////////////////////////////////////////////////////// + +// The opcode at bytecode position 5 in the Code array was changed to aload_1 +// (0x2B). This should cause a VerifyError because now the first operand on the +// stack for the withfield opcode (0xCC at bytecode position 6) does not match +// the type (int) of the field being assigned to. +// +class wthFldBadFldVal { + 0xCAFEBABE; + 0; // minor version + 57; // version + [20] { // Constant Pool + ; // first element is empty + Method #4 #17; // #1 at 0x0A + Field #3 #18; // #2 at 0x0F + class #12; // #3 at 0x14 + class #19; // #4 at 0x17 + Utf8 "int_v"; // #5 at 0x1A + Utf8 "I"; // #6 at 0x22 + Utf8 ""; // #7 at 0x26 + Utf8 "()V"; // #8 at 0x2F + Utf8 "Code"; // #9 at 0x35 + Utf8 "LineNumberTable"; // #10 at 0x3C + Utf8 "makewthFldBadFldVal"; // #11 at 0x4E + Utf8 "wthFldBadFldVal"; // #12 at 0x60 + Utf8 "ValueTypes"; // #13 at 0x6E + Utf8 "(I)LwthFldBadFldVal;"; // #14 at 0x7B + Utf8 "SourceFile"; // #15 at 0x8E + Utf8 "wthFldBadFldVal.java"; // #16 at 0x9B + NameAndType #7 #8; // #17 at 0xAE + NameAndType #5 #6; // #18 at 0xB3 + Utf8 "java/lang/Object"; // #19 at 0xB8 + } // Constant Pool + + 0x0130; // access [ ACC_SUPER ACC_FINAL ] + #3;// this_cpx + #4;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [1] { // fields + { // Member at 0xD5 + 0x0010; // access + #5; // name_cpx + #6; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [2] { // methods + { // Member at 0xDF + 0x0000; // access + #7; // name_cpx + #8; // sig_cpx + [1] { // Attributes + Attr(#9, 42) { // Code at 0xE7 + 2; // max_stack + 1; // max_locals + Bytes[10]{ + 0x2AB700012A04B500; + 0x02B1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#10, 14) { // LineNumberTable at 0x0103 + [3] { // LineNumberTable + 0 4; // at 0x010F + 4 5; // at 0x0113 + 9 6; // at 0x0117 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0117 + 0x0008; // access + #11; // name_cpx + #14; // sig_cpx + [1] { // Attributes + Attr(#9, 44) { // Code at 0x011F + 2; // max_stack + 2; // max_locals + Bytes[12]{ + 0xCB00034C2B2BCC00; // Changed opcode at bytecode 5 from iload_0 to aload_1 + 0x024C2BB0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#10, 14) { // LineNumberTable at 0x013D + [3] { // LineNumberTable + 0 8; // at 0x0149 + 4 9; // at 0x014D + 10 10; // at 0x0151 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [2] { // Attributes + Attr(#15, 2) { // SourceFile at 0x0153 + #16; + } // end SourceFile + ; + Attr(#13, 4) { // ValueTypes at 0x015B + 0x00010003; + } // end ValueTypes + } // Attributes +} // end class wthFldBadFldVal + +/////////////////////////////////////////////////////////// + +// The opcode at bytecode position 4 in the Code array was changed to iload_1 +// (0x1A). This should cause a VerifyError because the second operand on the stack +// for the withfield opcode (0xCC at bytecode position 6) must be a reference. +// +class wthFldBadFldRef { + 0xCAFEBABE; + 0; // minor version + 57; // version + [20] { // Constant Pool + ; // first element is empty + Method #4 #17; // #1 at 0x0A + Field #3 #18; // #2 at 0x0F + class #12; // #3 at 0x14 + class #19; // #4 at 0x17 + Utf8 "int_v"; // #5 at 0x1A + Utf8 "I"; // #6 at 0x22 + Utf8 ""; // #7 at 0x26 + Utf8 "()V"; // #8 at 0x2F + Utf8 "Code"; // #9 at 0x35 + Utf8 "LineNumberTable"; // #10 at 0x3C + Utf8 "makewthFldBadFldRef"; // #11 at 0x4E + Utf8 "wthFldBadFldRef"; // #12 at 0x60 + Utf8 "ValueTypes"; // #13 at 0x6E + Utf8 "(I)LwthFldBadFldRef;"; // #14 at 0x7B + Utf8 "SourceFile"; // #15 at 0x8E + Utf8 "wthFldBadFldRef.java"; // #16 at 0x9B + NameAndType #7 #8; // #17 at 0xAE + NameAndType #5 #6; // #18 at 0xB3 + Utf8 "java/lang/Object"; // #19 at 0xB8 + } // Constant Pool + + 0x0130; // access [ ACC_SUPER ACC_FINAL ] + #3;// this_cpx + #4;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [1] { // fields + { // Member at 0xD5 + 0x0010; // access + #5; // name_cpx + #6; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [2] { // methods + { // Member at 0xDF + 0x0000; // access + #7; // name_cpx + #8; // sig_cpx + [1] { // Attributes + Attr(#9, 42) { // Code at 0xE7 + 2; // max_stack + 1; // max_locals + Bytes[10]{ + 0x2AB700012A04B500; + 0x02B1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#10, 14) { // LineNumberTable at 0x0103 + [3] { // LineNumberTable + 0 4; // at 0x010F + 4 5; // at 0x0113 + 9 6; // at 0x0117 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0117 + 0x0008; // access + #11; // name_cpx + #14; // sig_cpx + [1] { // Attributes + Attr(#9, 44) { // Code at 0x011F + 2; // max_stack + 2; // max_locals + Bytes[12]{ + 0xCB00034C1A1ACC00; // Changed opcode at bytecode 4 from aload_1 to iload_0 + 0x024C2BB0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#10, 14) { // LineNumberTable at 0x013D + [3] { // LineNumberTable + 0 8; // at 0x0149 + 4 9; // at 0x014D + 10 10; // at 0x0151 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [2] { // Attributes + Attr(#15, 2) { // SourceFile at 0x0153 + #16; + } // end SourceFile + ; + Attr(#13, 4) { // ValueTypes at 0x015B + 0x00010003; + } // end ValueTypes + } // Attributes +} // end class wthFldBadFldRef + +/////////////////////////////////////////////////////////// + +// The class's major version was changed to 54 and the first opcode in the Code +// attribute was changed to a withfield (0xCC).. Since withfield opcodes are not +// allowed in classes with major version 54, this should cause a ClassFormatError +// exception to get thrown. +// +class wthFldBadMajorVersion { + 0xCAFEBABE; + 0; // minor version + 54; // version + [27] { // Constant Pool + ; // first element is empty + Method #7 #21; // #1 at 0x0A + Field #3 #22; // #2 at 0x0F + class #23; // #3 at 0x14 + int 0x01234567; // #4 at 0x17 + Method #3 #24; // #5 at 0x1C + Field #3 #25; // #6 at 0x21 + class #26; // #7 at 0x26 + Utf8 "VT"; // #8 at 0x29 + Utf8 "LwthFldBadMajorVersion;"; // #9 at 0x2E + Utf8 "int_v"; // #10 at 0x38 + Utf8 "I"; // #11 at 0x40 + Utf8 ""; // #12 at 0x44 + Utf8 "()V"; // #13 at 0x4D + Utf8 "Code"; // #14 at 0x53 + Utf8 "LineNumberTable"; // #15 at 0x5A + Utf8 "makeValue"; // #16 at 0x6C + Utf8 "(I)LwthFldBadMajorVersion;"; // #17 at 0x78 + Utf8 ""; // #18 at 0x85 + Utf8 "SourceFile"; // #19 at 0x90 + Utf8 "wthFldBadMajorVersion.java"; // #20 at 0x9D + NameAndType #12 #13; // #21 at 0xAA + NameAndType #10 #11; // #22 at 0xAF + Utf8 "wthFldBadMajorVersion"; // #23 at 0xB4 + NameAndType #16 #17; // #24 at 0xBC + NameAndType #8 #9; // #25 at 0xC1 + Utf8 "java/lang/Object"; // #26 at 0xC6 + } // Constant Pool + + 0x0130; // access [ ACC_VALUE ACC_SUPER ACC_FINAL ] + #3;// this_cpx + #7;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [2] { // fields + { // Member at 0xE3 + 0x0018; // access + #8; // name_cpx + #9; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + ; + { // Member at 0xEB + 0x0010; // access + #10; // name_cpx + #11; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [1] { // methods + { // Member at 0x012D + 0x0008; // access + #16; // name_cpx + #17; // sig_cpx + [1] { // Attributes + Attr(#14, 44) { // Code at 0x0135 + 2; // max_stack + 2; // max_locals + Bytes[12]{ + 0xCC00034C2B1ACC00; // Changed the first opcode to 0xCC (withfield) in order to + 0x024C2BB0; // test withfield opcode with an illegal major version. + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#15, 14) { // LineNumberTable at 0x0153 + [3] { // LineNumberTable + 0 8; // at 0x015F + 4 9; // at 0x0163 + 10 10; // at 0x0167 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [1] { // Attributes + Attr(#19, 2) { // SourceFile at 0x0198 + #20; + } // end SourceFile + } // Attributes +} // end class wthFldBadMajorVersion + +/////////////////////////////////////////////////////////// + +// The constant pool index of a withfield opcode (0xCC) in the Code +// attribute was changed to 1. Since this index now points to a Method +// entry instead of a Field entry, a VerifyError exception should get thrown. +// +class wthFldWrongCPType { + 0xCAFEBABE; + 0; // minor version + 57; // version + [20] { // Constant Pool + ; // first element is empty + Method #4 #17; // #1 at 0x0A + Field #3 #18; // #2 at 0x0F + class #12; // #3 at 0x14 + class #19; // #4 at 0x17 + Utf8 "int_v"; // #5 at 0x1A + Utf8 "I"; // #6 at 0x22 + Utf8 ""; // #7 at 0x26 + Utf8 "()V"; // #8 at 0x2F + Utf8 "Code"; // #9 at 0x35 + Utf8 "LineNumberTable"; // #10 at 0x3C + Utf8 "makewthFldWrongCPType"; // #11 at 0x4E + Utf8 "wthFldWrongCPType"; // #12 at 0x60 + Utf8 "ValueTypes"; // #13 at 0x6E + Utf8 "(I)LwthFldWrongCPType;"; // #14 at 0x7B + Utf8 "SourceFile"; // #15 at 0x8E + Utf8 "wthFldWrongCPType.java"; // #16 at 0x9B + NameAndType #7 #8; // #17 at 0xAE + NameAndType #5 #6; // #18 at 0xB3 + Utf8 "java/lang/Object"; // #19 at 0xB8 + } // Constant Pool + + 0x0130; // access [ ACC_SUPER ACC_FINAL ] + #3;// this_cpx + #4;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [1] { // fields + { // Member at 0xD5 + 0x0010; // access + #5; // name_cpx + #6; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [2] { // methods + { // Member at 0xDF + 0x0000; // access + #7; // name_cpx + #8; // sig_cpx + [1] { // Attributes + Attr(#9, 42) { // Code at 0xE7 + 2; // max_stack + 1; // max_locals + Bytes[10]{ + 0x2AB700012A04B500; + 0x02B1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#10, 14) { // LineNumberTable at 0x0103 + [3] { // LineNumberTable + 0 4; // at 0x010F + 4 5; // at 0x0113 + 9 6; // at 0x0117 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0117 + 0x0008; // access + #11; // name_cpx + #14; // sig_cpx + [1] { // Attributes + Attr(#9, 44) { // Code at 0x011F + 2; // max_stack + 2; // max_locals + Bytes[12]{ + 0xCB00034C2B1ACC00; // Changed CP index from 2 to 1 for opcode 0xCC (withfield) + 0x014C2BB0; // so that the cp index no longer points to a cp Field entry. + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#10, 14) { // LineNumberTable at 0x013D + [3] { // LineNumberTable + 0 8; // at 0x0149 + 4 9; // at 0x014D + 10 10; // at 0x0151 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [2] { // Attributes + Attr(#15, 2) { // SourceFile at 0x0153 + #16; + } // end SourceFile + ; + Attr(#13, 4) { // ValueTypes at 0x015B + 0x00010003; + } // end ValueTypes + } // Attributes +} // end class wthFldWrongCPType + +/////////////////////////////////////////////////////////// + +// A new fieldRef was added to the constant pool to point to a field in +// java/lang/Object. The withfield opcode (0XCC) was changed to use this new +// cp entry. This should cause a VerifyError because even though value types +// are assignable to java.lang.Object, withfield requires that the cp field class +// and the value type on the stack, in this case 'wthFldObject', be identical. +// +class wthFldObject { + 0xCAFEBABE; + 0; // minor version + 57; // version + [22] { // Constant Pool + ; // first element is empty + Method #4 #17; // #1 at 0x0A + Field #3 #18; // #2 at 0x0F + class #12; // #3 at 0x14 + class #19; // #4 at 0x17 + Utf8 "int_v"; // #5 at 0x1A + Utf8 "I"; // #6 at 0x22 + Utf8 ""; // #7 at 0x26 + Utf8 "()V"; // #8 at 0x2F + Utf8 "Code"; // #9 at 0x35 + Utf8 "LineNumberTable"; // #10 at 0x3C + Utf8 "makewthFldObject"; // #11 at 0x4E + Utf8 "wthFldObject"; // #12 at 0x60 + Utf8 "ValueTypes"; // #13 at 0x6E + Utf8 "(I)LwthFldObject;"; // #14 at 0x7B + Utf8 "SourceFile"; // #15 at 0x8E + Utf8 "wthFldObject.java"; // #16 at 0x9B + NameAndType #7 #8; // #17 at 0xAE + NameAndType #5 #6; // #18 at 0xB3 + Utf8 "java/lang/Object"; // #19 at 0xB8 + class #19; // #20 // NEW ClassRef + Field #20 #18; // #21 // New FieldRef + + } // Constant Pool + + 0x0130; // access [ ACC_VALUE, ACC_SUPER ACC_FINAL ] + #3;// this_cpx + #4;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [1] { // fields + { // Member at 0xD5 + 0x0010; // access + #5; // name_cpx + #6; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [2] { // methods + { // Member at 0xDF + 0x0000; // access + #7; // name_cpx + #8; // sig_cpx + [1] { // Attributes + Attr(#9, 42) { // Code at 0xE7 + 2; // max_stack + 1; // max_locals + Bytes[10]{ + 0x2AB700012A04B500; + 0x02B1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#10, 14) { // LineNumberTable at 0x0103 + [3] { // LineNumberTable + 0 4; // at 0x010F + 4 5; // at 0x0113 + 9 6; // at 0x0117 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0117 + 0x0008; // access + #11; // name_cpx + #14; // sig_cpx + [1] { // Attributes + Attr(#9, 47) { // Code at 0x011F + 2; // max_stack + 2; // max_locals + Bytes[15]{ + 0xCB00034C2B1ACC00; // Changed withfield (0xCC) cp index at byte 8 to point to a field in + 0x154C2BCB0003B0; // in j.l.Object. Also, added a defaultvalue opcode at byte 11 to push + }; // a Value ref so that the verifier won't complain if it sees the areturn. + + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#10, 14) { // LineNumberTable at 0x013D + [3] { // LineNumberTable + 0 8; // at 0x0149 + 4 9; // at 0x014D + 10 10; // at 0x0151 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [2] { // Attributes + Attr(#15, 2) { // SourceFile at 0x0153 + #16; + } // end SourceFile + ; + Attr(#13, 4) { // ValueTypes at 0x015B + 0x00010003; + } // end ValueTypes + } // Attributes +} // end class wthFldObject + +/////////////////////////////////////////////////////////// + +// The astore_1 opcode (0x4C) was changed to a monitorenter (0xC2) opcode but +// the operand on the stack is a value type. +// This should cause a VerifyError because the operand for opcode monitorenter +// cannot be a value type. +// +class monEnterVT { + 0xCAFEBABE; + 0; // minor version + 57; // version + [20] { // Constant Pool + ; // first element is empty + Method #4 #17; // #1 at 0x0A + Field #3 #18; // #2 at 0x0F + class #12; // #3 at 0x14 + class #19; // #4 at 0x17 + Utf8 "int_v"; // #5 at 0x1A + Utf8 "I"; // #6 at 0x22 + Utf8 ""; // #7 at 0x26 + Utf8 "()V"; // #8 at 0x2F + Utf8 "Code"; // #9 at 0x35 + Utf8 "LineNumberTable"; // #10 at 0x3C + Utf8 "makemonEnterVT"; // #11 at 0x4E + Utf8 "monEnterVT"; // #12 at 0x5F + Utf8 "ValueTypes"; // #13 at 0x6C + Utf8 "(I)LmonEnterVT;"; // #14 at 0x79 + Utf8 "SourceFile"; // #15 at 0x8B + Utf8 "monEnterVT.java"; // #16 at 0x98 + NameAndType #7 #8; // #17 at 0xAA + NameAndType #5 #6; // #18 at 0xAF + Utf8 "java/lang/Object"; // #19 at 0xB4 + } // Constant Pool + + 0x0130; // access [ ACC_SUPER ACC_FINAL ] + #3;// this_cpx + #4;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [1] { // fields + { // Member at 0xD1 + 0x0010; // access + #5; // name_cpx + #6; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [2] { // methods + { // Member at 0xDB + 0x0000; // access + #7; // name_cpx + #8; // sig_cpx + [1] { // Attributes + Attr(#9, 42) { // Code at 0xE3 + 2; // max_stack + 1; // max_locals + Bytes[10]{ + 0x2AB700012A04B500; + 0x02B1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#10, 14) { // LineNumberTable at 0xFF + [3] { // LineNumberTable + 0 3; // at 0x010B + 4 4; // at 0x010F + 9 5; // at 0x0113 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0113 + 0x0008; // access + #11; // name_cpx + #14; // sig_cpx + [1] { // Attributes + Attr(#9, 44) { // Code at 0x011B + 2; // max_stack + 2; // max_locals + Bytes[12]{ + 0xCB0003C22B1ACC00; // Changed bytecode at byte 3 from astore_1 to monitorenter. + 0x024C2BB0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#10, 14) { // LineNumberTable at 0x0139 + [3] { // LineNumberTable + 0 7; // at 0x0145 + 4 8; // at 0x0149 + 10 9; // at 0x014D + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [2] { // Attributes + Attr(#15, 2) { // SourceFile at 0x014F + #16; + } // end SourceFile + ; + Attr(#13, 4) { // ValueTypes at 0x0157 + 0x00010003; + } // end ValueTypes + } // Attributes +} // end class monEnterVT + +/////////////////////////////////////////////////////////// + +// The cp entry for the defaultvalue opcode was changed to a reference that +// is not a value type. +// This should cause a VerifyError because the cp entry for opcode defaultvalue +// must be a value type. +// +class defValueObj { + 0xCAFEBABE; + 0; // minor version + 57; // version + [20] { // Constant Pool + ; // first element is empty + Method #4 #17; // #1 at 0x0A + Field #3 #18; // #2 at 0x0F + class #12; // #3 at 0x14 + class #19; // #4 at 0x17 + Utf8 "int_v"; // #5 at 0x1A + Utf8 "I"; // #6 at 0x22 + Utf8 ""; // #7 at 0x26 + Utf8 "()V"; // #8 at 0x2F + Utf8 "Code"; // #9 at 0x35 + Utf8 "LineNumberTable"; // #10 at 0x3C + Utf8 "makedefValueObj"; // #11 at 0x4E + Utf8 "defValueObj"; // #12 at 0x60 + Utf8 "ValueTypes"; // #13 at 0x6E + Utf8 "(I)LdefValueObj;"; // #14 at 0x7B + Utf8 "SourceFile"; // #15 at 0x8E + Utf8 "defValueObj.java"; // #16 at 0x9B + NameAndType #7 #8; // #17 at 0xAE + NameAndType #5 #6; // #18 at 0xB3 + Utf8 "java/lang/Object"; // #19 at 0xB8 + } // Constant Pool + + 0x0130; // access [ ACC_SUPER ACC_FINAL ] + #3;// this_cpx + #4;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [1] { // fields + { // Member at 0xD5 + 0x0010; // access + #5; // name_cpx + #6; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [2] { // methods + { // Member at 0xDF + 0x0000; // access + #7; // name_cpx + #8; // sig_cpx + [1] { // Attributes + Attr(#9, 42) { // Code at 0xE7 + 2; // max_stack + 1; // max_locals + Bytes[10]{ + 0x2AB700012A04B500; + 0x02B1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#10, 14) { // LineNumberTable at 0x0103 + [3] { // LineNumberTable + 0 3; // at 0x010F + 4 4; // at 0x0113 + 9 5; // at 0x0117 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0117 + 0x0008; // access + #11; // name_cpx + #14; // sig_cpx + [1] { // Attributes + Attr(#9, 44) { // Code at 0x011F + 2; // max_stack + 2; // max_locals + Bytes[12]{ + 0xCB00044C2B1ACC00; // Changed defaultvalue's cp index at byte 3 from 3 to 4. + 0x024C2BB0; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#10, 14) { // LineNumberTable at 0x013D + [3] { // LineNumberTable + 0 7; // at 0x0149 + 4 8; // at 0x014D + 10 9; // at 0x0151 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [2] { // Attributes + Attr(#15, 2) { // SourceFile at 0x0153 + #16; + } // end SourceFile + ; + Attr(#13, 4) { // ValueTypes at 0x015B + 0x00010003; + } // end ValueTypes + } // Attributes +} // end class defValueObj + +/////////////////////////////////////////////////////////// + +// The operand for the value type for the defaultvalue opcode was changed to a +// reference that is not a value type. +// This should cause a VerifyError because the reference operand for a withfield +// opcode must be a value type. +// +class withfieldObj { + 0xCAFEBABE; + 0; // minor version + 57; // version + [23] { // Constant Pool + ; // first element is empty + Method #5 #19; // #1 at 0x0A + Field #3 #20; // #2 at 0x0F + class #13; // #3 at 0x14 + String #21; // #4 at 0x17 + class #22; // #5 at 0x1A + Utf8 "int_v"; // #6 at 0x1D + Utf8 "I"; // #7 at 0x25 + Utf8 ""; // #8 at 0x29 + Utf8 "()V"; // #9 at 0x32 + Utf8 "Code"; // #10 at 0x38 + Utf8 "LineNumberTable"; // #11 at 0x3F + Utf8 "makewithfieldObj"; // #12 at 0x51 + Utf8 "withfieldObj"; // #13 at 0x64 + Utf8 "ValueTypes"; // #14 at 0x73 + Utf8 "(ILjava/lang/String;)LwithfieldObj;"; // #15 at 0x80 + Utf8 "StackMapTable"; // #16 at 0xA6 + Utf8 "SourceFile"; // #17 at 0xB6 + Utf8 "withfieldObj.java"; // #18 at 0xC3 + NameAndType #8 #9; // #19 at 0xD7 + NameAndType #6 #7; // #20 at 0xDC + Utf8 "CDE"; // #21 at 0xE1 + Utf8 "java/lang/Object"; // #22 at 0xE7 + } // Constant Pool + + 0x0130; // access [ ACC_SUPER ACC_FINAL ] + #3;// this_cpx + #5;// super_cpx + + [0] { // Interfaces + } // Interfaces + + [1] { // fields + { // Member at 0x0104 + 0x0010; // access + #6; // name_cpx + #7; // sig_cpx + [0] { // Attributes + } // Attributes + } // Member + } // fields + + [2] { // methods + { // Member at 0x010E + 0x0000; // access + #8; // name_cpx + #9; // sig_cpx + [1] { // Attributes + Attr(#10, 42) { // Code at 0x0116 + 2; // max_stack + 1; // max_locals + Bytes[10]{ + 0x2AB700012A04B500; + 0x02B1; + }; + [0] { // Traps + } // end Traps + [1] { // Attributes + Attr(#11, 14) { // LineNumberTable at 0x0132 + [3] { // LineNumberTable + 0 3; // at 0x013E + 4 4; // at 0x0142 + 9 5; // at 0x0146 + } + } // end LineNumberTable + } // Attributes + } // end Code + } // Attributes + } // Member + ; + { // Member at 0x0146 + 0x0008; // access + #12; // name_cpx + #15; // sig_cpx + [1] { // Attributes + Attr(#10, 68) { // Code at 0x014E + 2; // max_stack + 3; // max_locals + Bytes[18]{ + 0xCB00034D2B1204A5; + 0x00092B1ACC00024D; // Changed aload_2 (0x2C) to aload_1 (0x2B) at byte 3. + 0x2CB0; + }; + [0] { // Traps + } // end Traps + [2] { // Attributes + Attr(#11, 18) { // LineNumberTable at 0x0172 + [4] { // LineNumberTable + 0 7; // at 0x017E + 4 8; // at 0x0182 + 10 9; // at 0x0186 + 16 11; // at 0x018A + } + } // end LineNumberTable + ; + Attr(#16, 8) { // StackMapTable at 0x018A + [1] { // + 252b, 16, [1]z{7b,3}; // append_frame 1 + } + } // end StackMapTable + } // Attributes + } // end Code + } // Attributes + } // Member + } // methods + + [2] { // Attributes + Attr(#17, 2) { // SourceFile at 0x019A + #18; + } // end SourceFile + ; + Attr(#14, 4) { // ValueTypes at 0x01A2 + 0x00010003; + } // end ValueTypes + } // Attributes +} // end class withfieldObj --- old/test/hotspot/jtreg/testlibrary_tests/TestPlatformIsTieredSupported.java 2019-03-11 14:28:03.506353532 +0100 +++ new/test/hotspot/jtreg/testlibrary_tests/TestPlatformIsTieredSupported.java 2019-03-11 14:28:03.290353535 +0100 @@ -28,6 +28,7 @@ /** * @test * @summary Verifies that Platform::isTieredSupported returns correct value. + * @requires vm.opt.final.TieredCompilation * @library /test/lib * @modules java.base/jdk.internal.misc * java.management --- old/test/hotspot/jtreg/vmTestbase/jit/tiered/TestDescription.java 2019-03-11 14:28:03.942353526 +0100 +++ new/test/hotspot/jtreg/vmTestbase/jit/tiered/TestDescription.java 2019-03-11 14:28:03.730353529 +0100 @@ -34,6 +34,7 @@ * If tiered compilation is explicitly disabled the test verifies that there are no * output from PrintTieredEvents. * + * @requires vm.opt.final.TieredCompilation * @library /vmTestbase * /test/lib * @run driver jdk.test.lib.FileInstaller . .